aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp7
-rw-r--r--lib/ARCMigrate/FileRemapper.cpp2
-rw-r--r--lib/ARCMigrate/ObjCMT.cpp52
-rw-r--r--lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp4
-rw-r--r--lib/ARCMigrate/TransGCAttrs.cpp4
-rw-r--r--lib/ARCMigrate/TransProperties.cpp26
-rw-r--r--lib/ARCMigrate/TransRetainReleaseDealloc.cpp28
-rw-r--r--lib/ARCMigrate/TransformActions.cpp7
-rw-r--r--lib/ARCMigrate/Transforms.cpp10
-rw-r--r--lib/AST/ASTContext.cpp502
-rw-r--r--lib/AST/ASTDiagnostic.cpp65
-rw-r--r--lib/AST/ASTDumper.cpp29
-rw-r--r--lib/AST/ASTImporter.cpp19
-rw-r--r--lib/AST/CMakeLists.txt5
-rw-r--r--lib/AST/CXXABI.h12
-rw-r--r--lib/AST/CXXInheritance.cpp85
-rw-r--r--lib/AST/Decl.cpp584
-rw-r--r--lib/AST/DeclBase.cpp60
-rw-r--r--lib/AST/DeclCXX.cpp43
-rw-r--r--lib/AST/DeclFriend.cpp7
-rw-r--r--lib/AST/DeclGroup.cpp7
-rw-r--r--lib/AST/DeclObjC.cpp90
-rw-r--r--lib/AST/DeclOpenMP.cpp10
-rw-r--r--lib/AST/DeclPrinter.cpp52
-rw-r--r--lib/AST/DeclTemplate.cpp148
-rw-r--r--lib/AST/DeclarationName.cpp2
-rw-r--r--lib/AST/Expr.cpp660
-rw-r--r--lib/AST/ExprCXX.cpp293
-rw-r--r--lib/AST/ExprClassification.cpp6
-rw-r--r--lib/AST/ExprConstant.cpp702
-rw-r--r--lib/AST/ExprObjC.cpp379
-rw-r--r--lib/AST/ExternalASTSource.cpp31
-rw-r--r--lib/AST/ItaniumCXXABI.cpp14
-rw-r--r--lib/AST/ItaniumMangle.cpp285
-rw-r--r--lib/AST/Mangle.cpp3
-rw-r--r--lib/AST/MicrosoftCXXABI.cpp45
-rw-r--r--lib/AST/MicrosoftMangle.cpp502
-rw-r--r--lib/AST/NSAPI.cpp11
-rw-r--r--lib/AST/NestedNameSpecifier.cpp7
-rw-r--r--lib/AST/OpenMPClause.cpp465
-rw-r--r--lib/AST/RawCommentList.cpp86
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp535
-rw-r--r--lib/AST/Stmt.cpp1328
-rw-r--r--lib/AST/StmtCXX.cpp86
-rw-r--r--lib/AST/StmtIterator.cpp2
-rw-r--r--lib/AST/StmtObjC.cpp73
-rw-r--r--lib/AST/StmtOpenMP.cpp884
-rw-r--r--lib/AST/StmtPrinter.cpp208
-rw-r--r--lib/AST/StmtProfile.cpp119
-rw-r--r--lib/AST/TemplateBase.cpp117
-rw-r--r--lib/AST/TemplateName.cpp54
-rw-r--r--lib/AST/Type.cpp273
-rw-r--r--lib/AST/TypeLoc.cpp34
-rw-r--r--lib/AST/TypePrinter.cpp29
-rw-r--r--lib/AST/VTableBuilder.cpp324
-rw-r--r--lib/ASTMatchers/ASTMatchFinder.cpp25
-rw-r--r--lib/ASTMatchers/ASTMatchersInternal.cpp10
-rw-r--r--lib/ASTMatchers/Dynamic/Diagnostics.cpp8
-rw-r--r--lib/ASTMatchers/Dynamic/Marshallers.h22
-rw-r--r--lib/ASTMatchers/Dynamic/Parser.cpp2
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp120
-rw-r--r--lib/ASTMatchers/Dynamic/VariantValue.cpp2
-rw-r--r--lib/Analysis/AnalysisDeclContext.cpp17
-rw-r--r--lib/Analysis/BodyFarm.cpp5
-rw-r--r--lib/Analysis/CFG.cpp187
-rw-r--r--lib/Analysis/Consumed.cpp127
-rw-r--r--lib/Analysis/ThreadSafety.cpp135
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp53
-rw-r--r--lib/Basic/Attributes.cpp4
-rw-r--r--lib/Basic/Builtins.cpp86
-rw-r--r--lib/Basic/Diagnostic.cpp24
-rw-r--r--lib/Basic/DiagnosticIDs.cpp38
-rw-r--r--lib/Basic/FileManager.cpp92
-rw-r--r--lib/Basic/IdentifierTable.cpp9
-rw-r--r--lib/Basic/Module.cpp20
-rw-r--r--lib/Basic/ObjCRuntime.cpp3
-rw-r--r--lib/Basic/OpenMPKinds.cpp168
-rw-r--r--lib/Basic/SanitizerBlacklist.cpp2
-rw-r--r--lib/Basic/SourceManager.cpp97
-rw-r--r--lib/Basic/TargetInfo.cpp144
-rw-r--r--lib/Basic/Targets.cpp3323
-rw-r--r--lib/Basic/Version.cpp2
-rw-r--r--lib/Basic/VirtualFileSystem.cpp631
-rw-r--r--lib/CodeGen/ABIInfo.h22
-rw-r--r--lib/CodeGen/Address.h126
-rw-r--r--lib/CodeGen/BackendUtil.cpp188
-rw-r--r--lib/CodeGen/CGAtomic.cpp641
-rw-r--r--lib/CodeGen/CGBlocks.cpp899
-rw-r--r--lib/CodeGen/CGBlocks.h33
-rw-r--r--lib/CodeGen/CGBuilder.h263
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1422
-rw-r--r--lib/CodeGen/CGCUDANV.cpp22
-rw-r--r--lib/CodeGen/CGCXX.cpp49
-rw-r--r--lib/CodeGen/CGCXXABI.cpp64
-rw-r--r--lib/CodeGen/CGCXXABI.h106
-rw-r--r--lib/CodeGen/CGCall.cpp989
-rw-r--r--lib/CodeGen/CGCall.h26
-rw-r--r--lib/CodeGen/CGClass.cpp784
-rw-r--r--lib/CodeGen/CGCleanup.cpp235
-rw-r--r--lib/CodeGen/CGCleanup.h125
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp565
-rw-r--r--lib/CodeGen/CGDebugInfo.h96
-rw-r--r--lib/CodeGen/CGDecl.cpp373
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp68
-rw-r--r--lib/CodeGen/CGException.cpp427
-rw-r--r--lib/CodeGen/CGExpr.cpp1357
-rw-r--r--lib/CodeGen/CGExprAgg.cpp182
-rw-r--r--lib/CodeGen/CGExprCXX.cpp400
-rw-r--r--lib/CodeGen/CGExprComplex.cpp142
-rw-r--r--lib/CodeGen/CGExprConstant.cpp64
-rw-r--r--lib/CodeGen/CGExprScalar.cpp348
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp178
-rw-r--r--lib/CodeGen/CGLoopInfo.h55
-rw-r--r--lib/CodeGen/CGObjC.cpp347
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp267
-rw-r--r--lib/CodeGen/CGObjCMac.cpp1419
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp56
-rw-r--r--lib/CodeGen/CGObjCRuntime.h36
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp34
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp1619
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h108
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp2
-rw-r--r--lib/CodeGen/CGStmt.cpp243
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp1040
-rw-r--r--lib/CodeGen/CGVTT.cpp1
-rw-r--r--lib/CodeGen/CGVTables.cpp175
-rw-r--r--lib/CodeGen/CGVTables.h4
-rw-r--r--lib/CodeGen/CGValue.h182
-rw-r--r--lib/CodeGen/CodeGenABITypes.cpp41
-rw-r--r--lib/CodeGen/CodeGenAction.cpp245
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp312
-rw-r--r--lib/CodeGen/CodeGenFunction.h750
-rw-r--r--lib/CodeGen/CodeGenModule.cpp687
-rw-r--r--lib/CodeGen/CodeGenModule.h299
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp81
-rw-r--r--lib/CodeGen/CodeGenPGO.h6
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp2
-rw-r--r--lib/CodeGen/CodeGenTypeCache.h108
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp151
-rw-r--r--lib/CodeGen/CodeGenTypes.h21
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp67
-rw-r--r--lib/CodeGen/EHScopeStack.h29
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp687
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp645
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp15
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp153
-rw-r--r--lib/CodeGen/TargetInfo.cpp2022
-rw-r--r--lib/CodeGen/TargetInfo.h6
-rw-r--r--lib/Driver/Action.cpp16
-rw-r--r--lib/Driver/Compilation.cpp5
-rw-r--r--lib/Driver/CrossWindowsToolChain.cpp6
-rw-r--r--lib/Driver/Driver.cpp367
-rw-r--r--lib/Driver/DriverOptions.cpp2
-rw-r--r--lib/Driver/Job.cpp32
-rw-r--r--lib/Driver/MSVCToolChain.cpp324
-rw-r--r--lib/Driver/MinGWToolChain.cpp16
-rw-r--r--lib/Driver/Multilib.cpp5
-rw-r--r--lib/Driver/SanitizerArgs.cpp67
-rw-r--r--lib/Driver/ToolChain.cpp233
-rw-r--r--lib/Driver/ToolChains.cpp1582
-rw-r--r--lib/Driver/ToolChains.h342
-rw-r--r--lib/Driver/Tools.cpp3351
-rw-r--r--lib/Driver/Tools.h919
-rw-r--r--lib/Driver/Types.cpp13
-rw-r--r--lib/Edit/Commit.cpp2
-rw-r--r--lib/Edit/EditedSource.cpp82
-rw-r--r--lib/Format/ContinuationIndenter.cpp68
-rw-r--r--lib/Format/Encoding.h2
-rw-r--r--lib/Format/Format.cpp616
-rw-r--r--lib/Format/FormatToken.cpp23
-rw-r--r--lib/Format/FormatToken.h28
-rw-r--r--lib/Format/TokenAnnotator.cpp385
-rw-r--r--lib/Format/TokenAnnotator.h11
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp41
-rw-r--r--lib/Format/UnwrappedLineParser.cpp126
-rw-r--r--lib/Format/UnwrappedLineParser.h2
-rw-r--r--lib/Format/WhitespaceManager.cpp255
-rw-r--r--lib/Format/WhitespaceManager.h16
-rw-r--r--lib/Frontend/ASTMerge.cpp1
-rw-r--r--lib/Frontend/ASTUnit.cpp134
-rw-r--r--lib/Frontend/CMakeLists.txt2
-rw-r--r--lib/Frontend/CacheTokens.cpp3
-rw-r--r--lib/Frontend/ChainedIncludesSource.cpp20
-rw-r--r--lib/Frontend/CompilerInstance.cpp254
-rw-r--r--lib/Frontend/CompilerInvocation.cpp267
-rw-r--r--lib/Frontend/CreateInvocationFromCommandLine.cpp6
-rw-r--r--lib/Frontend/DependencyFile.cpp77
-rw-r--r--lib/Frontend/DiagnosticRenderer.cpp317
-rw-r--r--lib/Frontend/FrontendAction.cpp18
-rw-r--r--lib/Frontend/FrontendActions.cpp58
-rw-r--r--lib/Frontend/HeaderIncludeGen.cpp64
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp90
-rw-r--r--lib/Frontend/InitPreprocessor.cpp59
-rw-r--r--lib/Frontend/LogDiagnosticPrinter.cpp4
-rw-r--r--lib/Frontend/ModuleDependencyCollector.cpp9
-rw-r--r--lib/Frontend/MultiplexConsumer.cpp10
-rw-r--r--lib/Frontend/PCHContainerOperations.cpp21
-rw-r--r--lib/Frontend/Rewrite/FrontendActions.cpp2
-rw-r--r--lib/Frontend/Rewrite/InclusionRewriter.cpp5
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp69
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp47
-rw-r--r--lib/Frontend/SerializedDiagnosticPrinter.cpp74
-rw-r--r--lib/Frontend/TestModuleFileExtension.cpp123
-rw-r--r--lib/Frontend/TestModuleFileExtension.h72
-rw-r--r--lib/Frontend/TextDiagnostic.cpp10
-rw-r--r--lib/Frontend/VerifyDiagnosticConsumer.cpp4
-rw-r--r--lib/Headers/CMakeLists.txt14
-rw-r--r--lib/Headers/Intrin.h34
-rw-r--r--lib/Headers/__clang_cuda_runtime_wrapper.h216
-rw-r--r--lib/Headers/__wmmintrin_aes.h10
-rw-r--r--lib/Headers/__wmmintrin_pclmul.h6
-rw-r--r--lib/Headers/adxintrin.h6
-rw-r--r--lib/Headers/altivec.h747
-rw-r--r--lib/Headers/ammintrin.h108
-rw-r--r--lib/Headers/arm_acle.h16
-rw-r--r--lib/Headers/avx2intrin.h451
-rw-r--r--lib/Headers/avx512bwintrin.h390
-rw-r--r--lib/Headers/avx512dqintrin.h538
-rw-r--r--lib/Headers/avx512erintrin.h26
-rw-r--r--lib/Headers/avx512fintrin.h701
-rw-r--r--lib/Headers/avx512vlbwintrin.h433
-rw-r--r--lib/Headers/avx512vldqintrin.h606
-rw-r--r--lib/Headers/avx512vlintrin.h2666
-rw-r--r--lib/Headers/avxintrin.h186
-rw-r--r--lib/Headers/bmi2intrin.h6
-rw-r--r--lib/Headers/bmiintrin.h18
-rw-r--r--lib/Headers/emmintrin.h81
-rw-r--r--lib/Headers/f16cintrin.h26
-rw-r--r--lib/Headers/fma4intrin.h8
-rw-r--r--lib/Headers/fmaintrin.h8
-rw-r--r--lib/Headers/fxsrintrin.h2
-rw-r--r--lib/Headers/htmxlintrin.h2
-rw-r--r--lib/Headers/immintrin.h95
-rw-r--r--lib/Headers/lzcntintrin.h6
-rw-r--r--lib/Headers/mm3dnow.h6
-rw-r--r--lib/Headers/mmintrin.h48
-rw-r--r--lib/Headers/module.modulemap25
-rw-r--r--lib/Headers/nmmintrin.h5
-rw-r--r--lib/Headers/pmmintrin.h10
-rw-r--r--lib/Headers/popcntintrin.h18
-rw-r--r--lib/Headers/prfchwintrin.h6
-rw-r--r--lib/Headers/rdseedintrin.h5
-rw-r--r--lib/Headers/rtmintrin.h2
-rw-r--r--lib/Headers/shaintrin.h8
-rw-r--r--lib/Headers/smmintrin.h181
-rw-r--r--lib/Headers/stdint.h14
-rw-r--r--lib/Headers/tbmintrin.h14
-rw-r--r--lib/Headers/tgmath.h2
-rw-r--r--lib/Headers/tmmintrin.h19
-rw-r--r--lib/Headers/wmmintrin.h9
-rw-r--r--lib/Headers/x86intrin.h24
-rw-r--r--lib/Headers/xmmintrin.h42
-rw-r--r--lib/Headers/xopintrin.h91
-rw-r--r--lib/Headers/xsavecintrin.h48
-rw-r--r--lib/Headers/xsaveintrin.h58
-rw-r--r--lib/Headers/xsaveoptintrin.h48
-rw-r--r--lib/Headers/xsavesintrin.h58
-rw-r--r--lib/Index/CommentToXML.cpp5
-rw-r--r--lib/Index/SimpleFormatContext.h12
-rw-r--r--lib/Index/USRGeneration.cpp13
-rw-r--r--lib/Lex/HeaderSearch.cpp296
-rw-r--r--lib/Lex/Lexer.cpp25
-rw-r--r--lib/Lex/LiteralSupport.cpp45
-rw-r--r--lib/Lex/MacroInfo.cpp14
-rw-r--r--lib/Lex/ModuleMap.cpp191
-rw-r--r--lib/Lex/PPDirectives.cpp90
-rw-r--r--lib/Lex/PPExpressions.cpp6
-rw-r--r--lib/Lex/PPLexerChange.cpp7
-rw-r--r--lib/Lex/PPMacroExpansion.cpp38
-rw-r--r--lib/Lex/Pragma.cpp8
-rw-r--r--lib/Lex/PreprocessingRecord.cpp2
-rw-r--r--lib/Lex/Preprocessor.cpp37
-rw-r--r--lib/Lex/TokenLexer.cpp17
-rw-r--r--lib/Parse/ParseAST.cpp17
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp10
-rw-r--r--lib/Parse/ParseDecl.cpp77
-rw-r--r--lib/Parse/ParseDeclCXX.cpp365
-rw-r--r--lib/Parse/ParseExpr.cpp44
-rw-r--r--lib/Parse/ParseExprCXX.cpp55
-rw-r--r--lib/Parse/ParseObjc.cpp102
-rw-r--r--lib/Parse/ParseOpenMP.cpp377
-rw-r--r--lib/Parse/ParsePragma.cpp47
-rw-r--r--lib/Parse/ParseStmt.cpp63
-rw-r--r--lib/Parse/ParseStmtAsm.cpp40
-rw-r--r--lib/Parse/ParseTemplate.cpp13
-rw-r--r--lib/Parse/ParseTentative.cpp3
-rw-r--r--lib/Parse/Parser.cpp86
-rw-r--r--lib/Rewrite/Rewriter.cpp28
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp184
-rw-r--r--lib/Sema/AttributeList.cpp31
-rw-r--r--lib/Sema/CMakeLists.txt1
-rw-r--r--lib/Sema/DeclSpec.cpp120
-rw-r--r--lib/Sema/JumpDiagnostics.cpp7
-rw-r--r--lib/Sema/MultiplexExternalSemaSource.cpp12
-rw-r--r--lib/Sema/ScopeInfo.cpp4
-rw-r--r--lib/Sema/Sema.cpp84
-rw-r--r--lib/Sema/SemaAccess.cpp13
-rw-r--r--lib/Sema/SemaCUDA.cpp144
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp24
-rw-r--r--lib/Sema/SemaCast.cpp110
-rw-r--r--lib/Sema/SemaChecking.cpp423
-rw-r--r--lib/Sema/SemaCodeComplete.cpp49
-rw-r--r--lib/Sema/SemaCoroutine.cpp448
-rw-r--r--lib/Sema/SemaDecl.cpp1228
-rw-r--r--lib/Sema/SemaDeclAttr.cpp695
-rw-r--r--lib/Sema/SemaDeclCXX.cpp593
-rw-r--r--lib/Sema/SemaDeclObjC.cpp172
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp68
-rw-r--r--lib/Sema/SemaExpr.cpp948
-rw-r--r--lib/Sema/SemaExprCXX.cpp226
-rw-r--r--lib/Sema/SemaExprMember.cpp83
-rw-r--r--lib/Sema/SemaExprObjC.cpp262
-rw-r--r--lib/Sema/SemaFixItUtils.cpp6
-rw-r--r--lib/Sema/SemaInit.cpp76
-rw-r--r--lib/Sema/SemaLambda.cpp115
-rw-r--r--lib/Sema/SemaLookup.cpp329
-rw-r--r--lib/Sema/SemaObjCProperty.cpp743
-rw-r--r--lib/Sema/SemaOpenMP.cpp2746
-rw-r--r--lib/Sema/SemaOverload.cpp810
-rw-r--r--lib/Sema/SemaPseudoObject.cpp323
-rw-r--r--lib/Sema/SemaStmt.cpp191
-rw-r--r--lib/Sema/SemaStmtAsm.cpp224
-rw-r--r--lib/Sema/SemaStmtAttr.cpp75
-rw-r--r--lib/Sema/SemaTemplate.cpp200
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp129
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp45
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp57
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp5
-rw-r--r--lib/Sema/SemaType.cpp630
-rw-r--r--lib/Sema/TreeTransform.h732
-rw-r--r--lib/Serialization/ASTCommon.cpp201
-rw-r--r--lib/Serialization/ASTCommon.h2
-rw-r--r--lib/Serialization/ASTReader.cpp2047
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp437
-rw-r--r--lib/Serialization/ASTReaderInternals.h99
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp331
-rw-r--r--lib/Serialization/ASTWriter.cpp1373
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp250
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp219
-rw-r--r--lib/Serialization/CMakeLists.txt1
-rw-r--r--lib/Serialization/GeneratePCH.cpp16
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp4
-rw-r--r--lib/Serialization/Module.cpp8
-rw-r--r--lib/Serialization/ModuleFileExtension.cpp22
-rw-r--r--lib/Serialization/ModuleManager.cpp122
-rw-r--r--lib/Serialization/MultiOnDiskHashTable.h330
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp12
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp44
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp77
-rw-r--r--lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp46
-rw-r--r--lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/CMakeLists.txt6
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp199
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp48
-rw-r--r--lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp33
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp22
-rw-r--r--lib/StaticAnalyzer/Checkers/Checkers.td118
-rw-r--r--lib/StaticAnalyzer/Checkers/ChrootChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp71
-rw-r--r--lib/StaticAnalyzer/Checkers/DebugCheckers.cpp36
-rw-r--r--lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp43
-rw-r--r--lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp213
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp711
-rw-r--r--lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp62
-rw-r--r--lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp31
-rw-r--r--lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp25
-rw-r--r--lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp1
-rw-r--r--lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp1201
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp33
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/Makefile6
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp226
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp134
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp42
-rw-r--r--lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp1066
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp21
-rw-r--r--lib/StaticAnalyzer/Checkers/PaddingChecker.cpp314
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp38
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp132
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp45
-rw-r--r--lib/StaticAnalyzer/Checkers/StreamChecker.cpp34
-rw-r--r--lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp16
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp16
-rw-r--r--lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Checkers/VforkChecker.cpp218
-rw-r--r--lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp24
-rw-r--r--lib/StaticAnalyzer/Core/AnalysisManager.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp21
-rw-r--r--lib/StaticAnalyzer/Core/BlockCounter.cpp8
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp110
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp74
-rw-r--r--lib/StaticAnalyzer/Core/CMakeLists.txt3
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp120
-rw-r--r--lib/StaticAnalyzer/Core/Checker.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/CheckerContext.cpp10
-rw-r--r--lib/StaticAnalyzer/Core/CheckerHelpers.cpp24
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp60
-rw-r--r--lib/StaticAnalyzer/Core/CheckerRegistry.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/ConstraintManager.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp8
-rw-r--r--lib/StaticAnalyzer/Core/DynamicTypeMap.cpp51
-rw-r--r--lib/StaticAnalyzer/Core/Environment.cpp17
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp10
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp221
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp207
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp188
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp42
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineObjC.cpp119
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp33
-rw-r--r--lib/StaticAnalyzer/Core/IssueHash.cpp196
-rw-r--r--lib/StaticAnalyzer/Core/LoopWidening.cpp68
-rw-r--r--lib/StaticAnalyzer/Core/Makefile6
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp67
-rw-r--r--lib/StaticAnalyzer/Core/PathDiagnostic.cpp40
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp53
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp75
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp188
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp244
-rw-r--r--lib/StaticAnalyzer/Core/SValBuilder.cpp46
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp67
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.h21
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp11
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp14
-rw-r--r--lib/StaticAnalyzer/Core/SymbolManager.cpp26
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp29
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp14
-rw-r--r--lib/StaticAnalyzer/Frontend/Makefile4
-rw-r--r--lib/Tooling/ArgumentsAdjusters.cpp12
-rw-r--r--lib/Tooling/CommonOptionsParser.cpp24
-rw-r--r--lib/Tooling/CompilationDatabase.cpp6
-rw-r--r--lib/Tooling/Core/CMakeLists.txt2
-rw-r--r--lib/Tooling/Core/Lookup.cpp113
-rw-r--r--lib/Tooling/Core/Replacement.cpp198
-rw-r--r--lib/Tooling/JSONCompilationDatabase.cpp93
-rw-r--r--lib/Tooling/Tooling.cpp115
464 files changed, 58105 insertions, 27257 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index e32884218597..8c04c8371cef 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -153,6 +153,9 @@ static bool HasARCRuntime(CompilerInvocation &origCI) {
if (triple.isiOS())
return triple.getOSMajorVersion() >= 5;
+ if (triple.isWatchOS())
+ return true;
+
if (triple.getOS() == llvm::Triple::Darwin)
return triple.getOSMajorVersion() >= 11;
@@ -206,7 +209,8 @@ createInvocationForMigration(CompilerInvocation &origCI,
WarnOpts.push_back("error=arc-unsafe-retained-assign");
CInvok->getDiagnosticOpts().Warnings = std::move(WarnOpts);
- CInvok->getLangOpts()->ObjCARCWeak = HasARCRuntime(origCI);
+ CInvok->getLangOpts()->ObjCWeakRuntime = HasARCRuntime(origCI);
+ CInvok->getLangOpts()->ObjCWeak = CInvok->getLangOpts()->ObjCWeakRuntime;
return CInvok.release();
}
@@ -600,7 +604,6 @@ bool MigrationProcess::applyTransform(TransformFn trans,
SmallString<512> newText;
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
- vecOS.flush();
std::unique_ptr<llvm::MemoryBuffer> memBuf(
llvm::MemoryBuffer::getMemBufferCopy(
StringRef(newText.data(), newText.size()), newFname));
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
index 72a55da5d50b..2cf20699aeef 100644
--- a/lib/ARCMigrate/FileRemapper.cpp
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -144,7 +144,7 @@ bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
SmallString<64> tempPath;
int fd;
if (fs::createTemporaryFile(path::filename(origFE->getName()),
- path::extension(origFE->getName()), fd,
+ path::extension(origFE->getName()).drop_front(), fd,
tempPath))
return report("Could not create file: " + tempPath.str(), Diag);
diff --git a/lib/ARCMigrate/ObjCMT.cpp b/lib/ARCMigrate/ObjCMT.cpp
index b61a421ce415..50b113660d3a 100644
--- a/lib/ARCMigrate/ObjCMT.cpp
+++ b/lib/ARCMigrate/ObjCMT.cpp
@@ -214,25 +214,15 @@ namespace {
// FIXME. This duplicates one in RewriteObjCFoundationAPI.cpp
bool subscriptOperatorNeedsParens(const Expr *FullExpr) {
const Expr* Expr = FullExpr->IgnoreImpCasts();
- if (isa<ArraySubscriptExpr>(Expr) ||
- isa<CallExpr>(Expr) ||
- isa<DeclRefExpr>(Expr) ||
- isa<CXXNamedCastExpr>(Expr) ||
- isa<CXXConstructExpr>(Expr) ||
- isa<CXXThisExpr>(Expr) ||
- isa<CXXTypeidExpr>(Expr) ||
- isa<CXXUnresolvedConstructExpr>(Expr) ||
- isa<ObjCMessageExpr>(Expr) ||
- isa<ObjCPropertyRefExpr>(Expr) ||
- isa<ObjCProtocolExpr>(Expr) ||
- isa<MemberExpr>(Expr) ||
- isa<ObjCIvarRefExpr>(Expr) ||
- isa<ParenExpr>(FullExpr) ||
- isa<ParenListExpr>(Expr) ||
- isa<SizeOfPackExpr>(Expr))
- return false;
-
- return true;
+ return !(isa<ArraySubscriptExpr>(Expr) || isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) || isa<CXXNamedCastExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) || isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) || isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) || isa<MemberExpr>(Expr) ||
+ isa<ObjCIvarRefExpr>(Expr) || isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) || isa<SizeOfPackExpr>(Expr));
}
/// \brief - Rewrite message expression for Objective-C setter and getters into
@@ -665,9 +655,7 @@ ClassImplementsAllMethodsAndProperties(ASTContext &Ctx,
return false;
}
}
- if (HasAtleastOneRequiredProperty || HasAtleastOneRequiredMethod)
- return true;
- return false;
+ return HasAtleastOneRequiredProperty || HasAtleastOneRequiredMethod;
}
static bool rewriteToObjCInterfaceDecl(const ObjCInterfaceDecl *IDecl,
@@ -736,7 +724,7 @@ static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl,
SourceLocation EndOfEnumDclLoc = EnumDcl->getLocEnd();
EndOfEnumDclLoc = trans::findSemiAfterLocation(EndOfEnumDclLoc,
NS.getASTContext(), /*IsDecl*/true);
- if (!EndOfEnumDclLoc.isInvalid()) {
+ if (EndOfEnumDclLoc.isValid()) {
SourceRange EnumDclRange(EnumDcl->getLocStart(), EndOfEnumDclLoc);
commit.insertFromRange(TypedefDcl->getLocStart(), EnumDclRange);
}
@@ -746,7 +734,7 @@ static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl,
SourceLocation EndTypedefDclLoc = TypedefDcl->getLocEnd();
EndTypedefDclLoc = trans::findSemiAfterLocation(EndTypedefDclLoc,
NS.getASTContext(), /*IsDecl*/true);
- if (!EndTypedefDclLoc.isInvalid()) {
+ if (EndTypedefDclLoc.isValid()) {
SourceRange TDRange(TypedefDcl->getLocStart(), EndTypedefDclLoc);
commit.remove(TDRange);
}
@@ -755,7 +743,7 @@ static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl,
EndOfEnumDclLoc = trans::findLocationAfterSemi(EnumDcl->getLocEnd(), NS.getASTContext(),
/*IsDecl*/true);
- if (!EndOfEnumDclLoc.isInvalid()) {
+ if (EndOfEnumDclLoc.isValid()) {
SourceLocation BeginOfEnumDclLoc = EnumDcl->getLocStart();
// FIXME. This assumes that enum decl; is immediately preceded by eoln.
// It is trying to remove the enum decl. lines entirely.
@@ -1536,7 +1524,7 @@ ObjCMigrateASTConsumer::CF_BRIDGING_KIND
FuncDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
FuncDecl->hasAttr<NSReturnsAutoreleasedAttr>());
- // Trivial case of when funciton is annotated and has no argument.
+ // Trivial case of when function is annotated and has no argument.
if (FuncIsReturnAnnotated && FuncDecl->getNumParams() == 0)
return CF_BRIDGING_NONE;
@@ -1665,7 +1653,7 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
Editor->commit(commit);
}
- // Trivial case of when funciton is annotated and has no argument.
+ // Trivial case of when function is annotated and has no argument.
if (MethodIsReturnAnnotated &&
(MethodDecl->param_begin() == MethodDecl->param_end()))
return;
@@ -1805,7 +1793,7 @@ private:
FileID FID;
unsigned Offset;
std::tie(FID, Offset) = SourceMgr.getDecomposedLoc(Loc);
- assert(!FID.isInvalid());
+ assert(FID.isValid());
SmallString<200> Path =
StringRef(SourceMgr.getFileEntryForID(FID)->getName());
llvm::sys::fs::make_absolute(Path);
@@ -1862,8 +1850,8 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
for (DeclContext::decl_iterator D = TU->decls_begin(), DEnd = TU->decls_end();
D != DEnd; ++D) {
FileID FID = PP.getSourceManager().getFileID((*D)->getLocation());
- if (!FID.isInvalid())
- if (!FileId.isInvalid() && FileId != FID) {
+ if (FID.isValid())
+ if (FileId.isValid() && FileId != FID) {
if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation)
AnnotateImplicitBridging(Ctx);
}
@@ -1982,7 +1970,6 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
SmallString<512> newText;
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
- vecOS.flush();
std::unique_ptr<llvm::MemoryBuffer> memBuf(
llvm::MemoryBuffer::getMemBufferCopy(
StringRef(newText.data(), newText.size()), file->getName()));
@@ -2215,12 +2202,11 @@ static std::string applyEditsToTemp(const FileEntry *FE,
SmallString<512> NewText;
llvm::raw_svector_ostream OS(NewText);
Buf->write(OS);
- OS.flush();
SmallString<64> TempPath;
int FD;
if (fs::createTemporaryFile(path::filename(FE->getName()),
- path::extension(FE->getName()), FD,
+ path::extension(FE->getName()).drop_front(), FD,
TempPath)) {
reportDiag("Could not create file: " + TempPath.str(), Diag);
return std::string();
diff --git a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
index 9689f40760cd..d45d5d60b78a 100644
--- a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
+++ b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -104,9 +104,7 @@ public:
return false;
if (!S->getThen() || !Visit(S->getThen()))
return false;
- if (S->getElse() && !Visit(S->getElse()))
- return false;
- return true;
+ return !S->getElse() || Visit(S->getElse());
}
bool VisitWhileStmt(WhileStmt *S) {
if (S->getConditionVariable())
diff --git a/lib/ARCMigrate/TransGCAttrs.cpp b/lib/ARCMigrate/TransGCAttrs.cpp
index 10fce19b6f19..2ae6b78a4634 100644
--- a/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/lib/ARCMigrate/TransGCAttrs.cpp
@@ -152,9 +152,7 @@ public:
return ID->getImplementation() != nullptr;
if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContD))
return CD->getImplementation() != nullptr;
- if (isa<ObjCImplDecl>(ContD))
- return true;
- return false;
+ return isa<ObjCImplDecl>(ContD);
}
return false;
}
diff --git a/lib/ARCMigrate/TransProperties.cpp b/lib/ARCMigrate/TransProperties.cpp
index ab128844b45f..8667bc2a37da 100644
--- a/lib/ARCMigrate/TransProperties.cpp
+++ b/lib/ARCMigrate/TransProperties.cpp
@@ -96,6 +96,10 @@ public:
collectProperties(iface, AtProps);
+ // Look through extensions.
+ for (auto *Ext : iface->visible_extensions())
+ collectProperties(Ext, AtProps);
+
typedef DeclContext::specific_decl_iterator<ObjCPropertyImplDecl>
prop_impl_iterator;
for (prop_impl_iterator
@@ -137,19 +141,6 @@ public:
Transaction Trans(Pass.TA);
rewriteProperty(props, atLoc);
}
-
- AtPropDeclsTy AtExtProps;
- // Look through extensions.
- for (auto *Ext : iface->visible_extensions())
- collectProperties(Ext, AtExtProps, &AtProps);
-
- for (AtPropDeclsTy::iterator
- I = AtExtProps.begin(), E = AtExtProps.end(); I != E; ++I) {
- SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
- PropsTy &props = I->second;
- Transaction Trans(Pass.TA);
- doActionForExtensionProp(props, atLoc);
- }
}
private:
@@ -177,15 +168,6 @@ private:
}
}
- void doActionForExtensionProp(PropsTy &props, SourceLocation atLoc) {
- llvm::DenseMap<IdentifierInfo *, PropActionKind>::iterator I;
- I = ActionOnProp.find(props[0].PropD->getIdentifier());
- if (I == ActionOnProp.end())
- return;
-
- doPropAction(I->second, props, atLoc, false);
- }
-
void rewriteProperty(PropsTy &props, SourceLocation atLoc) {
ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
diff --git a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index 7db1a1c378cc..f81133f3aad3 100644
--- a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -150,11 +150,8 @@ public:
return true;
}
- if (!hasSideEffects(rec, Pass.Ctx)) {
- if (tryRemoving(RecContainer))
- return true;
- }
- Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
+ if (hasSideEffects(rec, Pass.Ctx) || !tryRemoving(RecContainer))
+ Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
return true;
}
@@ -174,11 +171,8 @@ private:
/// return var;
///
bool isCommonUnusedAutorelease(ObjCMessageExpr *E) {
- if (isPlusOneAssignBeforeOrAfterAutorelease(E))
- return true;
- if (isReturnedAfterAutorelease(E))
- return true;
- return false;
+ return isPlusOneAssignBeforeOrAfterAutorelease(E) ||
+ isReturnedAfterAutorelease(E);
}
bool isReturnedAfterAutorelease(ObjCMessageExpr *E) {
@@ -225,11 +219,7 @@ private:
// Check for "RefD = [+1 retained object];".
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(S)) {
- if (RefD != getReferencedDecl(Bop->getLHS()))
- return false;
- if (isPlusOneAssign(Bop))
- return true;
- return false;
+ return (RefD == getReferencedDecl(Bop->getLHS())) && isPlusOneAssign(Bop);
}
if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
@@ -359,16 +349,16 @@ private:
return;
Stmt::child_range StmtExprChild = StmtE->children();
- if (!StmtExprChild)
+ if (StmtExprChild.begin() == StmtExprChild.end())
return;
- CompoundStmt *CompS = dyn_cast_or_null<CompoundStmt>(*StmtExprChild);
+ auto *CompS = dyn_cast_or_null<CompoundStmt>(*StmtExprChild.begin());
if (!CompS)
return;
Stmt::child_range CompStmtChild = CompS->children();
- if (!CompStmtChild)
+ if (CompStmtChild.begin() == CompStmtChild.end())
return;
- DeclStmt *DeclS = dyn_cast_or_null<DeclStmt>(*CompStmtChild);
+ auto *DeclS = dyn_cast_or_null<DeclStmt>(*CompStmtChild.begin());
if (!DeclS)
return;
if (!DeclS->isSingleDecl())
diff --git a/lib/ARCMigrate/TransformActions.cpp b/lib/ARCMigrate/TransformActions.cpp
index 9fb2f1d3eea8..c628b54ed414 100644
--- a/lib/ARCMigrate/TransformActions.cpp
+++ b/lib/ARCMigrate/TransformActions.cpp
@@ -505,11 +505,10 @@ void TransformActionsImpl::commitClearDiagnostic(ArrayRef<unsigned> IDs,
void TransformActionsImpl::addInsertion(SourceLocation loc, StringRef text) {
SourceManager &SM = Ctx.getSourceManager();
loc = SM.getExpansionLoc(loc);
- for (std::list<CharRange>::reverse_iterator
- I = Removals.rbegin(), E = Removals.rend(); I != E; ++I) {
- if (!SM.isBeforeInTranslationUnit(loc, I->End))
+ for (const CharRange &I : llvm::reverse(Removals)) {
+ if (!SM.isBeforeInTranslationUnit(loc, I.End))
break;
- if (I->Begin.isBeforeInTranslationUnitThan(loc))
+ if (I.Begin.isBeforeInTranslationUnitThan(loc))
return;
}
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
index 56d3af7233bf..3fd36ff310f3 100644
--- a/lib/ARCMigrate/Transforms.cpp
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -42,7 +42,7 @@ bool MigrationPass::CFBridgingFunctionsDefined() {
bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
bool AllowOnUnknownClass) {
- if (!Ctx.getLangOpts().ObjCARCWeak)
+ if (!Ctx.getLangOpts().ObjCWeakRuntime)
return false;
QualType T = type;
@@ -50,7 +50,8 @@ bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
return false;
// iOS is always safe to use 'weak'.
- if (Ctx.getTargetInfo().getTriple().isiOS())
+ if (Ctx.getTargetInfo().getTriple().isiOS() ||
+ Ctx.getTargetInfo().getTriple().isWatchOS())
AllowOnUnknownClass = true;
while (const PointerType *ptr = T->getAs<PointerType>())
@@ -112,10 +113,7 @@ bool trans::isPlusOne(const Expr *E) {
while (implCE && implCE->getCastKind() == CK_BitCast)
implCE = dyn_cast<ImplicitCastExpr>(implCE->getSubExpr());
- if (implCE && implCE->getCastKind() == CK_ARCConsumeObject)
- return true;
-
- return false;
+ return implCE && implCE->getCastKind() == CK_ARCConsumeObject;
}
/// \brief 'Loc' is the end of a statement range. This returns the location
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index fb9630180dca..108677abb8a4 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/Comment.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclContextInternals.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
@@ -327,7 +328,7 @@ const Decl *adjustDeclToTemplate(const Decl *D) {
// FIXME: Adjust alias templates?
return D;
}
-} // unnamed namespace
+} // anonymous namespace
const RawComment *ASTContext::getRawCommentForAnyRedecl(
const Decl *D,
@@ -366,8 +367,10 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(
OriginalDeclForRC = I;
RawCommentAndCacheFlags Raw;
if (RC) {
- Raw.setRaw(RC);
+ // Call order swapped to work around ICE in VS2015 RTM (Release Win32)
+ // https://connect.microsoft.com/VisualStudio/feedback/details/1741530
Raw.setKind(RawCommentAndCacheFlags::FromDecl);
+ Raw.setRaw(RC);
} else
Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl);
Raw.setOriginalDecl(I);
@@ -428,7 +431,6 @@ comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
new (*this) comments::FullComment(FC->getBlocks(),
ThisDeclInfo);
return CFC;
-
}
comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
@@ -659,8 +661,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
nullptr,
TemplateParameterList::Create(*this, SourceLocation(),
SourceLocation(),
- CanonParams.data(),
- CanonParams.size(),
+ CanonParams,
SourceLocation()));
// Get the new insert position for the node we care about.
@@ -681,9 +682,11 @@ CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
case TargetCXXABI::GenericARM: // Same as Itanium at this level
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
+ case TargetCXXABI::WatchOS:
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
+ case TargetCXXABI::WebAssembly:
return CreateItaniumCXXABI(*this);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(*this);
@@ -732,19 +735,20 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
SubstTemplateTemplateParmPacks(this_()),
GlobalNestedNameSpecifier(nullptr), Int128Decl(nullptr),
UInt128Decl(nullptr), Float128StubDecl(nullptr),
- BuiltinVaListDecl(nullptr), ObjCIdDecl(nullptr), ObjCSelDecl(nullptr),
- ObjCClassDecl(nullptr), ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr),
+ BuiltinVaListDecl(nullptr), BuiltinMSVaListDecl(nullptr),
+ ObjCIdDecl(nullptr), ObjCSelDecl(nullptr), ObjCClassDecl(nullptr),
+ ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr),
CFConstantStringTypeDecl(nullptr), ObjCInstanceTypeDecl(nullptr),
FILEDecl(nullptr), jmp_bufDecl(nullptr), sigjmp_bufDecl(nullptr),
ucontext_tDecl(nullptr), BlockDescriptorType(nullptr),
BlockDescriptorExtendedType(nullptr), cudaConfigureCallDecl(nullptr),
FirstLocalImport(), LastLocalImport(), ExternCContext(nullptr),
- SourceMgr(SM), LangOpts(LOpts),
+ MakeIntegerSeqDecl(nullptr), SourceMgr(SM), LangOpts(LOpts),
SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
- AddrSpaceMap(nullptr), Target(nullptr), PrintingPolicy(LOpts),
- Idents(idents), Selectors(sels), BuiltinInfo(builtins),
- DeclarationNames(*this), ExternalSource(nullptr), Listener(nullptr),
- Comments(SM), CommentsLoaded(false),
+ AddrSpaceMap(nullptr), Target(nullptr), AuxTarget(nullptr),
+ PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
+ BuiltinInfo(builtins), DeclarationNames(*this), ExternalSource(nullptr),
+ Listener(nullptr), Comments(SM), CommentsLoaded(false),
CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) {
TUDecl = TranslationUnitDecl::Create(*this);
}
@@ -757,10 +761,8 @@ ASTContext::~ASTContext() {
ReleaseDeclContextMaps();
// Call all of the deallocation functions on all of their targets.
- for (DeallocationMap::const_iterator I = Deallocations.begin(),
- E = Deallocations.end(); I != E; ++I)
- for (unsigned J = 0, N = I->second.size(); J != N; ++J)
- (I->first)((I->second)[J]);
+ for (auto &Pair : Deallocations)
+ (Pair.first)(Pair.second);
// ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
// because they can contain DenseMaps.
@@ -783,23 +785,33 @@ ASTContext::~ASTContext() {
A != AEnd; ++A)
A->second->~AttrVec();
+ for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
+ MaterializedTemporaryValues)
+ MTVPair.second->~APValue();
+
llvm::DeleteContainerSeconds(MangleNumberingContexts);
}
void ASTContext::ReleaseParentMapEntries() {
- if (!AllParents) return;
- for (const auto &Entry : *AllParents) {
+ if (!PointerParents) return;
+ for (const auto &Entry : *PointerParents) {
if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else {
- assert(Entry.second.is<ParentVector *>());
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
+ }
+ for (const auto &Entry : *OtherParents) {
+ if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
+ delete Entry.second.get<ast_type_traits::DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
delete Entry.second.get<ParentVector *>();
}
}
}
void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
- Deallocations[Callback].push_back(Data);
+ Deallocations.push_back({Callback, Data});
}
void
@@ -898,6 +910,24 @@ ExternCContextDecl *ASTContext::getExternCContextDecl() const {
return ExternCContext;
}
+BuiltinTemplateDecl *
+ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
+ const IdentifierInfo *II) const {
+ auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
+ BuiltinTemplate->setImplicit();
+ TUDecl->addDecl(BuiltinTemplate);
+
+ return BuiltinTemplate;
+}
+
+BuiltinTemplateDecl *
+ASTContext::getMakeIntegerSeqDecl() const {
+ if (!MakeIntegerSeqDecl)
+ MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
+ getMakeIntegerSeqName());
+ return MakeIntegerSeqDecl;
+}
+
RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
RecordDecl::TagKind TK) const {
SourceLocation Loc;
@@ -950,13 +980,15 @@ void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
Types.push_back(Ty);
}
-void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
+void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
+ const TargetInfo *AuxTarget) {
assert((!this->Target || this->Target == &Target) &&
"Incorrect target reinitialization");
assert(VoidTy.isNull() && "Context reinitialized?");
this->Target = &Target;
-
+ this->AuxTarget = AuxTarget;
+
ABI.reset(createCXXABI(Target));
AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
@@ -1043,6 +1075,10 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
// Placeholder type for builtin functions.
InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
+ // Placeholder type for OMP array sections.
+ if (LangOpts.OpenMP)
+ InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
DoubleComplexTy = getComplexType(DoubleTy);
@@ -1059,10 +1095,21 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
InitBuiltinType(OCLImage1dBufferTy, BuiltinType::OCLImage1dBuffer);
InitBuiltinType(OCLImage2dTy, BuiltinType::OCLImage2d);
InitBuiltinType(OCLImage2dArrayTy, BuiltinType::OCLImage2dArray);
+ InitBuiltinType(OCLImage2dDepthTy, BuiltinType::OCLImage2dDepth);
+ InitBuiltinType(OCLImage2dArrayDepthTy, BuiltinType::OCLImage2dArrayDepth);
+ InitBuiltinType(OCLImage2dMSAATy, BuiltinType::OCLImage2dMSAA);
+ InitBuiltinType(OCLImage2dArrayMSAATy, BuiltinType::OCLImage2dArrayMSAA);
+ InitBuiltinType(OCLImage2dMSAADepthTy, BuiltinType::OCLImage2dMSAADepth);
+ InitBuiltinType(OCLImage2dArrayMSAADepthTy,
+ BuiltinType::OCLImage2dArrayMSAADepth);
InitBuiltinType(OCLImage3dTy, BuiltinType::OCLImage3d);
InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
+ InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
+ InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
+ InitBuiltinType(OCLNDRangeTy, BuiltinType::OCLNDRange);
+ InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
}
// Builtin type for __objc_yes and __objc_no
@@ -1083,7 +1130,7 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
InitBuiltinType(HalfTy, BuiltinType::Half);
// Builtin type used to help define __builtin_va_list.
- VaListTagTy = QualType();
+ VaListTagDecl = nullptr;
}
DiagnosticsEngine &ASTContext::getDiagnostics() const {
@@ -1629,11 +1676,21 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = Target->getIntAlign();
break;
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
case BuiltinType::OCLImage1d:
case BuiltinType::OCLImage1dArray:
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
// Currently these types are pointers to opaque types.
Width = Target->getPointerWidth(0);
@@ -1861,7 +1918,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
/// for __attribute__((aligned)) on this target, to be used if no alignment
/// value is specified.
-unsigned ASTContext::getTargetDefaultAlignForAttributeAligned(void) const {
+unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
return getTargetInfo().getDefaultAlignForAttributeAligned();
}
@@ -2006,6 +2063,17 @@ void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
ObjCImpls[CatD] = ImplD;
}
+const ObjCMethodDecl *
+ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
+ return ObjCMethodRedecls.lookup(MD);
+}
+
+void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
+ const ObjCMethodDecl *Redecl) {
+ assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
+ ObjCMethodRedecls[MD] = Redecl;
+}
+
const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
const NamedDecl *ND) const {
if (const ObjCInterfaceDecl *ID =
@@ -2759,9 +2827,10 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
QualType canon = getQualifiedType(QualType(canonTy,0),
canonElementType.Quals);
- // If we didn't need extra canonicalization for the element type,
- // then just use that as our result.
- if (QualType(canonElementType.Ty, 0) == elementType)
+ // If we didn't need extra canonicalization for the element type or the size
+ // expression, then just use that as our result.
+ if (QualType(canonElementType.Ty, 0) == elementType &&
+ canonTy->getSizeExpr() == numElements)
return canon;
// Otherwise, we need to build a type which follows the spelling
@@ -2956,6 +3025,21 @@ static bool isCanonicalResultType(QualType T) {
T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
}
+CanQualType
+ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
+ CanQualType CanResultType = getCanonicalType(ResultType);
+
+ // Canonical result types do not have ARC lifetime qualifiers.
+ if (CanResultType.getQualifiers().hasObjCLifetime()) {
+ Qualifiers Qs = CanResultType.getQualifiers();
+ Qs.removeObjCLifetime();
+ return CanQualType::CreateUnsafe(
+ getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
+ }
+
+ return CanResultType;
+}
+
QualType
ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
const FunctionProtoType::ExtProtoInfo &EPI) const {
@@ -2993,14 +3077,8 @@ ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
CanonicalEPI.HasTrailingReturn = false;
CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
- // Result types do not have ARC lifetime qualifiers.
- QualType CanResultTy = getCanonicalType(ResultTy);
- if (ResultTy.getQualifiers().hasObjCLifetime()) {
- Qualifiers Qs = CanResultTy.getQualifiers();
- Qs.removeObjCLifetime();
- CanResultTy = getQualifiedType(CanResultTy.getUnqualifiedType(), Qs);
- }
-
+ // Adjust the canonical function result type.
+ CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
Canonical = getFunctionType(CanResultTy, CanonicalArgs, CanonicalEPI);
// Get the new insert position for the node we care about.
@@ -3164,7 +3242,6 @@ QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
return QualType(type, 0);
}
-
/// \brief Retrieve a substitution-result type.
QualType
ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
@@ -3595,20 +3672,18 @@ static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
return true;
}
-static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols,
- unsigned &NumProtocols) {
- ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols;
-
+static void
+SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
// Sort protocols, keyed by name.
- llvm::array_pod_sort(Protocols, ProtocolsEnd, CmpProtocolNames);
+ llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
// Canonicalize.
- for (unsigned I = 0, N = NumProtocols; I != N; ++I)
- Protocols[I] = Protocols[I]->getCanonicalDecl();
-
+ for (ObjCProtocolDecl *&P : Protocols)
+ P = P->getCanonicalDecl();
+
// Remove duplicates.
- ProtocolsEnd = std::unique(Protocols, ProtocolsEnd);
- NumProtocols = ProtocolsEnd-Protocols;
+ auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
+ Protocols.erase(ProtocolsEnd, Protocols.end());
}
QualType ASTContext::getObjCObjectType(QualType BaseType,
@@ -3673,12 +3748,9 @@ QualType ASTContext::getObjCObjectType(
ArrayRef<ObjCProtocolDecl *> canonProtocols;
SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
if (!protocolsSorted) {
- canonProtocolsVec.insert(canonProtocolsVec.begin(),
- protocols.begin(),
- protocols.end());
- unsigned uniqueCount = protocols.size();
- SortAndUniqueProtocols(&canonProtocolsVec[0], uniqueCount);
- canonProtocols = llvm::makeArrayRef(&canonProtocolsVec[0], uniqueCount);
+ canonProtocolsVec.append(protocols.begin(), protocols.end());
+ SortAndUniqueProtocols(canonProtocolsVec);
+ canonProtocols = canonProtocolsVec;
} else {
canonProtocols = protocols;
}
@@ -3869,7 +3941,6 @@ QualType ASTContext::getTypeOfType(QualType tofType) const {
return QualType(tot, 0);
}
-
/// \brief Unlike many "get<Type>" functions, we don't unique DecltypeType
/// nodes. This would never be helpful, since each such type has its own
/// expression, and would not give a significant memory saving, since there
@@ -3921,20 +3992,20 @@ QualType ASTContext::getUnaryTransformType(QualType BaseType,
/// getAutoType - Return the uniqued reference to the 'auto' type which has been
/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
/// canonical deduced-but-dependent 'auto' type.
-QualType ASTContext::getAutoType(QualType DeducedType, bool IsDecltypeAuto,
+QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
bool IsDependent) const {
- if (DeducedType.isNull() && !IsDecltypeAuto && !IsDependent)
+ if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
return getAutoDeductType();
// Look in the folding set for an existing type.
void *InsertPos = nullptr;
llvm::FoldingSetNodeID ID;
- AutoType::Profile(ID, DeducedType, IsDecltypeAuto, IsDependent);
+ AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType,
- IsDecltypeAuto,
+ Keyword,
IsDependent);
Types.push_back(AT);
if (InsertPos)
@@ -3974,7 +4045,7 @@ QualType ASTContext::getAtomicType(QualType T) const {
QualType ASTContext::getAutoDeductType() const {
if (AutoDeductTy.isNull())
AutoDeductTy = QualType(
- new (*this, TypeAlignment) AutoType(QualType(), /*decltype(auto)*/false,
+ new (*this, TypeAlignment) AutoType(QualType(), AutoTypeKeyword::Auto,
/*dependent*/false),
0);
return AutoDeductTy;
@@ -4310,7 +4381,7 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
A != AEnd; (void)++A, ++Idx)
CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
- return TemplateArgument(CanonArgs, Arg.pack_size());
+ return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
}
}
@@ -4374,7 +4445,6 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
-
const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// Handle the non-qualified case efficiently.
if (!T.hasLocalQualifiers()) {
@@ -4909,8 +4979,6 @@ bool ASTContext::BlockRequiresCopying(QualType Ty,
// If we have lifetime, that dominates.
if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
- assert(getLangOpts().ObjCAutoRefCount);
-
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("impossible");
@@ -4944,14 +5012,14 @@ bool ASTContext::getByrefLifetime(QualType Ty,
if (Ty->isRecordType()) {
HasByrefExtendedLayout = true;
LifeTime = Qualifiers::OCL_None;
- }
- else if (getLangOpts().ObjCAutoRefCount)
- LifeTime = Ty.getObjCLifetime();
- // MRR.
- else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
+ } else if ((LifeTime = Ty.getObjCLifetime())) {
+ // Honor the ARC qualifiers.
+ } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
+ // The MRR rule.
LifeTime = Qualifiers::OCL_ExplicitNone;
- else
+ } else {
LifeTime = Qualifiers::OCL_None;
+ }
return true;
}
@@ -4990,9 +5058,10 @@ CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
}
bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
- return getLangOpts().MSVCCompat && VD->isStaticDataMember() &&
+ return getTargetInfo().getCXXABI().isMicrosoft() &&
+ VD->isStaticDataMember() &&
VD->getType()->isIntegralOrEnumerationType() &&
- VD->isFirstDecl() && !VD->isOutOfLine() && VD->hasInit();
+ !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
}
static inline
@@ -5364,8 +5433,18 @@ static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
case BuiltinType::OCLSampler:
case BuiltinType::Dependent:
#define BUILTIN_TYPE(KIND, ID)
@@ -5779,7 +5858,6 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// Just ignore it.
case Type::Auto:
return;
-
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
@@ -5998,10 +6076,19 @@ ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
// __builtin_va_list Construction Functions
//===----------------------------------------------------------------------===//
-static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
- // typedef char* __builtin_va_list;
+static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
+ StringRef Name) {
+ // typedef char* __builtin[_ms]_va_list;
QualType T = Context->getPointerType(Context->CharTy);
- return Context->buildImplicitTypedef(T, "__builtin_va_list");
+ return Context->buildImplicitTypedef(T, Name);
+}
+
+static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
+ return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list");
+}
+
+static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
+ return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list");
}
static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
@@ -6067,8 +6154,8 @@ CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
- Context->VaListTagTy = VaListTagType;
// } __builtin_va_list;
return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
@@ -6119,8 +6206,8 @@ static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
- Context->VaListTagTy = VaListTagType;
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
@@ -6139,7 +6226,7 @@ static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
static TypedefDecl *
CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
- // typedef struct __va_list_tag {
+ // struct __va_list_tag {
RecordDecl *VaListTagDecl;
VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
@@ -6179,21 +6266,15 @@ CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
- Context->VaListTagTy = VaListTagType;
-
- // } __va_list_tag;
- TypedefDecl *VaListTagTypedefDecl =
- Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
- QualType VaListTagTypedefType =
- Context->getTypedefType(VaListTagTypedefDecl);
+ // };
- // typedef __va_list_tag __builtin_va_list[1];
+ // typedef struct __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
- QualType VaListTagArrayType
- = Context->getConstantArrayType(VaListTagTypedefType,
- Size, ArrayType::Normal,0);
+ QualType VaListTagArrayType =
+ Context->getConstantArrayType(VaListTagType, Size, ArrayType::Normal, 0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
@@ -6248,7 +6329,7 @@ CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
static TypedefDecl *
CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
- // typedef struct __va_list_tag {
+ // struct __va_list_tag {
RecordDecl *VaListTagDecl;
VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
@@ -6288,20 +6369,15 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
- Context->VaListTagTy = VaListTagType;
- // } __va_list_tag;
- TypedefDecl *VaListTagTypedefDecl =
- Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
- QualType VaListTagTypedefType =
- Context->getTypedefType(VaListTagTypedefDecl);
+ // };
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
- QualType VaListTagArrayType
- = Context->getConstantArrayType(VaListTagTypedefType,
- Size, ArrayType::Normal,0);
+ QualType VaListTagArrayType =
+ Context->getConstantArrayType(VaListTagType, Size, ArrayType::Normal, 0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
@@ -6339,13 +6415,20 @@ TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
return BuiltinVaListDecl;
}
-QualType ASTContext::getVaListTagType() const {
- // Force the creation of VaListTagTy by building the __builtin_va_list
+Decl *ASTContext::getVaListTagDecl() const {
+ // Force the creation of VaListTagDecl by building the __builtin_va_list
// declaration.
- if (VaListTagTy.isNull())
- (void) getBuiltinVaListDecl();
+ if (!VaListTagDecl)
+ (void)getBuiltinVaListDecl();
+
+ return VaListTagDecl;
+}
+
+TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
+ if (!BuiltinMSVaListDecl)
+ BuiltinMSVaListDecl = CreateMSVaListDecl(this);
- return VaListTagTy;
+ return BuiltinMSVaListDecl;
}
void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
@@ -7734,6 +7817,10 @@ bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
return true;
}
+void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) {
+ ObjCLayouts[CD] = nullptr;
+}
+
/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
/// 'RHS' attributes and returns the merged version; including for function
/// return types.
@@ -8128,7 +8215,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
QualType ASTContext::GetBuiltinType(unsigned Id,
GetBuiltinTypeError &Error,
unsigned *IntegerConstantArgs) const {
- const char *TypeStr = BuiltinInfo.GetTypeString(Id);
+ const char *TypeStr = BuiltinInfo.getTypeString(Id);
SmallVector<QualType, 8> ArgTypes;
@@ -8212,7 +8299,8 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
if (!FD->isInlined())
return External;
- if ((!Context.getLangOpts().CPlusPlus && !Context.getLangOpts().MSVCCompat &&
+ if ((!Context.getLangOpts().CPlusPlus &&
+ !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
!FD->hasAttr<DLLExportAttr>()) ||
FD->hasAttr<GNUInlineAttr>()) {
// FIXME: This doesn't match gcc's behavior for dllexport inline functions.
@@ -8235,13 +8323,13 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
return GVA_DiscardableODR;
}
-static GVALinkage adjustGVALinkageForDLLAttribute(GVALinkage L, const Decl *D) {
+static GVALinkage adjustGVALinkageForAttributes(GVALinkage L, const Decl *D) {
// See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
// dllexport/dllimport on inline functions.
if (D->hasAttr<DLLImportAttr>()) {
if (L == GVA_DiscardableODR || L == GVA_StrongODR)
return GVA_AvailableExternally;
- } else if (D->hasAttr<DLLExportAttr>()) {
+ } else if (D->hasAttr<DLLExportAttr>() || D->hasAttr<CUDAGlobalAttr>()) {
if (L == GVA_DiscardableODR)
return GVA_StrongODR;
}
@@ -8249,8 +8337,8 @@ static GVALinkage adjustGVALinkageForDLLAttribute(GVALinkage L, const Decl *D) {
}
GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
- return adjustGVALinkageForDLLAttribute(basicGVALinkageForFunction(*this, FD),
- FD);
+ return adjustGVALinkageForAttributes(basicGVALinkageForFunction(*this, FD),
+ FD);
}
static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
@@ -8285,9 +8373,14 @@ static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
switch (VD->getTemplateSpecializationKind()) {
case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
return GVA_StrongExternal;
+ case TSK_ExplicitSpecialization:
+ return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ VD->isStaticDataMember()
+ ? GVA_StrongODR
+ : GVA_StrongExternal;
+
case TSK_ExplicitInstantiationDefinition:
return GVA_StrongODR;
@@ -8302,8 +8395,8 @@ static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
}
GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
- return adjustGVALinkageForDLLAttribute(basicGVALinkageForVariable(*this, VD),
- VD);
+ return adjustGVALinkageForAttributes(basicGVALinkageForVariable(*this, VD),
+ VD);
}
bool ASTContext::DeclMustBeEmitted(const Decl *D) {
@@ -8313,6 +8406,9 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
// Global named register variables (GNU extension) are never emitted.
if (VD->getStorageClass() == SC_Register)
return false;
+ if (VD->getDescribedVarTemplate() ||
+ isa<VarTemplatePartialSpecializationDecl>(VD))
+ return false;
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// We never need to emit an uninstantiated function template.
if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
@@ -8385,7 +8481,8 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
// Variables that have initialization with side-effects are required.
- if (VD->getInit() && VD->getInit()->HasSideEffects(*this))
+ if (VD->getInit() && VD->getInit()->HasSideEffects(*this) &&
+ !VD->evaluateValue())
return true;
return false;
@@ -8425,6 +8522,8 @@ MangleContext *ASTContext::createMangleContext() {
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
+ case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::WatchOS:
return ItaniumMangleContext::create(*this, getDiagnostics());
case TargetCXXABI::Microsoft:
return MicrosoftMangleContext::create(*this, getDiagnostics());
@@ -8543,6 +8642,25 @@ Expr *ASTContext::getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx);
}
+void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) {
+ return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
+}
+
+TypedefNameDecl *
+ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
+ return ABI->getTypedefNameForUnnamedTagDecl(TD);
+}
+
+void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) {
+ return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
+}
+
+DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
+ return ABI->getDeclaratorForUnnamedTagDecl(TD);
+}
+
void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
ParamIndices[D] = index;
}
@@ -8559,12 +8677,14 @@ ASTContext::getMaterializedTemporaryValue(const MaterializeTemporaryExpr *E,
bool MayCreate) {
assert(E && E->getStorageDuration() == SD_Static &&
"don't need to cache the computed value for this temporary");
- if (MayCreate)
- return &MaterializedTemporaryValues[E];
+ if (MayCreate) {
+ APValue *&MTVI = MaterializedTemporaryValues[E];
+ if (!MTVI)
+ MTVI = new (*this) APValue;
+ return MTVI;
+ }
- llvm::DenseMap<const MaterializeTemporaryExpr *, APValue>::iterator I =
- MaterializedTemporaryValues.find(E);
- return I == MaterializedTemporaryValues.end() ? nullptr : &I->second;
+ return MaterializedTemporaryValues.lookup(E);
}
bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
@@ -8587,6 +8707,32 @@ bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
namespace {
+ast_type_traits::DynTypedNode getSingleDynTypedNodeFromParentMap(
+ ASTContext::ParentMapPointers::mapped_type U) {
+ if (const auto *D = U.dyn_cast<const Decl *>())
+ return ast_type_traits::DynTypedNode::create(*D);
+ if (const auto *S = U.dyn_cast<const Stmt *>())
+ return ast_type_traits::DynTypedNode::create(*S);
+ return *U.get<ast_type_traits::DynTypedNode *>();
+}
+
+/// Template specializations to abstract away from pointers and TypeLocs.
+/// @{
+template <typename T>
+ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
+ return ast_type_traits::DynTypedNode::create(*Node);
+}
+template <>
+ast_type_traits::DynTypedNode createDynTypedNode(const TypeLoc &Node) {
+ return ast_type_traits::DynTypedNode::create(Node);
+}
+template <>
+ast_type_traits::DynTypedNode
+createDynTypedNode(const NestedNameSpecifierLoc &Node) {
+ return ast_type_traits::DynTypedNode::create(Node);
+}
+/// @}
+
/// \brief A \c RecursiveASTVisitor that builds a map from nodes to their
/// parents as defined by the \c RecursiveASTVisitor.
///
@@ -8596,22 +8742,25 @@ namespace {
///
/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> {
-
public:
/// \brief Builds and returns the translation unit's parent map.
///
/// The caller takes ownership of the returned \c ParentMap.
- static ASTContext::ParentMap *buildMap(TranslationUnitDecl &TU) {
- ParentMapASTVisitor Visitor(new ASTContext::ParentMap);
+ static std::pair<ASTContext::ParentMapPointers *,
+ ASTContext::ParentMapOtherNodes *>
+ buildMap(TranslationUnitDecl &TU) {
+ ParentMapASTVisitor Visitor(new ASTContext::ParentMapPointers,
+ new ASTContext::ParentMapOtherNodes);
Visitor.TraverseDecl(&TU);
- return Visitor.Parents;
+ return std::make_pair(Visitor.Parents, Visitor.OtherParents);
}
private:
typedef RecursiveASTVisitor<ParentMapASTVisitor> VisitorBase;
- ParentMapASTVisitor(ASTContext::ParentMap *Parents) : Parents(Parents) {
- }
+ ParentMapASTVisitor(ASTContext::ParentMapPointers *Parents,
+ ASTContext::ParentMapOtherNodes *OtherParents)
+ : Parents(Parents), OtherParents(OtherParents) {}
bool shouldVisitTemplateInstantiations() const {
return true;
@@ -8619,14 +8768,11 @@ namespace {
bool shouldVisitImplicitCode() const {
return true;
}
- // Disables data recursion. We intercept Traverse* methods in the RAV, which
- // are not triggered during data recursion.
- bool shouldUseDataRecursionFor(clang::Stmt *S) const {
- return false;
- }
- template <typename T>
- bool TraverseNode(T *Node, bool(VisitorBase:: *traverse) (T *)) {
+ template <typename T, typename MapNodeTy, typename BaseTraverseFn,
+ typename MapTy>
+ bool TraverseNode(T Node, MapNodeTy MapNode,
+ BaseTraverseFn BaseTraverse, MapTy *Parents) {
if (!Node)
return true;
if (ParentStack.size() > 0) {
@@ -8640,18 +8786,25 @@ namespace {
// map. The main problem there is to implement hash functions /
// comparison operators for all types that DynTypedNode supports that
// do not have pointer identity.
- auto &NodeOrVector = (*Parents)[Node];
+ auto &NodeOrVector = (*Parents)[MapNode];
if (NodeOrVector.isNull()) {
- NodeOrVector = new ast_type_traits::DynTypedNode(ParentStack.back());
+ if (const auto *D = ParentStack.back().get<Decl>())
+ NodeOrVector = D;
+ else if (const auto *S = ParentStack.back().get<Stmt>())
+ NodeOrVector = S;
+ else
+ NodeOrVector =
+ new ast_type_traits::DynTypedNode(ParentStack.back());
} else {
- if (NodeOrVector.template is<ast_type_traits::DynTypedNode *>()) {
- auto *Node =
- NodeOrVector.template get<ast_type_traits::DynTypedNode *>();
- auto *Vector = new ASTContext::ParentVector(1, *Node);
+ if (!NodeOrVector.template is<ASTContext::ParentVector *>()) {
+ auto *Vector = new ASTContext::ParentVector(
+ 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
+ if (auto *Node =
+ NodeOrVector
+ .template dyn_cast<ast_type_traits::DynTypedNode *>())
+ delete Node;
NodeOrVector = Vector;
- delete Node;
}
- assert(NodeOrVector.template is<ASTContext::ParentVector *>());
auto *Vector =
NodeOrVector.template get<ASTContext::ParentVector *>();
@@ -8666,47 +8819,74 @@ namespace {
Vector->push_back(ParentStack.back());
}
}
- ParentStack.push_back(ast_type_traits::DynTypedNode::create(*Node));
- bool Result = (this ->* traverse) (Node);
+ ParentStack.push_back(createDynTypedNode(Node));
+ bool Result = BaseTraverse();
ParentStack.pop_back();
return Result;
}
bool TraverseDecl(Decl *DeclNode) {
- return TraverseNode(DeclNode, &VisitorBase::TraverseDecl);
+ return TraverseNode(DeclNode, DeclNode,
+ [&] { return VisitorBase::TraverseDecl(DeclNode); },
+ Parents);
}
bool TraverseStmt(Stmt *StmtNode) {
- return TraverseNode(StmtNode, &VisitorBase::TraverseStmt);
+ return TraverseNode(StmtNode, StmtNode,
+ [&] { return VisitorBase::TraverseStmt(StmtNode); },
+ Parents);
+ }
+
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ return TraverseNode(
+ TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
+ [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ OtherParents);
+ }
+
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
+ return TraverseNode(
+ NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
+ [&] {
+ return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode);
+ },
+ OtherParents);
}
- ASTContext::ParentMap *Parents;
+ ASTContext::ParentMapPointers *Parents;
+ ASTContext::ParentMapOtherNodes *OtherParents;
llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
friend class RecursiveASTVisitor<ParentMapASTVisitor>;
};
-} // end namespace
+} // anonymous namespace
-ArrayRef<ast_type_traits::DynTypedNode>
+template <typename NodeTy, typename MapTy>
+static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
+ const MapTy &Map) {
+ auto I = Map.find(Node);
+ if (I == Map.end()) {
+ return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
+ }
+ if (auto *V = I->second.template dyn_cast<ASTContext::ParentVector *>()) {
+ return llvm::makeArrayRef(*V);
+ }
+ return getSingleDynTypedNodeFromParentMap(I->second);
+}
+
+ASTContext::DynTypedNodeList
ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
- assert(Node.getMemoizationData() &&
- "Invariant broken: only nodes that support memoization may be "
- "used in the parent map.");
- if (!AllParents) {
+ if (!PointerParents) {
// We always need to run over the whole translation unit, as
// hasAncestor can escape any subtree.
- AllParents.reset(
- ParentMapASTVisitor::buildMap(*getTranslationUnitDecl()));
- }
- ParentMap::const_iterator I = AllParents->find(Node.getMemoizationData());
- if (I == AllParents->end()) {
- return None;
- }
- if (auto *N = I->second.dyn_cast<ast_type_traits::DynTypedNode *>()) {
- return llvm::makeArrayRef(N, 1);
+ auto Maps = ParentMapASTVisitor::buildMap(*getTranslationUnitDecl());
+ PointerParents.reset(Maps.first);
+ OtherParents.reset(Maps.second);
}
- return *I->second.get<ParentVector *>();
+ if (Node.getNodeKind().hasPointerIdentity())
+ return getDynNodeFromMap(Node.getMemoizationData(), *PointerParents);
+ return getDynNodeFromMap(Node, *OtherParents);
}
bool
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index dddaa5af6fb0..0ab1fa788603 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -66,11 +66,63 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
continue;
}
- // Don't desugar template specializations, unless it's an alias template.
- if (const TemplateSpecializationType *TST
- = dyn_cast<TemplateSpecializationType>(Ty))
- if (!TST->isTypeAlias())
+ // Desugar FunctionType if return type or any parameter type should be
+ // desugared. Preserve nullability attribute on desugared types.
+ if (const FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
+ bool DesugarReturn = false;
+ QualType SugarRT = FT->getReturnType();
+ QualType RT = Desugar(Context, SugarRT, DesugarReturn);
+ if (auto nullability = AttributedType::stripOuterNullability(SugarRT)) {
+ RT = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*nullability), RT, RT);
+ }
+
+ bool DesugarArgument = false;
+ SmallVector<QualType, 4> Args;
+ const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT);
+ if (FPT) {
+ for (QualType SugarPT : FPT->param_types()) {
+ QualType PT = Desugar(Context, SugarPT, DesugarArgument);
+ if (auto nullability =
+ AttributedType::stripOuterNullability(SugarPT)) {
+ PT = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*nullability), PT, PT);
+ }
+ Args.push_back(PT);
+ }
+ }
+
+ if (DesugarReturn || DesugarArgument) {
+ ShouldAKA = true;
+ QT = FPT ? Context.getFunctionType(RT, Args, FPT->getExtProtoInfo())
+ : Context.getFunctionNoProtoType(RT, FT->getExtInfo());
+ break;
+ }
+ }
+
+ // Desugar template specializations if any template argument should be
+ // desugared.
+ if (const TemplateSpecializationType *TST =
+ dyn_cast<TemplateSpecializationType>(Ty)) {
+ if (!TST->isTypeAlias()) {
+ bool DesugarArgument = false;
+ SmallVector<TemplateArgument, 4> Args;
+ for (unsigned I = 0, N = TST->getNumArgs(); I != N; ++I) {
+ const TemplateArgument &Arg = TST->getArg(I);
+ if (Arg.getKind() == TemplateArgument::Type)
+ Args.push_back(Desugar(Context, Arg.getAsType(), DesugarArgument));
+ else
+ Args.push_back(Arg);
+ }
+
+ if (DesugarArgument) {
+ ShouldAKA = true;
+ QT = Context.getTemplateSpecializationType(
+ TST->getTemplateName(), Args.data(), Args.size(), QT);
+ }
break;
+ }
+ }
// Don't desugar magic Objective-C types.
if (QualType(Ty,0) == Context.getObjCIdType() ||
@@ -80,7 +132,8 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
break;
// Don't desugar va_list.
- if (QualType(Ty,0) == Context.getBuiltinVaListType())
+ if (QualType(Ty, 0) == Context.getBuiltinVaListType() ||
+ QualType(Ty, 0) == Context.getBuiltinMSVaListType())
break;
// Otherwise, do a single-step desugar.
@@ -393,8 +446,6 @@ void clang::FormatASTNodeDiagnosticArgument(
}
- OS.flush();
-
if (NeedQuotes) {
Output.insert(Output.begin()+OldEnd, '\'');
Output.push_back('\'');
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index c95922b141e0..e7fee0316b69 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -21,8 +21,10 @@
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Sema/LocInfoType.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace clang::comments;
@@ -447,6 +449,7 @@ namespace {
const ClassTemplatePartialSpecializationDecl *D);
void VisitClassScopeFunctionSpecializationDecl(
const ClassScopeFunctionSpecializationDecl *D);
+ void VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D);
void VisitVarTemplateDecl(const VarTemplateDecl *D);
void VisitVarTemplateSpecializationDecl(
const VarTemplateSpecializationDecl *D);
@@ -653,6 +656,15 @@ void ASTDumper::dumpTypeAsChild(const Type *T) {
OS << "<<<NULL>>>";
return;
}
+ if (const LocInfoType *LIT = llvm::dyn_cast<LocInfoType>(T)) {
+ {
+ ColorScope Color(*this, TypeColor);
+ OS << "LocInfo Type";
+ }
+ dumpPointer(T);
+ dumpTypeAsChild(LIT->getTypeSourceInfo()->getType());
+ return;
+ }
{
ColorScope Color(*this, TypeColor);
@@ -1333,6 +1345,11 @@ void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
VisitTemplateDecl(D, false);
}
+void ASTDumper::VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D) {
+ dumpName(D);
+ dumpTemplateParameters(D->getTemplateParameters());
+}
+
void ASTDumper::VisitVarTemplateSpecializationDecl(
const VarTemplateSpecializationDecl *D) {
dumpTemplateArgumentList(D->getTemplateArgs());
@@ -1378,20 +1395,23 @@ void ASTDumper::VisitTemplateTemplateParmDecl(
void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
OS << ' ';
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
}
void ASTDumper::VisitUnresolvedUsingTypenameDecl(
const UnresolvedUsingTypenameDecl *D) {
OS << ' ';
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
}
void ASTDumper::VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D) {
OS << ' ';
- D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
+ if (D->getQualifier())
+ D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
dumpType(D->getType());
}
@@ -2017,6 +2037,9 @@ void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
VisitExpr(Node);
dumpPointer(Node->getPack());
dumpName(Node->getPack());
+ if (Node->isPartiallySubstituted())
+ for (const auto &A : Node->getPartialArguments())
+ dumpTemplateArgument(A);
}
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 35c0f690db82..359db1ba81b3 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -1746,7 +1746,7 @@ QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
return QualType();
}
- return Importer.getToContext().getAutoType(ToDeduced, T->isDecltypeAuto(),
+ return Importer.getToContext().getAutoType(ToDeduced, T->getKeyword(),
/*IsDependent*/false);
}
@@ -2144,7 +2144,7 @@ TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
return TemplateParameterList::Create(Importer.getToContext(),
Importer.Import(Params->getTemplateLoc()),
Importer.Import(Params->getLAngleLoc()),
- ToParams.data(), ToParams.size(),
+ ToParams,
Importer.Import(Params->getRAngleLoc()));
}
@@ -2210,11 +2210,9 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
ToPack.reserve(From.pack_size());
if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
return TemplateArgument();
-
- TemplateArgument *ToArgs
- = new (Importer.getToContext()) TemplateArgument[ToPack.size()];
- std::copy(ToPack.begin(), ToPack.end(), ToArgs);
- return TemplateArgument(ToArgs, ToPack.size());
+
+ return TemplateArgument(
+ llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
}
}
@@ -4917,13 +4915,14 @@ Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
if (!ToBody && S->getBody())
return nullptr;
SourceLocation ToForLoc = Importer.Import(S->getForLoc());
+ SourceLocation ToCoawaitLoc = Importer.Import(S->getCoawaitLoc());
SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
return new (Importer.getToContext()) CXXForRangeStmt(ToRange, ToBeginEnd,
ToCond, ToInc,
ToLoopVar, ToBody,
- ToForLoc, ToColonLoc,
- ToRParenLoc);
+ ToForLoc, ToCoawaitLoc,
+ ToColonLoc, ToRParenLoc);
}
Stmt *ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
@@ -5331,7 +5330,7 @@ Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
return new (Importer.getToContext())
CallExpr(Importer.getToContext(), ToCallee,
- ArrayRef<Expr*>(ToArgs_Copied, NumArgs), T, E->getValueKind(),
+ llvm::makeArrayRef(ToArgs_Copied, NumArgs), T, E->getValueKind(),
Importer.Import(E->getRParenLoc()));
}
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 6ce347b4a31c..e28bd2e16d17 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -30,6 +30,7 @@ add_clang_library(clangAST
ExprClassification.cpp
ExprConstant.cpp
ExprCXX.cpp
+ ExprObjC.cpp
ExternalASTSource.cpp
InheritViz.cpp
ItaniumCXXABI.cpp
@@ -39,13 +40,17 @@ add_clang_library(clangAST
MicrosoftMangle.cpp
NestedNameSpecifier.cpp
NSAPI.cpp
+ OpenMPClause.cpp
ParentMap.cpp
RawCommentList.cpp
RecordLayout.cpp
RecordLayoutBuilder.cpp
SelectorLocationsKind.cpp
Stmt.cpp
+ StmtCXX.cpp
StmtIterator.cpp
+ StmtObjC.cpp
+ StmtOpenMP.cpp
StmtPrinter.cpp
StmtProfile.cpp
StmtViz.cpp
diff --git a/lib/AST/CXXABI.h b/lib/AST/CXXABI.h
index dad226474fa7..c23b9191c7ab 100644
--- a/lib/AST/CXXABI.h
+++ b/lib/AST/CXXABI.h
@@ -21,6 +21,7 @@ namespace clang {
class ASTContext;
class CXXConstructorDecl;
+class DeclaratorDecl;
class Expr;
class MemberPointerType;
class MangleNumberingContext;
@@ -57,6 +58,17 @@ public:
virtual Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx) = 0;
+
+ virtual void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) = 0;
+
+ virtual TypedefNameDecl *
+ getTypedefNameForUnnamedTagDecl(const TagDecl *TD) = 0;
+
+ virtual void addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) = 0;
+
+ virtual DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD) = 0;
};
/// Creates an instance of a C++ ABI class.
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index 800c8f83b880..6785a0c2935a 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -31,16 +31,16 @@ void CXXBasePaths::ComputeDeclsFound() {
Decls.insert(Path->Decls.front());
NumDeclsFound = Decls.size();
- DeclsFound = new NamedDecl * [NumDeclsFound];
- std::copy(Decls.begin(), Decls.end(), DeclsFound);
+ DeclsFound = llvm::make_unique<NamedDecl *[]>(NumDeclsFound);
+ std::copy(Decls.begin(), Decls.end(), DeclsFound.get());
}
CXXBasePaths::decl_range CXXBasePaths::found_decls() {
if (NumDeclsFound == 0)
ComputeDeclsFound();
- return decl_range(decl_iterator(DeclsFound),
- decl_iterator(DeclsFound + NumDeclsFound));
+ return decl_range(decl_iterator(DeclsFound.get()),
+ decl_iterator(DeclsFound.get() + NumDeclsFound));
}
/// isAmbiguous - Determines whether the set of paths provided is
@@ -85,9 +85,14 @@ bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
return false;
Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
- return lookupInBases(&FindBaseClass,
- const_cast<CXXRecordDecl*>(Base->getCanonicalDecl()),
- Paths);
+
+ const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
+ // FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
+ return lookupInBases(
+ [this, BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return FindBaseClass(Specifier, Path, BaseDecl);
+ },
+ Paths);
}
bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const {
@@ -102,20 +107,20 @@ bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const {
Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
- const void *BasePtr = static_cast<const void*>(Base->getCanonicalDecl());
- return lookupInBases(&FindVirtualBaseClass,
- const_cast<void *>(BasePtr),
- Paths);
-}
-
-static bool BaseIsNot(const CXXRecordDecl *Base, void *OpaqueTarget) {
- // OpaqueTarget is a CXXRecordDecl*.
- return Base->getCanonicalDecl() != (const CXXRecordDecl*) OpaqueTarget;
+ const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
+ // FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
+ return lookupInBases(
+ [this, BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return FindVirtualBaseClass(Specifier, Path, BaseDecl);
+ },
+ Paths);
}
bool CXXRecordDecl::isProvablyNotDerivedFrom(const CXXRecordDecl *Base) const {
- return forallBases(BaseIsNot,
- const_cast<CXXRecordDecl *>(Base->getCanonicalDecl()));
+ const CXXRecordDecl *TargetDecl = Base->getCanonicalDecl();
+ return forallBases([TargetDecl](const CXXRecordDecl *Base) {
+ return Base->getCanonicalDecl() != TargetDecl;
+ });
}
bool
@@ -129,8 +134,7 @@ CXXRecordDecl::isCurrentInstantiation(const DeclContext *CurContext) const {
return false;
}
-bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
- void *OpaqueData,
+bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches,
bool AllowShortCircuit) const {
SmallVector<const CXXRecordDecl*, 8> Queue;
@@ -156,7 +160,7 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
}
Queue.push_back(Base);
- if (!BaseMatches(Base, OpaqueData)) {
+ if (!BaseMatches(Base)) {
if (AllowShortCircuit) return false;
AllMatches = false;
continue;
@@ -171,10 +175,9 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
return AllMatches;
}
-bool CXXBasePaths::lookupInBases(ASTContext &Context,
- const CXXRecordDecl *Record,
- CXXRecordDecl::BaseMatchesCallback *BaseMatches,
- void *UserData) {
+bool CXXBasePaths::lookupInBases(
+ ASTContext &Context, const CXXRecordDecl *Record,
+ CXXRecordDecl::BaseMatchesCallback BaseMatches) {
bool FoundPath = false;
// The access of the path down to this record.
@@ -248,7 +251,7 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
// Track whether there's a path involving this specific base.
bool FoundPathThroughBase = false;
- if (BaseMatches(&BaseSpec, ScratchPath, UserData)) {
+ if (BaseMatches(&BaseSpec, ScratchPath)) {
// We've found a path that terminates at this base.
FoundPath = FoundPathThroughBase = true;
if (isRecordingPaths()) {
@@ -263,7 +266,7 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
CXXRecordDecl *BaseRecord
= cast<CXXRecordDecl>(BaseSpec.getType()->castAs<RecordType>()
->getDecl());
- if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
+ if (lookupInBases(Context, BaseRecord, BaseMatches)) {
// C++ [class.member.lookup]p2:
// A member name f in one sub-object B hides a member name f in
// a sub-object A if A is a base class sub-object of B. Any
@@ -296,11 +299,10 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
return FoundPath;
}
-bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
- void *UserData,
+bool CXXRecordDecl::lookupInBases(BaseMatchesCallback BaseMatches,
CXXBasePaths &Paths) const {
// If we didn't find anything, report that.
- if (!Paths.lookupInBases(getASTContext(), this, BaseMatches, UserData))
+ if (!Paths.lookupInBases(getASTContext(), this, BaseMatches))
return false;
// If we're not recording paths or we won't ever find ambiguities,
@@ -353,8 +355,8 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
- void *BaseRecord) {
- assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
+ const CXXRecordDecl *BaseRecord) {
+ assert(BaseRecord->getCanonicalDecl() == BaseRecord &&
"User data for FindBaseClass is not canonical!");
return Specifier->getType()->castAs<RecordType>()->getDecl()
->getCanonicalDecl() == BaseRecord;
@@ -362,8 +364,8 @@ bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
- void *BaseRecord) {
- assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
+ const CXXRecordDecl *BaseRecord) {
+ assert(BaseRecord->getCanonicalDecl() == BaseRecord &&
"User data for FindBaseClass is not canonical!");
return Specifier->isVirtual() &&
Specifier->getType()->castAs<RecordType>()->getDecl()
@@ -372,12 +374,11 @@ bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
- void *Name) {
+ DeclarationName Name) {
RecordDecl *BaseRecord =
Specifier->getType()->castAs<RecordType>()->getDecl();
- DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
- for (Path.Decls = BaseRecord->lookup(N);
+ for (Path.Decls = BaseRecord->lookup(Name);
!Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
if (Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag))
@@ -389,13 +390,12 @@ bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
- void *Name) {
+ DeclarationName Name) {
RecordDecl *BaseRecord =
Specifier->getType()->castAs<RecordType>()->getDecl();
const unsigned IDNS = IDNS_Ordinary | IDNS_Tag | IDNS_Member;
- DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
- for (Path.Decls = BaseRecord->lookup(N);
+ for (Path.Decls = BaseRecord->lookup(Name);
!Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
if (Path.Decls.front()->isInIdentifierNamespace(IDNS))
@@ -408,12 +408,11 @@ bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
bool CXXRecordDecl::
FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
- void *Name) {
+ DeclarationName Name) {
RecordDecl *BaseRecord =
Specifier->getType()->castAs<RecordType>()->getDecl();
- DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
- for (Path.Decls = BaseRecord->lookup(N);
+ for (Path.Decls = BaseRecord->lookup(Name);
!Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
// FIXME: Refactor the "is it a nested-name-specifier?" check
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index ea4b2f517cd0..42bebc543e3e 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -207,13 +207,13 @@ static Optional<Visibility> getVisibilityOf(const NamedDecl *D,
// If we're ultimately computing the visibility of a type, look for
// a 'type_visibility' attribute before looking for 'visibility'.
if (kind == NamedDecl::VisibilityForType) {
- if (const TypeVisibilityAttr *A = D->getAttr<TypeVisibilityAttr>()) {
+ if (const auto *A = D->getAttr<TypeVisibilityAttr>()) {
return getVisibilityFromAttr(A);
}
}
// If this declaration has an explicit visibility attribute, use it.
- if (const VisibilityAttr *A = D->getAttr<VisibilityAttr>()) {
+ if (const auto *A = D->getAttr<VisibilityAttr>()) {
return getVisibilityFromAttr(A);
}
@@ -252,8 +252,7 @@ getLVForTemplateParameterList(const TemplateParameterList *Params,
// template <enum X> class A { ... };
// We have to be careful here, though, because we can be dealing with
// dependent types.
- if (const NonTypeTemplateParmDecl *NTTP =
- dyn_cast<NonTypeTemplateParmDecl>(P)) {
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
// Handle the non-pack case first.
if (!NTTP->isExpandedParameterPack()) {
if (!NTTP->getType()->isDependentType()) {
@@ -273,7 +272,7 @@ getLVForTemplateParameterList(const TemplateParameterList *Params,
// Template template parameters can be restricted by their
// template parameters, recursively.
- const TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(P);
+ const auto *TTP = cast<TemplateTemplateParmDecl>(P);
// Handle the non-pack case first.
if (!TTP->isExpandedParameterPack()) {
@@ -329,7 +328,7 @@ static LinkageInfo getLVForTemplateArgumentList(ArrayRef<TemplateArgument> Args,
continue;
case TemplateArgument::Declaration:
- if (NamedDecl *ND = dyn_cast<NamedDecl>(Arg.getAsDecl())) {
+ if (const auto *ND = dyn_cast<NamedDecl>(Arg.getAsDecl())) {
assert(!usesTypeVisibility(ND));
LV.merge(getLVForDecl(ND, computation));
}
@@ -541,7 +540,7 @@ static bool useInlineVisibilityHidden(const NamedDecl *D) {
if (!Opts.CPlusPlus || !Opts.InlineVisibilityHidden)
return false;
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ const auto *FD = dyn_cast<FunctionDecl>(D);
if (!FD)
return false;
@@ -569,7 +568,7 @@ template <typename T> static bool isFirstInExternCContext(T *D) {
}
static bool isSingleLineLanguageLinkage(const Decl &D) {
- if (const LinkageSpecDecl *SD = dyn_cast<LinkageSpecDecl>(D.getDeclContext()))
+ if (const auto *SD = dyn_cast<LinkageSpecDecl>(D.getDeclContext()))
if (!SD->hasBraces())
return true;
return false;
@@ -587,7 +586,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// - an object, reference, function or function template that is
// explicitly declared static; or,
// (This bullet corresponds to C99 6.2.2p3.)
- if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if (const auto *Var = dyn_cast<VarDecl>(D)) {
// Explicitly declared static.
if (Var->getStorageClass() == SC_Static)
return LinkageInfo::internal();
@@ -634,8 +633,10 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
assert(!isa<FieldDecl>(D) && "Didn't expect a FieldDecl!");
if (D->isInAnonymousNamespace()) {
- const VarDecl *Var = dyn_cast<VarDecl>(D);
- const FunctionDecl *Func = dyn_cast<FunctionDecl>(D);
+ const auto *Var = dyn_cast<VarDecl>(D);
+ const auto *Func = dyn_cast<FunctionDecl>(D);
+ // FIXME: In C++11 onwards, anonymous namespaces should give decls
+ // within them internal linkage, not unique external linkage.
if ((!Var || !isFirstInExternCContext(Var)) &&
(!Func || !isFirstInExternCContext(Func)))
return LinkageInfo::uniqueExternal();
@@ -658,7 +659,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
for (const DeclContext *DC = D->getDeclContext();
!isa<TranslationUnitDecl>(DC);
DC = DC->getParent()) {
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ const auto *ND = dyn_cast<NamespaceDecl>(DC);
if (!ND) continue;
if (Optional<Visibility> Vis = getExplicitVisibility(ND, computation)) {
LV.mergeVisibility(*Vis, true);
@@ -692,7 +693,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// name of
//
// - an object or reference, unless it has internal linkage; or
- if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if (const auto *Var = dyn_cast<VarDecl>(D)) {
// GCC applies the following optimization to variables and static
// data members, but not to functions:
//
@@ -732,13 +733,12 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// As per function and class template specializations (below),
// consider LV for the template and template arguments. We're at file
// scope, so we do not need to worry about nested specializations.
- if (const VarTemplateSpecializationDecl *spec
- = dyn_cast<VarTemplateSpecializationDecl>(Var)) {
+ if (const auto *spec = dyn_cast<VarTemplateSpecializationDecl>(Var)) {
mergeTemplateLV(LV, spec, computation);
}
// - a function, unless it has internal linkage; or
- } else if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ } else if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
// In theory, we can modify the function's LV by the LV of its
// type unless it has C linkage (see comment above about variables
// for justification). In practice, GCC doesn't do this, so it's
@@ -785,7 +785,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// - a named enumeration (7.2), or an unnamed enumeration
// defined in a typedef declaration in which the enumeration
// has the typedef name for linkage purposes (7.1.3); or
- } else if (const TagDecl *Tag = dyn_cast<TagDecl>(D)) {
+ } else if (const auto *Tag = dyn_cast<TagDecl>(D)) {
// Unnamed tags have no linkage.
if (!Tag->hasNameForLinkage())
return LinkageInfo::none();
@@ -793,8 +793,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// If this is a class template specialization, consider the
// linkage of the template and template arguments. We're at file
// scope, so we do not need to worry about nested specializations.
- if (const ClassTemplateSpecializationDecl *spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
+ if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
mergeTemplateLV(LV, spec, computation);
}
@@ -808,7 +807,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
// - a template, unless it is a function template that has
// internal linkage (Clause 14);
- } else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) {
+ } else if (const auto *temp = dyn_cast<TemplateDecl>(D)) {
bool considerVisibility = !hasExplicitVisibilityAlready(computation);
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
@@ -824,10 +823,14 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
} else if (isa<ObjCInterfaceDecl>(D)) {
// fallout
+ } else if (auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ // A typedef declaration has linkage if it gives a type a name for
+ // linkage purposes.
+ if (!TD->getAnonDeclWithTypedefName(/*AnyRedecl*/true))
+ return LinkageInfo::none();
+
// Everything not covered here has no linkage.
} else {
- // FIXME: A typedef declaration has linkage if it gives a type a name for
- // linkage purposes.
return LinkageInfo::none();
}
@@ -897,7 +900,7 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D,
// Specifically, if this decl exists and has an explicit attribute.
const NamedDecl *explicitSpecSuppressor = nullptr;
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
// If the type of the function uses a type with unique-external
// linkage, it's not legally usable from outside this translation unit.
// But only look at the type-as-written. If this function has an
@@ -928,9 +931,8 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D,
explicitSpecSuppressor = MD;
}
- } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
- if (const ClassTemplateSpecializationDecl *spec
- = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ } else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
mergeTemplateLV(LV, spec, computation);
if (spec->isExplicitSpecialization()) {
explicitSpecSuppressor = spec;
@@ -945,9 +947,8 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D,
}
// Static data members.
- } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- if (const VarTemplateSpecializationDecl *spec
- = dyn_cast<VarTemplateSpecializationDecl>(VD))
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *spec = dyn_cast<VarTemplateSpecializationDecl>(VD))
mergeTemplateLV(LV, spec, computation);
// Modify the variable's linkage by its type, but ignore the
@@ -962,7 +963,7 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D,
}
// Template members.
- } else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) {
+ } else if (const auto *temp = dyn_cast<TemplateDecl>(D)) {
bool considerVisibility =
(!LV.isVisibilityExplicit() &&
!classLV.isVisibilityExplicit() &&
@@ -971,8 +972,7 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D,
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
- if (const RedeclarableTemplateDecl *redeclTemp =
- dyn_cast<RedeclarableTemplateDecl>(temp)) {
+ if (const auto *redeclTemp = dyn_cast<RedeclarableTemplateDecl>(temp)) {
if (isExplicitMemberSpecialization(redeclTemp)) {
explicitSpecSuppressor = temp->getTemplatedDecl();
}
@@ -1048,7 +1048,7 @@ getExplicitVisibilityAux(const NamedDecl *ND,
// If this is a member class of a specialization of a class template
// and the corresponding decl has explicit visibility, use that.
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND)) {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) {
CXXRecordDecl *InstantiatedFrom = RD->getInstantiatedFromMemberClass();
if (InstantiatedFrom)
return getVisibilityOf(InstantiatedFrom, kind);
@@ -1057,8 +1057,7 @@ getExplicitVisibilityAux(const NamedDecl *ND,
// If there wasn't explicit visibility there, and this is a
// specialization of a class template, check for visibility
// on the pattern.
- if (const ClassTemplateSpecializationDecl *spec
- = dyn_cast<ClassTemplateSpecializationDecl>(ND))
+ if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(ND))
return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl(),
kind);
@@ -1069,7 +1068,7 @@ getExplicitVisibilityAux(const NamedDecl *ND,
return getExplicitVisibilityAux(MostRecent, kind, true);
}
- if (const VarDecl *Var = dyn_cast<VarDecl>(ND)) {
+ if (const auto *Var = dyn_cast<VarDecl>(ND)) {
if (Var->isStaticDataMember()) {
VarDecl *InstantiatedFrom = Var->getInstantiatedFromStaticDataMember();
if (InstantiatedFrom)
@@ -1083,7 +1082,7 @@ getExplicitVisibilityAux(const NamedDecl *ND,
return None;
}
// Also handle function template specializations.
- if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) {
+ if (const auto *fn = dyn_cast<FunctionDecl>(ND)) {
// If the function is a specialization of a template with an
// explicit visibility attribute, use that.
if (FunctionTemplateSpecializationInfo *templateInfo
@@ -1101,7 +1100,7 @@ getExplicitVisibilityAux(const NamedDecl *ND,
}
// The visibility of a template is stored in the templated decl.
- if (const TemplateDecl *TD = dyn_cast<TemplateDecl>(ND))
+ if (const auto *TD = dyn_cast<TemplateDecl>(ND))
return getVisibilityOf(TD->getTemplatedDecl(), kind);
return None;
@@ -1122,7 +1121,7 @@ static LinkageInfo getLVForClosure(const DeclContext *DC, Decl *ContextDecl,
return getLVForDecl(cast<NamedDecl>(ContextDecl), computation);
}
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC))
+ if (const auto *ND = dyn_cast<NamedDecl>(DC))
return getLVForDecl(ND, computation);
return LinkageInfo::external();
@@ -1130,7 +1129,7 @@ static LinkageInfo getLVForClosure(const DeclContext *DC, Decl *ContextDecl,
static LinkageInfo getLVForLocalDecl(const NamedDecl *D,
LVComputationKind computation) {
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
if (Function->isInAnonymousNamespace() &&
!Function->isInExternCContext())
return LinkageInfo::uniqueExternal();
@@ -1153,7 +1152,7 @@ static LinkageInfo getLVForLocalDecl(const NamedDecl *D,
return LV;
}
- if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if (const auto *Var = dyn_cast<VarDecl>(D)) {
if (Var->hasExternalStorage()) {
if (Var->isInAnonymousNamespace() && !Var->isInExternCContext())
return LinkageInfo::uniqueExternal();
@@ -1189,14 +1188,14 @@ static LinkageInfo getLVForLocalDecl(const NamedDecl *D,
return LinkageInfo::none();
LinkageInfo LV;
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(OuterD)) {
+ if (const auto *BD = dyn_cast<BlockDecl>(OuterD)) {
if (!BD->getBlockManglingNumber())
return LinkageInfo::none();
LV = getLVForClosure(BD->getDeclContext()->getRedeclContext(),
BD->getBlockManglingContextDecl(), computation);
} else {
- const FunctionDecl *FD = cast<FunctionDecl>(OuterD);
+ const auto *FD = cast<FunctionDecl>(OuterD);
if (!FD->isInlined() &&
!isTemplateInstantiation(FD->getTemplateSpecializationKind()))
return LinkageInfo::none();
@@ -1224,13 +1223,45 @@ getOutermostEnclosingLambda(const CXXRecordDecl *Record) {
static LinkageInfo computeLVForDecl(const NamedDecl *D,
LVComputationKind computation) {
+ // Internal_linkage attribute overrides other considerations.
+ if (D->hasAttr<InternalLinkageAttr>())
+ return LinkageInfo::internal();
+
// Objective-C: treat all Objective-C declarations as having external
// linkage.
switch (D->getKind()) {
default:
break;
+
+ // Per C++ [basic.link]p2, only the names of objects, references,
+ // functions, types, templates, namespaces, and values ever have linkage.
+ //
+ // Note that the name of a typedef, namespace alias, using declaration,
+ // and so on are not the name of the corresponding type, namespace, or
+ // declaration, so they do *not* have linkage.
+ case Decl::ImplicitParam:
+ case Decl::Label:
+ case Decl::NamespaceAlias:
case Decl::ParmVar:
+ case Decl::Using:
+ case Decl::UsingShadow:
+ case Decl::UsingDirective:
return LinkageInfo::none();
+
+ case Decl::EnumConstant:
+ // C++ [basic.link]p4: an enumerator has the linkage of its enumeration.
+ return getLVForDecl(cast<EnumDecl>(D->getDeclContext()), computation);
+
+ case Decl::Typedef:
+ case Decl::TypeAlias:
+ // A typedef declaration has linkage if it gives a type a name for
+ // linkage purposes.
+ if (!D->getASTContext().getLangOpts().CPlusPlus ||
+ !cast<TypedefNameDecl>(D)
+ ->getAnonDeclWithTypedefName(/*AnyRedecl*/true))
+ return LinkageInfo::none();
+ break;
+
case Decl::TemplateTemplateParm: // count these as external
case Decl::NonTypeTemplateParm:
case Decl::ObjCAtDefsField:
@@ -1245,7 +1276,7 @@ static LinkageInfo computeLVForDecl(const NamedDecl *D,
return LinkageInfo::external();
case Decl::CXXRecord: {
- const CXXRecordDecl *Record = cast<CXXRecordDecl>(D);
+ const auto *Record = cast<CXXRecordDecl>(D);
if (Record->isLambda()) {
if (!Record->getLambdaManglingNumber()) {
// This lambda has no mangling number, so it's internal.
@@ -1314,6 +1345,10 @@ class LinkageComputer {
public:
static LinkageInfo getLVForDecl(const NamedDecl *D,
LVComputationKind computation) {
+ // Internal_linkage attribute overrides other considerations.
+ if (D->hasAttr<InternalLinkageAttr>())
+ return LinkageInfo::internal();
+
if (computation == LVForLinkageOnly && D->hasCachedLinkage())
return LinkageInfo(D->getCachedLinkage(), DefaultVisibility, false);
@@ -1336,7 +1371,7 @@ public:
// computed also does.
NamedDecl *Old = nullptr;
for (auto I : D->redecls()) {
- NamedDecl *T = cast<NamedDecl>(I);
+ auto *T = cast<NamedDecl>(I);
if (T == D)
continue;
if (!T->isInvalidDecl() && T->hasCachedLinkage()) {
@@ -1388,28 +1423,29 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
for (ContextsTy::reverse_iterator I = Contexts.rbegin(), E = Contexts.rend();
I != E; ++I) {
- if (const ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(*I)) {
+ if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(*I)) {
OS << Spec->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
TemplateSpecializationType::PrintTemplateArgumentList(OS,
TemplateArgs.data(),
TemplateArgs.size(),
P);
- } else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) {
+ } else if (const auto *ND = dyn_cast<NamespaceDecl>(*I)) {
if (P.SuppressUnwrittenScope &&
(ND->isAnonymousNamespace() || ND->isInline()))
continue;
- if (ND->isAnonymousNamespace())
- OS << "(anonymous namespace)";
+ if (ND->isAnonymousNamespace()) {
+ OS << (P.MSVCFormatting ? "`anonymous namespace\'"
+ : "(anonymous namespace)");
+ }
else
OS << *ND;
- } else if (const RecordDecl *RD = dyn_cast<RecordDecl>(*I)) {
+ } else if (const auto *RD = dyn_cast<RecordDecl>(*I)) {
if (!RD->getIdentifier())
OS << "(anonymous " << RD->getKindName() << ')';
else
OS << *RD;
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(*I)) {
const FunctionProtoType *FT = nullptr;
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(FD->getType()->castAs<FunctionType>());
@@ -1430,6 +1466,15 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
}
}
OS << ')';
+ } else if (const auto *ED = dyn_cast<EnumDecl>(*I)) {
+ // C++ [dcl.enum]p10: Each enum-name and each unscoped
+ // enumerator is declared in the scope that immediately contains
+ // the enum-specifier. Each scoped enumerator is declared in the
+ // scope of the enumeration.
+ if (ED->isScoped() || ED->getIdentifier())
+ OS << *ED;
+ else
+ continue;
} else {
OS << *cast<NamedDecl>(*I);
}
@@ -1451,32 +1496,6 @@ void NamedDecl::getNameForDiagnostic(raw_ostream &OS,
printName(OS);
}
-static bool isKindReplaceableBy(Decl::Kind OldK, Decl::Kind NewK) {
- // For method declarations, we never replace.
- if (ObjCMethodDecl::classofKind(NewK))
- return false;
-
- if (OldK == NewK)
- return true;
-
- // A compatibility alias for a class can be replaced by an interface.
- if (ObjCCompatibleAliasDecl::classofKind(OldK) &&
- ObjCInterfaceDecl::classofKind(NewK))
- return true;
-
- // A typedef-declaration, alias-declaration, or Objective-C class declaration
- // can replace another declaration of the same type. Semantic analysis checks
- // that we have matching types.
- if ((TypedefNameDecl::classofKind(OldK) ||
- ObjCInterfaceDecl::classofKind(OldK)) &&
- (TypedefNameDecl::classofKind(NewK) ||
- ObjCInterfaceDecl::classofKind(NewK)))
- return true;
-
- // Otherwise, a kind mismatch implies that the declaration is not replaced.
- return false;
-}
-
template<typename T> static bool isRedeclarableImpl(Redeclarable<T> *) {
return true;
}
@@ -1500,9 +1519,19 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
if (OldD->isFromASTFile() && isFromASTFile())
return false;
- if (!isKindReplaceableBy(OldD->getKind(), getKind()))
+ // A kind mismatch implies that the declaration is not replaced.
+ if (OldD->getKind() != getKind())
+ return false;
+
+ // For method declarations, we never replace. (Why?)
+ if (isa<ObjCMethodDecl>(this))
return false;
+ // For parameters, pick the newer one. This is either an error or (in
+ // Objective-C) permitted as an extension.
+ if (isa<ParmVarDecl>(this))
+ return true;
+
// Inline namespaces can give us two declarations with the same
// name and kind in the same scope but different contexts; we should
// keep both declarations in this case.
@@ -1510,28 +1539,8 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
OldD->getDeclContext()->getRedeclContext()))
return false;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
- // For function declarations, we keep track of redeclarations.
- // FIXME: This returns false for functions that should in fact be replaced.
- // Instead, perform some kind of type check?
- if (FD->getPreviousDecl() != OldD)
- return false;
-
- // For function templates, the underlying function declarations are linked.
- if (const FunctionTemplateDecl *FunctionTemplate =
- dyn_cast<FunctionTemplateDecl>(this))
- return FunctionTemplate->getTemplatedDecl()->declarationReplaces(
- cast<FunctionTemplateDecl>(OldD)->getTemplatedDecl());
-
- // Using shadow declarations can be overloaded on their target declarations
- // if they introduce functions.
- // FIXME: If our target replaces the old target, can we replace the old
- // shadow declaration?
- if (auto *USD = dyn_cast<UsingShadowDecl>(this))
- if (USD->getTargetDecl() != cast<UsingShadowDecl>(OldD)->getTargetDecl())
- return false;
-
- // Using declarations can be overloaded if they introduce functions.
+ // Using declarations can be replaced if they import the same name from the
+ // same context.
if (auto *UD = dyn_cast<UsingDecl>(this)) {
ASTContext &Context = getASTContext();
return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) ==
@@ -1546,13 +1555,20 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
}
// UsingDirectiveDecl's are not really NamedDecl's, and all have same name.
- // We want to keep it, unless it nominates same namespace.
+ // They can be replaced if they nominate the same namespace.
+ // FIXME: Is this true even if they have different module visibility?
if (auto *UD = dyn_cast<UsingDirectiveDecl>(this))
return UD->getNominatedNamespace()->getOriginalNamespace() ==
cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace()
->getOriginalNamespace();
- if (!IsKnownNewer && isRedeclarable(getKind())) {
+ if (isRedeclarable(getKind())) {
+ if (getCanonicalDecl() != OldD->getCanonicalDecl())
+ return false;
+
+ if (IsKnownNewer)
+ return true;
+
// Check whether this is actually newer than OldD. We want to keep the
// newer declaration. This loop will usually only iterate once, because
// OldD is usually the previous declaration.
@@ -1567,11 +1583,16 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
if (D->isCanonicalDecl())
return false;
}
+
+ // It's a newer declaration of the same kind of declaration in the same
+ // scope: we want this decl instead of the existing one.
+ return true;
}
- // It's a newer declaration of the same kind of declaration in the same scope,
- // and not an overload: we want this decl instead of the existing one.
- return true;
+ // In all other cases, we need to keep both declarations in case they have
+ // different visibility. Any attempt to use the name will result in an
+ // ambiguity if more than one is visible.
+ return false;
}
bool NamedDecl::hasLinkage() const {
@@ -1580,12 +1601,15 @@ bool NamedDecl::hasLinkage() const {
NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
NamedDecl *ND = this;
- while (UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(ND))
+ while (auto *UD = dyn_cast<UsingShadowDecl>(ND))
ND = UD->getTargetDecl();
- if (ObjCCompatibleAliasDecl *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
+ if (auto *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
return AD->getClassInterface();
+ if (auto *AD = dyn_cast<NamespaceAliasDecl>(ND))
+ return AD->getNamespace();
+
return ND;
}
@@ -1599,8 +1623,7 @@ bool NamedDecl::isCXXInstanceMember() const {
if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D))
return true;
- if (const CXXMethodDecl *MD =
- dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction()))
+ if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction()))
return MD->isInstance();
return false;
}
@@ -1628,7 +1651,7 @@ void DeclaratorDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
// Make sure the extended decl info is allocated.
if (!hasExtInfo()) {
// Save (non-extended) type source info pointer.
- TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ auto *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
// Allocate external info struct.
DeclInfo = new (getASTContext()) ExtInfo;
// Restore savedTInfo into (extended) decl info.
@@ -1653,22 +1676,20 @@ void DeclaratorDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
}
}
-void
-DeclaratorDecl::setTemplateParameterListsInfo(ASTContext &Context,
- unsigned NumTPLists,
- TemplateParameterList **TPLists) {
- assert(NumTPLists > 0);
+void DeclaratorDecl::setTemplateParameterListsInfo(
+ ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) {
+ assert(!TPLists.empty());
// Make sure the extended decl info is allocated.
if (!hasExtInfo()) {
// Save (non-extended) type source info pointer.
- TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ auto *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
// Allocate external info struct.
DeclInfo = new (getASTContext()) ExtInfo;
// Restore savedTInfo into (extended) decl info.
getExtInfo()->TInfo = savedTInfo;
}
// Set the template parameter lists info.
- getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+ getExtInfo()->setTemplateParameterListsInfo(Context, TPLists);
}
SourceLocation DeclaratorDecl::getOuterLocStart() const {
@@ -1726,13 +1747,8 @@ SourceRange DeclaratorDecl::getSourceRange() const {
return SourceRange(getOuterLocStart(), RangeEnd);
}
-void
-QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context,
- unsigned NumTPLists,
- TemplateParameterList **TPLists) {
- assert((NumTPLists == 0 || TPLists != nullptr) &&
- "Empty array of template parameters with positive size!");
-
+void QualifierInfo::setTemplateParameterListsInfo(
+ ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) {
// Free previous template parameters (if any).
if (NumTemplParamLists > 0) {
Context.Deallocate(TemplParamLists);
@@ -1740,10 +1756,10 @@ QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context,
NumTemplParamLists = 0;
}
// Set info on matched template parameter lists (if any).
- if (NumTPLists > 0) {
- TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
- NumTemplParamLists = NumTPLists;
- std::copy(TPLists, TPLists + NumTPLists, TemplParamLists);
+ if (!TPLists.empty()) {
+ TemplParamLists = new (Context) TemplateParameterList *[TPLists.size()];
+ NumTemplParamLists = TPLists.size();
+ std::copy(TPLists.begin(), TPLists.end(), TemplParamLists);
}
}
@@ -1756,7 +1772,6 @@ const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
case SC_None: break;
case SC_Auto: return "auto";
case SC_Extern: return "extern";
- case SC_OpenCLWorkGroupLocal: return "<<work-group-local>>";
case SC_PrivateExtern: return "__private_extern__";
case SC_Register: return "register";
case SC_Static: return "static";
@@ -1995,7 +2010,7 @@ VarDecl *VarDecl::getDefinition(ASTContext &C) {
VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
DefinitionKind Kind = DeclarationOnly;
-
+
const VarDecl *First = getFirstDecl();
for (auto I : First->redecls()) {
Kind = std::max(Kind, I->isThisDeclarationADefinition(C));
@@ -2016,6 +2031,31 @@ const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
return nullptr;
}
+bool VarDecl::hasInit() const {
+ if (auto *P = dyn_cast<ParmVarDecl>(this))
+ if (P->hasUnparsedDefaultArg() || P->hasUninstantiatedDefaultArg())
+ return false;
+
+ return !Init.isNull();
+}
+
+Expr *VarDecl::getInit() {
+ if (!hasInit())
+ return nullptr;
+
+ if (auto *S = Init.dyn_cast<Stmt *>())
+ return cast<Expr>(S);
+
+ return cast_or_null<Expr>(Init.get<EvaluatedStmt *>()->Value);
+}
+
+Stmt **VarDecl::getInitAddress() {
+ if (auto *ES = Init.dyn_cast<EvaluatedStmt *>())
+ return &ES->Value;
+
+ return Init.getAddrOfPtr1();
+}
+
bool VarDecl::isOutOfLine() const {
if (Decl::isOutOfLine())
return true;
@@ -2045,7 +2085,7 @@ VarDecl *VarDecl::getOutOfLineDefinition() {
}
void VarDecl::setInit(Expr *I) {
- if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>()) {
+ if (auto *Eval = Init.dyn_cast<EvaluatedStmt *>()) {
Eval->~EvaluatedStmt();
getASTContext().Deallocate(Eval);
}
@@ -2084,15 +2124,14 @@ bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
/// form, which contains extra information on the evaluated value of the
/// initializer.
EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const {
- EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>();
+ auto *Eval = Init.dyn_cast<EvaluatedStmt *>();
if (!Eval) {
- Stmt *S = Init.get<Stmt *>();
// Note: EvaluatedStmt contains an APValue, which usually holds
// resources not allocated from the ASTContext. We need to do some
// work to avoid leaking those, but we do so in VarDecl::evaluateValue
// where we can detect whether there's anything to clean up or not.
Eval = new (getASTContext()) EvaluatedStmt;
- Eval->Value = S;
+ Eval->Value = Init.get<Stmt *>();
Init = Eval;
}
return Eval;
@@ -2120,7 +2159,7 @@ APValue *VarDecl::evaluateValue(
if (Eval->WasEvaluated)
return Eval->Evaluated.isUninit() ? nullptr : &Eval->Evaluated;
- const Expr *Init = cast<Expr>(Eval->Value);
+ const auto *Init = cast<Expr>(Eval->Value);
assert(!Init->isValueDependent());
if (Eval->IsEvaluating) {
@@ -2156,6 +2195,27 @@ APValue *VarDecl::evaluateValue(
return Result ? &Eval->Evaluated : nullptr;
}
+APValue *VarDecl::getEvaluatedValue() const {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ if (Eval->WasEvaluated)
+ return &Eval->Evaluated;
+
+ return nullptr;
+}
+
+bool VarDecl::isInitKnownICE() const {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ return Eval->CheckedICE;
+
+ return false;
+}
+
+bool VarDecl::isInitICE() const {
+ assert(isInitKnownICE() &&
+ "Check whether we already know that the initializer is an ICE");
+ return Init.get<EvaluatedStmt *>()->IsICE;
+}
+
bool VarDecl::checkInitIsICE() const {
// Initializers of weak variables are never ICEs.
if (isWeak())
@@ -2167,7 +2227,7 @@ bool VarDecl::checkInitIsICE() const {
// integral constant expression.
return Eval->IsICE;
- const Expr *Init = cast<Expr>(Eval->Value);
+ const auto *Init = cast<Expr>(Eval->Value);
assert(!Init->isValueDependent());
// In C++11, evaluate the initializer to check whether it's a constant
@@ -2200,8 +2260,7 @@ VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
}
TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const {
- if (const VarTemplateSpecializationDecl *Spec =
- dyn_cast<VarTemplateSpecializationDecl>(this))
+ if (const auto *Spec = dyn_cast<VarTemplateSpecializationDecl>(this))
return Spec->getSpecializationKind();
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
@@ -2211,8 +2270,7 @@ TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const {
}
SourceLocation VarDecl::getPointOfInstantiation() const {
- if (const VarTemplateSpecializationDecl *Spec =
- dyn_cast<VarTemplateSpecializationDecl>(this))
+ if (const auto *Spec = dyn_cast<VarTemplateSpecializationDecl>(this))
return Spec->getPointOfInstantiation();
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
@@ -2285,7 +2343,7 @@ ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
QualType ParmVarDecl::getOriginalType() const {
TypeSourceInfo *TSI = getTypeSourceInfo();
QualType T = TSI ? TSI->getType() : getType();
- if (const DecayedType *DT = dyn_cast<DecayedType>(T))
+ if (const auto *DT = dyn_cast<DecayedType>(T))
return DT->getOriginalType();
return T;
}
@@ -2315,22 +2373,56 @@ Expr *ParmVarDecl::getDefaultArg() {
assert(!hasUnparsedDefaultArg() && "Default argument is not yet parsed!");
assert(!hasUninstantiatedDefaultArg() &&
"Default argument is not yet instantiated!");
-
+
Expr *Arg = getInit();
- if (ExprWithCleanups *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
+ if (auto *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
return E->getSubExpr();
return Arg;
}
+void ParmVarDecl::setDefaultArg(Expr *defarg) {
+ ParmVarDeclBits.DefaultArgKind = DAK_Normal;
+ Init = defarg;
+}
+
SourceRange ParmVarDecl::getDefaultArgRange() const {
- if (const Expr *E = getInit())
- return E->getSourceRange();
+ switch (ParmVarDeclBits.DefaultArgKind) {
+ case DAK_None:
+ case DAK_Unparsed:
+ // Nothing we can do here.
+ return SourceRange();
- if (hasUninstantiatedDefaultArg())
+ case DAK_Uninstantiated:
return getUninstantiatedDefaultArg()->getSourceRange();
- return SourceRange();
+ case DAK_Normal:
+ if (const Expr *E = getInit())
+ return E->getSourceRange();
+
+ // Missing an actual expression, may be invalid.
+ return SourceRange();
+ }
+ llvm_unreachable("Invalid default argument kind.");
+}
+
+void ParmVarDecl::setUninstantiatedDefaultArg(Expr *arg) {
+ ParmVarDeclBits.DefaultArgKind = DAK_Uninstantiated;
+ Init = arg;
+}
+
+Expr *ParmVarDecl::getUninstantiatedDefaultArg() {
+ assert(hasUninstantiatedDefaultArg() &&
+ "Wrong kind of initialization expression!");
+ return cast_or_null<Expr>(Init.get<Stmt *>());
+}
+
+bool ParmVarDecl::hasDefaultArg() const {
+ // FIXME: We should just return false for DAK_None here once callers are
+ // prepared for the case that we encountered an invalid default argument and
+ // were unable to even build an invalid expression.
+ return hasUnparsedDefaultArg() || hasUninstantiatedDefaultArg() ||
+ !Init.isNull();
}
bool ParmVarDecl::isParameterPack() const {
@@ -2360,7 +2452,7 @@ void FunctionDecl::getNameForDiagnostic(
}
bool FunctionDecl::isVariadic() const {
- if (const FunctionProtoType *FT = getType()->getAs<FunctionProtoType>())
+ if (const auto *FT = getType()->getAs<FunctionProtoType>())
return FT->isVariadic();
return false;
}
@@ -2421,7 +2513,7 @@ void FunctionDecl::setBody(Stmt *B) {
void FunctionDecl::setPure(bool P) {
IsPure = P;
if (P)
- if (CXXRecordDecl *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
+ if (auto *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
Parent->markedVirtualFunctionPure();
}
@@ -2476,7 +2568,7 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const {
if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
return false;
- const FunctionProtoType *proto = getType()->castAs<FunctionProtoType>();
+ const auto *proto = getType()->castAs<FunctionProtoType>();
if (proto->getNumParams() != 2 || proto->isVariadic())
return false;
@@ -2505,7 +2597,7 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction() const {
if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
return false;
- const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>();
+ const auto *FPT = getType()->castAs<FunctionProtoType>();
if (FPT->getNumParams() == 0 || FPT->getNumParams() > 2 || FPT->isVariadic())
return false;
@@ -2547,7 +2639,7 @@ bool FunctionDecl::isInExternCXXContext() const {
}
bool FunctionDecl::isGlobal() const {
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(this))
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(this))
return Method->isStatic();
if (getCanonicalDecl()->getStorageClass() == SC_Static)
@@ -2556,7 +2648,7 @@ bool FunctionDecl::isGlobal() const {
for (const DeclContext *DC = getDeclContext();
DC->isNamespace();
DC = DC->getParent()) {
- if (const NamespaceDecl *Namespace = cast<NamespaceDecl>(DC)) {
+ if (const auto *Namespace = cast<NamespaceDecl>(DC)) {
if (!Namespace->getDeclName())
return false;
break;
@@ -2608,8 +2700,8 @@ unsigned FunctionDecl::getBuiltinID() const {
ASTContext &Context = getASTContext();
if (Context.getLangOpts().CPlusPlus) {
- const LinkageSpecDecl *LinkageDecl = dyn_cast<LinkageSpecDecl>(
- getFirstDecl()->getDeclContext());
+ const auto *LinkageDecl =
+ dyn_cast<LinkageSpecDecl>(getFirstDecl()->getDeclContext());
// In C++, the first declaration of a builtin is always inside an implicit
// extern "C".
// FIXME: A recognised library function may not be directly in an extern "C"
@@ -2649,7 +2741,7 @@ unsigned FunctionDecl::getBuiltinID() const {
/// based on its FunctionType. This is the length of the ParamInfo array
/// after it has been created.
unsigned FunctionDecl::getNumParams() const {
- const FunctionProtoType *FPT = getType()->getAs<FunctionProtoType>();
+ const auto *FPT = getType()->getAs<FunctionProtoType>();
return FPT ? FPT->getNumParams() : 0;
}
@@ -2711,7 +2803,8 @@ bool FunctionDecl::isMSExternInline() const {
assert(isInlined() && "expected to get called on an inlined function!");
const ASTContext &Context = getASTContext();
- if (!Context.getLangOpts().MSVCCompat && !hasAttr<DLLExportAttr>())
+ if (!Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ !hasAttr<DLLExportAttr>())
return false;
for (const FunctionDecl *FD = getMostRecentDecl(); FD;
@@ -2840,7 +2933,7 @@ bool FunctionDecl::hasUnusedResultAttr() const {
QualType RetType = getReturnType();
if (RetType->isRecordType()) {
const CXXRecordDecl *Ret = RetType->getAsCXXRecordDecl();
- const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(this);
+ const auto *MD = dyn_cast<CXXMethodDecl>(this);
if (Ret && Ret->hasAttr<WarnUnusedResultAttr>() &&
!(MD && MD->getCorrespondingMethodInClass(Ret, true)))
return true;
@@ -2952,6 +3045,10 @@ FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
return nullptr;
}
+MemberSpecializationInfo *FunctionDecl::getMemberSpecializationInfo() const {
+ return TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>();
+}
+
void
FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
FunctionDecl *FD,
@@ -2963,6 +3060,14 @@ FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
TemplateOrSpecialization = Info;
}
+FunctionTemplateDecl *FunctionDecl::getDescribedFunctionTemplate() const {
+ return TemplateOrSpecialization.dyn_cast<FunctionTemplateDecl *>();
+}
+
+void FunctionDecl::setDescribedFunctionTemplate(FunctionTemplateDecl *Template) {
+ TemplateOrSpecialization = Template;
+}
+
bool FunctionDecl::isImplicitlyInstantiable() const {
// If the function is invalid, it can't be implicitly instantiated.
if (isInvalidDecl())
@@ -3069,6 +3174,12 @@ FunctionDecl *FunctionDecl::getClassScopeSpecializationPattern() const {
return getASTContext().getClassScopeSpecializationPattern(this);
}
+FunctionTemplateSpecializationInfo *
+FunctionDecl::getTemplateSpecializationInfo() const {
+ return TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo *>();
+}
+
const TemplateArgumentList *
FunctionDecl::getTemplateSpecializationArgs() const {
if (FunctionTemplateSpecializationInfo *Info
@@ -3115,33 +3226,41 @@ FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
const UnresolvedSetImpl &Templates,
const TemplateArgumentListInfo &TemplateArgs) {
assert(TemplateOrSpecialization.isNull());
- size_t Size = sizeof(DependentFunctionTemplateSpecializationInfo);
- Size += Templates.size() * sizeof(FunctionTemplateDecl*);
- Size += TemplateArgs.size() * sizeof(TemplateArgumentLoc);
- void *Buffer = Context.Allocate(Size);
DependentFunctionTemplateSpecializationInfo *Info =
- new (Buffer) DependentFunctionTemplateSpecializationInfo(Templates,
- TemplateArgs);
+ DependentFunctionTemplateSpecializationInfo::Create(Context, Templates,
+ TemplateArgs);
TemplateOrSpecialization = Info;
}
+DependentFunctionTemplateSpecializationInfo *
+FunctionDecl::getDependentSpecializationInfo() const {
+ return TemplateOrSpecialization
+ .dyn_cast<DependentFunctionTemplateSpecializationInfo *>();
+}
+
+DependentFunctionTemplateSpecializationInfo *
+DependentFunctionTemplateSpecializationInfo::Create(
+ ASTContext &Context, const UnresolvedSetImpl &Ts,
+ const TemplateArgumentListInfo &TArgs) {
+ void *Buffer = Context.Allocate(
+ totalSizeToAlloc<TemplateArgumentLoc, FunctionTemplateDecl *>(
+ TArgs.size(), Ts.size()));
+ return new (Buffer) DependentFunctionTemplateSpecializationInfo(Ts, TArgs);
+}
+
DependentFunctionTemplateSpecializationInfo::
DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts,
const TemplateArgumentListInfo &TArgs)
: AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) {
- static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0,
- "Trailing data is unaligned!");
- d.NumTemplates = Ts.size();
- d.NumArgs = TArgs.size();
+ NumTemplates = Ts.size();
+ NumArgs = TArgs.size();
- FunctionTemplateDecl **TsArray =
- const_cast<FunctionTemplateDecl**>(getTemplates());
+ FunctionTemplateDecl **TsArray = getTrailingObjects<FunctionTemplateDecl *>();
for (unsigned I = 0, E = Ts.size(); I != E; ++I)
TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl());
- TemplateArgumentLoc *ArgsArray =
- const_cast<TemplateArgumentLoc*>(getTemplateArgs());
+ TemplateArgumentLoc *ArgsArray = getTrailingObjects<TemplateArgumentLoc>();
for (unsigned I = 0, E = TArgs.size(); I != E; ++I)
new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]);
}
@@ -3335,7 +3454,7 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
if (!isImplicit() || getDeclName())
return false;
- if (const RecordType *Record = getType()->getAs<RecordType>())
+ if (const auto *Record = getType()->getAs<RecordType>())
return Record->getDecl()->isAnonymousStructOrUnion();
return false;
@@ -3343,7 +3462,7 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
assert(isBitField() && "not a bitfield");
- Expr *BitWidth = static_cast<Expr *>(InitStorage.getPointer());
+ auto *BitWidth = static_cast<Expr *>(InitStorage.getPointer());
return BitWidth->EvaluateKnownConstInt(Ctx).getZExtValue();
}
@@ -3372,7 +3491,7 @@ SourceRange FieldDecl::getSourceRange() const {
case ISK_BitWidthOrNothing:
case ISK_InClassCopyInit:
case ISK_InClassListInit:
- if (const Expr *E = static_cast<const Expr *>(InitStorage.getPointer()))
+ if (const auto *E = static_cast<const Expr *>(InitStorage.getPointer()))
return SourceRange(getInnerLocStart(), E->getLocEnd());
// FALLTHROUGH
@@ -3408,7 +3527,7 @@ SourceRange TagDecl::getSourceRange() const {
TagDecl *TagDecl::getCanonicalDecl() { return getFirstDecl(); }
void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
- NamedDeclOrQualifier = TDD;
+ TypedefNameDeclOrQualifier = TDD;
if (const Type *T = getTypeForDecl()) {
(void)T;
assert(T->isLinkageValid());
@@ -3419,7 +3538,7 @@ void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
void TagDecl::startDefinition() {
IsBeingDefined = true;
- if (CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(this)) {
+ if (auto *D = dyn_cast<CXXRecordDecl>(this)) {
struct CXXRecordDecl::DefinitionData *Data =
new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
for (auto I : redecls())
@@ -3452,7 +3571,7 @@ TagDecl *TagDecl::getDefinition() const {
}
}
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this))
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(this))
return CXXRD->getDefinition();
for (auto R : redecls())
@@ -3466,7 +3585,7 @@ void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
if (QualifierLoc) {
// Make sure the extended qualifier info is allocated.
if (!hasExtInfo())
- NamedDeclOrQualifier = new (getASTContext()) ExtInfo;
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
// Set qualifier info.
getExtInfo()->QualifierLoc = QualifierLoc;
} else {
@@ -3474,7 +3593,7 @@ void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
if (hasExtInfo()) {
if (getExtInfo()->NumTemplParamLists == 0) {
getASTContext().Deallocate(getExtInfo());
- NamedDeclOrQualifier = (TypedefNameDecl*)nullptr;
+ TypedefNameDeclOrQualifier = (TypedefNameDecl *)nullptr;
}
else
getExtInfo()->QualifierLoc = QualifierLoc;
@@ -3482,16 +3601,15 @@ void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
}
}
-void TagDecl::setTemplateParameterListsInfo(ASTContext &Context,
- unsigned NumTPLists,
- TemplateParameterList **TPLists) {
- assert(NumTPLists > 0);
+void TagDecl::setTemplateParameterListsInfo(
+ ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) {
+ assert(!TPLists.empty());
// Make sure the extended decl info is allocated.
if (!hasExtInfo())
// Allocate external info struct.
- NamedDeclOrQualifier = new (getASTContext()) ExtInfo;
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
// Set the template parameter lists info.
- getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+ getExtInfo()->setTemplateParameterListsInfo(Context, TPLists);
}
//===----------------------------------------------------------------------===//
@@ -3505,9 +3623,8 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
EnumDecl *PrevDecl, bool IsScoped,
bool IsScopedUsingClassTag, bool IsFixed) {
- EnumDecl *Enum = new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl,
- IsScoped, IsScopedUsingClassTag,
- IsFixed);
+ auto *Enum = new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl,
+ IsScoped, IsScopedUsingClassTag, IsFixed);
Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
C.getTypeDeclType(Enum, PrevDecl);
return Enum;
@@ -3647,10 +3764,6 @@ bool RecordDecl::isMsStruct(const ASTContext &C) const {
return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1;
}
-static bool isFieldOrIndirectField(Decl::Kind K) {
- return FieldDecl::classofKind(K) || IndirectFieldDecl::classofKind(K);
-}
-
void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource *Source = getASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
@@ -3659,16 +3772,10 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource::Deserializing TheFields(Source);
SmallVector<Decl*, 64> Decls;
- LoadedFieldsFromExternalStorage = true;
- switch (Source->FindExternalLexicalDecls(this, isFieldOrIndirectField,
- Decls)) {
- case ELR_Success:
- break;
-
- case ELR_AlreadyLoaded:
- case ELR_Failure:
- return;
- }
+ LoadedFieldsFromExternalStorage = true;
+ Source->FindExternalLexicalDecls(this, [](Decl::Kind K) {
+ return FieldDecl::classofKind(K) || IndirectFieldDecl::classofKind(K);
+ }, Decls);
#ifndef NDEBUG
// Check that all decls we got were FieldDecls.
@@ -3690,7 +3797,7 @@ bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
!Context.getLangOpts().SanitizeAddressFieldPadding)
return false;
const auto &Blacklist = Context.getSanitizerBlacklist();
- const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this);
+ const auto *CXXRD = dyn_cast<CXXRecordDecl>(this);
// We may be able to relax some of these requirements.
int ReasonToReject = -1;
if (!CXXRD || CXXRD->isExternCContext())
@@ -3731,9 +3838,9 @@ const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
if (I->getIdentifier())
return I;
- if (const RecordType *RT = I->getType()->getAs<RecordType>())
+ if (const auto *RT = I->getType()->getAs<RecordType>())
if (const FieldDecl *NamedDataMember =
- RT->getDecl()->findFirstNamedDataMember())
+ RT->getDecl()->findFirstNamedDataMember())
return NamedDataMember;
}
@@ -3757,26 +3864,17 @@ void BlockDecl::setParams(ArrayRef<ParmVarDecl *> NewParamInfo) {
}
}
-void BlockDecl::setCaptures(ASTContext &Context,
- const Capture *begin,
- const Capture *end,
- bool capturesCXXThis) {
- CapturesCXXThis = capturesCXXThis;
+void BlockDecl::setCaptures(ASTContext &Context, ArrayRef<Capture> Captures,
+ bool CapturesCXXThis) {
+ this->CapturesCXXThis = CapturesCXXThis;
+ this->NumCaptures = Captures.size();
- if (begin == end) {
- NumCaptures = 0;
- Captures = nullptr;
+ if (Captures.empty()) {
+ this->Captures = nullptr;
return;
}
- NumCaptures = end - begin;
-
- // Avoid new Capture[] because we don't want to provide a default
- // constructor.
- size_t allocationSize = NumCaptures * sizeof(Capture);
- void *buffer = Context.Allocate(allocationSize, /*alignment*/sizeof(void*));
- memcpy(buffer, begin, allocationSize);
- Captures = static_cast<Capture*>(buffer);
+ this->Captures = Captures.copy(Context).data();
}
bool BlockDecl::capturesVariable(const VarDecl *variable) const {
@@ -3889,18 +3987,28 @@ BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) BlockDecl(nullptr, SourceLocation());
}
+CapturedDecl::CapturedDecl(DeclContext *DC, unsigned NumParams)
+ : Decl(Captured, DC, SourceLocation()), DeclContext(Captured),
+ NumParams(NumParams), ContextParam(0), BodyAndNothrow(nullptr, false) {}
+
CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC,
unsigned NumParams) {
- return new (C, DC, NumParams * sizeof(ImplicitParamDecl *))
+ return new (C, DC, additionalSizeToAlloc<ImplicitParamDecl *>(NumParams))
CapturedDecl(DC, NumParams);
}
CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumParams) {
- return new (C, ID, NumParams * sizeof(ImplicitParamDecl *))
+ return new (C, ID, additionalSizeToAlloc<ImplicitParamDecl *>(NumParams))
CapturedDecl(nullptr, NumParams);
}
+Stmt *CapturedDecl::getBody() const { return BodyAndNothrow.getPointer(); }
+void CapturedDecl::setBody(Stmt *B) { BodyAndNothrow.setPointer(B); }
+
+bool CapturedDecl::isNothrow() const { return BodyAndNothrow.getInt(); }
+void CapturedDecl::setNothrow(bool Nothrow) { BodyAndNothrow.setInt(Nothrow); }
+
EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
SourceLocation L,
IdentifierInfo *Id, QualType T,
@@ -4042,9 +4150,9 @@ ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
NextLocalImport()
{
assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
- SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(this + 1);
- memcpy(StoredLocs, IdentifierLocs.data(),
- IdentifierLocs.size() * sizeof(SourceLocation));
+ auto *StoredLocs = getTrailingObjects<SourceLocation>();
+ std::uninitialized_copy(IdentifierLocs.begin(), IdentifierLocs.end(),
+ StoredLocs);
}
ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
@@ -4052,13 +4160,14 @@ ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
: Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false),
NextLocalImport()
{
- *reinterpret_cast<SourceLocation *>(this + 1) = EndLoc;
+ *getTrailingObjects<SourceLocation>() = EndLoc;
}
ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs) {
- return new (C, DC, IdentifierLocs.size() * sizeof(SourceLocation))
+ return new (C, DC,
+ additionalSizeToAlloc<SourceLocation>(IdentifierLocs.size()))
ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
}
@@ -4066,16 +4175,15 @@ ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
Module *Imported,
SourceLocation EndLoc) {
- ImportDecl *Import =
- new (C, DC, sizeof(SourceLocation)) ImportDecl(DC, StartLoc,
- Imported, EndLoc);
+ ImportDecl *Import = new (C, DC, additionalSizeToAlloc<SourceLocation>(1))
+ ImportDecl(DC, StartLoc, Imported, EndLoc);
Import->setImplicit();
return Import;
}
ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumLocations) {
- return new (C, ID, NumLocations * sizeof(SourceLocation))
+ return new (C, ID, additionalSizeToAlloc<SourceLocation>(NumLocations))
ImportDecl(EmptyShell());
}
@@ -4083,16 +4191,14 @@ ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
if (!ImportedAndComplete.getInt())
return None;
- const SourceLocation *StoredLocs
- = reinterpret_cast<const SourceLocation *>(this + 1);
+ const auto *StoredLocs = getTrailingObjects<SourceLocation>();
return llvm::makeArrayRef(StoredLocs,
getNumModuleIdentifiers(getImportedModule()));
}
SourceRange ImportDecl::getSourceRange() const {
if (!ImportedAndComplete.getInt())
- return SourceRange(getLocation(),
- *reinterpret_cast<const SourceLocation *>(this + 1));
-
+ return SourceRange(getLocation(), *getTrailingObjects<SourceLocation>());
+
return SourceRange(getLocation(), getIdentifierLocs().back());
}
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 4fcec53d6eb9..16394e865eb1 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -45,10 +45,19 @@ void Decl::updateOutOfDate(IdentifierInfo &II) const {
getASTContext().getExternalSource()->updateOutOfDateIdentifier(II);
}
+#define DECL(DERIVED, BASE) \
+ static_assert(Decl::DeclObjAlignment >= \
+ llvm::AlignOf<DERIVED##Decl>::Alignment, \
+ "Alignment sufficient after objects prepended to " #DERIVED);
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
void *Decl::operator new(std::size_t Size, const ASTContext &Context,
unsigned ID, std::size_t Extra) {
// Allocate an extra 8 bytes worth of storage, which ensures that the
- // resulting pointer will still be 8-byte aligned.
+ // resulting pointer will still be 8-byte aligned.
+ static_assert(sizeof(unsigned) * 2 >= DeclObjAlignment,
+ "Decl won't be misaligned");
void *Start = Context.Allocate(Size + Extra + 8);
void *Result = (char*)Start + 8;
@@ -69,7 +78,13 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
// With local visibility enabled, we track the owning module even for local
// declarations.
if (Ctx.getLangOpts().ModulesLocalVisibility) {
- void *Buffer = ::operator new(sizeof(Module *) + Size + Extra, Ctx);
+ // Ensure required alignment of the resulting object by adding extra
+ // padding at the start if required.
+ size_t ExtraAlign =
+ llvm::OffsetToAlignment(sizeof(Module *), DeclObjAlignment);
+ char *Buffer = reinterpret_cast<char *>(
+ ::operator new(ExtraAlign + sizeof(Module *) + Size + Extra, Ctx));
+ Buffer += ExtraAlign;
return new (Buffer) Module*(nullptr) + 1;
}
return ::operator new(Size + Extra, Ctx);
@@ -251,6 +266,18 @@ void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
}
}
+bool Decl::isLexicallyWithinFunctionOrMethod() const {
+ const DeclContext *LDC = getLexicalDeclContext();
+ while (true) {
+ if (LDC->isFunctionOrMethod())
+ return true;
+ if (!isa<TagDecl>(LDC))
+ return false;
+ LDC = LDC->getLexicalParent();
+ }
+ return false;
+}
+
bool Decl::isInAnonymousNamespace() const {
const DeclContext *DC = getDeclContext();
do {
@@ -612,6 +639,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ExternCContext:
case UsingDirective:
+ case BuiltinTemplate:
case ClassTemplateSpecialization:
case ClassTemplatePartialSpecialization:
case ClassScopeFunctionSpecialization:
@@ -1044,14 +1072,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
// Load the external declarations, if any.
SmallVector<Decl*, 64> Decls;
ExternalLexicalStorage = false;
- switch (Source->FindExternalLexicalDecls(this, Decls)) {
- case ELR_Success:
- break;
-
- case ELR_Failure:
- case ELR_AlreadyLoaded:
- return false;
- }
+ Source->FindExternalLexicalDecls(this, Decls);
if (Decls.empty())
return false;
@@ -1189,13 +1210,16 @@ void DeclContext::removeDecl(Decl *D) {
// Remove only decls that have a name
if (!ND->getDeclName()) return;
- StoredDeclsMap *Map = getPrimaryContext()->LookupPtr;
- if (!Map) return;
-
- StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
- assert(Pos != Map->end() && "no lookup entry for decl");
- if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
- Pos->second.remove(ND);
+ auto *DC = this;
+ do {
+ StoredDeclsMap *Map = DC->getPrimaryContext()->LookupPtr;
+ if (Map) {
+ StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
+ assert(Pos != Map->end() && "no lookup entry for decl");
+ if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
+ Pos->second.remove(ND);
+ }
+ } while (DC->isTransparentContext() && (DC = DC->getParent()));
}
}
@@ -1213,7 +1237,7 @@ void DeclContext::addHiddenDecl(Decl *D) {
}
// Notify a C++ record declaration that we've added a member, so it can
- // update it's class-specific state.
+ // update its class-specific state.
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
Record->addedMember(D);
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index d905fcf13a45..4f24fdc28f71 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -385,17 +385,11 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
}
}
-/// Callback function for CXXRecordDecl::forallBases that acknowledges
-/// that it saw a base class.
-static bool SawBase(const CXXRecordDecl *, void *) {
- return true;
-}
-
bool CXXRecordDecl::hasAnyDependentBases() const {
if (!isDependentContext())
return false;
- return !forallBases(SawBase, nullptr);
+ return !forallBases([](const CXXRecordDecl *) { return true; });
}
bool CXXRecordDecl::isTriviallyCopyable() const {
@@ -1224,6 +1218,10 @@ CXXRecordDecl *CXXRecordDecl::getInstantiatedFromMemberClass() const {
return nullptr;
}
+MemberSpecializationInfo *CXXRecordDecl::getMemberSpecializationInfo() const {
+ return TemplateOrInstantiation.dyn_cast<MemberSpecializationInfo *>();
+}
+
void
CXXRecordDecl::setInstantiationOfMemberClass(CXXRecordDecl *RD,
TemplateSpecializationKind TSK) {
@@ -1234,6 +1232,14 @@ CXXRecordDecl::setInstantiationOfMemberClass(CXXRecordDecl *RD,
= new (getASTContext()) MemberSpecializationInfo(RD, TSK);
}
+ClassTemplateDecl *CXXRecordDecl::getDescribedClassTemplate() const {
+ return TemplateOrInstantiation.dyn_cast<ClassTemplateDecl *>();
+}
+
+void CXXRecordDecl::setDescribedClassTemplate(ClassTemplateDecl *Template) {
+ TemplateOrInstantiation = Template;
+}
+
TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{
if (const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(this))
@@ -1681,8 +1687,8 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
{
- VarDecl **MyIndices = reinterpret_cast<VarDecl **> (this + 1);
- memcpy(MyIndices, Indices, NumIndices * sizeof(VarDecl *));
+ std::uninitialized_copy(Indices, Indices + NumIndices,
+ getTrailingObjects<VarDecl *>());
}
CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
@@ -1692,8 +1698,7 @@ CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
SourceLocation R,
VarDecl **Indices,
unsigned NumIndices) {
- void *Mem = Context.Allocate(sizeof(CXXCtorInitializer) +
- sizeof(VarDecl *) * NumIndices,
+ void *Mem = Context.Allocate(totalSizeToAlloc<VarDecl *>(NumIndices),
llvm::alignOf<CXXCtorInitializer>());
return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R,
Indices, NumIndices);
@@ -2023,6 +2028,22 @@ NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
SourceLocation(), nullptr, nullptr);
}
+NamespaceDecl *NamespaceDecl::getOriginalNamespace() {
+ if (isFirstDecl())
+ return this;
+
+ return AnonOrFirstNamespaceAndInline.getPointer();
+}
+
+const NamespaceDecl *NamespaceDecl::getOriginalNamespace() const {
+ if (isFirstDecl())
+ return this;
+
+ return AnonOrFirstNamespaceAndInline.getPointer();
+}
+
+bool NamespaceDecl::isOriginalNamespace() const { return isFirstDecl(); }
+
NamespaceDecl *NamespaceDecl::getNextRedeclarationImpl() {
return getNextRedeclaration();
}
diff --git a/lib/AST/DeclFriend.cpp b/lib/AST/DeclFriend.cpp
index a996cab093af..121403b07e57 100644
--- a/lib/AST/DeclFriend.cpp
+++ b/lib/AST/DeclFriend.cpp
@@ -46,7 +46,9 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
}
#endif
- std::size_t Extra = FriendTypeTPLists.size() * sizeof(TemplateParameterList*);
+ std::size_t Extra =
+ FriendDecl::additionalSizeToAlloc<TemplateParameterList *>(
+ FriendTypeTPLists.size());
FriendDecl *FD = new (C, DC, Extra) FriendDecl(DC, L, Friend, FriendL,
FriendTypeTPLists);
cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
@@ -55,7 +57,8 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned FriendTypeNumTPLists) {
- std::size_t Extra = FriendTypeNumTPLists * sizeof(TemplateParameterList*);
+ std::size_t Extra =
+ additionalSizeToAlloc<TemplateParameterList *>(FriendTypeNumTPLists);
return new (C, ID, Extra) FriendDecl(EmptyShell(), FriendTypeNumTPLists);
}
diff --git a/lib/AST/DeclGroup.cpp b/lib/AST/DeclGroup.cpp
index 512837fdf3f4..f162e6d40c48 100644
--- a/lib/AST/DeclGroup.cpp
+++ b/lib/AST/DeclGroup.cpp
@@ -18,10 +18,8 @@
using namespace clang;
DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
- static_assert(sizeof(DeclGroup) % llvm::AlignOf<void *>::Alignment == 0,
- "Trailing data is unaligned!");
assert(NumDecls > 1 && "Invalid DeclGroup");
- unsigned Size = sizeof(DeclGroup) + sizeof(Decl*) * NumDecls;
+ unsigned Size = totalSizeToAlloc<Decl *>(NumDecls);
void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment);
new (Mem) DeclGroup(NumDecls, Decls);
return static_cast<DeclGroup*>(Mem);
@@ -30,5 +28,6 @@ DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
DeclGroup::DeclGroup(unsigned numdecls, Decl** decls) : NumDecls(numdecls) {
assert(numdecls > 0);
assert(decls);
- memcpy(this+1, decls, numdecls * sizeof(*decls));
+ std::uninitialized_copy(decls, decls + numdecls,
+ getTrailingObjects<Decl *>());
}
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index 280c412ae8ff..050a0f53f1e5 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -161,6 +161,15 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
return nullptr;
}
+ // If context is class, then lookup property in its extensions.
+ // This comes before property is looked up in primary class.
+ if (auto *IDecl = dyn_cast<ObjCInterfaceDecl>(DC)) {
+ for (const auto *Ext : IDecl->known_extensions())
+ if (ObjCPropertyDecl *PD = ObjCPropertyDecl::findPropertyDecl(Ext,
+ propertyID))
+ return PD;
+ }
+
DeclContext::lookup_result R = DC->lookup(propertyID);
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
++I)
@@ -190,6 +199,15 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
if (Def->isHidden())
return nullptr;
}
+
+ // Search the extensions of a class first; they override what's in
+ // the class itself.
+ if (const auto *ClassDecl = dyn_cast<ObjCInterfaceDecl>(this)) {
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
+ if (auto *P = Ext->FindPropertyDeclaration(PropertyId))
+ return P;
+ }
+ }
if (ObjCPropertyDecl *PD =
ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
@@ -207,7 +225,7 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
}
case Decl::ObjCInterface: {
const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
- // Look through categories (but not extensions).
+ // Look through categories (but not extensions; they were handled above).
for (const auto *Cat : OID->visible_categories()) {
if (!Cat->IsClassExtension())
if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId))
@@ -327,6 +345,13 @@ void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM,
PM[Prop->getIdentifier()] = Prop;
PO.push_back(Prop);
}
+ for (const auto *Ext : known_extensions()) {
+ const ObjCCategoryDecl *ClassExt = Ext;
+ for (auto *Prop : ClassExt->properties()) {
+ PM[Prop->getIdentifier()] = Prop;
+ PO.push_back(Prop);
+ }
+ }
for (const auto *PI : all_referenced_protocols())
PI->collectPropertiesToImplement(PM, PO);
// Note, the properties declared only in class extensions are still copied
@@ -747,6 +772,10 @@ void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
if (Params.empty() && SelLocs.empty())
return;
+ static_assert(llvm::AlignOf<ParmVarDecl *>::Alignment >=
+ llvm::AlignOf<SourceLocation>::Alignment,
+ "Alignment not sufficient for SourceLocation");
+
unsigned Size = sizeof(ParmVarDecl *) * NumParams +
sizeof(SourceLocation) * SelLocs.size();
ParamsAndSelLocs = C.Allocate(Size);
@@ -1182,18 +1211,47 @@ ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
if (isPropertyAccessor()) {
const ObjCContainerDecl *Container = cast<ObjCContainerDecl>(getParent());
- // If container is class extension, find its primary class.
- if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(Container))
- if (CatDecl->IsClassExtension())
- Container = CatDecl->getClassInterface();
-
bool IsGetter = (NumArgs == 0);
- for (const auto *I : Container->properties()) {
- Selector NextSel = IsGetter ? I->getGetterName()
- : I->getSetterName();
- if (NextSel == Sel)
- return I;
+ /// Local function that attempts to find a matching property within the
+ /// given Objective-C container.
+ auto findMatchingProperty =
+ [&](const ObjCContainerDecl *Container) -> const ObjCPropertyDecl * {
+
+ for (const auto *I : Container->properties()) {
+ Selector NextSel = IsGetter ? I->getGetterName()
+ : I->getSetterName();
+ if (NextSel == Sel)
+ return I;
+ }
+
+ return nullptr;
+ };
+
+ // Look in the container we were given.
+ if (const auto *Found = findMatchingProperty(Container))
+ return Found;
+
+ // If we're in a category or extension, look in the main class.
+ const ObjCInterfaceDecl *ClassDecl = nullptr;
+ if (const auto *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
+ ClassDecl = Category->getClassInterface();
+ if (const auto *Found = findMatchingProperty(ClassDecl))
+ return Found;
+ } else {
+ // Determine whether the container is a class.
+ ClassDecl = dyn_cast<ObjCInterfaceDecl>(Container);
+ }
+
+ // If we have a class, check its visible extensions.
+ if (ClassDecl) {
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
+ if (Ext == Container)
+ continue;
+
+ if (const auto *Found = findMatchingProperty(Ext))
+ return Found;
+ }
}
llvm_unreachable("Marked as a property accessor but no property found!");
@@ -1272,13 +1330,9 @@ ObjCTypeParamList *ObjCTypeParamList::create(
SourceLocation lAngleLoc,
ArrayRef<ObjCTypeParamDecl *> typeParams,
SourceLocation rAngleLoc) {
- unsigned size = sizeof(ObjCTypeParamList)
- + sizeof(ObjCTypeParamDecl *) * typeParams.size();
- static_assert(llvm::AlignOf<ObjCTypeParamList>::Alignment >=
- llvm::AlignOf<ObjCTypeParamDecl *>::Alignment,
- "type parameter list needs greater alignment");
- unsigned align = llvm::alignOf<ObjCTypeParamList>();
- void *mem = ctx.Allocate(size, align);
+ void *mem =
+ ctx.Allocate(totalSizeToAlloc<ObjCTypeParamDecl *>(typeParams.size()),
+ llvm::alignOf<ObjCTypeParamList>());
return new (mem) ObjCTypeParamList(lAngleLoc, typeParams, rAngleLoc);
}
diff --git a/lib/AST/DeclOpenMP.cpp b/lib/AST/DeclOpenMP.cpp
index 5f8b42b3f964..493e2cd41226 100644
--- a/lib/AST/DeclOpenMP.cpp
+++ b/lib/AST/DeclOpenMP.cpp
@@ -29,8 +29,9 @@ OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL) {
- OMPThreadPrivateDecl *D = new (C, DC, VL.size() * sizeof(Expr *))
- OMPThreadPrivateDecl(OMPThreadPrivate, DC, L);
+ OMPThreadPrivateDecl *D =
+ new (C, DC, additionalSizeToAlloc<Expr *>(VL.size()))
+ OMPThreadPrivateDecl(OMPThreadPrivate, DC, L);
D->NumVars = VL.size();
D->setVars(VL);
return D;
@@ -39,7 +40,7 @@ OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
unsigned ID,
unsigned N) {
- OMPThreadPrivateDecl *D = new (C, ID, N * sizeof(Expr *))
+ OMPThreadPrivateDecl *D = new (C, ID, additionalSizeToAlloc<Expr *>(N))
OMPThreadPrivateDecl(OMPThreadPrivate, nullptr, SourceLocation());
D->NumVars = N;
return D;
@@ -48,7 +49,6 @@ OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
void OMPThreadPrivateDecl::setVars(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
- Expr **Vars = reinterpret_cast<Expr **>(this + 1);
- std::copy(VL.begin(), VL.end(), Vars);
+ std::uninitialized_copy(VL.begin(), VL.end(), getTrailingObjects<Expr *>());
}
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 3202d8c75436..5c6002d55c0f 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -96,6 +96,7 @@ namespace {
void PrintTemplateParameters(const TemplateParameterList *Params,
const TemplateArgumentList *Args = nullptr);
void prettyPrintAttributes(Decl *D);
+ void prettyPrintPragmas(Decl *D);
void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
};
}
@@ -197,12 +198,40 @@ raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
void DeclPrinter::prettyPrintAttributes(Decl *D) {
if (Policy.PolishForDeclaration)
return;
-
+
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (auto *A : Attrs) {
+ switch (A->getKind()) {
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) case attr::X:
+#include "clang/Basic/AttrList.inc"
+ break;
+ default:
+ A->printPretty(Out, Policy);
+ break;
+ }
+ }
+ }
+}
+
+void DeclPrinter::prettyPrintPragmas(Decl *D) {
+ if (Policy.PolishForDeclaration)
+ return;
+
if (D->hasAttrs()) {
AttrVec &Attrs = D->getAttrs();
- for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) {
- Attr *A = *i;
- A->printPretty(Out, Policy);
+ for (auto *A : Attrs) {
+ switch (A->getKind()) {
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) case attr::X:
+#include "clang/Basic/AttrList.inc"
+ A->printPretty(Out, Policy);
+ Indent();
+ break;
+ default:
+ break;
+ }
}
}
}
@@ -408,6 +437,10 @@ void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
}
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
+ if (!D->getDescribedFunctionTemplate() &&
+ !D->isFunctionTemplateSpecialization())
+ prettyPrintPragmas(D);
+
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
if (!Policy.SuppressSpecifiers) {
@@ -416,7 +449,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
case SC_Extern: Out << "extern "; break;
case SC_Static: Out << "static "; break;
case SC_PrivateExtern: Out << "__private_extern__ "; break;
- case SC_Auto: case SC_Register: case SC_OpenCLWorkGroupLocal:
+ case SC_Auto: case SC_Register:
llvm_unreachable("invalid for functions");
}
@@ -643,6 +676,7 @@ void DeclPrinter::VisitFriendDecl(FriendDecl *D) {
}
void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
+ // FIXME: add printing of pragma attributes if required.
if (!Policy.SuppressSpecifiers && D->isMutable())
Out << "mutable ";
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
@@ -672,6 +706,7 @@ void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
}
void DeclPrinter::VisitVarDecl(VarDecl *D) {
+ prettyPrintPragmas(D);
if (!Policy.SuppressSpecifiers) {
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
@@ -779,6 +814,7 @@ void DeclPrinter::VisitEmptyDecl(EmptyDecl *D) {
}
void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ // FIXME: add printing of pragma attributes if required.
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
Out << D->getKindName();
@@ -914,11 +950,13 @@ void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (PrintInstantiation) {
TemplateParameterList *Params = D->getTemplateParameters();
for (auto *I : D->specializations()) {
+ prettyPrintPragmas(I);
PrintTemplateParameters(Params, I->getTemplateSpecializationArgs());
Visit(I);
}
}
+ prettyPrintPragmas(D->getTemplatedDecl());
return VisitRedeclarableTemplateDecl(D);
}
@@ -1088,7 +1126,7 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
}
if (SID)
- Out << " : " << OID->getSuperClass()->getName();
+ Out << " : " << QualType(OID->getSuperClassType(), 0).getAsString(Policy);
// Protocols?
const ObjCList<ObjCProtocolDecl> &Protocols = OID->getReferencedProtocols();
@@ -1299,7 +1337,7 @@ void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
if (!D->isAccessDeclaration())
Out << "using ";
D->getQualifier()->print(Out, Policy);
- Out << D->getName();
+ Out << D->getDeclName();
}
void DeclPrinter::VisitUsingShadowDecl(UsingShadowDecl *D) {
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index cde497b012e2..de3ebd23ef4f 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/STLExtras.h"
#include <memory>
@@ -29,10 +30,10 @@ using namespace clang;
TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
- NamedDecl **Params, unsigned NumParams,
+ ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc)
: TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
- NumParams(NumParams), ContainsUnexpandedParameterPack(false) {
+ NumParams(Params.size()), ContainsUnexpandedParameterPack(false) {
assert(this->NumParams == NumParams && "Too many template parameters");
for (unsigned Idx = 0; Idx < NumParams; ++Idx) {
NamedDecl *P = Params[Idx];
@@ -53,17 +54,13 @@ TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
}
}
-TemplateParameterList *
-TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
- SourceLocation LAngleLoc, NamedDecl **Params,
- unsigned NumParams, SourceLocation RAngleLoc) {
- unsigned Size = sizeof(TemplateParameterList)
- + sizeof(NamedDecl *) * NumParams;
- unsigned Align = std::max(llvm::alignOf<TemplateParameterList>(),
- llvm::alignOf<NamedDecl*>());
- void *Mem = C.Allocate(Size, Align);
+TemplateParameterList *TemplateParameterList::Create(
+ const ASTContext &C, SourceLocation TemplateLoc, SourceLocation LAngleLoc,
+ ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc) {
+ void *Mem = C.Allocate(totalSizeToAlloc<NamedDecl *>(Params.size()),
+ llvm::alignOf<TemplateParameterList>());
return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
- NumParams, RAngleLoc);
+ RAngleLoc);
}
unsigned TemplateParameterList::getMinRequiredArguments() const {
@@ -240,8 +237,8 @@ static void GenerateInjectedTemplateArgs(ASTContext &Context,
}
if ((*Param)->isTemplateParameterPack())
- Arg = TemplateArgument::CreatePackCopy(Context, &Arg, 1);
-
+ Arg = TemplateArgument::CreatePackCopy(Context, Arg);
+
*Args++ = Arg;
}
}
@@ -552,10 +549,11 @@ NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(DeclContext *DC,
TemplateParmPosition(D, P), ParameterPack(true),
ExpandedParameterPack(true), NumExpandedTypes(NumExpandedTypes) {
if (ExpandedTypes && ExpandedTInfos) {
- void **TypesAndInfos = reinterpret_cast<void **>(this + 1);
+ auto TypesAndInfos =
+ getTrailingObjects<std::pair<QualType, TypeSourceInfo *>>();
for (unsigned I = 0; I != NumExpandedTypes; ++I) {
- TypesAndInfos[2*I] = ExpandedTypes[I].getAsOpaquePtr();
- TypesAndInfos[2*I + 1] = ExpandedTInfos[I];
+ new (&TypesAndInfos[I].first) QualType(ExpandedTypes[I]);
+ TypesAndInfos[I].second = ExpandedTInfos[I];
}
}
}
@@ -579,10 +577,11 @@ NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
const QualType *ExpandedTypes,
unsigned NumExpandedTypes,
TypeSourceInfo **ExpandedTInfos) {
- unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
- return new (C, DC, Extra) NonTypeTemplateParmDecl(
- DC, StartLoc, IdLoc, D, P, Id, T, TInfo,
- ExpandedTypes, NumExpandedTypes, ExpandedTInfos);
+ return new (C, DC,
+ additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>>(
+ NumExpandedTypes))
+ NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id, T, TInfo,
+ ExpandedTypes, NumExpandedTypes, ExpandedTInfos);
}
NonTypeTemplateParmDecl *
@@ -595,10 +594,12 @@ NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpandedTypes) {
- unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
- return new (C, ID, Extra) NonTypeTemplateParmDecl(
- nullptr, SourceLocation(), SourceLocation(), 0, 0, nullptr, QualType(),
- nullptr, nullptr, NumExpandedTypes, nullptr);
+ return new (C, ID,
+ additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>>(
+ NumExpandedTypes))
+ NonTypeTemplateParmDecl(nullptr, SourceLocation(), SourceLocation(), 0, 0,
+ nullptr, QualType(), nullptr, nullptr,
+ NumExpandedTypes, nullptr);
}
SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
@@ -628,8 +629,8 @@ TemplateTemplateParmDecl::TemplateTemplateParmDecl(
TemplateParmPosition(D, P), ParameterPack(true),
ExpandedParameterPack(true), NumExpandedParams(NumExpansions) {
if (Expansions)
- std::memcpy(reinterpret_cast<void*>(this + 1), Expansions,
- sizeof(TemplateParameterList*) * NumExpandedParams);
+ std::uninitialized_copy(Expansions, Expansions + NumExpandedParams,
+ getTrailingObjects<TemplateParameterList *>());
}
TemplateTemplateParmDecl *
@@ -647,9 +648,10 @@ TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
TemplateParameterList *Params,
ArrayRef<TemplateParameterList *> Expansions) {
- return new (C, DC, sizeof(TemplateParameterList*) * Expansions.size())
- TemplateTemplateParmDecl(DC, L, D, P, Id, Params,
- Expansions.size(), Expansions.data());
+ return new (C, DC,
+ additionalSizeToAlloc<TemplateParameterList *>(Expansions.size()))
+ TemplateTemplateParmDecl(DC, L, D, P, Id, Params, Expansions.size(),
+ Expansions.data());
}
TemplateTemplateParmDecl *
@@ -661,7 +663,8 @@ TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpansions) {
- return new (C, ID, sizeof(TemplateParameterList*) * NumExpansions)
+ return new (C, ID,
+ additionalSizeToAlloc<TemplateParameterList *>(NumExpansions))
TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, nullptr,
nullptr, NumExpansions, nullptr);
}
@@ -682,18 +685,19 @@ void TemplateTemplateParmDecl::setDefaultArgument(
//===----------------------------------------------------------------------===//
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
+TemplateArgumentList::TemplateArgumentList(const TemplateArgument *Args,
+ unsigned NumArgs)
+ : Arguments(getTrailingObjects<TemplateArgument>()), NumArguments(NumArgs) {
+ std::uninitialized_copy(Args, Args + NumArgs,
+ getTrailingObjects<TemplateArgument>());
+}
+
TemplateArgumentList *
TemplateArgumentList::CreateCopy(ASTContext &Context,
const TemplateArgument *Args,
unsigned NumArgs) {
- std::size_t Size = sizeof(TemplateArgumentList)
- + NumArgs * sizeof(TemplateArgument);
- void *Mem = Context.Allocate(Size);
- TemplateArgument *StoredArgs
- = reinterpret_cast<TemplateArgument *>(
- static_cast<TemplateArgumentList *>(Mem) + 1);
- std::uninitialized_copy(Args, Args + NumArgs, StoredArgs);
- return new (Mem) TemplateArgumentList(StoredArgs, NumArgs, true);
+ void *Mem = Context.Allocate(totalSizeToAlloc<TemplateArgument>(NumArgs));
+ return new (Mem) TemplateArgumentList(Args, NumArgs);
}
FunctionTemplateSpecializationInfo *
@@ -1187,3 +1191,69 @@ VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) VarTemplatePartialSpecializationDecl(C);
}
+
+static TemplateParameterList *
+createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) {
+ // typename T
+ auto *T = TemplateTypeParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/1, /*Position=*/0,
+ /*Id=*/nullptr, /*Typename=*/true, /*ParameterPack=*/false);
+ T->setImplicit(true);
+
+ // T ...Ints
+ TypeSourceInfo *TI =
+ C.getTrivialTypeSourceInfo(QualType(T->getTypeForDecl(), 0));
+ auto *N = NonTypeTemplateParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/0, /*Position=*/1,
+ /*Id=*/nullptr, TI->getType(), /*ParameterPack=*/true, TI);
+ N->setImplicit(true);
+
+ // <typename T, T ...Ints>
+ NamedDecl *P[2] = {T, N};
+ auto *TPL = TemplateParameterList::Create(
+ C, SourceLocation(), SourceLocation(), P, SourceLocation());
+
+ // template <typename T, ...Ints> class IntSeq
+ auto *TemplateTemplateParm = TemplateTemplateParmDecl::Create(
+ C, DC, SourceLocation(), /*Depth=*/0, /*Position=*/0,
+ /*ParameterPack=*/false, /*Id=*/nullptr, TPL);
+ TemplateTemplateParm->setImplicit(true);
+
+ // typename T
+ auto *TemplateTypeParm = TemplateTypeParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/0, /*Position=*/1,
+ /*Id=*/nullptr, /*Typename=*/true, /*ParameterPack=*/false);
+ TemplateTypeParm->setImplicit(true);
+
+ // T N
+ TypeSourceInfo *TInfo = C.getTrivialTypeSourceInfo(
+ QualType(TemplateTypeParm->getTypeForDecl(), 0));
+ auto *NonTypeTemplateParm = NonTypeTemplateParmDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Depth=*/0, /*Position=*/2,
+ /*Id=*/nullptr, TInfo->getType(), /*ParameterPack=*/false, TInfo);
+ NamedDecl *Params[] = {TemplateTemplateParm, TemplateTypeParm,
+ NonTypeTemplateParm};
+
+ // template <template <typename T, T ...Ints> class IntSeq, typename T, T N>
+ return TemplateParameterList::Create(C, SourceLocation(), SourceLocation(),
+ Params, SourceLocation());
+}
+
+static TemplateParameterList *createBuiltinTemplateParameterList(
+ const ASTContext &C, DeclContext *DC, BuiltinTemplateKind BTK) {
+ switch (BTK) {
+ case BTK__make_integer_seq:
+ return createMakeIntegerSeqParameterList(C, DC);
+ }
+
+ llvm_unreachable("unhandled BuiltinTemplateKind!");
+}
+
+void BuiltinTemplateDecl::anchor() {}
+
+BuiltinTemplateDecl::BuiltinTemplateDecl(const ASTContext &C, DeclContext *DC,
+ DeclarationName Name,
+ BuiltinTemplateKind BTK)
+ : TemplateDecl(BuiltinTemplate, DC, SourceLocation(), Name,
+ createBuiltinTemplateParameterList(C, DC, BTK)),
+ BTK(BTK) {}
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index b7c287720027..b2f27275f49c 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -182,7 +182,7 @@ raw_ostream &operator<<(raw_ostream &OS, DeclarationName N) {
}
case DeclarationName::CXXLiteralOperatorName:
- return OS << "operator \"\" " << N.getCXXLiteralIdentifier()->getName();
+ return OS << "operator\"\"" << N.getCXXLiteralIdentifier()->getName();
case DeclarationName::CXXConversionFunctionName: {
OS << "operator ";
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 2e066b2c42c6..bdd7a45f0850 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -331,7 +331,8 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
if (QualifierLoc) {
- getInternalQualifierLoc() = QualifierLoc;
+ new (getTrailingObjects<NestedNameSpecifierLoc>())
+ NestedNameSpecifierLoc(QualifierLoc);
auto *NNS = QualifierLoc.getNestedNameSpecifier();
if (NNS->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
@@ -340,7 +341,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
}
DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
if (FoundD)
- getInternalFoundDecl() = FoundD;
+ *getTrailingObjects<NamedDecl *>() = FoundD;
DeclRefExprBits.HasTemplateKWAndArgsInfo
= (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
DeclRefExprBits.RefersToEnclosingVariableOrCapture =
@@ -349,15 +350,15 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
- Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
assert(!Dependent && "built a DeclRefExpr with dependent template args");
ExprBits.InstantiationDependent |= InstantiationDependent;
ExprBits.ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
} else if (TemplateKWLoc.isValid()) {
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
}
DeclRefExprBits.HadMultipleCandidates = 0;
@@ -394,15 +395,13 @@ DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
if (D == FoundD)
FoundD = nullptr;
- std::size_t Size = sizeof(DeclRefExpr);
- if (QualifierLoc)
- Size += sizeof(NestedNameSpecifierLoc);
- if (FoundD)
- Size += sizeof(NamedDecl *);
- if (TemplateArgs)
- Size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
- else if (TemplateKWLoc.isValid())
- Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+ bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<NestedNameSpecifierLoc, NamedDecl *,
+ ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ QualifierLoc ? 1 : 0, FoundD ? 1 : 0,
+ HasTemplateKWAndArgsInfo ? 1 : 0,
+ TemplateArgs ? TemplateArgs->size() : 0);
void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
@@ -415,14 +414,12 @@ DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
bool HasFoundDecl,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
- std::size_t Size = sizeof(DeclRefExpr);
- if (HasQualifier)
- Size += sizeof(NestedNameSpecifierLoc);
- if (HasFoundDecl)
- Size += sizeof(NamedDecl *);
- if (HasTemplateKWAndArgsInfo)
- Size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
-
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<NestedNameSpecifierLoc, NamedDecl *,
+ ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasQualifier ? 1 : 0, HasFoundDecl ? 1 : 0, HasTemplateKWAndArgsInfo,
+ NumTemplateArgs);
void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(EmptyShell());
}
@@ -490,7 +487,6 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
else
MC->mangleName(ND, Out);
- Out.flush();
if (!Buffer.empty() && Buffer.front() == '\01')
return Buffer.substr(1);
return Buffer.str();
@@ -652,7 +648,6 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
Out << Proto;
- Out.flush();
return Name.str().str();
}
if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(CurrentDecl)) {
@@ -684,7 +679,6 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
MD->getSelector().print(Out);
Out << ']';
- Out.flush();
return Name.str().str();
}
if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) {
@@ -1002,15 +996,33 @@ void StringLiteral::setString(const ASTContext &C, StringRef Str,
/// can have escape sequences in them in addition to the usual trigraph and
/// escaped newline business. This routine handles this complexity.
///
-SourceLocation StringLiteral::
-getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
- const LangOptions &Features, const TargetInfo &Target) const {
+/// The *StartToken sets the first token to be searched in this function and
+/// the *StartTokenByteOffset is the byte offset of the first token. Before
+/// returning, it updates the *StartToken to the TokNo of the token being found
+/// and sets *StartTokenByteOffset to the byte offset of the token in the
+/// string.
+/// Using these two parameters can reduce the time complexity from O(n^2) to
+/// O(n) if one wants to get the location of byte for all the tokens in a
+/// string.
+///
+SourceLocation
+StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
+ const LangOptions &Features,
+ const TargetInfo &Target, unsigned *StartToken,
+ unsigned *StartTokenByteOffset) const {
assert((Kind == StringLiteral::Ascii || Kind == StringLiteral::UTF8) &&
"Only narrow string literals are currently supported");
// Loop over all of the tokens in this string until we find the one that
// contains the byte we're looking for.
unsigned TokNo = 0;
+ unsigned StringOffset = 0;
+ if (StartToken)
+ TokNo = *StartToken;
+ if (StartTokenByteOffset) {
+ StringOffset = *StartTokenByteOffset;
+ ByteNo -= StringOffset;
+ }
while (1) {
assert(TokNo < getNumConcatenated() && "Invalid byte number!");
SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
@@ -1019,14 +1031,20 @@ getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
// the string literal, not the identifier for the macro it is potentially
// expanded through.
SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
-
+
// Re-lex the token to get its length and original spelling.
- std::pair<FileID, unsigned> LocInfo =SM.getDecomposedLoc(StrTokSpellingLoc);
+ std::pair<FileID, unsigned> LocInfo =
+ SM.getDecomposedLoc(StrTokSpellingLoc);
bool Invalid = false;
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
- if (Invalid)
+ if (Invalid) {
+ if (StartTokenByteOffset != nullptr)
+ *StartTokenByteOffset = StringOffset;
+ if (StartToken != nullptr)
+ *StartToken = TokNo;
return StrTokSpellingLoc;
-
+ }
+
const char *StrData = Buffer.data()+LocInfo.second;
// Create a lexer starting at the beginning of this token.
@@ -1042,14 +1060,19 @@ getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
// If the byte is in this token, return the location of the byte.
if (ByteNo < TokNumBytes ||
(ByteNo == TokNumBytes && TokNo == getNumConcatenated() - 1)) {
- unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
-
+ unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
+
// Now that we know the offset of the token in the spelling, use the
// preprocessor to get the offset in the original source.
+ if (StartTokenByteOffset != nullptr)
+ *StartTokenByteOffset = StringOffset;
+ if (StartToken != nullptr)
+ *StartToken = TokNo;
return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features);
}
-
+
// Move to the next string token.
+ StringOffset += TokNumBytes;
++TokNo;
ByteNo -= TokNumBytes;
}
@@ -1074,6 +1097,7 @@ StringRef UnaryOperator::getOpcodeStr(Opcode Op) {
case UO_Real: return "__real";
case UO_Imag: return "__imag";
case UO_Extension: return "__extension__";
+ case UO_Coawait: return "co_await";
}
llvm_unreachable("Unknown unary operator");
}
@@ -1090,6 +1114,7 @@ UnaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix) {
case OO_Minus: return UO_Minus;
case OO_Tilde: return UO_Not;
case OO_Exclaim: return UO_LNot;
+ case OO_Coawait: return UO_Coawait;
}
}
@@ -1103,6 +1128,7 @@ OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
case UO_Minus: return OO_Minus;
case UO_Not: return OO_Tilde;
case UO_LNot: return OO_Exclaim;
+ case UO_Coawait: return OO_Coawait;
default: return OO_None;
}
}
@@ -1288,9 +1314,8 @@ OffsetOfExpr *OffsetOfExpr::Create(const ASTContext &C, QualType type,
ArrayRef<OffsetOfNode> comps,
ArrayRef<Expr*> exprs,
SourceLocation RParenLoc) {
- void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
- sizeof(OffsetOfNode) * comps.size() +
- sizeof(Expr*) * exprs.size());
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<OffsetOfNode, Expr *>(comps.size(), exprs.size()));
return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, comps, exprs,
RParenLoc);
@@ -1298,9 +1323,8 @@ OffsetOfExpr *OffsetOfExpr::Create(const ASTContext &C, QualType type,
OffsetOfExpr *OffsetOfExpr::CreateEmpty(const ASTContext &C,
unsigned numComps, unsigned numExprs) {
- void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
- sizeof(OffsetOfNode) * numComps +
- sizeof(Expr*) * numExprs);
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<OffsetOfNode, Expr *>(numComps, numExprs));
return new (Mem) OffsetOfExpr(numComps, numExprs);
}
@@ -1330,7 +1354,7 @@ OffsetOfExpr::OffsetOfExpr(const ASTContext &C, QualType type,
}
}
-IdentifierInfo *OffsetOfExpr::OffsetOfNode::getFieldName() const {
+IdentifierInfo *OffsetOfNode::getFieldName() const {
assert(getKind() == Field || getKind() == Identifier);
if (getKind() == Field)
return getField()->getIdentifier();
@@ -1382,18 +1406,17 @@ MemberExpr *MemberExpr::Create(
ValueDecl *memberdecl, DeclAccessPair founddecl,
DeclarationNameInfo nameinfo, const TemplateArgumentListInfo *targs,
QualType ty, ExprValueKind vk, ExprObjectKind ok) {
- std::size_t Size = sizeof(MemberExpr);
bool hasQualOrFound = (QualifierLoc ||
founddecl.getDecl() != memberdecl ||
founddecl.getAccess() != memberdecl->getAccess());
- if (hasQualOrFound)
- Size += sizeof(MemberNameQualifier);
- if (targs)
- Size += ASTTemplateKWAndArgsInfo::sizeFor(targs->size());
- else if (TemplateKWLoc.isValid())
- Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+ bool HasTemplateKWAndArgsInfo = targs || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(hasQualOrFound ? 1 : 0,
+ HasTemplateKWAndArgsInfo ? 1 : 0,
+ targs ? targs->size() : 0);
void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
MemberExpr *E = new (Mem)
@@ -1412,7 +1435,8 @@ MemberExpr *MemberExpr::Create(
E->HasQualifierOrFoundDecl = true;
- MemberNameQualifier *NQ = E->getMemberQualifier();
+ MemberExprNameQualifier *NQ =
+ E->getTrailingObjects<MemberExprNameQualifier>();
NQ->QualifierLoc = QualifierLoc;
NQ->FoundDecl = founddecl;
}
@@ -1423,14 +1447,14 @@ MemberExpr *MemberExpr::Create(
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
- E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *targs,
- Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *targs, E->getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
if (InstantiationDependent)
E->setInstantiationDependent(true);
} else if (TemplateKWLoc.isValid()) {
- E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
}
return E;
@@ -1719,9 +1743,9 @@ Expr *CastExpr::getSubExprAsWritten() {
CXXBaseSpecifier **CastExpr::path_buffer() {
switch (getStmtClass()) {
#define ABSTRACT_STMT(x)
-#define CASTEXPR(Type, Base) \
- case Stmt::Type##Class: \
- return reinterpret_cast<CXXBaseSpecifier**>(static_cast<Type*>(this)+1);
+#define CASTEXPR(Type, Base) \
+ case Stmt::Type##Class: \
+ return static_cast<Type *>(this)->getTrailingObjects<CXXBaseSpecifier *>();
#define STMT(Type, Base)
#include "clang/AST/StmtNodes.inc"
default:
@@ -1729,28 +1753,23 @@ CXXBaseSpecifier **CastExpr::path_buffer() {
}
}
-void CastExpr::setCastPath(const CXXCastPath &Path) {
- assert(Path.size() == path_size());
- memcpy(path_buffer(), Path.data(), Path.size() * sizeof(CXXBaseSpecifier*));
-}
-
ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
CastKind Kind, Expr *Operand,
const CXXCastPath *BasePath,
ExprValueKind VK) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
ImplicitCastExpr *E =
new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
- if (PathSize) E->setCastPath(*BasePath);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
return E;
}
ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize);
}
@@ -1761,18 +1780,18 @@ CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T,
TypeSourceInfo *WrittenTy,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
CStyleCastExpr *E =
new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
- if (PathSize) E->setCastPath(*BasePath);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
return E;
}
CStyleCastExpr *CStyleCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize);
}
@@ -2045,6 +2064,9 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
case UO_LNot:
case UO_Deref:
break;
+ case UO_Coawait:
+ // This is just the 'operator co_await' call inside the guts of a
+ // dependent co_await call.
case UO_PostInc:
case UO_PostDec:
case UO_PreInc:
@@ -2880,7 +2902,10 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
return cast<CXXDefaultInitExpr>(this)->getExpr()
->isConstantInitializer(Ctx, false, Culprit);
}
- if (isEvaluatable(Ctx))
+ // Allow certain forms of UB in constant initializers: signed integer
+ // overflow and floating-point division by zero. We'll give a warning on
+ // these, but they're common enough that we have to accept them.
+ if (isEvaluatable(Ctx, SE_AllowUndefinedBehavior))
return true;
if (Culprit)
*Culprit = this;
@@ -2993,6 +3018,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
return true;
case MSPropertyRefExprClass:
+ case MSPropertySubscriptExprClass:
case CompoundAssignOperatorClass:
case VAArgExprClass:
case AtomicExprClass:
@@ -3000,6 +3026,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case CXXNewExprClass:
case CXXDeleteExprClass:
case ExprWithCleanupsClass:
+ case CoawaitExprClass:
+ case CoyieldExprClass:
// These always have a side-effect.
return true;
@@ -3012,6 +3040,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ParenExprClass:
case ArraySubscriptExprClass:
+ case OMPArraySectionExprClass:
case MemberExprClass:
case ConditionalOperatorClass:
case BinaryConditionalOperatorClass:
@@ -3246,9 +3275,20 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
// Check that it is a cast to void*.
if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
QualType Pointee = PT->getPointeeType();
- if (!Pointee.hasQualifiers() &&
- Pointee->isVoidType() && // to void*
- CE->getSubExpr()->getType()->isIntegerType()) // from int.
+ Qualifiers Q = Pointee.getQualifiers();
+ // In OpenCL v2.0 generic address space acts as a placeholder
+ // and should be ignored.
+ bool IsASValid = true;
+ if (Ctx.getLangOpts().OpenCLVersion >= 200) {
+ if (Pointee.getAddressSpace() == LangAS::opencl_generic)
+ Q.removeAddressSpace();
+ else
+ IsASValid = false;
+ }
+
+ if (IsASValid && !Q.hasQualifiers() &&
+ Pointee->isVoidType() && // to void*
+ CE->getSubExpr()->getType()->isIntegerType()) // from int.
return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
}
}
@@ -3429,6 +3469,18 @@ bool Expr::refersToVectorElement() const {
return false;
}
+bool Expr::refersToGlobalRegisterVar() const {
+ const Expr *E = this->IgnoreParenImpCasts();
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->getStorageClass() == SC_Register &&
+ VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
+ return true;
+
+ return false;
+}
+
/// isArrow - Return true if the base expression is a pointer to vector,
/// return false if the base expression is a vector.
bool ExtVectorElementExpr::isArrow() const {
@@ -3464,7 +3516,7 @@ bool ExtVectorElementExpr::containsDuplicateElements() const {
/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray.
void ExtVectorElementExpr::getEncodedElementAccess(
- SmallVectorImpl<unsigned> &Elts) const {
+ SmallVectorImpl<uint32_t> &Elts) const {
StringRef Comp = Accessor->getName();
if (Comp[0] == 's' || Comp[0] == 'S')
Comp = Comp.substr(1);
@@ -3492,285 +3544,6 @@ void ExtVectorElementExpr::getEncodedElementAccess(
}
}
-ObjCMessageExpr::ObjCMessageExpr(QualType T,
- ExprValueKind VK,
- SourceLocation LBracLoc,
- SourceLocation SuperLoc,
- bool IsInstanceSuper,
- QualType SuperType,
- Selector Sel,
- ArrayRef<SourceLocation> SelLocs,
- SelectorLocationsKind SelLocsK,
- ObjCMethodDecl *Method,
- ArrayRef<Expr *> Args,
- SourceLocation RBracLoc,
- bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
- /*TypeDependent=*/false, /*ValueDependent=*/false,
- /*InstantiationDependent=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
- SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
- : Sel.getAsOpaquePtr())),
- Kind(IsInstanceSuper? SuperInstance : SuperClass),
- HasMethod(Method != nullptr), IsDelegateInitCall(false),
- IsImplicit(isImplicit), SuperLoc(SuperLoc), LBracLoc(LBracLoc),
- RBracLoc(RBracLoc)
-{
- initArgsAndSelLocs(Args, SelLocs, SelLocsK);
- setReceiverPointer(SuperType.getAsOpaquePtr());
-}
-
-ObjCMessageExpr::ObjCMessageExpr(QualType T,
- ExprValueKind VK,
- SourceLocation LBracLoc,
- TypeSourceInfo *Receiver,
- Selector Sel,
- ArrayRef<SourceLocation> SelLocs,
- SelectorLocationsKind SelLocsK,
- ObjCMethodDecl *Method,
- ArrayRef<Expr *> Args,
- SourceLocation RBracLoc,
- bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
- T->isDependentType(), T->isInstantiationDependentType(),
- T->containsUnexpandedParameterPack()),
- SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
- : Sel.getAsOpaquePtr())),
- Kind(Class),
- HasMethod(Method != nullptr), IsDelegateInitCall(false),
- IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
-{
- initArgsAndSelLocs(Args, SelLocs, SelLocsK);
- setReceiverPointer(Receiver);
-}
-
-ObjCMessageExpr::ObjCMessageExpr(QualType T,
- ExprValueKind VK,
- SourceLocation LBracLoc,
- Expr *Receiver,
- Selector Sel,
- ArrayRef<SourceLocation> SelLocs,
- SelectorLocationsKind SelLocsK,
- ObjCMethodDecl *Method,
- ArrayRef<Expr *> Args,
- SourceLocation RBracLoc,
- bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(),
- Receiver->isTypeDependent(),
- Receiver->isInstantiationDependent(),
- Receiver->containsUnexpandedParameterPack()),
- SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
- : Sel.getAsOpaquePtr())),
- Kind(Instance),
- HasMethod(Method != nullptr), IsDelegateInitCall(false),
- IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
-{
- initArgsAndSelLocs(Args, SelLocs, SelLocsK);
- setReceiverPointer(Receiver);
-}
-
-void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
- ArrayRef<SourceLocation> SelLocs,
- SelectorLocationsKind SelLocsK) {
- setNumArgs(Args.size());
- Expr **MyArgs = getArgs();
- for (unsigned I = 0; I != Args.size(); ++I) {
- if (Args[I]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (Args[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Args[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Args[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
- MyArgs[I] = Args[I];
- }
-
- SelLocsKind = SelLocsK;
- if (!isImplicit()) {
- if (SelLocsK == SelLoc_NonStandard)
- std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
- }
-}
-
-ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T,
- ExprValueKind VK,
- SourceLocation LBracLoc,
- SourceLocation SuperLoc,
- bool IsInstanceSuper,
- QualType SuperType,
- Selector Sel,
- ArrayRef<SourceLocation> SelLocs,
- ObjCMethodDecl *Method,
- ArrayRef<Expr *> Args,
- SourceLocation RBracLoc,
- bool isImplicit) {
- assert((!SelLocs.empty() || isImplicit) &&
- "No selector locs for non-implicit message");
- ObjCMessageExpr *Mem;
- SelectorLocationsKind SelLocsK = SelectorLocationsKind();
- if (isImplicit)
- Mem = alloc(Context, Args.size(), 0);
- else
- Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
- return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
- SuperType, Sel, SelLocs, SelLocsK,
- Method, Args, RBracLoc, isImplicit);
-}
-
-ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T,
- ExprValueKind VK,
- SourceLocation LBracLoc,
- TypeSourceInfo *Receiver,
- Selector Sel,
- ArrayRef<SourceLocation> SelLocs,
- ObjCMethodDecl *Method,
- ArrayRef<Expr *> Args,
- SourceLocation RBracLoc,
- bool isImplicit) {
- assert((!SelLocs.empty() || isImplicit) &&
- "No selector locs for non-implicit message");
- ObjCMessageExpr *Mem;
- SelectorLocationsKind SelLocsK = SelectorLocationsKind();
- if (isImplicit)
- Mem = alloc(Context, Args.size(), 0);
- else
- Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
- return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
- SelLocs, SelLocsK, Method, Args, RBracLoc,
- isImplicit);
-}
-
-ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T,
- ExprValueKind VK,
- SourceLocation LBracLoc,
- Expr *Receiver,
- Selector Sel,
- ArrayRef<SourceLocation> SelLocs,
- ObjCMethodDecl *Method,
- ArrayRef<Expr *> Args,
- SourceLocation RBracLoc,
- bool isImplicit) {
- assert((!SelLocs.empty() || isImplicit) &&
- "No selector locs for non-implicit message");
- ObjCMessageExpr *Mem;
- SelectorLocationsKind SelLocsK = SelectorLocationsKind();
- if (isImplicit)
- Mem = alloc(Context, Args.size(), 0);
- else
- Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
- return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
- SelLocs, SelLocsK, Method, Args, RBracLoc,
- isImplicit);
-}
-
-ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(const ASTContext &Context,
- unsigned NumArgs,
- unsigned NumStoredSelLocs) {
- ObjCMessageExpr *Mem = alloc(Context, NumArgs, NumStoredSelLocs);
- return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs);
-}
-
-ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
- ArrayRef<Expr *> Args,
- SourceLocation RBraceLoc,
- ArrayRef<SourceLocation> SelLocs,
- Selector Sel,
- SelectorLocationsKind &SelLocsK) {
- SelLocsK = hasStandardSelectorLocs(Sel, SelLocs, Args, RBraceLoc);
- unsigned NumStoredSelLocs = (SelLocsK == SelLoc_NonStandard) ? SelLocs.size()
- : 0;
- return alloc(C, Args.size(), NumStoredSelLocs);
-}
-
-ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
- unsigned NumArgs,
- unsigned NumStoredSelLocs) {
- unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
- NumArgs * sizeof(Expr *) + NumStoredSelLocs * sizeof(SourceLocation);
- return (ObjCMessageExpr *)C.Allocate(Size,
- llvm::AlignOf<ObjCMessageExpr>::Alignment);
-}
-
-void ObjCMessageExpr::getSelectorLocs(
- SmallVectorImpl<SourceLocation> &SelLocs) const {
- for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
- SelLocs.push_back(getSelectorLoc(i));
-}
-
-SourceRange ObjCMessageExpr::getReceiverRange() const {
- switch (getReceiverKind()) {
- case Instance:
- return getInstanceReceiver()->getSourceRange();
-
- case Class:
- return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange();
-
- case SuperInstance:
- case SuperClass:
- return getSuperLoc();
- }
-
- llvm_unreachable("Invalid ReceiverKind!");
-}
-
-Selector ObjCMessageExpr::getSelector() const {
- if (HasMethod)
- return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod)
- ->getSelector();
- return Selector(SelectorOrMethod);
-}
-
-QualType ObjCMessageExpr::getReceiverType() const {
- switch (getReceiverKind()) {
- case Instance:
- return getInstanceReceiver()->getType();
- case Class:
- return getClassReceiver();
- case SuperInstance:
- case SuperClass:
- return getSuperType();
- }
-
- llvm_unreachable("unexpected receiver kind");
-}
-
-ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
- QualType T = getReceiverType();
-
- if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
- return Ptr->getInterfaceDecl();
-
- if (const ObjCObjectType *Ty = T->getAs<ObjCObjectType>())
- return Ty->getInterface();
-
- return nullptr;
-}
-
-QualType ObjCPropertyRefExpr::getReceiverType(const ASTContext &ctx) const {
- if (isClassReceiver())
- return ctx.getObjCInterfaceType(getClassReceiver());
-
- if (isSuperReceiver())
- return getSuperReceiverType();
-
- return getBase()->getType();
-}
-
-StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
- switch (getBridgeKind()) {
- case OBC_Bridge:
- return "__bridge";
- case OBC_BridgeTransfer:
- return "__bridge_transfer";
- case OBC_BridgeRetained:
- return "__bridge_retained";
- }
-
- llvm_unreachable("Invalid BridgeKind!");
-}
-
ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr*> args,
QualType Type, SourceLocation BLoc,
SourceLocation RP)
@@ -3883,7 +3656,7 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
this->Designators = new (C) Designator[NumDesignators];
// Record the initializer itself.
- child_range Child = children();
+ child_iterator Child = child_begin();
*Child++ = Init;
// Copy the designators and their subexpressions, computing
@@ -3939,7 +3712,8 @@ DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators,
SourceLocation ColonOrEqualLoc,
bool UsesColonSyntax, Expr *Init) {
void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
- sizeof(Stmt *) * (IndexExprs.size() + 1), 8);
+ sizeof(Stmt *) * (IndexExprs.size() + 1),
+ llvm::alignOf<DesignatedInitExpr>());
return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
ColonOrEqualLoc, UsesColonSyntax,
IndexExprs, Init);
@@ -4154,19 +3928,6 @@ PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
}
//===----------------------------------------------------------------------===//
-// ExprIterator.
-//===----------------------------------------------------------------------===//
-
-Expr* ExprIterator::operator[](size_t idx) { return cast<Expr>(I[idx]); }
-Expr* ExprIterator::operator*() const { return cast<Expr>(*I); }
-Expr* ExprIterator::operator->() const { return cast<Expr>(*I); }
-const Expr* ConstExprIterator::operator[](size_t idx) const {
- return cast<Expr>(I[idx]);
-}
-const Expr* ConstExprIterator::operator*() const { return cast<Expr>(*I); }
-const Expr* ConstExprIterator::operator->() const { return cast<Expr>(*I); }
-
-//===----------------------------------------------------------------------===//
// Child Iterators for iterating over subexpressions/substatements
//===----------------------------------------------------------------------===//
@@ -4179,134 +3940,11 @@ Stmt::child_range UnaryExprOrTypeTraitExpr::children() {
if (const VariableArrayType* T = dyn_cast<VariableArrayType>(
getArgumentType().getTypePtr()))
return child_range(child_iterator(T), child_iterator());
- return child_range();
+ return child_range(child_iterator(), child_iterator());
}
return child_range(&Argument.Ex, &Argument.Ex + 1);
}
-// ObjCMessageExpr
-Stmt::child_range ObjCMessageExpr::children() {
- Stmt **begin;
- if (getReceiverKind() == Instance)
- begin = reinterpret_cast<Stmt **>(this + 1);
- else
- begin = reinterpret_cast<Stmt **>(getArgs());
- return child_range(begin,
- reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
-}
-
-ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements,
- QualType T, ObjCMethodDecl *Method,
- SourceRange SR)
- : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary,
- false, false, false, false),
- NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method)
-{
- Expr **SaveElements = getElements();
- for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
- if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Elements[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Elements[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
- SaveElements[I] = Elements[I];
- }
-}
-
-ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
- ArrayRef<Expr *> Elements,
- QualType T, ObjCMethodDecl * Method,
- SourceRange SR) {
- void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
- + Elements.size() * sizeof(Expr *));
- return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
-}
-
-ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(const ASTContext &C,
- unsigned NumElements) {
-
- void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
- + NumElements * sizeof(Expr *));
- return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
-}
-
-ObjCDictionaryLiteral::ObjCDictionaryLiteral(
- ArrayRef<ObjCDictionaryElement> VK,
- bool HasPackExpansions,
- QualType T, ObjCMethodDecl *method,
- SourceRange SR)
- : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
- NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
- DictWithObjectsMethod(method)
-{
- KeyValuePair *KeyValues = getKeyValues();
- ExpansionData *Expansions = getExpansionData();
- for (unsigned I = 0; I < NumElements; I++) {
- if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
- VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
- ExprBits.ValueDependent = true;
- if (VK[I].Key->isInstantiationDependent() ||
- VK[I].Value->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (VK[I].EllipsisLoc.isInvalid() &&
- (VK[I].Key->containsUnexpandedParameterPack() ||
- VK[I].Value->containsUnexpandedParameterPack()))
- ExprBits.ContainsUnexpandedParameterPack = true;
-
- KeyValues[I].Key = VK[I].Key;
- KeyValues[I].Value = VK[I].Value;
- if (Expansions) {
- Expansions[I].EllipsisLoc = VK[I].EllipsisLoc;
- if (VK[I].NumExpansions)
- Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1;
- else
- Expansions[I].NumExpansionsPlusOne = 0;
- }
- }
-}
-
-ObjCDictionaryLiteral *
-ObjCDictionaryLiteral::Create(const ASTContext &C,
- ArrayRef<ObjCDictionaryElement> VK,
- bool HasPackExpansions,
- QualType T, ObjCMethodDecl *method,
- SourceRange SR) {
- unsigned ExpansionsSize = 0;
- if (HasPackExpansions)
- ExpansionsSize = sizeof(ExpansionData) * VK.size();
-
- void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
- sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
- return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
-}
-
-ObjCDictionaryLiteral *
-ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements,
- bool HasPackExpansions) {
- unsigned ExpansionsSize = 0;
- if (HasPackExpansions)
- ExpansionsSize = sizeof(ExpansionData) * NumElements;
- void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
- sizeof(KeyValuePair) * NumElements + ExpansionsSize);
- return new (Mem) ObjCDictionaryLiteral(EmptyShell(), NumElements,
- HasPackExpansions);
-}
-
-ObjCSubscriptRefExpr *ObjCSubscriptRefExpr::Create(const ASTContext &C,
- Expr *base,
- Expr *key, QualType T,
- ObjCMethodDecl *getMethod,
- ObjCMethodDecl *setMethod,
- SourceLocation RB) {
- void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
- return new (Mem) ObjCSubscriptRefExpr(base, key, T, VK_LValue,
- OK_ObjCSubscript,
- getMethod, setMethod, RB);
-}
-
AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
QualType t, AtomicOp op, SourceLocation RP)
: Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
@@ -4373,3 +4011,29 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
}
llvm_unreachable("unknown atomic op");
}
+
+QualType OMPArraySectionExpr::getBaseOriginalType(Expr *Base) {
+ unsigned ArraySectionCount = 0;
+ while (auto *OASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParens())) {
+ Base = OASE->getBase();
+ ++ArraySectionCount;
+ }
+ while (auto *ASE = dyn_cast<ArraySubscriptExpr>(Base->IgnoreParens())) {
+ Base = ASE->getBase();
+ ++ArraySectionCount;
+ }
+ auto OriginalTy = Base->getType();
+ if (auto *DRE = dyn_cast<DeclRefExpr>(Base))
+ if (auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
+ OriginalTy = PVD->getOriginalType().getNonReferenceType();
+
+ for (unsigned Cnt = 0; Cnt < ArraySectionCount; ++Cnt) {
+ if (OriginalTy->isAnyPointerType())
+ OriginalTy = OriginalTy->getPointeeType();
+ else {
+ assert (OriginalTy->isArrayType());
+ OriginalTy = OriginalTy->castAsArrayTypeUnsafe()->getElementType();
+ }
+ }
+ return OriginalTy;
+}
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index d6f2ce63a0a5..4bb4b5073c4f 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -295,8 +295,11 @@ UnresolvedLookupExpr::Create(const ASTContext &C,
{
assert(Args || TemplateKWLoc.isValid());
unsigned num_args = Args ? Args->size() : 0;
- void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
- ASTTemplateKWAndArgsInfo::sizeFor(num_args));
+
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(1,
+ num_args);
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedLookupExpr>());
return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
TemplateKWLoc, NameInfo,
ADL, /*Overload*/ true, Args,
@@ -307,11 +310,11 @@ UnresolvedLookupExpr *
UnresolvedLookupExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
- std::size_t size = sizeof(UnresolvedLookupExpr);
- if (HasTemplateKWAndArgsInfo)
- size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
-
- void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedLookupExpr>());
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedLookupExpr>());
UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
@@ -367,10 +370,9 @@ OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
- Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(
+ TemplateKWLoc, *TemplateArgs, getTrailingTemplateArgumentLoc(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
if (Dependent) {
ExprBits.TypeDependent = true;
@@ -381,7 +383,7 @@ OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
} else if (TemplateKWLoc.isValid()) {
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
if (isTypeDependent())
@@ -432,13 +434,13 @@ DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack
= ExprBits.ContainsUnexpandedParameterPack;
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *Args,
- Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *Args, getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
} else if (TemplateKWLoc.isValid()) {
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
}
}
@@ -449,12 +451,11 @@ DependentScopeDeclRefExpr::Create(const ASTContext &C,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args) {
assert(QualifierLoc && "should be created for dependent qualifiers");
- std::size_t size = sizeof(DependentScopeDeclRefExpr);
- if (Args)
- size += ASTTemplateKWAndArgsInfo::sizeFor(Args->size());
- else if (TemplateKWLoc.isValid())
- size += ASTTemplateKWAndArgsInfo::sizeFor(0);
- void *Mem = C.Allocate(size);
+ bool HasTemplateKWAndArgsInfo = Args || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, Args ? Args->size() : 0);
+ void *Mem = C.Allocate(Size);
return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
TemplateKWLoc, NameInfo, Args);
}
@@ -463,10 +464,11 @@ DependentScopeDeclRefExpr *
DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
- std::size_t size = sizeof(DependentScopeDeclRefExpr);
- if (HasTemplateKWAndArgsInfo)
- size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
- void *Mem = C.Allocate(size);
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = C.Allocate(Size);
DependentScopeDeclRefExpr *E
= new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
SourceLocation(),
@@ -587,19 +589,19 @@ CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer = C.Allocate(sizeof(CXXStaticCastExpr)
- + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
CXXStaticCastExpr *E =
new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
- if (PathSize) E->setCastPath(*BasePath);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
return E;
}
CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(sizeof(CXXStaticCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize);
}
@@ -612,19 +614,19 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer = C.Allocate(sizeof(CXXDynamicCastExpr)
- + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
CXXDynamicCastExpr *E =
new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
- if (PathSize) E->setCastPath(*BasePath);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
return E;
}
CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(sizeof(CXXDynamicCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXDynamicCastExpr(EmptyShell(), PathSize);
}
@@ -669,19 +671,19 @@ CXXReinterpretCastExpr::Create(const ASTContext &C, QualType T,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(sizeof(CXXReinterpretCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
CXXReinterpretCastExpr *E =
new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
- if (PathSize) E->setCastPath(*BasePath);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
return E;
}
CXXReinterpretCastExpr *
CXXReinterpretCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
- void *Buffer = C.Allocate(sizeof(CXXReinterpretCastExpr)
- + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize);
}
@@ -704,18 +706,18 @@ CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
const CXXCastPath *BasePath,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
- + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
CXXFunctionalCastExpr *E =
new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R);
- if (PathSize) E->setCastPath(*BasePath);
+ if (PathSize)
+ std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
+ E->getTrailingObjects<CXXBaseSpecifier *>());
return E;
}
CXXFunctionalCastExpr *
CXXFunctionalCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
- void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
- + PathSize * sizeof(CXXBaseSpecifier*));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
}
@@ -1070,15 +1072,15 @@ LambdaExpr::capture_range LambdaExpr::implicit_captures() const {
return capture_range(implicit_capture_begin(), implicit_capture_end());
}
-ArrayRef<VarDecl *>
-LambdaExpr::getCaptureInitIndexVars(capture_init_iterator Iter) const {
+ArrayRef<VarDecl *>
+LambdaExpr::getCaptureInitIndexVars(const_capture_init_iterator Iter) const {
assert(HasArrayIndexVars && "No array index-var data?");
unsigned Index = Iter - capture_init_begin();
assert(Index < getLambdaClass()->getLambdaData().NumCaptures &&
"Capture index out-of-range");
- VarDecl **IndexVars = getArrayIndexVars();
- unsigned *IndexStarts = getArrayIndexStarts();
+ VarDecl *const *IndexVars = getArrayIndexVars();
+ const unsigned *IndexStarts = getArrayIndexStarts();
return llvm::makeArrayRef(IndexVars + IndexStarts[Index],
IndexVars + IndexStarts[Index + 1]);
}
@@ -1099,9 +1101,13 @@ TemplateParameterList *LambdaExpr::getTemplateParameterList() const {
}
CompoundStmt *LambdaExpr::getBody() const {
+ // FIXME: this mutation in getBody is bogus. It should be
+ // initialized in ASTStmtReader::VisitLambdaExpr, but for reasons I
+ // don't understand, that doesn't work.
if (!getStoredStmts()[NumCaptures])
- getStoredStmts()[NumCaptures] = getCallOperator()->getBody();
-
+ *const_cast<clang::Stmt **>(&getStoredStmts()[NumCaptures]) =
+ getCallOperator()->getBody();
+
return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
}
@@ -1191,63 +1197,40 @@ SourceLocation CXXUnresolvedConstructExpr::getLocStart() const {
return Type->getTypeLoc().getBeginLoc();
}
-CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(const ASTContext &C,
- Expr *Base, QualType BaseType,
- bool IsArrow,
- SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- NamedDecl *FirstQualifierFoundInScope,
- DeclarationNameInfo MemberNameInfo,
- const TemplateArgumentListInfo *TemplateArgs)
- : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
- VK_LValue, OK_Ordinary, true, true, true,
- ((Base && Base->containsUnexpandedParameterPack()) ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()) ||
- MemberNameInfo.containsUnexpandedParameterPack())),
- Base(Base), BaseType(BaseType), IsArrow(IsArrow),
- HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
- TemplateKWLoc.isValid()),
- OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
- FirstQualifierFoundInScope(FirstQualifierFoundInScope),
- MemberNameInfo(MemberNameInfo) {
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
+ const ASTContext &C, Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs)
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy, VK_LValue,
+ OK_Ordinary, true, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
+ Base(Base), BaseType(BaseType), IsArrow(IsArrow),
+ HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
+ TemplateKWLoc.isValid()),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ MemberNameInfo(MemberNameInfo) {
if (TemplateArgs) {
bool Dependent = true;
bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack = false;
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
- Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
+ Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
} else if (TemplateKWLoc.isValid()) {
- getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
}
}
-CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(const ASTContext &C,
- Expr *Base, QualType BaseType,
- bool IsArrow,
- SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc,
- NamedDecl *FirstQualifierFoundInScope,
- DeclarationNameInfo MemberNameInfo)
- : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
- VK_LValue, OK_Ordinary, true, true, true,
- ((Base && Base->containsUnexpandedParameterPack()) ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->
- containsUnexpandedParameterPack()) ||
- MemberNameInfo.containsUnexpandedParameterPack())),
- Base(Base), BaseType(BaseType), IsArrow(IsArrow),
- HasTemplateKWAndArgsInfo(false),
- OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
- FirstQualifierFoundInScope(FirstQualifierFoundInScope),
- MemberNameInfo(MemberNameInfo) { }
-
CXXDependentScopeMemberExpr *
CXXDependentScopeMemberExpr::Create(const ASTContext &C,
Expr *Base, QualType BaseType, bool IsArrow,
@@ -1257,18 +1240,13 @@ CXXDependentScopeMemberExpr::Create(const ASTContext &C,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
- if (!TemplateArgs && !TemplateKWLoc.isValid())
- return new (C) CXXDependentScopeMemberExpr(C, Base, BaseType,
- IsArrow, OperatorLoc,
- QualifierLoc,
- FirstQualifierFoundInScope,
- MemberNameInfo);
-
+ bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
- std::size_t size = sizeof(CXXDependentScopeMemberExpr)
- + ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXDependentScopeMemberExpr>());
return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
IsArrow, OperatorLoc,
QualifierLoc,
@@ -1281,22 +1259,18 @@ CXXDependentScopeMemberExpr *
CXXDependentScopeMemberExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
- if (!HasTemplateKWAndArgsInfo)
- return new (C) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
- 0, SourceLocation(),
- NestedNameSpecifierLoc(),
- nullptr, DeclarationNameInfo());
-
- std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
- ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXDependentScopeMemberExpr>());
CXXDependentScopeMemberExpr *E
= new (Mem) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
0, SourceLocation(),
NestedNameSpecifierLoc(),
SourceLocation(), nullptr,
DeclarationNameInfo(), nullptr);
- E->HasTemplateKWAndArgsInfo = true;
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
@@ -1361,38 +1335,34 @@ bool UnresolvedMemberExpr::isImplicitAccess() const {
return cast<Expr>(Base)->isImplicitCXXThis();
}
-UnresolvedMemberExpr *
-UnresolvedMemberExpr::Create(const ASTContext &C, bool HasUnresolvedUsing,
- Expr *Base, QualType BaseType, bool IsArrow,
- SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &MemberNameInfo,
- const TemplateArgumentListInfo *TemplateArgs,
- UnresolvedSetIterator Begin,
- UnresolvedSetIterator End) {
- std::size_t size = sizeof(UnresolvedMemberExpr);
- if (TemplateArgs)
- size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
- else if (TemplateKWLoc.isValid())
- size += ASTTemplateKWAndArgsInfo::sizeFor(0);
-
- void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
- return new (Mem) UnresolvedMemberExpr(C,
- HasUnresolvedUsing, Base, BaseType,
- IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc,
- MemberNameInfo, TemplateArgs, Begin, End);
+UnresolvedMemberExpr *UnresolvedMemberExpr::Create(
+ const ASTContext &C, bool HasUnresolvedUsing, Expr *Base, QualType BaseType,
+ bool IsArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, TemplateArgs ? TemplateArgs->size() : 0);
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedMemberExpr>());
+ return new (Mem) UnresolvedMemberExpr(
+ C, HasUnresolvedUsing, Base, BaseType, IsArrow, OperatorLoc, QualifierLoc,
+ TemplateKWLoc, MemberNameInfo, TemplateArgs, Begin, End);
}
UnresolvedMemberExpr *
UnresolvedMemberExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
- std::size_t size = sizeof(UnresolvedMemberExpr);
- if (HasTemplateKWAndArgsInfo)
- size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+ assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
+ std::size_t Size =
+ totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
+ void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedMemberExpr>());
UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
@@ -1428,6 +1398,25 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
return Record;
}
+SizeOfPackExpr *
+SizeOfPackExpr::Create(ASTContext &Context, SourceLocation OperatorLoc,
+ NamedDecl *Pack, SourceLocation PackLoc,
+ SourceLocation RParenLoc,
+ Optional<unsigned> Length,
+ ArrayRef<TemplateArgument> PartialArgs) {
+ void *Storage = Context.Allocate(
+ sizeof(SizeOfPackExpr) + sizeof(TemplateArgument) * PartialArgs.size());
+ return new (Storage) SizeOfPackExpr(Context.getSizeType(), OperatorLoc, Pack,
+ PackLoc, RParenLoc, Length, PartialArgs);
+}
+
+SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
+ unsigned NumPartialArgs) {
+ void *Storage = Context.Allocate(
+ sizeof(SizeOfPackExpr) + sizeof(TemplateArgument) * NumPartialArgs);
+ return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
+}
+
SubstNonTypeTemplateParmPackExpr::
SubstNonTypeTemplateParmPackExpr(QualType T,
NonTypeTemplateParmDecl *Param,
@@ -1439,25 +1428,25 @@ SubstNonTypeTemplateParmPackExpr(QualType T,
NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { }
TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
- return TemplateArgument(Arguments, NumArguments);
+ return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
}
FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
SourceLocation NameLoc,
unsigned NumParams,
- Decl * const *Params)
- : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary,
- true, true, true, true),
- ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
+ ParmVarDecl *const *Params)
+ : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary, true, true,
+ true, true),
+ ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
if (Params)
std::uninitialized_copy(Params, Params + NumParams,
- reinterpret_cast<Decl**>(this+1));
+ reinterpret_cast<ParmVarDecl **>(this + 1));
}
FunctionParmPackExpr *
FunctionParmPackExpr::Create(const ASTContext &Context, QualType T,
ParmVarDecl *ParamPack, SourceLocation NameLoc,
- ArrayRef<Decl *> Params) {
+ ArrayRef<ParmVarDecl *> Params) {
return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
sizeof(ParmVarDecl*) * Params.size()))
FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index 9cc612eae9b7..a47b03c0afba 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -136,6 +136,8 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ObjCIvarRefExprClass:
case Expr::FunctionParmPackExprClass:
case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
+ case Expr::OMPArraySectionExprClass:
return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
@@ -185,6 +187,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXFoldExprClass:
case Expr::NoInitExprClass:
case Expr::DesignatedInitUpdateExprClass:
+ case Expr::CoyieldExprClass:
return Cl::CL_PRValue;
// Next come the complicated cases.
@@ -396,6 +399,9 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
"Only 1-element init lists can be glvalues.");
return ClassifyInternal(Ctx, cast<InitListExpr>(E)->getInit(0));
+
+ case Expr::CoawaitExprClass:
+ return ClassifyInternal(Ctx, cast<CoawaitExpr>(E)->getResumeExpr());
}
llvm_unreachable("unhandled expression kind in classification");
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index ed749cc56f1e..c4c4398c3622 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -114,7 +114,8 @@ namespace {
static
unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
ArrayRef<APValue::LValuePathEntry> Path,
- uint64_t &ArraySize, QualType &Type) {
+ uint64_t &ArraySize, QualType &Type,
+ bool &IsArray) {
unsigned MostDerivedLength = 0;
Type = Base;
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
@@ -124,18 +125,22 @@ namespace {
Type = CAT->getElementType();
ArraySize = CAT->getSize().getZExtValue();
MostDerivedLength = I + 1;
+ IsArray = true;
} else if (Type->isAnyComplexType()) {
const ComplexType *CT = Type->castAs<ComplexType>();
Type = CT->getElementType();
ArraySize = 2;
MostDerivedLength = I + 1;
+ IsArray = true;
} else if (const FieldDecl *FD = getAsField(Path[I])) {
Type = FD->getType();
ArraySize = 0;
MostDerivedLength = I + 1;
+ IsArray = false;
} else {
// Path[I] describes a base class.
ArraySize = 0;
+ IsArray = false;
}
}
return MostDerivedLength;
@@ -157,12 +162,17 @@ namespace {
/// Is this a pointer one past the end of an object?
bool IsOnePastTheEnd : 1;
+ /// Indicator of whether the most-derived object is an array element.
+ bool MostDerivedIsArrayElement : 1;
+
/// The length of the path to the most-derived object of which this is a
/// subobject.
- unsigned MostDerivedPathLength : 30;
+ unsigned MostDerivedPathLength : 29;
- /// The size of the array of which the most-derived object is an element, or
- /// 0 if the most-derived object is not an array element.
+ /// The size of the array of which the most-derived object is an element.
+ /// This will always be 0 if the most-derived object is not an array
+ /// element. 0 is not an indicator of whether or not the most-derived object
+ /// is an array, however, because 0-length arrays are allowed.
uint64_t MostDerivedArraySize;
/// The type of the most derived object referred to by this address.
@@ -176,21 +186,26 @@ namespace {
SubobjectDesignator() : Invalid(true) {}
explicit SubobjectDesignator(QualType T)
- : Invalid(false), IsOnePastTheEnd(false), MostDerivedPathLength(0),
- MostDerivedArraySize(0), MostDerivedType(T) {}
+ : Invalid(false), IsOnePastTheEnd(false),
+ MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
+ MostDerivedArraySize(0), MostDerivedType(T) {}
SubobjectDesignator(ASTContext &Ctx, const APValue &V)
- : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
- MostDerivedPathLength(0), MostDerivedArraySize(0) {
+ : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
+ MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
+ MostDerivedArraySize(0) {
if (!Invalid) {
IsOnePastTheEnd = V.isLValueOnePastTheEnd();
ArrayRef<PathEntry> VEntries = V.getLValuePath();
Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
- if (V.getLValueBase())
+ if (V.getLValueBase()) {
+ bool IsArray = false;
MostDerivedPathLength =
findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
V.getLValuePath(), MostDerivedArraySize,
- MostDerivedType);
+ MostDerivedType, IsArray);
+ MostDerivedIsArrayElement = IsArray;
+ }
}
}
@@ -204,7 +219,7 @@ namespace {
assert(!Invalid);
if (IsOnePastTheEnd)
return true;
- if (MostDerivedArraySize &&
+ if (MostDerivedIsArrayElement &&
Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
return true;
return false;
@@ -228,6 +243,7 @@ namespace {
// This is a most-derived object.
MostDerivedType = CAT->getElementType();
+ MostDerivedIsArrayElement = true;
MostDerivedArraySize = CAT->getSize().getZExtValue();
MostDerivedPathLength = Entries.size();
}
@@ -242,6 +258,7 @@ namespace {
// If this isn't a base class, it's a new most-derived object.
if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
MostDerivedType = FD->getType();
+ MostDerivedIsArrayElement = false;
MostDerivedArraySize = 0;
MostDerivedPathLength = Entries.size();
}
@@ -255,6 +272,7 @@ namespace {
// This is technically a most-derived object, though in practice this
// is unlikely to matter.
MostDerivedType = EltTy;
+ MostDerivedIsArrayElement = true;
MostDerivedArraySize = 2;
MostDerivedPathLength = Entries.size();
}
@@ -262,7 +280,8 @@ namespace {
/// Add N to the address of this subobject.
void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
if (Invalid) return;
- if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize) {
+ if (MostDerivedPathLength == Entries.size() &&
+ MostDerivedIsArrayElement) {
Entries.back().ArrayIndex += N;
if (Entries.back().ArrayIndex > MostDerivedArraySize) {
diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
@@ -454,6 +473,10 @@ namespace {
/// notes attached to it will also be stored, otherwise they will not be.
bool HasActiveDiagnostic;
+ /// \brief Have we emitted a diagnostic explaining why we couldn't constant
+ /// fold (not just why it's not strictly a constant expression)?
+ bool HasFoldFailureDiagnostic;
+
enum EvaluationMode {
/// Evaluate as a constant expression. Stop if we find that the expression
/// is not a constant expression.
@@ -492,7 +515,11 @@ namespace {
/// optimizer if we don't constant fold them here, but in an unevaluated
/// context we try to fold them immediately since the optimizer never
/// gets a chance to look at it.
- EM_PotentialConstantExpressionUnevaluated
+ EM_PotentialConstantExpressionUnevaluated,
+
+ /// Evaluate as a constant expression. Continue evaluating if we find a
+ /// MemberExpr with a base that can't be evaluated.
+ EM_DesignatorFold,
} EvalMode;
/// Are we checking whether the expression is a potential constant
@@ -514,7 +541,7 @@ namespace {
BottomFrame(*this, SourceLocation(), nullptr, nullptr, nullptr),
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
- EvalMode(Mode) {}
+ HasFoldFailureDiagnostic(false), EvalMode(Mode) {}
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) {
EvaluatingDecl = Base;
@@ -574,7 +601,7 @@ namespace {
/// Diagnose that the evaluation cannot be folded.
OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId
= diag::note_invalid_subexpr_in_const_expr,
- unsigned ExtraNotes = 0) {
+ unsigned ExtraNotes = 0, bool IsCCEDiag = false) {
if (EvalStatus.Diag) {
// If we have a prior diagnostic, it will be noting that the expression
// isn't a constant expression. This diagnostic is more important,
@@ -587,14 +614,14 @@ namespace {
case EM_ConstantFold:
case EM_IgnoreSideEffects:
case EM_EvaluateForOverflow:
- if (!EvalStatus.HasSideEffects)
+ if (!HasFoldFailureDiagnostic)
break;
- // We've had side-effects; we want the diagnostic from them, not
- // some later problem.
+ // We've already failed to fold something. Keep that diagnostic.
case EM_ConstantExpression:
case EM_PotentialConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_PotentialConstantExpressionUnevaluated:
+ case EM_DesignatorFold:
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
@@ -608,6 +635,7 @@ namespace {
CallStackNotes = 0;
HasActiveDiagnostic = true;
+ HasFoldFailureDiagnostic = !IsCCEDiag;
EvalStatus.Diag->clear();
EvalStatus.Diag->reserve(1 + ExtraNotes + CallStackNotes);
addDiag(Loc, DiagId);
@@ -621,9 +649,9 @@ namespace {
OptionalDiagnostic Diag(const Expr *E, diag::kind DiagId
= diag::note_invalid_subexpr_in_const_expr,
- unsigned ExtraNotes = 0) {
+ unsigned ExtraNotes = 0, bool IsCCEDiag = false) {
if (EvalStatus.Diag)
- return Diag(E->getExprLoc(), DiagId, ExtraNotes);
+ return Diag(E->getExprLoc(), DiagId, ExtraNotes, IsCCEDiag);
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
@@ -643,7 +671,7 @@ namespace {
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
- return Diag(Loc, DiagId, ExtraNotes);
+ return Diag(Loc, DiagId, ExtraNotes, true);
}
/// Add a note to a prior diagnostic.
@@ -674,6 +702,7 @@ namespace {
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
+ case EM_DesignatorFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
@@ -686,6 +715,32 @@ namespace {
return keepEvaluatingAfterSideEffect();
}
+ /// Should we continue evaluation after encountering undefined behavior?
+ bool keepEvaluatingAfterUndefinedBehavior() {
+ switch (EvalMode) {
+ case EM_EvaluateForOverflow:
+ case EM_IgnoreSideEffects:
+ case EM_ConstantFold:
+ case EM_DesignatorFold:
+ return true;
+
+ case EM_PotentialConstantExpression:
+ case EM_PotentialConstantExpressionUnevaluated:
+ case EM_ConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
+ return false;
+ }
+ llvm_unreachable("Missed EvalMode case");
+ }
+
+ /// Note that we hit something that was technically undefined behavior, but
+ /// that we can evaluate past it (such as signed overflow or floating-point
+ /// division by zero.)
+ bool noteUndefinedBehavior() {
+ EvalStatus.HasUndefinedBehavior = true;
+ return keepEvaluatingAfterUndefinedBehavior();
+ }
+
/// Should we continue evaluation as much as possible after encountering a
/// construct which can't be reduced to a value?
bool keepEvaluatingAfterFailure() {
@@ -702,10 +757,15 @@ namespace {
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
case EM_IgnoreSideEffects:
+ case EM_DesignatorFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
}
+
+ bool allowInvalidBaseExpr() const {
+ return EvalMode == EM_DesignatorFold;
+ }
};
/// Object used to treat all foldable expressions as constant expressions.
@@ -736,6 +796,21 @@ namespace {
}
};
+ /// RAII object used to treat the current evaluation as the correct pointer
+ /// offset fold for the current EvalMode
+ struct FoldOffsetRAII {
+ EvalInfo &Info;
+ EvalInfo::EvaluationMode OldMode;
+ explicit FoldOffsetRAII(EvalInfo &Info, bool Subobject)
+ : Info(Info), OldMode(Info.EvalMode) {
+ if (!Info.checkingPotentialConstantExpression())
+ Info.EvalMode = Subobject ? EvalInfo::EM_DesignatorFold
+ : EvalInfo::EM_ConstantFold;
+ }
+
+ ~FoldOffsetRAII() { Info.EvalMode = OldMode; }
+ };
+
/// RAII object used to suppress diagnostics and side-effects from a
/// speculative evaluation.
class SpeculativeEvaluationRAII {
@@ -808,7 +883,7 @@ bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
const Expr *E, uint64_t N) {
- if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize)
+ if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
Info.CCEDiag(E, diag::note_constexpr_array_index)
<< static_cast<int>(N) << /*array*/ 0
<< static_cast<unsigned>(MostDerivedArraySize);
@@ -917,7 +992,8 @@ namespace {
struct LValue {
APValue::LValueBase Base;
CharUnits Offset;
- unsigned CallIndex;
+ bool InvalidBase : 1;
+ unsigned CallIndex : 31;
SubobjectDesignator Designator;
const APValue::LValueBase getLValueBase() const { return Base; }
@@ -938,17 +1014,23 @@ namespace {
assert(V.isLValue());
Base = V.getLValueBase();
Offset = V.getLValueOffset();
+ InvalidBase = false;
CallIndex = V.getLValueCallIndex();
Designator = SubobjectDesignator(Ctx, V);
}
- void set(APValue::LValueBase B, unsigned I = 0) {
+ void set(APValue::LValueBase B, unsigned I = 0, bool BInvalid = false) {
Base = B;
Offset = CharUnits::Zero();
+ InvalidBase = BInvalid;
CallIndex = I;
Designator = SubobjectDesignator(getType(B));
}
+ void setInvalid(APValue::LValueBase B, unsigned I = 0) {
+ set(B, I, true);
+ }
+
// Check that this LValue is not based on a null pointer. If it is, produce
// a diagnostic and mark the designator as invalid.
bool checkNullPointer(EvalInfo &Info, const Expr *E,
@@ -967,10 +1049,6 @@ namespace {
// Check this LValue refers to an object. If not, set the designator to be
// invalid and emit a diagnostic.
bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
- // Outside C++11, do not build a designator referring to a subobject of
- // any object: we won't use such a designator for anything.
- if (!Info.getLangOpts().CPlusPlus11)
- Designator.setInvalid();
return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
Designator.checkSubobject(Info, E, CSK);
}
@@ -1102,12 +1180,13 @@ static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
EvalInfo &Info);
static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
-static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info);
static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
//===----------------------------------------------------------------------===//
// Misc utilities
@@ -1492,10 +1571,11 @@ static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
}
template<typename T>
-static void HandleOverflow(EvalInfo &Info, const Expr *E,
+static bool HandleOverflow(EvalInfo &Info, const Expr *E,
const T &SrcValue, QualType DestType) {
Info.CCEDiag(E, diag::note_constexpr_overflow)
<< SrcValue << DestType;
+ return Info.noteUndefinedBehavior();
}
static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
@@ -1509,7 +1589,7 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
bool ignored;
if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
& APFloat::opInvalidOp)
- HandleOverflow(Info, E, Value, DestType);
+ return HandleOverflow(Info, E, Value, DestType);
return true;
}
@@ -1521,13 +1601,13 @@ static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
APFloat::rmNearestTiesToEven, &ignored)
& APFloat::opOverflow)
- HandleOverflow(Info, E, Value, DestType);
+ return HandleOverflow(Info, E, Value, DestType);
return true;
}
static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
QualType DestType, QualType SrcType,
- APSInt &Value) {
+ const APSInt &Value) {
unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
APSInt Result = Value;
// Figure out if this is a truncate, extend or noop cast.
@@ -1544,7 +1624,7 @@ static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
if (Result.convertFromAPInt(Value, Value.isSigned(),
APFloat::rmNearestTiesToEven)
& APFloat::opOverflow)
- HandleOverflow(Info, E, Value, DestType);
+ return HandleOverflow(Info, E, Value, DestType);
return true;
}
@@ -1620,23 +1700,26 @@ static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
/// bits, and check for overflow in the original type (if that type was not an
/// unsigned type).
template<typename Operation>
-static APSInt CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
- const APSInt &LHS, const APSInt &RHS,
- unsigned BitWidth, Operation Op) {
- if (LHS.isUnsigned())
- return Op(LHS, RHS);
+static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &LHS, const APSInt &RHS,
+ unsigned BitWidth, Operation Op,
+ APSInt &Result) {
+ if (LHS.isUnsigned()) {
+ Result = Op(LHS, RHS);
+ return true;
+ }
APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
- APSInt Result = Value.trunc(LHS.getBitWidth());
+ Result = Value.trunc(LHS.getBitWidth());
if (Result.extend(BitWidth) != Value) {
if (Info.checkingForOverflow())
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
- diag::warn_integer_constant_overflow)
+ diag::warn_integer_constant_overflow)
<< Result.toString(10) << E->getType();
else
- HandleOverflow(Info, E, Value, E->getType());
+ return HandleOverflow(Info, E, Value, E->getType());
}
- return Result;
+ return true;
}
/// Perform the given binary integer operation.
@@ -1648,17 +1731,14 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
Info.Diag(E);
return false;
case BO_Mul:
- Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() * 2,
- std::multiplies<APSInt>());
- return true;
+ return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() * 2,
+ std::multiplies<APSInt>(), Result);
case BO_Add:
- Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
- std::plus<APSInt>());
- return true;
+ return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
+ std::plus<APSInt>(), Result);
case BO_Sub:
- Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
- std::minus<APSInt>());
- return true;
+ return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
+ std::minus<APSInt>(), Result);
case BO_And: Result = LHS & RHS; return true;
case BO_Xor: Result = LHS ^ RHS; return true;
case BO_Or: Result = LHS | RHS; return true;
@@ -1668,11 +1748,13 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
Info.Diag(E, diag::note_expr_divide_by_zero);
return false;
}
- // Check for overflow case: INT_MIN / -1 or INT_MIN % -1.
+ Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
+ // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports
+ // this operation and gives the two's complement result.
if (RHS.isNegative() && RHS.isAllOnesValue() &&
LHS.isSigned() && LHS.isMinSignedValue())
- HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType());
- Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
+ return HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1),
+ E->getType());
return true;
case BO_Shl: {
if (Info.getLangOpts().OpenCL)
@@ -1760,8 +1842,10 @@ static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
break;
}
- if (LHS.isInfinity() || LHS.isNaN())
+ if (LHS.isInfinity() || LHS.isNaN()) {
Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN();
+ return Info.noteUndefinedBehavior();
+ }
return true;
}
@@ -2159,6 +2243,7 @@ enum AccessKinds {
AK_Decrement
};
+namespace {
/// A handle to a complete object (an object that is not a subobject of
/// another object).
struct CompleteObject {
@@ -2175,6 +2260,7 @@ struct CompleteObject {
explicit operator bool() const { return Value; }
};
+} // end anonymous namespace
/// Find the designated sub-object of an rvalue.
template<typename SubobjectHandler>
@@ -2488,7 +2574,7 @@ static bool AreElementsOfSameArray(QualType ObjType,
if (A.Entries.size() != B.Entries.size())
return false;
- bool IsArray = A.MostDerivedArraySize != 0;
+ bool IsArray = A.MostDerivedIsArrayElement;
if (IsArray && A.MostDerivedPathLength != A.Entries.size())
// A is a subobject of the array element.
return false;
@@ -2713,8 +2799,7 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
// Check for special cases where there is no existing APValue to look at.
const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
- if (!LVal.Designator.Invalid && Base && !LVal.CallIndex &&
- !Type.isVolatileQualified()) {
+ if (Base && !LVal.CallIndex && !Type.isVolatileQualified()) {
if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) {
// In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
// initializer until now for such expressions. Such an expression can't be
@@ -2959,7 +3044,7 @@ struct IncDecSubobjectHandler {
if (!WasNegative && Value.isNegative() &&
isOverflowingIntegerType(Info.Ctx, SubobjType)) {
APSInt ActualValue(Value, /*IsUnsigned*/true);
- HandleOverflow(Info, E, ActualValue, SubobjType);
+ return HandleOverflow(Info, E, ActualValue, SubobjType);
}
} else {
--Value;
@@ -2969,7 +3054,7 @@ struct IncDecSubobjectHandler {
unsigned BitWidth = Value.getBitWidth();
APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
ActualValue.setBit(BitWidth);
- HandleOverflow(Info, E, ActualValue, SubobjType);
+ return HandleOverflow(Info, E, ActualValue, SubobjType);
}
}
return true;
@@ -3253,12 +3338,21 @@ static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
return EvaluateAsBooleanCondition(Cond, Result, Info);
}
-static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
+/// \brief A location where the result (returned value) of evaluating a
+/// statement should be stored.
+struct StmtResult {
+ /// The APValue that should be filled in with the returned value.
+ APValue &Value;
+ /// The location containing the result, if any (used to support RVO).
+ const LValue *Slot;
+};
+
+static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const Stmt *S,
const SwitchCase *SC = nullptr);
/// Evaluate the body of a loop, and translate the result as appropriate.
-static EvalStmtResult EvaluateLoopBody(APValue &Result, EvalInfo &Info,
+static EvalStmtResult EvaluateLoopBody(StmtResult &Result, EvalInfo &Info,
const Stmt *Body,
const SwitchCase *Case = nullptr) {
BlockScopeRAII Scope(Info);
@@ -3277,7 +3371,7 @@ static EvalStmtResult EvaluateLoopBody(APValue &Result, EvalInfo &Info,
}
/// Evaluate a switch statement.
-static EvalStmtResult EvaluateSwitch(APValue &Result, EvalInfo &Info,
+static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
const SwitchStmt *SS) {
BlockScopeRAII Scope(Info);
@@ -3334,7 +3428,7 @@ static EvalStmtResult EvaluateSwitch(APValue &Result, EvalInfo &Info,
}
// Evaluate a statement.
-static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
+static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const Stmt *S, const SwitchCase *Case) {
if (!Info.nextStep(S))
return ESR_Failed;
@@ -3440,7 +3534,10 @@ static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
case Stmt::ReturnStmtClass: {
const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
FullExpressionRAII Scope(Info);
- if (RetExpr && !Evaluate(Result, Info, RetExpr))
+ if (RetExpr &&
+ !(Result.Slot
+ ? EvaluateInPlace(Result.Value, Info, *Result.Slot, RetExpr)
+ : Evaluate(Result.Value, Info, RetExpr)))
return ESR_Failed;
return ESR_Returned;
}
@@ -3710,7 +3807,8 @@ static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
static bool HandleFunctionCall(SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
ArrayRef<const Expr*> Args, const Stmt *Body,
- EvalInfo &Info, APValue &Result) {
+ EvalInfo &Info, APValue &Result,
+ const LValue *ResultSlot) {
ArgVector ArgValues(Args.size());
if (!EvaluateArgs(Args, ArgValues, Info))
return false;
@@ -3745,7 +3843,8 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
return true;
}
- EvalStmtResult ESR = EvaluateStmt(Result, Info, Body);
+ StmtResult Ret = {Result, ResultSlot};
+ EvalStmtResult ESR = EvaluateStmt(Ret, Info, Body);
if (ESR == ESR_Succeeded) {
if (Callee->getReturnType()->isVoidType())
return true;
@@ -3774,6 +3873,11 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues.data());
+ // FIXME: Creating an APValue just to hold a nonexistent return value is
+ // wasteful.
+ APValue RetVal;
+ StmtResult Ret = {RetVal, nullptr};
+
// If it's a delegating constructor, just delegate.
if (Definition->isDelegatingConstructor()) {
CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
@@ -3782,7 +3886,7 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
if (!EvaluateInPlace(Result, Info, This, (*I)->getInit()))
return false;
}
- return EvaluateStmt(Result, Info, Definition->getBody()) != ESR_Failed;
+ return EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed;
}
// For a trivial copy or move constructor, perform an APValue copy. This is
@@ -3890,7 +3994,7 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
}
return Success &&
- EvaluateStmt(Result, Info, Definition->getBody()) != ESR_Failed;
+ EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed;
}
//===----------------------------------------------------------------------===//
@@ -3902,11 +4006,12 @@ template <class Derived>
class ExprEvaluatorBase
: public ConstStmtVisitor<Derived, bool> {
private:
+ Derived &getDerived() { return static_cast<Derived&>(*this); }
bool DerivedSuccess(const APValue &V, const Expr *E) {
- return static_cast<Derived*>(this)->Success(V, E);
+ return getDerived().Success(V, E);
}
bool DerivedZeroInitialization(const Expr *E) {
- return static_cast<Derived*>(this)->ZeroInitialization(E);
+ return getDerived().ZeroInitialization(E);
}
// Check whether a conditional operator with a non-constant condition is a
@@ -4087,6 +4192,14 @@ public:
}
bool VisitCallExpr(const CallExpr *E) {
+ APValue Result;
+ if (!handleCallExpr(E, Result, nullptr))
+ return false;
+ return DerivedSuccess(Result, E);
+ }
+
+ bool handleCallExpr(const CallExpr *E, APValue &Result,
+ const LValue *ResultSlot) {
const Expr *Callee = E->getCallee()->IgnoreParens();
QualType CalleeType = Callee->getType();
@@ -4161,14 +4274,13 @@ public:
const FunctionDecl *Definition = nullptr;
Stmt *Body = FD->getBody(Definition);
- APValue Result;
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition) ||
- !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body,
- Info, Result))
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body, Info,
+ Result, ResultSlot))
return false;
- return DerivedSuccess(Result, E);
+ return true;
}
bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
@@ -4293,7 +4405,8 @@ public:
}
APValue ReturnValue;
- EvalStmtResult ESR = EvaluateStmt(ReturnValue, Info, *BI);
+ StmtResult Result = { ReturnValue, nullptr };
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI);
if (ESR != ESR_Succeeded) {
// FIXME: If the statement-expression terminated due to 'return',
// 'break', or 'continue', it would be nice to propagate that to
@@ -4345,20 +4458,24 @@ public:
bool VisitMemberExpr(const MemberExpr *E) {
// Handle non-static data members.
QualType BaseTy;
+ bool EvalOK;
if (E->isArrow()) {
- if (!EvaluatePointer(E->getBase(), Result, this->Info))
- return false;
+ EvalOK = EvaluatePointer(E->getBase(), Result, this->Info);
BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
} else if (E->getBase()->isRValue()) {
assert(E->getBase()->getType()->isRecordType());
- if (!EvaluateTemporary(E->getBase(), Result, this->Info))
- return false;
+ EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info);
BaseTy = E->getBase()->getType();
} else {
- if (!this->Visit(E->getBase()))
- return false;
+ EvalOK = this->Visit(E->getBase());
BaseTy = E->getBase()->getType();
}
+ if (!EvalOK) {
+ if (!this->Info.allowInvalidBaseExpr())
+ return false;
+ Result.setInvalid(E);
+ return true;
+ }
const ValueDecl *MD = E->getMemberDecl();
if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
@@ -4498,12 +4615,13 @@ public:
} // end anonymous namespace
/// Evaluate an expression as an lvalue. This can be legitimately called on
-/// expressions which are not glvalues, in two cases:
+/// expressions which are not glvalues, in three cases:
/// * function designators in C, and
/// * "extern void" objects
+/// * @selector() expressions in Objective-C
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info) {
assert(E->isGLValue() || E->getType()->isFunctionType() ||
- E->getType()->isVoidType());
+ E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E));
return LValueExprEvaluator(Info, Result).Visit(E);
}
@@ -4770,7 +4888,7 @@ public:
bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
{ return Success(E); }
bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E)
- { return Success(E); }
+ { return Success(E); }
bool VisitAddrLabelExpr(const AddrLabelExpr *E)
{ return Success(E); }
bool VisitCallExpr(const CallExpr *E);
@@ -4896,6 +5014,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
unsigned Size = Info.Ctx.getTypeSize(E->getType());
uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue();
Result.Base = (Expr*)nullptr;
+ Result.InvalidBase = false;
Result.Offset = CharUnits::fromQuantity(N);
Result.CallIndex = 0;
Result.Designator.setInvalid();
@@ -5148,6 +5267,9 @@ namespace {
}
bool ZeroInitialization(const Expr *E);
+ bool VisitCallExpr(const CallExpr *E) {
+ return handleCallExpr(E, Result, &This);
+ }
bool VisitCastExpr(const CastExpr *E);
bool VisitInitListExpr(const InitListExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E);
@@ -5504,7 +5626,7 @@ namespace {
VectorExprEvaluator(EvalInfo &info, APValue &Result)
: ExprEvaluatorBaseTy(info), Result(Result) {}
- bool Success(const ArrayRef<APValue> &V, const Expr *E) {
+ bool Success(ArrayRef<APValue> V, const Expr *E) {
assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
// FIXME: remove this APValue copy.
Result = APValue(V.data(), V.size());
@@ -5533,7 +5655,7 @@ static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
return VectorExprEvaluator(Info, Result).Visit(E);
}
-bool VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
+bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
const VectorType *VTy = E->getType()->castAs<VectorType>();
unsigned NElts = VTy->getNumElements();
@@ -5546,13 +5668,13 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
if (SETy->isIntegerType()) {
APSInt IntResult;
if (!EvaluateInteger(SE, IntResult, Info))
- return false;
- Val = APValue(IntResult);
+ return false;
+ Val = APValue(std::move(IntResult));
} else if (SETy->isRealFloatingType()) {
- APFloat F(0.0);
- if (!EvaluateFloat(SE, F, Info))
- return false;
- Val = APValue(F);
+ APFloat FloatResult(0.0);
+ if (!EvaluateFloat(SE, FloatResult, Info))
+ return false;
+ Val = APValue(std::move(FloatResult));
} else {
return Error(E);
}
@@ -5710,6 +5832,9 @@ namespace {
return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
}
+ bool VisitCallExpr(const CallExpr *E) {
+ return handleCallExpr(E, Result, &This);
+ }
bool VisitInitListExpr(const InitListExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E,
@@ -5998,8 +6123,7 @@ public:
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
private:
- static QualType GetObjectType(APValue::LValueBase B);
- bool TryEvaluateBuiltinObjectSize(const CallExpr *E);
+ bool TryEvaluateBuiltinObjectSize(const CallExpr *E, unsigned Type);
// FIXME: Missing: array subscript of vector, member of vector
};
} // end anonymous namespace
@@ -6151,8 +6275,8 @@ static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
APValue &V = Result.Val;
if (V.getKind() == APValue::Int)
return true;
-
- return EvaluateBuiltinConstantPForLValue(V);
+ if (V.getKind() == APValue::LValue)
+ return EvaluateBuiltinConstantPForLValue(V);
} else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) {
return Arg->isEvaluatable(Ctx);
} else if (ArgType->isPointerType() || Arg->isGLValue()) {
@@ -6171,7 +6295,7 @@ static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
/// Retrieves the "underlying object type" of the given expression,
/// as used by __builtin_object_size.
-QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) {
+static QualType getObjectType(APValue::LValueBase B) {
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->getType();
@@ -6183,49 +6307,258 @@ QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) {
return QualType();
}
-bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
- LValue Base;
+/// A more selective version of E->IgnoreParenCasts for
+/// TryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
+/// to change the type of E.
+/// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo`
+///
+/// Always returns an RValue with a pointer representation.
+static const Expr *ignorePointerCastsAndParens(const Expr *E) {
+ assert(E->isRValue() && E->getType()->hasPointerRepresentation());
+ auto *NoParens = E->IgnoreParens();
+ auto *Cast = dyn_cast<CastExpr>(NoParens);
+ if (Cast == nullptr)
+ return NoParens;
+
+ // We only conservatively allow a few kinds of casts, because this code is
+ // inherently a simple solution that seeks to support the common case.
+ auto CastKind = Cast->getCastKind();
+ if (CastKind != CK_NoOp && CastKind != CK_BitCast &&
+ CastKind != CK_AddressSpaceConversion)
+ return NoParens;
+
+ auto *SubExpr = Cast->getSubExpr();
+ if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isRValue())
+ return NoParens;
+ return ignorePointerCastsAndParens(SubExpr);
+}
+
+/// Checks to see if the given LValue's Designator is at the end of the LValue's
+/// record layout. e.g.
+/// struct { struct { int a, b; } fst, snd; } obj;
+/// obj.fst // no
+/// obj.snd // yes
+/// obj.fst.a // no
+/// obj.fst.b // no
+/// obj.snd.a // no
+/// obj.snd.b // yes
+///
+/// Please note: this function is specialized for how __builtin_object_size
+/// views "objects".
+static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
+ assert(!LVal.Designator.Invalid);
+
+ auto IsLastFieldDecl = [&Ctx](const FieldDecl *FD) {
+ if (FD->getParent()->isUnion())
+ return true;
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent());
+ return FD->getFieldIndex() + 1 == Layout.getFieldCount();
+ };
+
+ auto &Base = LVal.getLValueBase();
+ if (auto *ME = dyn_cast_or_null<MemberExpr>(Base.dyn_cast<const Expr *>())) {
+ if (auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ if (!IsLastFieldDecl(FD))
+ return false;
+ } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(ME->getMemberDecl())) {
+ for (auto *FD : IFD->chain())
+ if (!IsLastFieldDecl(cast<FieldDecl>(FD)))
+ return false;
+ }
+ }
+
+ QualType BaseType = getType(Base);
+ for (int I = 0, E = LVal.Designator.Entries.size(); I != E; ++I) {
+ if (BaseType->isArrayType()) {
+ // Because __builtin_object_size treats arrays as objects, we can ignore
+ // the index iff this is the last array in the Designator.
+ if (I + 1 == E)
+ return true;
+ auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
+ uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
+ if (Index + 1 != CAT->getSize())
+ return false;
+ BaseType = CAT->getElementType();
+ } else if (BaseType->isAnyComplexType()) {
+ auto *CT = BaseType->castAs<ComplexType>();
+ uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
+ if (Index != 1)
+ return false;
+ BaseType = CT->getElementType();
+ } else if (auto *FD = getAsField(LVal.Designator.Entries[I])) {
+ if (!IsLastFieldDecl(FD))
+ return false;
+ BaseType = FD->getType();
+ } else {
+ assert(getAsBaseClass(LVal.Designator.Entries[I]) != nullptr &&
+ "Expecting cast to a base class");
+ return false;
+ }
+ }
+ return true;
+}
+
+/// Tests to see if the LValue has a designator (that isn't necessarily valid).
+static bool refersToCompleteObject(const LValue &LVal) {
+ if (LVal.Designator.Invalid || !LVal.Designator.Entries.empty())
+ return false;
+
+ if (!LVal.InvalidBase)
+ return true;
+
+ auto *E = LVal.Base.dyn_cast<const Expr *>();
+ (void)E;
+ assert(E != nullptr && isa<MemberExpr>(E));
+ return false;
+}
+
+/// Tries to evaluate the __builtin_object_size for @p E. If successful, returns
+/// true and stores the result in @p Size.
+///
+/// If @p WasError is non-null, this will report whether the failure to evaluate
+/// is to be treated as an Error in IntExprEvaluator.
+static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
+ EvalInfo &Info, uint64_t &Size,
+ bool *WasError = nullptr) {
+ if (WasError != nullptr)
+ *WasError = false;
+
+ auto Error = [&](const Expr *E) {
+ if (WasError != nullptr)
+ *WasError = true;
+ return false;
+ };
+
+ auto Success = [&](uint64_t S, const Expr *E) {
+ Size = S;
+ return true;
+ };
+
+ // Determine the denoted object.
+ LValue Base;
{
// The operand of __builtin_object_size is never evaluated for side-effects.
// If there are any, but we can determine the pointed-to object anyway, then
// ignore the side-effects.
SpeculativeEvaluationRAII SpeculativeEval(Info);
- if (!EvaluatePointer(E->getArg(0), Base, Info))
+ FoldOffsetRAII Fold(Info, Type & 1);
+
+ if (E->isGLValue()) {
+ // It's possible for us to be given GLValues if we're called via
+ // Expr::tryEvaluateObjectSize.
+ APValue RVal;
+ if (!EvaluateAsRValue(Info, E, RVal))
+ return false;
+ Base.setFrom(Info.Ctx, RVal);
+ } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), Base, Info))
return false;
}
- if (!Base.getLValueBase()) {
- // It is not possible to determine which objects ptr points to at compile time,
- // __builtin_object_size should return (size_t) -1 for type 0 or 1
- // and (size_t) 0 for type 2 or 3.
- llvm::APSInt TypeIntVaue;
- const Expr *ExprType = E->getArg(1);
- if (!ExprType->EvaluateAsInt(TypeIntVaue, Info.Ctx))
- return false;
- if (TypeIntVaue == 0 || TypeIntVaue == 1)
- return Success(-1, E);
- if (TypeIntVaue == 2 || TypeIntVaue == 3)
- return Success(0, E);
+ CharUnits BaseOffset = Base.getLValueOffset();
+ // If we point to before the start of the object, there are no accessible
+ // bytes.
+ if (BaseOffset.isNegative())
+ return Success(0, E);
+
+ // In the case where we're not dealing with a subobject, we discard the
+ // subobject bit.
+ bool SubobjectOnly = (Type & 1) != 0 && !refersToCompleteObject(Base);
+
+ // If Type & 1 is 0, we need to be able to statically guarantee that the bytes
+ // exist. If we can't verify the base, then we can't do that.
+ //
+ // As a special case, we produce a valid object size for an unknown object
+ // with a known designator if Type & 1 is 1. For instance:
+ //
+ // extern struct X { char buff[32]; int a, b, c; } *p;
+ // int a = __builtin_object_size(p->buff + 4, 3); // returns 28
+ // int b = __builtin_object_size(p->buff + 4, 2); // returns 0, not 40
+ //
+ // This matches GCC's behavior.
+ if (Base.InvalidBase && !SubobjectOnly)
return Error(E);
+
+ // If we're not examining only the subobject, then we reset to a complete
+ // object designator
+ //
+ // If Type is 1 and we've lost track of the subobject, just find the complete
+ // object instead. (If Type is 3, that's not correct behavior and we should
+ // return 0 instead.)
+ LValue End = Base;
+ if (!SubobjectOnly || (End.Designator.Invalid && Type == 1)) {
+ QualType T = getObjectType(End.getLValueBase());
+ if (T.isNull())
+ End.Designator.setInvalid();
+ else {
+ End.Designator = SubobjectDesignator(T);
+ End.Offset = CharUnits::Zero();
+ }
}
- QualType T = GetObjectType(Base.getLValueBase());
- if (T.isNull() ||
- T->isIncompleteType() ||
- T->isFunctionType() ||
- T->isVariablyModifiedType() ||
- T->isDependentType())
+ // If it is not possible to determine which objects ptr points to at compile
+ // time, __builtin_object_size should return (size_t) -1 for type 0 or 1
+ // and (size_t) 0 for type 2 or 3.
+ if (End.Designator.Invalid)
+ return false;
+
+ // According to the GCC documentation, we want the size of the subobject
+ // denoted by the pointer. But that's not quite right -- what we actually
+ // want is the size of the immediately-enclosing array, if there is one.
+ int64_t AmountToAdd = 1;
+ if (End.Designator.MostDerivedIsArrayElement &&
+ End.Designator.Entries.size() == End.Designator.MostDerivedPathLength) {
+ // We got a pointer to an array. Step to its end.
+ AmountToAdd = End.Designator.MostDerivedArraySize -
+ End.Designator.Entries.back().ArrayIndex;
+ } else if (End.Designator.isOnePastTheEnd()) {
+ // We're already pointing at the end of the object.
+ AmountToAdd = 0;
+ }
+
+ QualType PointeeType = End.Designator.MostDerivedType;
+ assert(!PointeeType.isNull());
+ if (PointeeType->isIncompleteType() || PointeeType->isFunctionType())
return Error(E);
- CharUnits Size = Info.Ctx.getTypeSizeInChars(T);
- CharUnits Offset = Base.getLValueOffset();
+ if (!HandleLValueArrayAdjustment(Info, E, End, End.Designator.MostDerivedType,
+ AmountToAdd))
+ return false;
- if (!Offset.isNegative() && Offset <= Size)
- Size -= Offset;
- else
- Size = CharUnits::Zero();
- return Success(Size, E);
+ auto EndOffset = End.getLValueOffset();
+
+ // The following is a moderately common idiom in C:
+ //
+ // struct Foo { int a; char c[1]; };
+ // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
+ // strcpy(&F->c[0], Bar);
+ //
+ // So, if we see that we're examining a 1-length (or 0-length) array at the
+ // end of a struct with an unknown base, we give up instead of breaking code
+ // that behaves this way. Note that we only do this when Type=1, because
+ // Type=3 is a lower bound, so answering conservatively is fine.
+ if (End.InvalidBase && SubobjectOnly && Type == 1 &&
+ End.Designator.Entries.size() == End.Designator.MostDerivedPathLength &&
+ End.Designator.MostDerivedIsArrayElement &&
+ End.Designator.MostDerivedArraySize < 2 &&
+ isDesignatorAtObjectEnd(Info.Ctx, End))
+ return false;
+
+ if (BaseOffset > EndOffset)
+ return Success(0, E);
+
+ return Success((EndOffset - BaseOffset).getQuantity(), E);
+}
+
+bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E,
+ unsigned Type) {
+ uint64_t Size;
+ bool WasError;
+ if (::tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size, &WasError))
+ return Success(Size, E);
+ if (WasError)
+ return Error(E);
+ return false;
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
@@ -6234,17 +6567,16 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
return ExprEvaluatorBaseTy::VisitCallExpr(E);
case Builtin::BI__builtin_object_size: {
- if (TryEvaluateBuiltinObjectSize(E))
+ // The type was checked when we built the expression.
+ unsigned Type =
+ E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
+ assert(Type <= 3 && "unexpected type");
+
+ if (TryEvaluateBuiltinObjectSize(E, Type))
return true;
- // If evaluating the argument has side-effects, we can't determine the size
- // of the object, and so we lower it to unknown now. CodeGen relies on us to
- // handle all cases where the expression has side-effects.
- if (E->getArg(0)->HasSideEffects(Info.Ctx)) {
- if (E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue() <= 1)
- return Success(-1ULL, E);
- return Success(0, E);
- }
+ if (E->getArg(0)->HasSideEffects(Info.Ctx))
+ return Success((Type & 2) ? 0 : -1, E);
// Expression had no side effects, but we couldn't statically determine the
// size of the referenced object.
@@ -6254,10 +6586,13 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
case EvalInfo::EM_ConstantFold:
case EvalInfo::EM_EvaluateForOverflow:
case EvalInfo::EM_IgnoreSideEffects:
+ case EvalInfo::EM_DesignatorFold:
+ // Leave it to IR generation.
return Error(E);
case EvalInfo::EM_ConstantExpressionUnevaluated:
case EvalInfo::EM_PotentialConstantExpressionUnevaluated:
- return Success(-1ULL, E);
+ // Reduce it to a constant now.
+ return Success((Type & 2) ? 0 : -1, E);
}
}
@@ -6523,9 +6858,15 @@ static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
!LV.getLValueDesignator().isOnePastTheEnd())
return false;
+ // A pointer to an incomplete type might be past-the-end if the type's size is
+ // zero. We cannot tell because the type is incomplete.
+ QualType Ty = getType(LV.getLValueBase());
+ if (Ty->isIncompleteType())
+ return true;
+
// We're a past-the-end pointer if we point to the byte after the object,
// no matter what our type or path is.
- auto Size = Ctx.getTypeSizeInChars(getType(LV.getLValueBase()));
+ auto Size = Ctx.getTypeSizeInChars(Ty);
return LV.getLValueOffset() == Size;
}
@@ -6555,7 +6896,13 @@ class DataRecursiveIntBinOpEvaluator {
EvalResult LHSResult; // meaningful only for binary operator expression.
enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
- Job() : StoredInfo(nullptr) {}
+ Job() = default;
+ Job(Job &&J)
+ : E(J.E), LHSResult(J.LHSResult), Kind(J.Kind),
+ StoredInfo(J.StoredInfo), OldEvalStatus(J.OldEvalStatus) {
+ J.StoredInfo = nullptr;
+ }
+
void startSpeculativeEval(EvalInfo &Info) {
OldEvalStatus = Info.EvalStatus;
Info.EvalStatus.Diag = nullptr;
@@ -6567,7 +6914,7 @@ class DataRecursiveIntBinOpEvaluator {
}
}
private:
- EvalInfo *StoredInfo; // non-null if status changed.
+ EvalInfo *StoredInfo = nullptr; // non-null if status changed.
Expr::EvalStatus OldEvalStatus;
};
@@ -6946,7 +7293,7 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
LValue LHSValue, RHSValue;
bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
- if (!LHSOK && Info.keepEvaluatingAfterFailure())
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
@@ -6958,21 +7305,20 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->getOpcode() == BO_Sub) {
// Handle &&A - &&B.
if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
- return false;
+ return Error(E);
const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr*>();
if (!LHSExpr || !RHSExpr)
- return false;
+ return Error(E);
const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
if (!LHSAddrExpr || !RHSAddrExpr)
- return false;
+ return Error(E);
// Make sure both labels come from the same function.
if (LHSAddrExpr->getLabel()->getDeclContext() !=
RHSAddrExpr->getLabel()->getDeclContext())
- return false;
- Result = APValue(LHSAddrExpr, RHSAddrExpr);
- return true;
+ return Error(E);
+ return Success(APValue(LHSAddrExpr, RHSAddrExpr), E);
}
// Inequalities and subtractions between unrelated pointers have
// unspecified or undefined behavior.
@@ -7063,8 +7409,9 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
APSInt TrueResult = (LHS - RHS) / ElemSize;
APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
- if (Result.extend(65) != TrueResult)
- HandleOverflow(Info, E, TrueResult, E->getType());
+ if (Result.extend(65) != TrueResult &&
+ !HandleOverflow(Info, E, TrueResult, E->getType()))
+ return false;
return Success(Result, E);
}
@@ -7270,9 +7617,9 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
return Error(OOE);
QualType CurrentType = OOE->getTypeSourceInfo()->getType();
for (unsigned i = 0; i != n; ++i) {
- OffsetOfExpr::OffsetOfNode ON = OOE->getComponent(i);
+ OffsetOfNode ON = OOE->getComponent(i);
switch (ON.getKind()) {
- case OffsetOfExpr::OffsetOfNode::Array: {
+ case OffsetOfNode::Array: {
const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex());
APSInt IdxResult;
if (!EvaluateInteger(Idx, IdxResult, Info))
@@ -7286,7 +7633,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
break;
}
- case OffsetOfExpr::OffsetOfNode::Field: {
+ case OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
@@ -7301,10 +7648,10 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
break;
}
- case OffsetOfExpr::OffsetOfNode::Identifier:
+ case OffsetOfNode::Identifier:
llvm_unreachable("dependent __builtin_offsetof");
- case OffsetOfExpr::OffsetOfNode::Base: {
+ case OffsetOfNode::Base: {
CXXBaseSpecifier *BaseSpec = ON.getBase();
if (BaseSpec->isVirtual())
return Error(OOE);
@@ -7350,9 +7697,10 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!Result.isInt()) return Error(E);
const APSInt &Value = Result.getInt();
- if (Value.isSigned() && Value.isMinSignedValue())
- HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
- E->getType());
+ if (Value.isSigned() && Value.isMinSignedValue() &&
+ !HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
+ E->getType()))
+ return false;
return Success(-Value, E);
}
case UO_Not: {
@@ -8512,6 +8860,12 @@ bool Expr::EvaluateAsBooleanCondition(bool &Result,
HandleConversionToBool(Scratch.Val, Result);
}
+static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
+ Expr::SideEffectsKind SEK) {
+ return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
+ (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
+}
+
bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
SideEffectsKind AllowSideEffects) const {
if (!getType()->isIntegralOrEnumerationType())
@@ -8519,7 +8873,7 @@ bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
EvalResult ExprResult;
if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
- (!AllowSideEffects && ExprResult.HasSideEffects))
+ hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
return false;
Result = ExprResult.Val.getInt();
@@ -8551,7 +8905,9 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
- EvalInfo InitInfo(Ctx, EStatus, EvalInfo::EM_ConstantFold);
+ EvalInfo InitInfo(Ctx, EStatus, VD->isConstexpr()
+ ? EvalInfo::EM_ConstantExpression
+ : EvalInfo::EM_ConstantFold);
InitInfo.setEvaluatingDecl(VD, Value);
LValue LVal;
@@ -8580,9 +8936,10 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
/// constant folded, but discard the result.
-bool Expr::isEvaluatable(const ASTContext &Ctx) const {
+bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
EvalResult Result;
- return EvaluateAsRValue(Result, Ctx) && !Result.HasSideEffects;
+ return EvaluateAsRValue(Result, Ctx) &&
+ !hasUnacceptableSideEffect(Result, SEK);
}
APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
@@ -8677,6 +9034,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::ImaginaryLiteralClass:
case Expr::StringLiteralClass:
case Expr::ArraySubscriptExprClass:
+ case Expr::OMPArraySectionExprClass:
case Expr::MemberExprClass:
case Expr::CompoundAssignOperatorClass:
case Expr::CompoundLiteralExprClass:
@@ -8695,6 +9053,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CXXTypeidExprClass:
case Expr::CXXUuidofExprClass:
case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
case Expr::CXXNullPtrLiteralExprClass:
case Expr::UserDefinedLiteralClass:
case Expr::CXXThisExprClass:
@@ -8740,6 +9099,8 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::AtomicExprClass:
case Expr::LambdaExprClass:
case Expr::CXXFoldExprClass:
+ case Expr::CoawaitExprClass:
+ case Expr::CoyieldExprClass:
return ICEDiag(IK_NotICE, E->getLocStart());
case Expr::InitListExprClass: {
@@ -8825,6 +9186,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case UO_PreDec:
case UO_AddrOf:
case UO_Deref:
+ case UO_Coawait:
// C99 6.6/3 allows increment and decrement within unevaluated
// subexpressions of constant expressions, but they can never be ICEs
// because an ICE cannot contain an lvalue operand.
@@ -9078,7 +9440,11 @@ bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
if (!isIntegerConstantExpr(Ctx, Loc))
return false;
- if (!EvaluateAsInt(Value, Ctx))
+ // The only possible side-effects here are due to UB discovered in the
+ // evaluation (for instance, INT_MAX + 1). In such a case, we are still
+ // required to treat the expression as an ICE, so we produce the folded
+ // value.
+ if (!EvaluateAsInt(Value, Ctx, SE_AllowSideEffects))
llvm_unreachable("ICE cannot be evaluated!");
return true;
}
@@ -9172,7 +9538,7 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
HandleConstructorCall(Loc, This, Args, CD, Info, Scratch);
} else
HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr,
- Args, FD->getBody(), Info, Scratch);
+ Args, FD->getBody(), Info, Scratch, nullptr);
return Diags.empty();
}
@@ -9200,3 +9566,13 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
Evaluate(ResultScratch, Info, E);
return Diags.empty();
}
+
+bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
+ unsigned Type) const {
+ if (!getType()->isPointerType())
+ return false;
+
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
+ return ::tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
+}
diff --git a/lib/AST/ExprObjC.cpp b/lib/AST/ExprObjC.cpp
new file mode 100644
index 000000000000..46298c7a730b
--- /dev/null
+++ b/lib/AST/ExprObjC.cpp
@@ -0,0 +1,379 @@
+//===--- ExprObjC.cpp - (ObjC) Expression AST Node Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Expr class declared in ExprObjC.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprObjC.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements, QualType T,
+ ObjCMethodDecl *Method, SourceRange SR)
+ : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method) {
+ Expr **SaveElements = getElements();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Elements[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Elements[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SaveElements[I] = Elements[I];
+ }
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
+ ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl *Method,
+ SourceRange SR) {
+ void *Mem =
+ C.Allocate(sizeof(ObjCArrayLiteral) + Elements.size() * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(const ASTContext &C,
+ unsigned NumElements) {
+
+ void *Mem =
+ C.Allocate(sizeof(ObjCArrayLiteral) + NumElements * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
+}
+
+ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions, QualType T,
+ ObjCMethodDecl *method,
+ SourceRange SR)
+ : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
+ DictWithObjectsMethod(method) {
+ KeyValuePair *KeyValues = getKeyValues();
+ ExpansionData *Expansions = getExpansionData();
+ for (unsigned I = 0; I < NumElements; I++) {
+ if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
+ VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (VK[I].Key->isInstantiationDependent() ||
+ VK[I].Value->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (VK[I].EllipsisLoc.isInvalid() &&
+ (VK[I].Key->containsUnexpandedParameterPack() ||
+ VK[I].Value->containsUnexpandedParameterPack()))
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ KeyValues[I].Key = VK[I].Key;
+ KeyValues[I].Value = VK[I].Value;
+ if (Expansions) {
+ Expansions[I].EllipsisLoc = VK[I].EllipsisLoc;
+ if (VK[I].NumExpansions)
+ Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1;
+ else
+ Expansions[I].NumExpansionsPlusOne = 0;
+ }
+ }
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::Create(const ASTContext &C,
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions, QualType T,
+ ObjCMethodDecl *method, SourceRange SR) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * VK.size();
+
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
+ return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements,
+ bool HasPackExpansions) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * NumElements;
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * NumElements + ExpansionsSize);
+ return new (Mem)
+ ObjCDictionaryLiteral(EmptyShell(), NumElements, HasPackExpansions);
+}
+
+QualType ObjCPropertyRefExpr::getReceiverType(const ASTContext &ctx) const {
+ if (isClassReceiver())
+ return ctx.getObjCInterfaceType(getClassReceiver());
+
+ if (isSuperReceiver())
+ return getSuperReceiverType();
+
+ return getBase()->getType();
+}
+
+ObjCSubscriptRefExpr *
+ObjCSubscriptRefExpr::Create(const ASTContext &C, Expr *base, Expr *key,
+ QualType T, ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod, SourceLocation RB) {
+ void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
+ return new (Mem) ObjCSubscriptRefExpr(
+ base, key, T, VK_LValue, OK_ObjCSubscript, getMethod, setMethod, RB);
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc, bool IsInstanceSuper,
+ QualType SuperType, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ SelectorOrMethod(
+ reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
+ Kind(IsInstanceSuper ? SuperInstance : SuperClass),
+ HasMethod(Method != nullptr), IsDelegateInitCall(false),
+ IsImplicit(isImplicit), SuperLoc(SuperLoc), LBracLoc(LBracLoc),
+ RBracLoc(RBracLoc) {
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(SuperType.getAsOpaquePtr());
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
+ T->isDependentType(), T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ SelectorOrMethod(
+ reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
+ Kind(Class), HasMethod(Method != nullptr), IsDelegateInitCall(false),
+ IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, Expr *Receiver,
+ Selector Sel, ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
+ Receiver->isTypeDependent(), Receiver->isTypeDependent(),
+ Receiver->isInstantiationDependent(),
+ Receiver->containsUnexpandedParameterPack()),
+ SelectorOrMethod(
+ reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
+ Kind(Instance), HasMethod(Method != nullptr), IsDelegateInitCall(false),
+ IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK) {
+ setNumArgs(Args.size());
+ Expr **MyArgs = getArgs();
+ for (unsigned I = 0; I != Args.size(); ++I) {
+ if (Args[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (Args[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Args[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ MyArgs[I] = Args[I];
+ }
+
+ SelLocsKind = SelLocsK;
+ if (!isImplicit()) {
+ if (SelLocsK == SelLoc_NonStandard)
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+ }
+}
+
+ObjCMessageExpr *
+ObjCMessageExpr::Create(const ASTContext &Context, QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, SourceLocation SuperLoc,
+ bool IsInstanceSuper, QualType SuperType, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
+ SuperType, Sel, SelLocs, SelLocsK, Method,
+ Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *
+ObjCMessageExpr::Create(const ASTContext &Context, QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, TypeSourceInfo *Receiver,
+ Selector Sel, ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem)
+ ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, SelLocs, SelLocsK, Method,
+ Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *
+ObjCMessageExpr::Create(const ASTContext &Context, QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc, Expr *Receiver, Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc, bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem)
+ ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, SelLocs, SelLocsK, Method,
+ Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ ObjCMessageExpr *Mem = alloc(Context, NumArgs, NumStoredSelLocs);
+ return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBraceLoc,
+ ArrayRef<SourceLocation> SelLocs,
+ Selector Sel,
+ SelectorLocationsKind &SelLocsK) {
+ SelLocsK = hasStandardSelectorLocs(Sel, SelLocs, Args, RBraceLoc);
+ unsigned NumStoredSelLocs =
+ (SelLocsK == SelLoc_NonStandard) ? SelLocs.size() : 0;
+ return alloc(C, Args.size(), NumStoredSelLocs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C, unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
+ NumArgs * sizeof(Expr *) +
+ NumStoredSelLocs * sizeof(SourceLocation);
+ return (ObjCMessageExpr *)C.Allocate(
+ Size, llvm::AlignOf<ObjCMessageExpr>::Alignment);
+}
+
+void ObjCMessageExpr::getSelectorLocs(
+ SmallVectorImpl<SourceLocation> &SelLocs) const {
+ for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
+ SelLocs.push_back(getSelectorLoc(i));
+}
+
+SourceRange ObjCMessageExpr::getReceiverRange() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ return getInstanceReceiver()->getSourceRange();
+
+ case Class:
+ return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange();
+
+ case SuperInstance:
+ case SuperClass:
+ return getSuperLoc();
+ }
+
+ llvm_unreachable("Invalid ReceiverKind!");
+}
+
+Selector ObjCMessageExpr::getSelector() const {
+ if (HasMethod)
+ return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod)
+ ->getSelector();
+ return Selector(SelectorOrMethod);
+}
+
+QualType ObjCMessageExpr::getReceiverType() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ return getInstanceReceiver()->getType();
+ case Class:
+ return getClassReceiver();
+ case SuperInstance:
+ case SuperClass:
+ return getSuperType();
+ }
+
+ llvm_unreachable("unexpected receiver kind");
+}
+
+ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
+ QualType T = getReceiverType();
+
+ if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+
+ if (const ObjCObjectType *Ty = T->getAs<ObjCObjectType>())
+ return Ty->getInterface();
+
+ return nullptr;
+}
+
+Stmt::child_range ObjCMessageExpr::children() {
+ Stmt **begin;
+ if (getReceiverKind() == Instance)
+ begin = reinterpret_cast<Stmt **>(this + 1);
+ else
+ begin = reinterpret_cast<Stmt **>(getArgs());
+ return child_range(begin,
+ reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
+}
+
+StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
+ switch (getBridgeKind()) {
+ case OBC_Bridge:
+ return "__bridge";
+ case OBC_BridgeTransfer:
+ return "__bridge_transfer";
+ case OBC_BridgeRetained:
+ return "__bridge_retained";
+ }
+
+ llvm_unreachable("Invalid BridgeKind!");
+}
diff --git a/lib/AST/ExternalASTSource.cpp b/lib/AST/ExternalASTSource.cpp
index 1c82c355134e..e3de8c5fefa2 100644
--- a/lib/AST/ExternalASTSource.cpp
+++ b/lib/AST/ExternalASTSource.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/Basic/Module.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -27,9 +28,19 @@ ExternalASTSource::getSourceDescriptor(unsigned ID) {
return None;
}
-ExternalASTSource::ASTSourceDescriptor
-ExternalASTSource::getSourceDescriptor(const Module &M) {
- return ASTSourceDescriptor();
+ExternalASTSource::ASTSourceDescriptor::ASTSourceDescriptor(const Module &M)
+ : Signature(M.Signature), ClangModule(&M) {
+ if (M.Directory)
+ Path = M.Directory->getName();
+ if (auto *File = M.getASTFile())
+ ASTFile = File->getName();
+}
+
+std::string ExternalASTSource::ASTSourceDescriptor::getModuleName() const {
+ if (ClangModule)
+ return ClangModule->Name;
+ else
+ return PCHModuleName;
}
void ExternalASTSource::FindFileRegionDecls(FileID File, unsigned Offset,
@@ -92,17 +103,13 @@ ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
return false;
}
-void ExternalASTSource::completeVisibleDeclsMap(const DeclContext *DC) {
-}
+void ExternalASTSource::completeVisibleDeclsMap(const DeclContext *DC) {}
-ExternalLoadResult
-ExternalASTSource::FindExternalLexicalDecls(const DeclContext *DC,
- bool (*isKindWeWant)(Decl::Kind),
- SmallVectorImpl<Decl*> &Result) {
- return ELR_AlreadyLoaded;
-}
+void ExternalASTSource::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) {}
-void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const { }
+void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const {}
uint32_t ExternalASTSource::incrementGeneration(ASTContext &C) {
uint32_t OldGeneration = CurrentGeneration;
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index 7503cbfc9805..8a2cc0fbee42 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -149,6 +149,20 @@ public:
return nullptr;
}
+ void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) override {}
+
+ TypedefNameDecl *getTypedefNameForUnnamedTagDecl(const TagDecl *TD) override {
+ return nullptr;
+ }
+
+ void addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) override {}
+
+ DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD) override {
+ return nullptr;
+ }
+
MangleNumberingContext *createMangleNumberingContext() const override {
return new ItaniumNumberingContext();
}
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index dac803e5d2ff..8018188a6b24 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -174,8 +174,6 @@ public:
void mangleStringLiteral(const StringLiteral *, raw_ostream &) override;
- void mangleCXXVTableBitSet(const CXXRecordDecl *RD, raw_ostream &) override;
-
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
// Lambda closure types are already numbered.
if (isLambda(ND))
@@ -379,8 +377,8 @@ private:
void mangleType(const TagType*);
void mangleType(TemplateName);
- void mangleBareFunctionType(const FunctionType *T,
- bool MangleReturnType);
+ void mangleBareFunctionType(const FunctionType *T, bool MangleReturnType,
+ const FunctionDecl *FD = nullptr);
void mangleNeonVectorType(const VectorType *T);
void mangleAArch64NeonVectorType(const VectorType *T);
@@ -397,7 +395,8 @@ private:
void mangleCXXCtorType(CXXCtorType T);
void mangleCXXDtorType(CXXDtorType T);
- void mangleTemplateArgs(const ASTTemplateArgumentListInfo &TemplateArgs);
+ void mangleTemplateArgs(const TemplateArgumentLoc *TemplateArgs,
+ unsigned NumTemplateArgs);
void mangleTemplateArgs(const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
void mangleTemplateArgs(const TemplateArgumentList &AL);
@@ -525,7 +524,7 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
}
mangleBareFunctionType(FD->getType()->getAs<FunctionType>(),
- MangleReturnType);
+ MangleReturnType, FD);
}
static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
@@ -700,8 +699,7 @@ void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
assert(numCharacters != 0);
// Allocate a buffer of the right number of characters.
- SmallVector<char, 20> buffer;
- buffer.set_size(numCharacters);
+ SmallVector<char, 20> buffer(numCharacters);
// Fill the buffer left-to-right.
for (unsigned stringIndex = 0; stringIndex != numCharacters; ++stringIndex) {
@@ -1020,7 +1018,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
unsigned UnnamedMangle = getASTContext().getManglingNumber(TD);
Out << "Ut";
if (UnnamedMangle > 1)
- Out << llvm::utostr(UnnamedMangle - 2);
+ Out << UnnamedMangle - 2;
Out << '_';
break;
}
@@ -1285,7 +1283,8 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
Out << "Ul";
const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()->
getAs<FunctionProtoType>();
- mangleBareFunctionType(Proto, /*MangleReturnType=*/false);
+ mangleBareFunctionType(Proto, /*MangleReturnType=*/false,
+ Lambda->getLambdaStaticInvoker());
Out << "E";
// The number is omitted for the first closure type with a given
@@ -1756,6 +1755,9 @@ CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
// The conditional operator can't be overloaded, but we still handle it when
// mangling expressions.
case OO_Conditional: Out << "qu"; break;
+ // Proposal on cxx-abi-dev, 2015-10-21.
+ // ::= aw # co_await
+ case OO_Coawait: Out << "aw"; break;
case OO_None:
case NUM_OVERLOADED_OPERATORS:
@@ -1988,34 +1990,79 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
// ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
// ::= u <source-name> # vendor extended type
switch (T->getKind()) {
- case BuiltinType::Void: Out << 'v'; break;
- case BuiltinType::Bool: Out << 'b'; break;
- case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
- case BuiltinType::UChar: Out << 'h'; break;
- case BuiltinType::UShort: Out << 't'; break;
- case BuiltinType::UInt: Out << 'j'; break;
- case BuiltinType::ULong: Out << 'm'; break;
- case BuiltinType::ULongLong: Out << 'y'; break;
- case BuiltinType::UInt128: Out << 'o'; break;
- case BuiltinType::SChar: Out << 'a'; break;
+ case BuiltinType::Void:
+ Out << 'v';
+ break;
+ case BuiltinType::Bool:
+ Out << 'b';
+ break;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ Out << 'c';
+ break;
+ case BuiltinType::UChar:
+ Out << 'h';
+ break;
+ case BuiltinType::UShort:
+ Out << 't';
+ break;
+ case BuiltinType::UInt:
+ Out << 'j';
+ break;
+ case BuiltinType::ULong:
+ Out << 'm';
+ break;
+ case BuiltinType::ULongLong:
+ Out << 'y';
+ break;
+ case BuiltinType::UInt128:
+ Out << 'o';
+ break;
+ case BuiltinType::SChar:
+ Out << 'a';
+ break;
case BuiltinType::WChar_S:
- case BuiltinType::WChar_U: Out << 'w'; break;
- case BuiltinType::Char16: Out << "Ds"; break;
- case BuiltinType::Char32: Out << "Di"; break;
- case BuiltinType::Short: Out << 's'; break;
- case BuiltinType::Int: Out << 'i'; break;
- case BuiltinType::Long: Out << 'l'; break;
- case BuiltinType::LongLong: Out << 'x'; break;
- case BuiltinType::Int128: Out << 'n'; break;
- case BuiltinType::Half: Out << "Dh"; break;
- case BuiltinType::Float: Out << 'f'; break;
- case BuiltinType::Double: Out << 'd'; break;
+ case BuiltinType::WChar_U:
+ Out << 'w';
+ break;
+ case BuiltinType::Char16:
+ Out << "Ds";
+ break;
+ case BuiltinType::Char32:
+ Out << "Di";
+ break;
+ case BuiltinType::Short:
+ Out << 's';
+ break;
+ case BuiltinType::Int:
+ Out << 'i';
+ break;
+ case BuiltinType::Long:
+ Out << 'l';
+ break;
+ case BuiltinType::LongLong:
+ Out << 'x';
+ break;
+ case BuiltinType::Int128:
+ Out << 'n';
+ break;
+ case BuiltinType::Half:
+ Out << "Dh";
+ break;
+ case BuiltinType::Float:
+ Out << 'f';
+ break;
+ case BuiltinType::Double:
+ Out << 'd';
+ break;
case BuiltinType::LongDouble:
Out << (getASTContext().getTargetInfo().useFloat128ManglingForLongDouble()
? 'g'
: 'e');
break;
- case BuiltinType::NullPtr: Out << "Dn"; break;
+ case BuiltinType::NullPtr:
+ Out << "Dn";
+ break;
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
@@ -2023,17 +2070,69 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
llvm_unreachable("mangling a placeholder type");
- case BuiltinType::ObjCId: Out << "11objc_object"; break;
- case BuiltinType::ObjCClass: Out << "10objc_class"; break;
- case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
- case BuiltinType::OCLImage1d: Out << "11ocl_image1d"; break;
- case BuiltinType::OCLImage1dArray: Out << "16ocl_image1darray"; break;
- case BuiltinType::OCLImage1dBuffer: Out << "17ocl_image1dbuffer"; break;
- case BuiltinType::OCLImage2d: Out << "11ocl_image2d"; break;
- case BuiltinType::OCLImage2dArray: Out << "16ocl_image2darray"; break;
- case BuiltinType::OCLImage3d: Out << "11ocl_image3d"; break;
- case BuiltinType::OCLSampler: Out << "11ocl_sampler"; break;
- case BuiltinType::OCLEvent: Out << "9ocl_event"; break;
+ case BuiltinType::ObjCId:
+ Out << "11objc_object";
+ break;
+ case BuiltinType::ObjCClass:
+ Out << "10objc_class";
+ break;
+ case BuiltinType::ObjCSel:
+ Out << "13objc_selector";
+ break;
+ case BuiltinType::OCLImage1d:
+ Out << "11ocl_image1d";
+ break;
+ case BuiltinType::OCLImage1dArray:
+ Out << "16ocl_image1darray";
+ break;
+ case BuiltinType::OCLImage1dBuffer:
+ Out << "17ocl_image1dbuffer";
+ break;
+ case BuiltinType::OCLImage2d:
+ Out << "11ocl_image2d";
+ break;
+ case BuiltinType::OCLImage2dArray:
+ Out << "16ocl_image2darray";
+ break;
+ case BuiltinType::OCLImage2dDepth:
+ Out << "16ocl_image2ddepth";
+ break;
+ case BuiltinType::OCLImage2dArrayDepth:
+ Out << "21ocl_image2darraydepth";
+ break;
+ case BuiltinType::OCLImage2dMSAA:
+ Out << "15ocl_image2dmsaa";
+ break;
+ case BuiltinType::OCLImage2dArrayMSAA:
+ Out << "20ocl_image2darraymsaa";
+ break;
+ case BuiltinType::OCLImage2dMSAADepth:
+ Out << "20ocl_image2dmsaadepth";
+ break;
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ Out << "35ocl_image2darraymsaadepth";
+ break;
+ case BuiltinType::OCLImage3d:
+ Out << "11ocl_image3d";
+ break;
+ case BuiltinType::OCLSampler:
+ Out << "11ocl_sampler";
+ break;
+ case BuiltinType::OCLEvent:
+ Out << "9ocl_event";
+ break;
+ case BuiltinType::OCLClkEvent:
+ Out << "12ocl_clkevent";
+ break;
+ case BuiltinType::OCLQueue:
+ Out << "9ocl_queue";
+ break;
+ case BuiltinType::OCLNDRange:
+ Out << "11ocl_ndrange";
+ break;
+ case BuiltinType::OCLReserveID:
+ Out << "13ocl_reserveid";
+ break;
}
}
@@ -2056,11 +2155,26 @@ void CXXNameMangler::mangleType(const FunctionProtoType *T) {
Out << 'E';
}
+
void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
- llvm_unreachable("Can't mangle K&R function prototypes");
+ // Function types without prototypes can arise when mangling a function type
+ // within an overloadable function in C. We mangle these as the absence of any
+ // parameter types (not even an empty parameter list).
+ Out << 'F';
+
+ FunctionTypeDepthState saved = FunctionTypeDepth.push();
+
+ FunctionTypeDepth.enterResultType();
+ mangleType(T->getReturnType());
+ FunctionTypeDepth.leaveResultType();
+
+ FunctionTypeDepth.pop(saved);
+ Out << 'E';
}
+
void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
- bool MangleReturnType) {
+ bool MangleReturnType,
+ const FunctionDecl *FD) {
// We should never be mangling something without a prototype.
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
@@ -2083,8 +2197,19 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
return;
}
- for (const auto &Arg : Proto->param_types())
- mangleType(Context.getASTContext().getSignatureParameterType(Arg));
+ assert(!FD || FD->getNumParams() == Proto->getNumParams());
+ for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
+ const auto &ParamTy = Proto->getParamType(I);
+ mangleType(Context.getASTContext().getSignatureParameterType(ParamTy));
+
+ if (FD) {
+ if (auto *Attr = FD->getParamDecl(I)->getAttr<PassObjectSizeAttr>()) {
+ // Attr can only take 1 character, so we can hardcode the length below.
+ assert(Attr->getType() <= 9 && Attr->getType() >= 0);
+ Out << "U17pass_object_size" << Attr->getType();
+ }
+ }
+ }
FunctionTypeDepth.pop(saved);
@@ -2325,7 +2450,7 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) {
EltName = mangleAArch64VectorBase(cast<BuiltinType>(EltType));
std::string TypeName =
- ("__" + EltName + "x" + llvm::utostr(T->getNumElements()) + "_t").str();
+ ("__" + EltName + "x" + Twine(T->getNumElements()) + "_t").str();
Out << TypeName.length() << TypeName;
}
@@ -2392,7 +2517,6 @@ void CXXNameMangler::mangleType(const ObjCObjectType *T) {
StringRef name = I->getName();
QualOS << name.size() << name;
}
- QualOS.flush();
Out << 'U' << QualStr.size() << QualStr;
}
@@ -2543,9 +2667,11 @@ void CXXNameMangler::mangleType(const UnaryTransformType *T) {
void CXXNameMangler::mangleType(const AutoType *T) {
QualType D = T->getDeducedType();
// <builtin-type> ::= Da # dependent auto
- if (D.isNull())
+ if (D.isNull()) {
+ assert(T->getKeyword() != AutoTypeKeyword::GNUAutoType &&
+ "shouldn't need to mangle __auto_type!");
Out << (T->isDecltypeAuto() ? "Dc" : "Da");
- else
+ } else
mangleType(D);
}
@@ -2699,7 +2825,9 @@ recurse:
case Expr::ParenListExprClass:
case Expr::LambdaExprClass:
case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
case Expr::TypoExprClass: // This should no longer exist in the AST by now.
+ case Expr::OMPArraySectionExprClass:
llvm_unreachable("unexpected statement kind");
// FIXME: invent manglings for all these.
@@ -2908,7 +3036,7 @@ recurse:
ME->isArrow(), ME->getQualifier(), nullptr,
ME->getMemberName(), Arity);
if (ME->hasExplicitTemplateArgs())
- mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ mangleTemplateArgs(ME->getTemplateArgs(), ME->getNumTemplateArgs());
break;
}
@@ -2920,7 +3048,7 @@ recurse:
ME->getFirstQualifierFoundInScope(),
ME->getMember(), Arity);
if (ME->hasExplicitTemplateArgs())
- mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ mangleTemplateArgs(ME->getTemplateArgs(), ME->getNumTemplateArgs());
break;
}
@@ -2932,7 +3060,7 @@ recurse:
// base-unresolved-name, where <template-args> are just tacked
// onto the end.
if (ULE->hasExplicitTemplateArgs())
- mangleTemplateArgs(ULE->getExplicitTemplateArgs());
+ mangleTemplateArgs(ULE->getTemplateArgs(), ULE->getNumTemplateArgs());
break;
}
@@ -3254,7 +3382,7 @@ recurse:
// base-unresolved-name, where <template-args> are just tacked
// onto the end.
if (DRE->hasExplicitTemplateArgs())
- mangleTemplateArgs(DRE->getExplicitTemplateArgs());
+ mangleTemplateArgs(DRE->getTemplateArgs(), DRE->getNumTemplateArgs());
break;
}
@@ -3350,8 +3478,17 @@ recurse:
break;
case Expr::SizeOfPackExprClass: {
+ auto *SPE = cast<SizeOfPackExpr>(E);
+ if (SPE->isPartiallySubstituted()) {
+ Out << "sP";
+ for (const auto &A : SPE->getPartialArguments())
+ mangleTemplateArg(A);
+ Out << "E";
+ break;
+ }
+
Out << "sZ";
- const NamedDecl *Pack = cast<SizeOfPackExpr>(E)->getPack();
+ const NamedDecl *Pack = SPE->getPack();
if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Pack))
mangleTemplateParameter(TTP->getIndex());
else if (const NonTypeTemplateParmDecl *NTTP
@@ -3394,6 +3531,18 @@ recurse:
case Expr::CXXThisExprClass:
Out << "fpT";
break;
+
+ case Expr::CoawaitExprClass:
+ // FIXME: Propose a non-vendor mangling.
+ Out << "v18co_await";
+ mangleExpression(cast<CoawaitExpr>(E)->getOperand());
+ break;
+
+ case Expr::CoyieldExprClass:
+ // FIXME: Propose a non-vendor mangling.
+ Out << "v18co_yield";
+ mangleExpression(cast<CoawaitExpr>(E)->getOperand());
+ break;
}
}
@@ -3501,12 +3650,12 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
}
}
-void CXXNameMangler::mangleTemplateArgs(
- const ASTTemplateArgumentListInfo &TemplateArgs) {
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentLoc *TemplateArgs,
+ unsigned NumTemplateArgs) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
- for (unsigned i = 0, e = TemplateArgs.NumTemplateArgs; i != e; ++i)
- mangleTemplateArg(TemplateArgs.getTemplateArgs()[i].getArgument());
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(TemplateArgs[i].getArgument());
Out << 'E';
}
@@ -4085,21 +4234,6 @@ void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) {
mangleCXXRTTIName(Ty, Out);
}
-void ItaniumMangleContextImpl::mangleCXXVTableBitSet(const CXXRecordDecl *RD,
- raw_ostream &Out) {
- if (!RD->isExternallyVisible()) {
- // This part of the identifier needs to be unique across all translation
- // units in the linked program. The scheme fails if multiple translation
- // units are compiled using the same relative source file path, or if
- // multiple translation units are built from the same source file.
- SourceManager &SM = getASTContext().getSourceManager();
- Out << "[" << SM.getFileEntryForID(SM.getMainFileID())->getName() << "]";
- }
-
- CXXNameMangler Mangler(*this, Out);
- Mangler.mangleType(QualType(RD->getTypeForDecl(), 0));
-}
-
void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) {
llvm_unreachable("Can't mangle string literals");
}
@@ -4108,4 +4242,3 @@ ItaniumMangleContext *
ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
return new ItaniumMangleContextImpl(Context, Diags);
}
-
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index 1a061c4f6632..014338f0490f 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -206,7 +206,6 @@ void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
mangleCXXCtor(CD, CT, Out);
- Out.flush();
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
@@ -216,7 +215,6 @@ void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
mangleCXXDtor(DD, DT, Out);
- Out.flush();
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
@@ -253,7 +251,6 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
}
}
}
- Stream.flush();
mangleFunctionBlock(*this, Buffer, BD, Out);
}
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index aba6796256a7..6ba31ccf1e37 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -70,6 +70,11 @@ class MicrosoftCXXABI : public CXXABI {
llvm::SmallDenseMap<std::pair<const CXXConstructorDecl *, unsigned>, Expr *>
CtorToDefaultArgExpr;
+ llvm::SmallDenseMap<TagDecl *, DeclaratorDecl *>
+ UnnamedTagDeclToDeclaratorDecl;
+ llvm::SmallDenseMap<TagDecl *, TypedefNameDecl *>
+ UnnamedTagDeclToTypedefNameDecl;
+
public:
MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
@@ -84,17 +89,7 @@ public:
}
bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
- // FIXME: Audit the corners
- if (!RD->isDynamicClass())
- return false;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // In the Microsoft ABI, classes can have one or two vtable pointers.
- CharUnits PointerSize =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- return Layout.getNonVirtualSize() == PointerSize ||
- Layout.getNonVirtualSize() == PointerSize * 2;
+ llvm_unreachable("unapplicable to the MS ABI");
}
void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
@@ -120,6 +115,34 @@ public:
RecordToCopyCtor[RD] = CD;
}
+ void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
+ TypedefNameDecl *DD) override {
+ TD = TD->getCanonicalDecl();
+ DD = cast<TypedefNameDecl>(DD->getCanonicalDecl());
+ TypedefNameDecl *&I = UnnamedTagDeclToTypedefNameDecl[TD];
+ if (!I)
+ I = DD;
+ }
+
+ TypedefNameDecl *getTypedefNameForUnnamedTagDecl(const TagDecl *TD) override {
+ return UnnamedTagDeclToTypedefNameDecl.lookup(
+ const_cast<TagDecl *>(TD->getCanonicalDecl()));
+ }
+
+ void addDeclaratorForUnnamedTagDecl(TagDecl *TD,
+ DeclaratorDecl *DD) override {
+ TD = TD->getCanonicalDecl();
+ DD = cast<DeclaratorDecl>(DD->getCanonicalDecl());
+ DeclaratorDecl *&I = UnnamedTagDeclToDeclaratorDecl[TD];
+ if (!I)
+ I = DD;
+ }
+
+ DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD) override {
+ return UnnamedTagDeclToDeclaratorDecl.lookup(
+ const_cast<TagDecl *>(TD->getCanonicalDecl()));
+ }
+
MangleNumberingContext *createMangleNumberingContext() const override {
return new MicrosoftNumberingContext();
}
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 48a8fa541a69..136a43b640f7 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -28,6 +28,7 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/JamCRC.h"
using namespace clang;
@@ -160,8 +161,6 @@ public:
void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
raw_ostream &Out) override;
void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override;
- void mangleCXXVTableBitSet(const CXXRecordDecl *RD,
- raw_ostream &Out) override;
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
// Lambda closure types are already numbered.
if (isLambda(ND))
@@ -179,7 +178,9 @@ public:
// Anonymous tags are already numbered.
if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
- if (Tag->getName().empty() && !Tag->getTypedefNameForAnonDecl())
+ if (!Tag->hasNameForLinkage() &&
+ !getASTContext().getDeclaratorForUnnamedTagDecl(Tag) &&
+ !getASTContext().getTypedefNameForUnnamedTagDecl(Tag))
return false;
}
@@ -223,6 +224,9 @@ class MicrosoftCXXNameMangler {
typedef llvm::DenseMap<void *, unsigned> ArgBackRefMap;
ArgBackRefMap TypeBackReferences;
+ typedef std::set<int> PassObjectSizeArgsSet;
+ PassObjectSizeArgsSet PassObjectSizeArgs;
+
ASTContext &getASTContext() const { return Context.getASTContext(); }
// FIXME: If we add support for __ptr32/64 qualifiers, then we should push
@@ -262,6 +266,9 @@ public:
const CXXMethodDecl *MD,
const MicrosoftVTableContext::MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
+ void mangleTagTypeKind(TagTypeKind TK);
+ void mangleArtificalTagType(TagTypeKind TK, StringRef UnqualifiedName,
+ ArrayRef<StringRef> NestedNames = None);
void mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM = QMM_Mangle);
void mangleFunctionType(const FunctionType *T,
@@ -289,6 +296,7 @@ private:
void mangleObjCMethodName(const ObjCMethodDecl *MD);
void mangleArgumentType(QualType T, SourceRange Range);
+ void manglePassObjectSizeArg(const PassObjectSizeAttr *POSA);
// Declare manglers for every type class.
#define ABSTRACT_TYPE(CLASS, PARENT)
@@ -364,7 +372,8 @@ bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
DC = getEffectiveParentContext(DC);
if (DC->isTranslationUnit() && D->getFormalLinkage() == InternalLinkage &&
- !isa<VarTemplateSpecializationDecl>(D))
+ !isa<VarTemplateSpecializationDecl>(D) &&
+ D->getIdentifier() != nullptr)
return false;
}
@@ -390,14 +399,8 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
- else {
- // TODO: Fields? Can MSVC even mangle them?
- // Issue a diagnostic for now.
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(
- DiagnosticsEngine::Error, "cannot mangle this declaration yet");
- Diags.Report(D->getLocation(), DiagID) << D->getSourceRange();
- }
+ else
+ llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD,
@@ -420,7 +423,7 @@ void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD,
// We would like to mangle all extern "C" functions using this additional
// component but this would break compatibility with MSVC's behavior.
// Instead, do this when we know that compatibility isn't important (in
- // other words, when it is an overloaded extern "C" funciton).
+ // other words, when it is an overloaded extern "C" function).
if (FD->isExternC() && FD->hasAttr<OverloadableAttr>())
Out << "$$J0";
@@ -695,8 +698,7 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// Function templates aren't considered for name back referencing. This
// makes sense since function templates aren't likely to occur multiple
// times in a symbol.
- // FIXME: Test alias template mangling with MSVC 2013.
- if (!isa<ClassTemplateDecl>(TD)) {
+ if (isa<FunctionTemplateDecl>(TD)) {
mangleTemplateInstantiationName(TD, *TemplateArgs);
Out << '@';
return;
@@ -721,7 +723,6 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
llvm::raw_svector_ostream Stream(TemplateMangling);
MicrosoftCXXNameMangler Extra(Context, Stream);
Extra.mangleTemplateInstantiationName(TD, *TemplateArgs);
- Stream.flush();
mangleSourceName(TemplateMangling);
return;
@@ -787,14 +788,21 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
}
llvm::SmallString<64> Name("<unnamed-type-");
- if (TD->hasDeclaratorForAnonDecl()) {
- // Anonymous types with no tag or typedef get the name of their
+ if (DeclaratorDecl *DD =
+ Context.getASTContext().getDeclaratorForUnnamedTagDecl(TD)) {
+ // Anonymous types without a name for linkage purposes have their
// declarator mangled in if they have one.
- Name += TD->getDeclaratorForAnonDecl()->getName();
+ Name += DD->getName();
+ } else if (TypedefNameDecl *TND =
+ Context.getASTContext().getTypedefNameForUnnamedTagDecl(
+ TD)) {
+ // Anonymous types without a name for linkage purposes have their
+ // associate typedef mangled in if they have one.
+ Name += TND->getName();
} else {
// Otherwise, number the types using a $S prefix.
Name += "$S";
- Name += llvm::utostr(Context.getAnonymousStructId(TD));
+ Name += llvm::utostr(Context.getAnonymousStructId(TD) + 1);
}
Name += ">";
mangleSourceName(Name.str());
@@ -1039,6 +1047,14 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
break;
}
+ case OO_Coawait: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this operator co_await yet");
+ Diags.Report(Loc, DiagID);
+ break;
+ }
+
case OO_None:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Not an overloaded operator");
@@ -1071,8 +1087,10 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
// Templates have their own context for back references.
ArgBackRefMap OuterArgsContext;
BackRefVec OuterTemplateContext;
+ PassObjectSizeArgsSet OuterPassObjectSizeArgs;
NameBackReferences.swap(OuterTemplateContext);
TypeBackReferences.swap(OuterArgsContext);
+ PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
mangleUnscopedTemplateName(TD);
mangleTemplateArgs(TD, TemplateArgs);
@@ -1080,6 +1098,7 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
// Restore the previous back reference contexts.
NameBackReferences.swap(OuterTemplateContext);
TypeBackReferences.swap(OuterArgsContext);
+ PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
}
void
@@ -1121,12 +1140,6 @@ void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
UE = dyn_cast<CXXUuidofExpr>(E);
if (UE) {
- // This CXXUuidofExpr is mangled as-if it were actually a VarDecl from
- // const __s_GUID _GUID_{lower case UUID with underscores}
- StringRef Uuid = UE->getUuidAsStringRef(Context.getASTContext());
- std::string Name = "_GUID_" + Uuid.lower();
- std::replace(Name.begin(), Name.end(), '-', '_');
-
// If we had to peek through an address-of operator, treat this like we are
// dealing with a pointer type. Otherwise, treat it like a const reference.
//
@@ -1136,7 +1149,22 @@ void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
Out << "$E?";
else
Out << "$1?";
- Out << Name << "@@3U__s_GUID@@B";
+
+ // This CXXUuidofExpr is mangled as-if it were actually a VarDecl from
+ // const __s_GUID _GUID_{lower case UUID with underscores}
+ StringRef Uuid = UE->getUuidAsStringRef(Context.getASTContext());
+ std::string Name = "_GUID_" + Uuid.lower();
+ std::replace(Name.begin(), Name.end(), '-', '_');
+
+ mangleSourceName(Name);
+ // Terminate the whole name with an '@'.
+ Out << '@';
+ // It's a global variable.
+ Out << '3';
+ // It's a struct called __s_GUID.
+ mangleArtificalTagType(TTK_Struct, "__s_GUID");
+ // It's const.
+ Out << 'B';
return;
}
@@ -1210,12 +1238,13 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
QualType T = TA.getNullPtrType();
if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
- if (MPT->isMemberFunctionPointerType() && isa<ClassTemplateDecl>(TD)) {
+ if (MPT->isMemberFunctionPointerType() &&
+ !isa<FunctionTemplateDecl>(TD)) {
mangleMemberFunctionPointer(RD, nullptr);
return;
}
if (MPT->isMemberDataPointer()) {
- if (isa<ClassTemplateDecl>(TD)) {
+ if (!isa<FunctionTemplateDecl>(TD)) {
mangleMemberDataPointer(RD, nullptr);
return;
}
@@ -1455,6 +1484,27 @@ void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
}
}
+void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
+ const PassObjectSizeAttr *POSA) {
+ int Type = POSA->getType();
+
+ auto Iter = PassObjectSizeArgs.insert(Type).first;
+ void *TypePtr = (void *)&*Iter;
+ ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
+
+ if (Found == TypeBackReferences.end()) {
+ mangleArtificalTagType(TTK_Enum, "__pass_object_size" + llvm::utostr(Type),
+ {"__clang"});
+
+ if (TypeBackReferences.size() < 10) {
+ size_t Size = TypeBackReferences.size();
+ TypeBackReferences[TypePtr] = Size;
+ }
+ } else {
+ Out << Found->second;
+ }
+}
+
void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM) {
// Don't use the canonical types. MSVC includes things like 'const' on
@@ -1546,29 +1596,72 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
// ::= _W # wchar_t
// ::= _Z # __float80 (Digital Mars)
switch (T->getKind()) {
- case BuiltinType::Void: Out << 'X'; break;
- case BuiltinType::SChar: Out << 'C'; break;
- case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
- case BuiltinType::UChar: Out << 'E'; break;
- case BuiltinType::Short: Out << 'F'; break;
- case BuiltinType::UShort: Out << 'G'; break;
- case BuiltinType::Int: Out << 'H'; break;
- case BuiltinType::UInt: Out << 'I'; break;
- case BuiltinType::Long: Out << 'J'; break;
- case BuiltinType::ULong: Out << 'K'; break;
- case BuiltinType::Float: Out << 'M'; break;
- case BuiltinType::Double: Out << 'N'; break;
+ case BuiltinType::Void:
+ Out << 'X';
+ break;
+ case BuiltinType::SChar:
+ Out << 'C';
+ break;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ Out << 'D';
+ break;
+ case BuiltinType::UChar:
+ Out << 'E';
+ break;
+ case BuiltinType::Short:
+ Out << 'F';
+ break;
+ case BuiltinType::UShort:
+ Out << 'G';
+ break;
+ case BuiltinType::Int:
+ Out << 'H';
+ break;
+ case BuiltinType::UInt:
+ Out << 'I';
+ break;
+ case BuiltinType::Long:
+ Out << 'J';
+ break;
+ case BuiltinType::ULong:
+ Out << 'K';
+ break;
+ case BuiltinType::Float:
+ Out << 'M';
+ break;
+ case BuiltinType::Double:
+ Out << 'N';
+ break;
// TODO: Determine size and mangle accordingly
- case BuiltinType::LongDouble: Out << 'O'; break;
- case BuiltinType::LongLong: Out << "_J"; break;
- case BuiltinType::ULongLong: Out << "_K"; break;
- case BuiltinType::Int128: Out << "_L"; break;
- case BuiltinType::UInt128: Out << "_M"; break;
- case BuiltinType::Bool: Out << "_N"; break;
- case BuiltinType::Char16: Out << "_S"; break;
- case BuiltinType::Char32: Out << "_U"; break;
+ case BuiltinType::LongDouble:
+ Out << 'O';
+ break;
+ case BuiltinType::LongLong:
+ Out << "_J";
+ break;
+ case BuiltinType::ULongLong:
+ Out << "_K";
+ break;
+ case BuiltinType::Int128:
+ Out << "_L";
+ break;
+ case BuiltinType::UInt128:
+ Out << "_M";
+ break;
+ case BuiltinType::Bool:
+ Out << "_N";
+ break;
+ case BuiltinType::Char16:
+ Out << "_S";
+ break;
+ case BuiltinType::Char32:
+ Out << "_U";
+ break;
case BuiltinType::WChar_S:
- case BuiltinType::WChar_U: Out << "_W"; break;
+ case BuiltinType::WChar_U:
+ Out << "_W";
+ break;
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
@@ -1577,28 +1670,102 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
case BuiltinType::Dependent:
llvm_unreachable("placeholder types shouldn't get to name mangling");
- case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
- case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
- case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+ case BuiltinType::ObjCId:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "objc_object");
+ break;
+ case BuiltinType::ObjCClass:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "objc_class");
+ break;
+ case BuiltinType::ObjCSel:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "objc_selector");
+ break;
- case BuiltinType::OCLImage1d: Out << "PAUocl_image1d@@"; break;
- case BuiltinType::OCLImage1dArray: Out << "PAUocl_image1darray@@"; break;
- case BuiltinType::OCLImage1dBuffer: Out << "PAUocl_image1dbuffer@@"; break;
- case BuiltinType::OCLImage2d: Out << "PAUocl_image2d@@"; break;
- case BuiltinType::OCLImage2dArray: Out << "PAUocl_image2darray@@"; break;
- case BuiltinType::OCLImage3d: Out << "PAUocl_image3d@@"; break;
- case BuiltinType::OCLSampler: Out << "PAUocl_sampler@@"; break;
- case BuiltinType::OCLEvent: Out << "PAUocl_event@@"; break;
+ case BuiltinType::OCLImage1d:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image1d");
+ break;
+ case BuiltinType::OCLImage1dArray:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image1darray");
+ break;
+ case BuiltinType::OCLImage1dBuffer:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image1dbuffer");
+ break;
+ case BuiltinType::OCLImage2d:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2d");
+ break;
+ case BuiltinType::OCLImage2dArray:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darray");
+ break;
+ case BuiltinType::OCLImage2dDepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2ddepth");
+ break;
+ case BuiltinType::OCLImage2dArrayDepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darraydepth");
+ break;
+ case BuiltinType::OCLImage2dMSAA:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2dmsaa");
+ break;
+ case BuiltinType::OCLImage2dArrayMSAA:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darraymsaa");
+ break;
+ case BuiltinType::OCLImage2dMSAADepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2dmsaadepth");
+ break;
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image2darraymsaadepth");
+ break;
+ case BuiltinType::OCLImage3d:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_image3d");
+ break;
+ case BuiltinType::OCLSampler:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_sampler");
+ break;
+ case BuiltinType::OCLEvent:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_event");
+ break;
+ case BuiltinType::OCLClkEvent:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_clkevent");
+ break;
+ case BuiltinType::OCLQueue:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_queue");
+ break;
+ case BuiltinType::OCLNDRange:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_ndrange");
+ break;
+ case BuiltinType::OCLReserveID:
+ Out << "PA";
+ mangleArtificalTagType(TTK_Struct, "ocl_reserveid");
+ break;
- case BuiltinType::NullPtr: Out << "$$T"; break;
+ case BuiltinType::NullPtr:
+ Out << "$$T";
+ break;
case BuiltinType::Half: {
DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this built-in %0 type yet");
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this built-in %0 type yet");
Diags.Report(Range.getBegin(), DiagID)
- << T->getName(Context.getASTContext().getPrintingPolicy())
- << Range;
+ << T->getName(Context.getASTContext().getPrintingPolicy()) << Range;
break;
}
}
@@ -1620,7 +1787,8 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, Qualifiers,
}
void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
Qualifiers, SourceRange) {
- llvm_unreachable("Can't mangle K&R function prototypes");
+ Out << "$$A6";
+ mangleFunctionType(T);
}
void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
@@ -1628,7 +1796,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
bool ForceThisQuals) {
// <function-type> ::= <this-cvr-qualifiers> <calling-convention>
// <return-type> <argument-list> <throw-spec>
- const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(T);
SourceRange Range;
if (D) Range = D->getSourceRange();
@@ -1699,12 +1867,14 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
}
Out << '@';
} else {
- QualType ResultType = Proto->getReturnType();
+ QualType ResultType = T->getReturnType();
if (const auto *AT =
dyn_cast_or_null<AutoType>(ResultType->getContainedAutoType())) {
Out << '?';
mangleQualifiers(ResultType.getLocalQualifiers(), /*IsMember=*/false);
Out << '?';
+ assert(AT->getKeyword() != AutoTypeKeyword::GNUAutoType &&
+ "shouldn't need to mangle __auto_type!");
mangleSourceName(AT->isDecltypeAuto() ? "<decltype-auto>" : "<auto>");
Out << '@';
} else {
@@ -1717,12 +1887,29 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// <argument-list> ::= X # void
// ::= <type>+ @
// ::= <type>* Z # varargs
- if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
+ if (!Proto) {
+ // Function types without prototypes can arise when mangling a function type
+ // within an overloadable function in C. We mangle these as the absence of
+ // any parameter types (not even an empty parameter list).
+ Out << '@';
+ } else if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
Out << 'X';
} else {
// Happens for function pointer type arguments for example.
- for (const QualType &Arg : Proto->param_types())
- mangleArgumentType(Arg, Range);
+ for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
+ mangleArgumentType(Proto->getParamType(I), Range);
+ // Mangle each pass_object_size parameter as if it's a paramater of enum
+ // type passed directly after the parameter with the pass_object_size
+ // attribute. The aforementioned enum's name is __pass_object_size, and we
+ // pretend it resides in a top-level namespace called __clang.
+ //
+ // FIXME: Is there a defined extension notation for the MS ABI, or is it
+ // necessary to just cross our fingers and hope this type+namespace
+ // combination doesn't conflict with anything?
+ if (D)
+ if (const auto *P = D->getParamDecl(I)->getAttr<PassObjectSizeAttr>())
+ manglePassObjectSizeArg(P);
+ }
// <builtin-type> ::= Z # ellipsis
if (Proto->isVariadic())
Out << 'Z';
@@ -1851,16 +2038,8 @@ void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T,
// <struct-type> ::= U <name>
// <class-type> ::= V <name>
// <enum-type> ::= W4 <name>
-void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers,
- SourceRange) {
- mangleType(cast<TagType>(T)->getDecl());
-}
-void MicrosoftCXXNameMangler::mangleType(const RecordType *T, Qualifiers,
- SourceRange) {
- mangleType(cast<TagType>(T)->getDecl());
-}
-void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
- switch (TD->getTagKind()) {
+void MicrosoftCXXNameMangler::mangleTagTypeKind(TagTypeKind TTK) {
+ switch (TTK) {
case TTK_Union:
Out << 'T';
break;
@@ -1875,8 +2054,33 @@ void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
Out << "W4";
break;
}
+}
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers,
+ SourceRange) {
+ mangleType(cast<TagType>(T)->getDecl());
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T, Qualifiers,
+ SourceRange) {
+ mangleType(cast<TagType>(T)->getDecl());
+}
+void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
+ mangleTagTypeKind(TD->getTagKind());
mangleName(TD);
}
+void MicrosoftCXXNameMangler::mangleArtificalTagType(
+ TagTypeKind TK, StringRef UnqualifiedName, ArrayRef<StringRef> NestedNames) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ mangleTagTypeKind(TK);
+
+ // Always start with the unqualified name.
+ mangleSourceName(UnqualifiedName);
+
+ for (auto I = NestedNames.rbegin(), E = NestedNames.rend(); I != E; ++I)
+ mangleSourceName(*I);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
// <type> ::= <array-type>
// <array-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
@@ -2029,11 +2233,16 @@ void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers,
SourceRange Range) {
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this complex number type yet");
- Diags.Report(Range.getBegin(), DiagID)
- << Range;
+ QualType ElementType = T->getElementType();
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("_Complex");
+ Extra.mangleType(ElementType, Range, QMM_Escape);
+
+ mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
@@ -2043,46 +2252,44 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
uint64_t Width = getASTContext().getTypeSize(T);
// Pattern match exactly the typedefs in our intrinsic headers. Anything that
// doesn't match the Intel types uses a custom mangling below.
- bool IsBuiltin = true;
+ size_t OutSizeBefore = Out.tell();
llvm::Triple::ArchType AT =
getASTContext().getTargetInfo().getTriple().getArch();
if (AT == llvm::Triple::x86 || AT == llvm::Triple::x86_64) {
if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
- Out << "T__m64";
+ mangleArtificalTagType(TTK_Union, "__m64");
} else if (Width >= 128) {
if (ET->getKind() == BuiltinType::Float)
- Out << "T__m" << Width;
+ mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width));
else if (ET->getKind() == BuiltinType::LongLong)
- Out << "T__m" << Width << 'i';
+ mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i');
else if (ET->getKind() == BuiltinType::Double)
- Out << "U__m" << Width << 'd';
- else
- IsBuiltin = false;
- } else {
- IsBuiltin = false;
+ mangleArtificalTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd');
}
- } else {
- IsBuiltin = false;
}
+ bool IsBuiltin = Out.tell() != OutSizeBefore;
if (!IsBuiltin) {
// The MS ABI doesn't have a special mangling for vector types, so we define
// our own mangling to handle uses of __vector_size__ on user-specified
// types, and for extensions like __v4sf.
- Out << "T__clang_vec" << T->getNumElements() << '_';
- mangleType(ET, Quals, Range);
- }
- Out << "@@";
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("__vector");
+ Extra.mangleType(QualType(ET, 0), Range, QMM_Escape);
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements()),
+ /*IsBoolean=*/false);
+
+ mangleArtificalTagType(TTK_Union, TemplateMangling, {"__clang"});
+ }
}
-void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T, Qualifiers,
- SourceRange Range) {
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this extended vector type yet");
- Diags.Report(Range.getBegin(), DiagID)
- << Range;
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T,
+ Qualifiers Quals, SourceRange Range) {
+ mangleType(static_cast<const VectorType *>(T), Quals, Range);
}
void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
Qualifiers, SourceRange Range) {
@@ -2096,7 +2303,7 @@ void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
SourceRange) {
// ObjC interfaces have structs underlying them.
- Out << 'U';
+ mangleTagTypeKind(TTK_Struct);
mangleName(T->getDecl());
}
@@ -2209,11 +2416,16 @@ void MicrosoftCXXNameMangler::mangleType(const AutoType *T, Qualifiers,
void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
SourceRange Range) {
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this C11 atomic type yet");
- Diags.Report(Range.getBegin(), DiagID)
- << Range;
+ QualType ValueType = T->getValueType();
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("_Atomic");
+ Extra.mangleType(ValueType, Range, QMM_Escape);
+
+ mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
void MicrosoftMangleContextImpl::mangleCXXName(const NamedDecl *D,
@@ -2574,12 +2786,12 @@ void MicrosoftMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
mangler.mangle(D);
}
-void MicrosoftMangleContextImpl::mangleReferenceTemporary(const VarDecl *VD,
- unsigned,
- raw_ostream &) {
- unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this reference temporary yet");
- getDiags().Report(VD->getLocation(), DiagID);
+void MicrosoftMangleContextImpl::mangleReferenceTemporary(
+ const VarDecl *VD, unsigned ManglingNumber, raw_ostream &Out) {
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+
+ Mangler.getStream() << "\01?$RT" << ManglingNumber << '@';
+ Mangler.mangle(VD, "");
}
void MicrosoftMangleContextImpl::mangleThreadSafeStaticGuardVariable(
@@ -2688,28 +2900,6 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
// N.B. The length is in terms of bytes, not characters.
Mangler.mangleNumber(SL->getByteLength() + SL->getCharByteWidth());
- // We will use the "Rocksoft^tm Model CRC Algorithm" to describe the
- // properties of our CRC:
- // Width : 32
- // Poly : 04C11DB7
- // Init : FFFFFFFF
- // RefIn : True
- // RefOut : True
- // XorOut : 00000000
- // Check : 340BC6D9
- uint32_t CRC = 0xFFFFFFFFU;
-
- auto UpdateCRC = [&CRC](char Byte) {
- for (unsigned i = 0; i < 8; ++i) {
- bool Bit = CRC & 0x80000000U;
- if (Byte & (1U << i))
- Bit = !Bit;
- CRC <<= 1;
- if (Bit)
- CRC ^= 0x04C11DB7U;
- }
- };
-
auto GetLittleEndianByte = [&Mangler, &SL](unsigned Index) {
unsigned CharByteWidth = SL->getCharByteWidth();
uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
@@ -2725,22 +2915,19 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
};
// CRC all the bytes of the StringLiteral.
+ llvm::JamCRC JC;
for (unsigned I = 0, E = SL->getByteLength(); I != E; ++I)
- UpdateCRC(GetLittleEndianByte(I));
+ JC.update(GetLittleEndianByte(I));
// The NUL terminator byte(s) were not present earlier,
// we need to manually process those bytes into the CRC.
for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
++NullTerminator)
- UpdateCRC('\x00');
-
- // The literature refers to the process of reversing the bits in the final CRC
- // output as "reflection".
- CRC = llvm::reverseBits(CRC);
+ JC.update('\x00');
// <encoded-crc>: The CRC is encoded utilizing the standard number mangling
// scheme.
- Mangler.mangleNumber(CRC);
+ Mangler.mangleNumber(JC.getCRC());
// <encoded-string>: The mangled name also contains the first 32 _characters_
// (including null-terminator bytes) of the StringLiteral.
@@ -2790,21 +2977,6 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
Mangler.getStream() << '@';
}
-void MicrosoftMangleContextImpl::mangleCXXVTableBitSet(const CXXRecordDecl *RD,
- raw_ostream &Out) {
- if (!RD->isExternallyVisible()) {
- // This part of the identifier needs to be unique across all translation
- // units in the linked program. The scheme fails if multiple translation
- // units are compiled using the same relative source file path, or if
- // multiple translation units are built from the same source file.
- SourceManager &SM = getASTContext().getSourceManager();
- Out << "[" << SM.getFileEntryForID(SM.getMainFileID())->getName() << "]";
- }
-
- MicrosoftCXXNameMangler mangler(*this, Out);
- mangler.mangleName(RD);
-}
-
MicrosoftMangleContext *
MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
return new MicrosoftMangleContextImpl(Context, Diags);
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index c9264d59aae3..c562dae63231 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -450,9 +450,19 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
case BuiltinType::BoundMember:
case BuiltinType::Dependent:
case BuiltinType::Overload:
@@ -461,6 +471,7 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::Half:
case BuiltinType::PseudoObject:
case BuiltinType::BuiltinFn:
+ case BuiltinType::OMPArraySection:
break;
}
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 97425d001de0..d2370c88b9c5 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -318,7 +318,12 @@ NestedNameSpecifier::print(raw_ostream &OS,
OS << "::";
}
-void NestedNameSpecifier::dump(const LangOptions &LO) {
+void NestedNameSpecifier::dump(const LangOptions &LO) const {
+ print(llvm::errs(), PrintingPolicy(LO));
+}
+
+void NestedNameSpecifier::dump() const {
+ LangOptions LO;
print(llvm::errs(), PrintingPolicy(LO));
}
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
new file mode 100644
index 000000000000..cd60d3727ba1
--- /dev/null
+++ b/lib/AST/OpenMPClause.cpp
@@ -0,0 +1,465 @@
+//===--- OpenMPClause.cpp - Classes for OpenMP clauses --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in OpenMPClause.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/OpenMPClause.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+OMPClause::child_range OMPClause::children() {
+ switch (getClauseKind()) {
+ default:
+ break;
+#define OPENMP_CLAUSE(Name, Class) \
+ case OMPC_##Name: \
+ return static_cast<Class *>(this)->children();
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("unknown OMPClause");
+}
+
+void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), varlist_end());
+}
+
+OMPPrivateClause *
+OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL) {
+ // Allocate space for private variables and initializer expressions.
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
+ llvm::alignOf<Expr *>()) +
+ 2 * sizeof(Expr *) * VL.size());
+ OMPPrivateClause *Clause =
+ new (Mem) OMPPrivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setPrivateCopies(PrivateVL);
+ return Clause;
+}
+
+OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
+ llvm::alignOf<Expr *>()) +
+ 2 * sizeof(Expr *) * N);
+ return new (Mem) OMPPrivateClause(N);
+}
+
+void OMPFirstprivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), varlist_end());
+}
+
+void OMPFirstprivateClause::setInits(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of inits is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), getPrivateCopies().end());
+}
+
+OMPFirstprivateClause *
+OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
+ ArrayRef<Expr *> InitVL) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
+ llvm::alignOf<Expr *>()) +
+ 3 * sizeof(Expr *) * VL.size());
+ OMPFirstprivateClause *Clause =
+ new (Mem) OMPFirstprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setPrivateCopies(PrivateVL);
+ Clause->setInits(InitVL);
+ return Clause;
+}
+
+OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
+ llvm::alignOf<Expr *>()) +
+ 3 * sizeof(Expr *) * N);
+ return new (Mem) OMPFirstprivateClause(N);
+}
+
+void OMPLastprivateClause::setPrivateCopies(ArrayRef<Expr *> PrivateCopies) {
+ assert(PrivateCopies.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(PrivateCopies.begin(), PrivateCopies.end(), varlist_end());
+}
+
+void OMPLastprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
+ assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
+ "not the same as the "
+ "preallocated buffer");
+ std::copy(SrcExprs.begin(), SrcExprs.end(), getPrivateCopies().end());
+}
+
+void OMPLastprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
+ assert(DstExprs.size() == varlist_size() && "Number of destination "
+ "expressions is not the same as "
+ "the preallocated buffer");
+ std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
+}
+
+void OMPLastprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
+ assert(AssignmentOps.size() == varlist_size() &&
+ "Number of assignment expressions is not the same as the preallocated "
+ "buffer");
+ std::copy(AssignmentOps.begin(), AssignmentOps.end(),
+ getDestinationExprs().end());
+}
+
+OMPLastprivateClause *OMPLastprivateClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
+ llvm::alignOf<Expr *>()) +
+ 5 * sizeof(Expr *) * VL.size());
+ OMPLastprivateClause *Clause =
+ new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setSourceExprs(SrcExprs);
+ Clause->setDestinationExprs(DstExprs);
+ Clause->setAssignmentOps(AssignmentOps);
+ return Clause;
+}
+
+OMPLastprivateClause *OMPLastprivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
+ llvm::alignOf<Expr *>()) +
+ 5 * sizeof(Expr *) * N);
+ return new (Mem) OMPLastprivateClause(N);
+}
+
+OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
+ OMPSharedClause *Clause =
+ new (Mem) OMPSharedClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
+ return new (Mem) OMPSharedClause(N);
+}
+
+void OMPLinearClause::setPrivates(ArrayRef<Expr *> PL) {
+ assert(PL.size() == varlist_size() &&
+ "Number of privates is not the same as the preallocated buffer");
+ std::copy(PL.begin(), PL.end(), varlist_end());
+}
+
+void OMPLinearClause::setInits(ArrayRef<Expr *> IL) {
+ assert(IL.size() == varlist_size() &&
+ "Number of inits is not the same as the preallocated buffer");
+ std::copy(IL.begin(), IL.end(), getPrivates().end());
+}
+
+void OMPLinearClause::setUpdates(ArrayRef<Expr *> UL) {
+ assert(UL.size() == varlist_size() &&
+ "Number of updates is not the same as the preallocated buffer");
+ std::copy(UL.begin(), UL.end(), getInits().end());
+}
+
+void OMPLinearClause::setFinals(ArrayRef<Expr *> FL) {
+ assert(FL.size() == varlist_size() &&
+ "Number of final updates is not the same as the preallocated buffer");
+ std::copy(FL.begin(), FL.end(), getUpdates().end());
+}
+
+OMPLinearClause *OMPLinearClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
+ SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
+ ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep) {
+ // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
+ // (Step and CalcStep).
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
+ llvm::alignOf<Expr *>()) +
+ (5 * VL.size() + 2) * sizeof(Expr *));
+ OMPLinearClause *Clause = new (Mem) OMPLinearClause(
+ StartLoc, LParenLoc, Modifier, ModifierLoc, ColonLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setPrivates(PL);
+ Clause->setInits(IL);
+ // Fill update and final expressions with zeroes, they are provided later,
+ // after the directive construction.
+ std::fill(Clause->getInits().end(), Clause->getInits().end() + VL.size(),
+ nullptr);
+ std::fill(Clause->getUpdates().end(), Clause->getUpdates().end() + VL.size(),
+ nullptr);
+ Clause->setStep(Step);
+ Clause->setCalcStep(CalcStep);
+ return Clause;
+}
+
+OMPLinearClause *OMPLinearClause::CreateEmpty(const ASTContext &C,
+ unsigned NumVars) {
+ // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
+ // (Step and CalcStep).
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
+ llvm::alignOf<Expr *>()) +
+ (5 * NumVars + 2) * sizeof(Expr *));
+ return new (Mem) OMPLinearClause(NumVars);
+}
+
+OMPAlignedClause *
+OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * (VL.size() + 1));
+ OMPAlignedClause *Clause = new (Mem)
+ OMPAlignedClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setAlignment(A);
+ return Clause;
+}
+
+OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C,
+ unsigned NumVars) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * (NumVars + 1));
+ return new (Mem) OMPAlignedClause(NumVars);
+}
+
+void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
+ assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
+ "not the same as the "
+ "preallocated buffer");
+ std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
+}
+
+void OMPCopyinClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
+ assert(DstExprs.size() == varlist_size() && "Number of destination "
+ "expressions is not the same as "
+ "the preallocated buffer");
+ std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
+}
+
+void OMPCopyinClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
+ assert(AssignmentOps.size() == varlist_size() &&
+ "Number of assignment expressions is not the same as the preallocated "
+ "buffer");
+ std::copy(AssignmentOps.begin(), AssignmentOps.end(),
+ getDestinationExprs().end());
+}
+
+OMPCopyinClause *OMPCopyinClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
+ llvm::alignOf<Expr *>()) +
+ 4 * sizeof(Expr *) * VL.size());
+ OMPCopyinClause *Clause =
+ new (Mem) OMPCopyinClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setSourceExprs(SrcExprs);
+ Clause->setDestinationExprs(DstExprs);
+ Clause->setAssignmentOps(AssignmentOps);
+ return Clause;
+}
+
+OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
+ llvm::alignOf<Expr *>()) +
+ 4 * sizeof(Expr *) * N);
+ return new (Mem) OMPCopyinClause(N);
+}
+
+void OMPCopyprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
+ assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
+ "not the same as the "
+ "preallocated buffer");
+ std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
+}
+
+void OMPCopyprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
+ assert(DstExprs.size() == varlist_size() && "Number of destination "
+ "expressions is not the same as "
+ "the preallocated buffer");
+ std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
+}
+
+void OMPCopyprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
+ assert(AssignmentOps.size() == varlist_size() &&
+ "Number of assignment expressions is not the same as the preallocated "
+ "buffer");
+ std::copy(AssignmentOps.begin(), AssignmentOps.end(),
+ getDestinationExprs().end());
+}
+
+OMPCopyprivateClause *OMPCopyprivateClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
+ llvm::alignOf<Expr *>()) +
+ 4 * sizeof(Expr *) * VL.size());
+ OMPCopyprivateClause *Clause =
+ new (Mem) OMPCopyprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setSourceExprs(SrcExprs);
+ Clause->setDestinationExprs(DstExprs);
+ Clause->setAssignmentOps(AssignmentOps);
+ return Clause;
+}
+
+OMPCopyprivateClause *OMPCopyprivateClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
+ llvm::alignOf<Expr *>()) +
+ 4 * sizeof(Expr *) * N);
+ return new (Mem) OMPCopyprivateClause(N);
+}
+
+void OMPReductionClause::setPrivates(ArrayRef<Expr *> Privates) {
+ assert(Privates.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(Privates.begin(), Privates.end(), varlist_end());
+}
+
+void OMPReductionClause::setLHSExprs(ArrayRef<Expr *> LHSExprs) {
+ assert(
+ LHSExprs.size() == varlist_size() &&
+ "Number of LHS expressions is not the same as the preallocated buffer");
+ std::copy(LHSExprs.begin(), LHSExprs.end(), getPrivates().end());
+}
+
+void OMPReductionClause::setRHSExprs(ArrayRef<Expr *> RHSExprs) {
+ assert(
+ RHSExprs.size() == varlist_size() &&
+ "Number of RHS expressions is not the same as the preallocated buffer");
+ std::copy(RHSExprs.begin(), RHSExprs.end(), getLHSExprs().end());
+}
+
+void OMPReductionClause::setReductionOps(ArrayRef<Expr *> ReductionOps) {
+ assert(ReductionOps.size() == varlist_size() && "Number of reduction "
+ "expressions is not the same "
+ "as the preallocated buffer");
+ std::copy(ReductionOps.begin(), ReductionOps.end(), getRHSExprs().end());
+}
+
+OMPReductionClause *OMPReductionClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
+ NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
+ ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs,
+ ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
+ llvm::alignOf<Expr *>()) +
+ 5 * sizeof(Expr *) * VL.size());
+ OMPReductionClause *Clause = new (Mem) OMPReductionClause(
+ StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
+ Clause->setVarRefs(VL);
+ Clause->setPrivates(Privates);
+ Clause->setLHSExprs(LHSExprs);
+ Clause->setRHSExprs(RHSExprs);
+ Clause->setReductionOps(ReductionOps);
+ return Clause;
+}
+
+OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
+ llvm::alignOf<Expr *>()) +
+ 5 * sizeof(Expr *) * N);
+ return new (Mem) OMPReductionClause(N);
+}
+
+OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
+ OMPFlushClause *Clause =
+ new (Mem) OMPFlushClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
+ return new (Mem) OMPFlushClause(N);
+}
+
+OMPDependClause *
+OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
+ OMPDependClause *Clause =
+ new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setDependencyKind(DepKind);
+ Clause->setDependencyLoc(DepLoc);
+ Clause->setColonLoc(ColonLoc);
+ return Clause;
+}
+
+OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
+ return new (Mem) OMPDependClause(N);
+}
+
+OMPMapClause *OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL,
+ OpenMPMapClauseKind TypeModifier,
+ OpenMPMapClauseKind Type,
+ SourceLocation TypeLoc) {
+ void *Mem = C.Allocate(
+ llvm::RoundUpToAlignment(sizeof(OMPMapClause), llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
+ OMPMapClause *Clause = new (Mem) OMPMapClause(
+ TypeModifier, Type, TypeLoc, StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ Clause->setMapTypeModifier(TypeModifier);
+ Clause->setMapType(Type);
+ Clause->setMapLoc(TypeLoc);
+ return Clause;
+}
+
+OMPMapClause *OMPMapClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(
+ llvm::RoundUpToAlignment(sizeof(OMPMapClause), llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
+ return new (Mem) OMPMapClause(N);
+}
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index 24b129a5c532..8317f76b8569 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/CommentLexer.h"
#include "clang/AST/CommentParser.h"
#include "clang/AST/CommentSema.h"
+#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/STLExtras.h"
using namespace clang;
@@ -62,12 +63,53 @@ std::pair<RawComment::CommentKind, bool> getCommentKind(StringRef Comment,
bool mergedCommentIsTrailingComment(StringRef Comment) {
return (Comment.size() > 3) && (Comment[3] == '<');
}
+
+/// Returns true if R1 and R2 both have valid locations that start on the same
+/// column.
+bool commentsStartOnSameColumn(const SourceManager &SM, const RawComment &R1,
+ const RawComment &R2) {
+ SourceLocation L1 = R1.getLocStart();
+ SourceLocation L2 = R2.getLocStart();
+ bool Invalid = false;
+ unsigned C1 = SM.getPresumedColumnNumber(L1, &Invalid);
+ if (!Invalid) {
+ unsigned C2 = SM.getPresumedColumnNumber(L2, &Invalid);
+ return !Invalid && (C1 == C2);
+ }
+ return false;
+}
} // unnamed namespace
+/// \brief Determines whether there is only whitespace in `Buffer` between `P`
+/// and the previous line.
+/// \param Buffer The buffer to search in.
+/// \param P The offset from the beginning of `Buffer` to start from.
+/// \return true if all of the characters in `Buffer` ranging from the closest
+/// line-ending character before `P` (or the beginning of `Buffer`) to `P - 1`
+/// are whitespace.
+static bool onlyWhitespaceOnLineBefore(const char *Buffer, unsigned P) {
+ // Search backwards until we see linefeed or carriage return.
+ for (unsigned I = P; I != 0; --I) {
+ char C = Buffer[I - 1];
+ if (isVerticalWhitespace(C))
+ return true;
+ if (!isHorizontalWhitespace(C))
+ return false;
+ }
+ // We hit the beginning of the buffer.
+ return true;
+}
+
+/// Returns whether `K` is an ordinary comment kind.
+static bool isOrdinaryKind(RawComment::CommentKind K) {
+ return (K == RawComment::RCK_OrdinaryBCPL) ||
+ (K == RawComment::RCK_OrdinaryC);
+}
+
RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
bool Merged, bool ParseAllComments) :
Range(SR), RawTextValid(false), BriefTextValid(false),
- IsAttached(false), IsAlmostTrailingComment(false),
+ IsAttached(false), IsTrailingComment(false), IsAlmostTrailingComment(false),
ParseAllComments(ParseAllComments) {
// Extract raw comment text, if possible.
if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) {
@@ -75,17 +117,34 @@ RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
return;
}
+ // Guess comment kind.
+ std::pair<CommentKind, bool> K = getCommentKind(RawText, ParseAllComments);
+
+ // Guess whether an ordinary comment is trailing.
+ if (ParseAllComments && isOrdinaryKind(K.first)) {
+ FileID BeginFileID;
+ unsigned BeginOffset;
+ std::tie(BeginFileID, BeginOffset) =
+ SourceMgr.getDecomposedLoc(Range.getBegin());
+ if (BeginOffset != 0) {
+ bool Invalid = false;
+ const char *Buffer =
+ SourceMgr.getBufferData(BeginFileID, &Invalid).data();
+ IsTrailingComment |=
+ (!Invalid && !onlyWhitespaceOnLineBefore(Buffer, BeginOffset));
+ }
+ }
+
if (!Merged) {
- // Guess comment kind.
- std::pair<CommentKind, bool> K = getCommentKind(RawText, ParseAllComments);
Kind = K.first;
- IsTrailingComment = K.second;
+ IsTrailingComment |= K.second;
IsAlmostTrailingComment = RawText.startswith("//<") ||
RawText.startswith("/*<");
} else {
Kind = RCK_Merged;
- IsTrailingComment = mergedCommentIsTrailingComment(RawText);
+ IsTrailingComment =
+ IsTrailingComment || mergedCommentIsTrailingComment(RawText);
}
}
@@ -239,9 +298,22 @@ void RawCommentList::addComment(const RawComment &RC,
const RawComment &C2 = RC;
// Merge comments only if there is only whitespace between them.
- // Can't merge trailing and non-trailing comments.
+ // Can't merge trailing and non-trailing comments unless the second is
+ // non-trailing ordinary in the same column, as in the case:
+ // int x; // documents x
+ // // more text
+ // versus:
+ // int x; // documents x
+ // int y; // documents y
+ // or:
+ // int x; // documents x
+ // // documents y
+ // int y;
// Merge comments if they are on same or consecutive lines.
- if (C1.isTrailingComment() == C2.isTrailingComment() &&
+ if ((C1.isTrailingComment() == C2.isTrailingComment() ||
+ (C1.isTrailingComment() && !C2.isTrailingComment() &&
+ isOrdinaryKind(C2.getKind()) &&
+ commentsStartOnSameColumn(SourceMgr, C1, C2))) &&
onlyWhitespaceBetween(SourceMgr, C1.getLocEnd(), C2.getLocStart(),
/*MaxNewlinesAllowed=*/1)) {
SourceRange MergedRange(C1.getLocStart(), C2.getLocEnd());
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index de7bcb826ebb..bc3c2a831c47 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -18,7 +18,6 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
@@ -565,7 +564,7 @@ void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> ClassSetTy;
-class RecordLayoutBuilder {
+class ItaniumRecordLayoutBuilder {
protected:
// FIXME: Remove this and make the appropriate fields public.
friend class clang::ASTContext;
@@ -656,19 +655,18 @@ protected:
/// Valid if UseExternalLayout is true.
ExternalLayout External;
- RecordLayoutBuilder(const ASTContext &Context,
- EmptySubobjectMap *EmptySubobjects)
- : Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
- Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
- UseExternalLayout(false), InferAlignment(false),
- Packed(false), IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
- UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0),
- MaxFieldAlignment(CharUnits::Zero()),
- DataSize(0), NonVirtualSize(CharUnits::Zero()),
- NonVirtualAlignment(CharUnits::One()),
- PrimaryBase(nullptr), PrimaryBaseIsVirtual(false),
- HasOwnVFPtr(false),
- FirstNearlyEmptyVBase(nullptr) {}
+ ItaniumRecordLayoutBuilder(const ASTContext &Context,
+ EmptySubobjectMap *EmptySubobjects)
+ : Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
+ Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
+ UseExternalLayout(false), InferAlignment(false), Packed(false),
+ IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
+ UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0),
+ MaxFieldAlignment(CharUnits::Zero()), DataSize(0),
+ NonVirtualSize(CharUnits::Zero()),
+ NonVirtualAlignment(CharUnits::One()), PrimaryBase(nullptr),
+ PrimaryBaseIsVirtual(false), HasOwnVFPtr(false),
+ FirstNearlyEmptyVBase(nullptr) {}
void Layout(const RecordDecl *D);
void Layout(const CXXRecordDecl *D);
@@ -782,13 +780,12 @@ protected:
void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); }
void setDataSize(uint64_t NewSize) { DataSize = NewSize; }
- RecordLayoutBuilder(const RecordLayoutBuilder &) = delete;
- void operator=(const RecordLayoutBuilder &) = delete;
+ ItaniumRecordLayoutBuilder(const ItaniumRecordLayoutBuilder &) = delete;
+ void operator=(const ItaniumRecordLayoutBuilder &) = delete;
};
} // end anonymous namespace
-void
-RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
+void ItaniumRecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
for (const auto &I : RD->bases()) {
assert(!I.getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
@@ -817,7 +814,7 @@ RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
}
/// DeterminePrimaryBase - Determine the primary base of the given class.
-void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
+void ItaniumRecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
// If the class isn't dynamic, it won't have a primary base.
if (!RD->isDynamicClass())
return;
@@ -864,10 +861,8 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
assert(!PrimaryBase && "Should not get here with a primary base!");
}
-BaseSubobjectInfo *
-RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
- bool IsVirtual,
- BaseSubobjectInfo *Derived) {
+BaseSubobjectInfo *ItaniumRecordLayoutBuilder::ComputeBaseSubobjectInfo(
+ const CXXRecordDecl *RD, bool IsVirtual, BaseSubobjectInfo *Derived) {
BaseSubobjectInfo *Info;
if (IsVirtual) {
@@ -943,7 +938,8 @@ RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
return Info;
}
-void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
+void ItaniumRecordLayoutBuilder::ComputeBaseSubobjectInfo(
+ const CXXRecordDecl *RD) {
for (const auto &I : RD->bases()) {
bool IsVirtual = I.isVirtual();
@@ -966,8 +962,8 @@ void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
}
}
-void
-RecordLayoutBuilder::EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign) {
+void ItaniumRecordLayoutBuilder::EnsureVTablePointerAlignment(
+ CharUnits UnpackedBaseAlign) {
CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
// The maximum field alignment overrides base align.
@@ -984,8 +980,8 @@ RecordLayoutBuilder::EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign) {
UpdateAlignment(BaseAlign, UnpackedBaseAlign);
}
-void
-RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
+void ItaniumRecordLayoutBuilder::LayoutNonVirtualBases(
+ const CXXRecordDecl *RD) {
// Then, determine the primary base class.
DeterminePrimaryBase(RD);
@@ -1054,7 +1050,8 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
}
}
-void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
+void ItaniumRecordLayoutBuilder::LayoutNonVirtualBase(
+ const BaseSubobjectInfo *Base) {
// Layout the base.
CharUnits Offset = LayoutBase(Base);
@@ -1065,9 +1062,8 @@ void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
AddPrimaryVirtualBaseOffsets(Base, Offset);
}
-void
-RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
- CharUnits Offset) {
+void ItaniumRecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(
+ const BaseSubobjectInfo *Info, CharUnits Offset) {
// This base isn't interesting, it has no virtual bases.
if (!Info->Class->getNumVBases())
return;
@@ -1099,9 +1095,8 @@ RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
}
}
-void
-RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
- const CXXRecordDecl *MostDerivedClass) {
+void ItaniumRecordLayoutBuilder::LayoutVirtualBases(
+ const CXXRecordDecl *RD, const CXXRecordDecl *MostDerivedClass) {
const CXXRecordDecl *PrimaryBase;
bool PrimaryBaseIsVirtual;
@@ -1146,7 +1141,8 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
}
}
-void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
+void ItaniumRecordLayoutBuilder::LayoutVirtualBase(
+ const BaseSubobjectInfo *Base) {
assert(!Base->Derived && "Trying to lay out a primary virtual base!");
// Layout the base.
@@ -1160,7 +1156,8 @@ void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
AddPrimaryVirtualBaseOffsets(Base, Offset);
}
-CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
+CharUnits
+ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
@@ -1229,7 +1226,7 @@ CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
return Offset;
}
-void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
+void ItaniumRecordLayoutBuilder::InitializeLayout(const Decl *D) {
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
IsUnion = RD->isUnion();
IsMsStruct = RD->isMsStruct(Context);
@@ -1277,7 +1274,7 @@ void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
}
}
-void RecordLayoutBuilder::Layout(const RecordDecl *D) {
+void ItaniumRecordLayoutBuilder::Layout(const RecordDecl *D) {
InitializeLayout(D);
LayoutFields(D);
@@ -1286,7 +1283,7 @@ void RecordLayoutBuilder::Layout(const RecordDecl *D) {
FinishLayout(D);
}
-void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
+void ItaniumRecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
InitializeLayout(RD);
// Lay out the vtable and the non-virtual bases.
@@ -1326,7 +1323,7 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
#endif
}
-void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
+void ItaniumRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
const ASTRecordLayout &SL = Context.getASTObjCInterfaceLayout(SD);
@@ -1349,7 +1346,7 @@ void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
FinishLayout(D);
}
-void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+void ItaniumRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
// Layout each field, for now, just sequentially, respecting alignment. In
// the future, this will need to be tweakable by targets.
bool InsertExtraPadding = D->mayInsertExtraPadding(/*EmitRemark=*/true);
@@ -1370,10 +1367,10 @@ roundUpSizeToCharAlignment(uint64_t Size,
return llvm::RoundUpToAlignment(Size, CharAlignment);
}
-void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
- uint64_t TypeSize,
- bool FieldPacked,
- const FieldDecl *D) {
+void ItaniumRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
+ uint64_t TypeSize,
+ bool FieldPacked,
+ const FieldDecl *D) {
assert(Context.getLangOpts().CPlusPlus &&
"Can only have wide bit-fields in C++!");
@@ -1437,7 +1434,7 @@ void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
UpdateAlignment(TypeAlign);
}
-void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
+void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
uint64_t FieldSize = D->getBitWidthValue(Context);
TypeInfo FieldInfo = Context.getTypeInfo(D->getType());
@@ -1568,6 +1565,12 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
}
+ // But, ms_struct just ignores all of that in unions, even explicit
+ // alignment attributes.
+ if (IsMsStruct && IsUnion) {
+ FieldAlign = UnpackedFieldAlign = 1;
+ }
+
// For purposes of diagnostics, we're going to simultaneously
// compute the field offsets that we would have used if we weren't
// adding any alignment padding or if the field weren't packed.
@@ -1634,9 +1637,20 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// For unions, this is just a max operation, as usual.
if (IsUnion) {
- uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize,
- Context);
+ // For ms_struct, allocate the entire storage unit --- unless this
+ // is a zero-width bitfield, in which case just use a size of 1.
+ uint64_t RoundedFieldSize;
+ if (IsMsStruct) {
+ RoundedFieldSize =
+ (FieldSize ? TypeSize : Context.getTargetInfo().getCharWidth());
+
+ // Otherwise, allocate just the number of bytes required to store
+ // the bitfield.
+ } else {
+ RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize, Context);
+ }
setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize));
+
// For non-zero-width bitfields in ms_struct structs, allocate a new
// storage unit if necessary.
} else if (IsMsStruct && FieldSize) {
@@ -1672,8 +1686,8 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
Context.toCharUnitsFromBits(UnpackedFieldAlign));
}
-void RecordLayoutBuilder::LayoutField(const FieldDecl *D,
- bool InsertExtraPadding) {
+void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
+ bool InsertExtraPadding) {
if (D->isBitField()) {
LayoutBitField(D);
return;
@@ -1800,7 +1814,7 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D,
UpdateAlignment(FieldAlign, UnpackedFieldAlign);
}
-void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
+void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
// In C++, records cannot be of size 0.
if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
@@ -1852,7 +1866,7 @@ void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
Diag(RD->getLocation(), diag::warn_padded_struct_size)
<< Context.getTypeDeclType(RD)
<< PadSize
- << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ << (InBits ? 1 : 0); // (byte|bit)
}
// Warn if we packed it unnecessarily. If the alignment is 1 byte don't
@@ -1864,8 +1878,8 @@ void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
}
}
-void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
- CharUnits UnpackedNewAlignment) {
+void ItaniumRecordLayoutBuilder::UpdateAlignment(
+ CharUnits NewAlignment, CharUnits UnpackedNewAlignment) {
// The alignment is not modified when using 'mac68k' alignment or when
// we have an externally-supplied layout that also provides overall alignment.
if (IsMac68kAlign || (UseExternalLayout && !InferAlignment))
@@ -1885,8 +1899,8 @@ void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
}
uint64_t
-RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
- uint64_t ComputedOffset) {
+ItaniumRecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset) {
uint64_t ExternalFieldOffset = External.getExternalFieldOffset(Field);
if (InferAlignment && ExternalFieldOffset < ComputedOffset) {
@@ -1914,12 +1928,9 @@ static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) {
}
}
-void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
- uint64_t UnpaddedOffset,
- uint64_t UnpackedOffset,
- unsigned UnpackedAlign,
- bool isPacked,
- const FieldDecl *D) {
+void ItaniumRecordLayoutBuilder::CheckFieldPadding(
+ uint64_t Offset, uint64_t UnpaddedOffset, uint64_t UnpackedOffset,
+ unsigned UnpackedAlign, bool isPacked, const FieldDecl *D) {
// We let objc ivars without warning, objc interfaces generally are not used
// for padding tricks.
if (isa<ObjCIvarDecl>(D))
@@ -1945,14 +1956,14 @@ void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
<< Context.getTypeDeclType(D->getParent())
<< PadSize
- << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1) // plural or not
+ << (InBits ? 1 : 0) // (byte|bit)
<< D->getIdentifier();
else
Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
<< Context.getTypeDeclType(D->getParent())
<< PadSize
- << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ << (InBits ? 1 : 0); // (byte|bit)
}
// Warn if we packed it unnecessarily. If the alignment is 1 byte don't
@@ -2014,6 +2025,21 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
continue;
}
+ if (Context.getLangOpts().CUDA) {
+ // While compiler may see key method in this TU, during CUDA
+ // compilation we should ignore methods that are not accessible
+ // on this side of compilation.
+ if (Context.getLangOpts().CUDAIsDevice) {
+ // In device mode ignore methods without __device__ attribute.
+ if (!MD->hasAttr<CUDADeviceAttr>())
+ continue;
+ } else {
+ // In host mode ignore __device__-only methods.
+ if (!MD->hasAttr<CUDAHostAttr>() && MD->hasAttr<CUDADeviceAttr>())
+ continue;
+ }
+ }
+
// If the key function is dllimport but the class isn't, then the class has
// no key function. The DLL that exports the key function won't export the
// vtable in this case.
@@ -2027,8 +2053,8 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
return nullptr;
}
-DiagnosticBuilder
-RecordLayoutBuilder::Diag(SourceLocation Loc, unsigned DiagID) {
+DiagnosticBuilder ItaniumRecordLayoutBuilder::Diag(SourceLocation Loc,
+ unsigned DiagID) {
return Context.getDiagnostics().Report(Loc, DiagID);
}
@@ -2074,8 +2100,8 @@ static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) {
llvm_unreachable("bad tail-padding use kind");
}
-static bool isMsLayout(const RecordDecl* D) {
- return D->getASTContext().getTargetInfo().getCXXABI().isMicrosoft();
+static bool isMsLayout(const ASTContext &Context) {
+ return Context.getTargetInfo().getCXXABI().isMicrosoft();
}
// This section contains an implementation of struct layout that is, up to the
@@ -2656,13 +2682,20 @@ void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) {
// alignment.
CharUnits Offset = PointerInfo.Size.RoundUpToAlignment(
std::max(RequiredAlignment, Alignment));
- // Increase the size of the object and push back all fields, the vbptr and all
- // bases by the offset amount.
+ // Push back the vbptr, but increase the size of the object and push back
+ // regular fields by the offset only if not using external record layout.
+ if (HasVBPtr)
+ VBPtrOffset += Offset;
+
+ if (UseExternalLayout)
+ return;
+
Size += Offset;
+
+ // If we're using an external layout, the fields offsets have already
+ // accounted for this adjustment.
for (uint64_t &FieldOffset : FieldOffsets)
FieldOffset += Context.toBits(Offset);
- if (HasVBPtr)
- VBPtrOffset += Offset;
for (BaseOffsetsMapTy::value_type &Base : Bases)
Base.second += Offset;
}
@@ -2840,32 +2873,6 @@ void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
}
}
-/// \brief Get or compute information about the layout of the specified record
-/// (struct/union/class), which indicates its size and field position
-/// information.
-const ASTRecordLayout *
-ASTContext::BuildMicrosoftASTRecordLayout(const RecordDecl *D) const {
- MicrosoftRecordLayoutBuilder Builder(*this);
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
- Builder.cxxLayout(RD);
- return new (*this) ASTRecordLayout(
- *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
- Builder.HasOwnVFPtr,
- Builder.HasOwnVFPtr || Builder.PrimaryBase,
- Builder.VBPtrOffset, Builder.NonVirtualSize, Builder.FieldOffsets.data(),
- Builder.FieldOffsets.size(), Builder.NonVirtualSize,
- Builder.Alignment, CharUnits::Zero(), Builder.PrimaryBase,
- false, Builder.SharedVBPtrBase,
- Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase,
- Builder.Bases, Builder.VBases);
- } else {
- Builder.layout(D);
- return new (*this) ASTRecordLayout(
- *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
- Builder.Size, Builder.FieldOffsets.data(), Builder.FieldOffsets.size());
- }
-}
-
/// getASTRecordLayout - Get or compute information about the layout of the
/// specified record (struct/union/class), which indicates its size and field
/// position information.
@@ -2892,54 +2899,63 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
const ASTRecordLayout *NewEntry = nullptr;
- if (isMsLayout(D)) {
- NewEntry = BuildMicrosoftASTRecordLayout(D);
- } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
- EmptySubobjectMap EmptySubobjects(*this, RD);
- RecordLayoutBuilder Builder(*this, &EmptySubobjects);
- Builder.Layout(RD);
-
- // In certain situations, we are allowed to lay out objects in the
- // tail-padding of base classes. This is ABI-dependent.
- // FIXME: this should be stored in the record layout.
- bool skipTailPadding =
- mustSkipTailPadding(getTargetInfo().getCXXABI(), cast<CXXRecordDecl>(D));
-
- // FIXME: This should be done in FinalizeLayout.
- CharUnits DataSize =
- skipTailPadding ? Builder.getSize() : Builder.getDataSize();
- CharUnits NonVirtualSize =
- skipTailPadding ? DataSize : Builder.NonVirtualSize;
- NewEntry =
- new (*this) ASTRecordLayout(*this, Builder.getSize(),
- Builder.Alignment,
- /*RequiredAlignment : used by MS-ABI)*/
- Builder.Alignment,
- Builder.HasOwnVFPtr,
- RD->isDynamicClass(),
- CharUnits::fromQuantity(-1),
- DataSize,
- Builder.FieldOffsets.data(),
- Builder.FieldOffsets.size(),
- NonVirtualSize,
- Builder.NonVirtualAlignment,
- EmptySubobjects.SizeOfLargestEmptySubobject,
- Builder.PrimaryBase,
- Builder.PrimaryBaseIsVirtual,
- nullptr, false, false,
- Builder.Bases, Builder.VBases);
+ if (isMsLayout(*this)) {
+ MicrosoftRecordLayoutBuilder Builder(*this);
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ Builder.cxxLayout(RD);
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+ Builder.HasOwnVFPtr, Builder.HasOwnVFPtr || Builder.PrimaryBase,
+ Builder.VBPtrOffset, Builder.NonVirtualSize,
+ Builder.FieldOffsets.data(), Builder.FieldOffsets.size(),
+ Builder.NonVirtualSize, Builder.Alignment, CharUnits::Zero(),
+ Builder.PrimaryBase, false, Builder.SharedVBPtrBase,
+ Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase,
+ Builder.Bases, Builder.VBases);
+ } else {
+ Builder.layout(D);
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+ Builder.Size, Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+ }
} else {
- RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
- Builder.Layout(D);
-
- NewEntry =
- new (*this) ASTRecordLayout(*this, Builder.getSize(),
- Builder.Alignment,
- /*RequiredAlignment : used by MS-ABI)*/
- Builder.Alignment,
- Builder.getSize(),
- Builder.FieldOffsets.data(),
- Builder.FieldOffsets.size());
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ EmptySubobjectMap EmptySubobjects(*this, RD);
+ ItaniumRecordLayoutBuilder Builder(*this, &EmptySubobjects);
+ Builder.Layout(RD);
+
+ // In certain situations, we are allowed to lay out objects in the
+ // tail-padding of base classes. This is ABI-dependent.
+ // FIXME: this should be stored in the record layout.
+ bool skipTailPadding =
+ mustSkipTailPadding(getTargetInfo().getCXXABI(), RD);
+
+ // FIXME: This should be done in FinalizeLayout.
+ CharUnits DataSize =
+ skipTailPadding ? Builder.getSize() : Builder.getDataSize();
+ CharUnits NonVirtualSize =
+ skipTailPadding ? DataSize : Builder.NonVirtualSize;
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.getSize(), Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment, Builder.HasOwnVFPtr, RD->isDynamicClass(),
+ CharUnits::fromQuantity(-1), DataSize, Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size(), NonVirtualSize,
+ Builder.NonVirtualAlignment,
+ EmptySubobjects.SizeOfLargestEmptySubobject, Builder.PrimaryBase,
+ Builder.PrimaryBaseIsVirtual, nullptr, false, false, Builder.Bases,
+ Builder.VBases);
+ } else {
+ ItaniumRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
+ Builder.Layout(D);
+
+ NewEntry = new (*this) ASTRecordLayout(
+ *this, Builder.getSize(), Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment, Builder.getSize(), Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+ }
}
ASTRecordLayouts[D] = NewEntry;
@@ -3049,7 +3065,7 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
return getObjCLayout(D, nullptr);
}
- RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
+ ItaniumRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
Builder.Layout(D);
const ASTRecordLayout *NewEntry =
@@ -3068,148 +3084,193 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
static void PrintOffset(raw_ostream &OS,
CharUnits Offset, unsigned IndentLevel) {
- OS << llvm::format("%4" PRId64 " | ", (int64_t)Offset.getQuantity());
+ OS << llvm::format("%10" PRId64 " | ", (int64_t)Offset.getQuantity());
+ OS.indent(IndentLevel * 2);
+}
+
+static void PrintBitFieldOffset(raw_ostream &OS, CharUnits Offset,
+ unsigned Begin, unsigned Width,
+ unsigned IndentLevel) {
+ llvm::SmallString<10> Buffer;
+ {
+ llvm::raw_svector_ostream BufferOS(Buffer);
+ BufferOS << Offset.getQuantity() << ':';
+ if (Width == 0) {
+ BufferOS << '-';
+ } else {
+ BufferOS << Begin << '-' << (Begin + Width - 1);
+ }
+ }
+
+ OS << llvm::right_justify(Buffer, 10) << " | ";
OS.indent(IndentLevel * 2);
}
static void PrintIndentNoOffset(raw_ostream &OS, unsigned IndentLevel) {
- OS << " | ";
+ OS << " | ";
OS.indent(IndentLevel * 2);
}
-static void DumpCXXRecordLayout(raw_ostream &OS,
- const CXXRecordDecl *RD, const ASTContext &C,
- CharUnits Offset,
- unsigned IndentLevel,
- const char* Description,
- bool IncludeVirtualBases) {
+static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
+ const ASTContext &C,
+ CharUnits Offset,
+ unsigned IndentLevel,
+ const char* Description,
+ bool PrintSizeInfo,
+ bool IncludeVirtualBases) {
const ASTRecordLayout &Layout = C.getASTRecordLayout(RD);
+ auto CXXRD = dyn_cast<CXXRecordDecl>(RD);
PrintOffset(OS, Offset, IndentLevel);
- OS << C.getTypeDeclType(const_cast<CXXRecordDecl *>(RD)).getAsString();
+ OS << C.getTypeDeclType(const_cast<RecordDecl*>(RD)).getAsString();
if (Description)
OS << ' ' << Description;
- if (RD->isEmpty())
+ if (CXXRD && CXXRD->isEmpty())
OS << " (empty)";
OS << '\n';
IndentLevel++;
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- bool HasOwnVFPtr = Layout.hasOwnVFPtr();
- bool HasOwnVBPtr = Layout.hasOwnVBPtr();
-
- // Vtable pointer.
- if (RD->isDynamicClass() && !PrimaryBase && !isMsLayout(RD)) {
- PrintOffset(OS, Offset, IndentLevel);
- OS << '(' << *RD << " vtable pointer)\n";
- } else if (HasOwnVFPtr) {
- PrintOffset(OS, Offset, IndentLevel);
- // vfptr (for Microsoft C++ ABI)
- OS << '(' << *RD << " vftable pointer)\n";
- }
-
- // Collect nvbases.
- SmallVector<const CXXRecordDecl *, 4> Bases;
- for (const CXXBaseSpecifier &Base : RD->bases()) {
- assert(!Base.getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
- if (!Base.isVirtual())
- Bases.push_back(Base.getType()->getAsCXXRecordDecl());
- }
+ // Dump bases.
+ if (CXXRD) {
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ bool HasOwnVFPtr = Layout.hasOwnVFPtr();
+ bool HasOwnVBPtr = Layout.hasOwnVBPtr();
+
+ // Vtable pointer.
+ if (CXXRD->isDynamicClass() && !PrimaryBase && !isMsLayout(C)) {
+ PrintOffset(OS, Offset, IndentLevel);
+ OS << '(' << *RD << " vtable pointer)\n";
+ } else if (HasOwnVFPtr) {
+ PrintOffset(OS, Offset, IndentLevel);
+ // vfptr (for Microsoft C++ ABI)
+ OS << '(' << *RD << " vftable pointer)\n";
+ }
- // Sort nvbases by offset.
- std::stable_sort(Bases.begin(), Bases.end(),
- [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
- return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
- });
+ // Collect nvbases.
+ SmallVector<const CXXRecordDecl *, 4> Bases;
+ for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
+ assert(!Base.getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+ if (!Base.isVirtual())
+ Bases.push_back(Base.getType()->getAsCXXRecordDecl());
+ }
- // Dump (non-virtual) bases
- for (const CXXRecordDecl *Base : Bases) {
- CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
- DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
- Base == PrimaryBase ? "(primary base)" : "(base)",
- /*IncludeVirtualBases=*/false);
- }
+ // Sort nvbases by offset.
+ std::stable_sort(Bases.begin(), Bases.end(),
+ [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
+ return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
+ });
+
+ // Dump (non-virtual) bases
+ for (const CXXRecordDecl *Base : Bases) {
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+ DumpRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
+ Base == PrimaryBase ? "(primary base)" : "(base)",
+ /*PrintSizeInfo=*/false,
+ /*IncludeVirtualBases=*/false);
+ }
- // vbptr (for Microsoft C++ ABI)
- if (HasOwnVBPtr) {
- PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
- OS << '(' << *RD << " vbtable pointer)\n";
+ // vbptr (for Microsoft C++ ABI)
+ if (HasOwnVBPtr) {
+ PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
+ OS << '(' << *RD << " vbtable pointer)\n";
+ }
}
// Dump fields.
uint64_t FieldNo = 0;
- for (CXXRecordDecl::field_iterator I = RD->field_begin(),
+ for (RecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I, ++FieldNo) {
const FieldDecl &Field = **I;
- CharUnits FieldOffset = Offset +
- C.toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
-
- if (const CXXRecordDecl *D = Field.getType()->getAsCXXRecordDecl()) {
- DumpCXXRecordLayout(OS, D, C, FieldOffset, IndentLevel,
- Field.getName().data(),
- /*IncludeVirtualBases=*/true);
+ uint64_t LocalFieldOffsetInBits = Layout.getFieldOffset(FieldNo);
+ CharUnits FieldOffset =
+ Offset + C.toCharUnitsFromBits(LocalFieldOffsetInBits);
+
+ // Recursively dump fields of record type.
+ if (auto RT = Field.getType()->getAs<RecordType>()) {
+ DumpRecordLayout(OS, RT->getDecl(), C, FieldOffset, IndentLevel,
+ Field.getName().data(),
+ /*PrintSizeInfo=*/false,
+ /*IncludeVirtualBases=*/true);
continue;
}
- PrintOffset(OS, FieldOffset, IndentLevel);
+ if (Field.isBitField()) {
+ uint64_t LocalFieldByteOffsetInBits = C.toBits(FieldOffset - Offset);
+ unsigned Begin = LocalFieldOffsetInBits - LocalFieldByteOffsetInBits;
+ unsigned Width = Field.getBitWidthValue(C);
+ PrintBitFieldOffset(OS, FieldOffset, Begin, Width, IndentLevel);
+ } else {
+ PrintOffset(OS, FieldOffset, IndentLevel);
+ }
OS << Field.getType().getAsString() << ' ' << Field << '\n';
}
- if (!IncludeVirtualBases)
- return;
-
// Dump virtual bases.
- const ASTRecordLayout::VBaseOffsetsMapTy &vtordisps =
- Layout.getVBaseOffsetsMap();
- for (const CXXBaseSpecifier &Base : RD->vbases()) {
- assert(Base.isVirtual() && "Found non-virtual class!");
- const CXXRecordDecl *VBase = Base.getType()->getAsCXXRecordDecl();
+ if (CXXRD && IncludeVirtualBases) {
+ const ASTRecordLayout::VBaseOffsetsMapTy &VtorDisps =
+ Layout.getVBaseOffsetsMap();
- CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
+ for (const CXXBaseSpecifier &Base : CXXRD->vbases()) {
+ assert(Base.isVirtual() && "Found non-virtual class!");
+ const CXXRecordDecl *VBase = Base.getType()->getAsCXXRecordDecl();
- if (vtordisps.find(VBase)->second.hasVtorDisp()) {
- PrintOffset(OS, VBaseOffset - CharUnits::fromQuantity(4), IndentLevel);
- OS << "(vtordisp for vbase " << *VBase << ")\n";
- }
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
+
+ if (VtorDisps.find(VBase)->second.hasVtorDisp()) {
+ PrintOffset(OS, VBaseOffset - CharUnits::fromQuantity(4), IndentLevel);
+ OS << "(vtordisp for vbase " << *VBase << ")\n";
+ }
- DumpCXXRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
- VBase == PrimaryBase ?
- "(primary virtual base)" : "(virtual base)",
- /*IncludeVirtualBases=*/false);
+ DumpRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
+ VBase == Layout.getPrimaryBase() ?
+ "(primary virtual base)" : "(virtual base)",
+ /*PrintSizeInfo=*/false,
+ /*IncludeVirtualBases=*/false);
+ }
}
+ if (!PrintSizeInfo) return;
+
PrintIndentNoOffset(OS, IndentLevel - 1);
OS << "[sizeof=" << Layout.getSize().getQuantity();
- if (!isMsLayout(RD))
+ if (CXXRD && !isMsLayout(C))
OS << ", dsize=" << Layout.getDataSize().getQuantity();
- OS << ", align=" << Layout.getAlignment().getQuantity() << '\n';
+ OS << ", align=" << Layout.getAlignment().getQuantity();
- PrintIndentNoOffset(OS, IndentLevel - 1);
- OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
- OS << ", nvalign=" << Layout.getNonVirtualAlignment().getQuantity() << "]\n";
+ if (CXXRD) {
+ OS << ",\n";
+ PrintIndentNoOffset(OS, IndentLevel - 1);
+ OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
+ OS << ", nvalign=" << Layout.getNonVirtualAlignment().getQuantity();
+ }
+ OS << "]\n";
}
void ASTContext::DumpRecordLayout(const RecordDecl *RD,
raw_ostream &OS,
bool Simple) const {
- const ASTRecordLayout &Info = getASTRecordLayout(RD);
+ if (!Simple) {
+ ::DumpRecordLayout(OS, RD, *this, CharUnits(), 0, nullptr,
+ /*PrintSizeInfo*/true,
+ /*IncludeVirtualBases=*/true);
+ return;
+ }
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- if (!Simple)
- return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, nullptr,
- /*IncludeVirtualBases=*/true);
+ // The "simple" format is designed to be parsed by the
+ // layout-override testing code. There shouldn't be any external
+ // uses of this format --- when LLDB overrides a layout, it sets up
+ // the data structures directly --- so feel free to adjust this as
+ // you like as long as you also update the rudimentary parser for it
+ // in libFrontend.
+ const ASTRecordLayout &Info = getASTRecordLayout(RD);
OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
- if (!Simple) {
- OS << "Record: ";
- RD->dump();
- }
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
- if (!isMsLayout(RD))
+ if (!isMsLayout(*this))
OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
OS << " FieldOffsets: [";
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index e6292b495365..ca63d8486d82 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
@@ -294,14 +295,15 @@ CompoundStmt::CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts,
std::copy(Stmts.begin(), Stmts.end(), Body);
}
-void CompoundStmt::setStmts(const ASTContext &C, Stmt **Stmts,
- unsigned NumStmts) {
- if (this->Body)
+void CompoundStmt::setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts) {
+ if (Body)
C.Deallocate(Body);
- this->CompoundStmtBits.NumStmts = NumStmts;
+ CompoundStmtBits.NumStmts = Stmts.size();
+ assert(CompoundStmtBits.NumStmts == Stmts.size() &&
+ "NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
- Body = new (C) Stmt*[NumStmts];
- memcpy(Body, Stmts, sizeof(Stmt *) * NumStmts);
+ Body = new (C) Stmt*[Stmts.size()];
+ std::copy(Stmts.begin(), Stmts.end(), Body);
}
const char *LabelStmt::getName() const {
@@ -675,12 +677,6 @@ void MSAsmStmt::setInputExpr(unsigned i, Expr *E) {
Exprs[i + NumOutputs] = E;
}
-QualType CXXCatchStmt::getCaughtType() const {
- if (ExceptionDecl)
- return ExceptionDecl->getType();
- return QualType();
-}
-
//===----------------------------------------------------------------------===//
// Constructors
//===----------------------------------------------------------------------===//
@@ -724,12 +720,7 @@ MSAsmStmt::MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
}
static StringRef copyIntoContext(const ASTContext &C, StringRef str) {
- if (str.empty())
- return StringRef();
- size_t size = str.size();
- char *buffer = new (C) char[size];
- memcpy(buffer, str.data(), size);
- return StringRef(buffer, size);
+ return str.copy(C);
}
void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
@@ -740,145 +731,29 @@ void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
assert(NumAsmToks == asmtoks.size());
assert(NumClobbers == clobbers.size());
- unsigned NumExprs = exprs.size();
- assert(NumExprs == NumOutputs + NumInputs);
- assert(NumExprs == constraints.size());
+ assert(exprs.size() == NumOutputs + NumInputs);
+ assert(exprs.size() == constraints.size());
AsmStr = copyIntoContext(C, asmstr);
- Exprs = new (C) Stmt*[NumExprs];
- for (unsigned i = 0, e = NumExprs; i != e; ++i)
- Exprs[i] = exprs[i];
+ Exprs = new (C) Stmt*[exprs.size()];
+ std::copy(exprs.begin(), exprs.end(), Exprs);
- AsmToks = new (C) Token[NumAsmToks];
- for (unsigned i = 0, e = NumAsmToks; i != e; ++i)
- AsmToks[i] = asmtoks[i];
+ AsmToks = new (C) Token[asmtoks.size()];
+ std::copy(asmtoks.begin(), asmtoks.end(), AsmToks);
- Constraints = new (C) StringRef[NumExprs];
- for (unsigned i = 0, e = NumExprs; i != e; ++i) {
- Constraints[i] = copyIntoContext(C, constraints[i]);
- }
+ Constraints = new (C) StringRef[exprs.size()];
+ std::transform(constraints.begin(), constraints.end(), Constraints,
+ [&](StringRef Constraint) {
+ return copyIntoContext(C, Constraint);
+ });
Clobbers = new (C) StringRef[NumClobbers];
- for (unsigned i = 0, e = NumClobbers; i != e; ++i) {
- // FIXME: Avoid the allocation/copy if at all possible.
- Clobbers[i] = copyIntoContext(C, clobbers[i]);
- }
-}
-
-ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
- Stmt *Body, SourceLocation FCL,
- SourceLocation RPL)
-: Stmt(ObjCForCollectionStmtClass) {
- SubExprs[ELEM] = Elem;
- SubExprs[COLLECTION] = Collect;
- SubExprs[BODY] = Body;
- ForLoc = FCL;
- RParenLoc = RPL;
-}
-
-ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
- Stmt **CatchStmts, unsigned NumCatchStmts,
- Stmt *atFinallyStmt)
- : Stmt(ObjCAtTryStmtClass), AtTryLoc(atTryLoc),
- NumCatchStmts(NumCatchStmts), HasFinally(atFinallyStmt != nullptr) {
- Stmt **Stmts = getStmts();
- Stmts[0] = atTryStmt;
- for (unsigned I = 0; I != NumCatchStmts; ++I)
- Stmts[I + 1] = CatchStmts[I];
-
- if (HasFinally)
- Stmts[NumCatchStmts + 1] = atFinallyStmt;
-}
-
-ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context,
- SourceLocation atTryLoc,
- Stmt *atTryStmt,
- Stmt **CatchStmts,
- unsigned NumCatchStmts,
- Stmt *atFinallyStmt) {
- unsigned Size = sizeof(ObjCAtTryStmt) +
- (1 + NumCatchStmts + (atFinallyStmt != nullptr)) * sizeof(Stmt *);
- void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
- return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
- atFinallyStmt);
-}
-
-ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context,
- unsigned NumCatchStmts,
- bool HasFinally) {
- unsigned Size = sizeof(ObjCAtTryStmt) +
- (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
- void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
- return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
-}
-
-SourceLocation ObjCAtTryStmt::getLocEnd() const {
- if (HasFinally)
- return getFinallyStmt()->getLocEnd();
- if (NumCatchStmts)
- return getCatchStmt(NumCatchStmts - 1)->getLocEnd();
- return getTryBody()->getLocEnd();
-}
-
-CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
- Stmt *tryBlock, ArrayRef<Stmt*> handlers) {
- std::size_t Size = sizeof(CXXTryStmt);
- Size += ((handlers.size() + 1) * sizeof(Stmt));
-
- void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
- return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
-}
-
-CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
- unsigned numHandlers) {
- std::size_t Size = sizeof(CXXTryStmt);
- Size += ((numHandlers + 1) * sizeof(Stmt));
-
- void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
- return new (Mem) CXXTryStmt(Empty, numHandlers);
-}
-
-CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
- ArrayRef<Stmt*> handlers)
- : Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) {
- Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1);
- Stmts[0] = tryBlock;
- std::copy(handlers.begin(), handlers.end(), Stmts + 1);
-}
-
-CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEndStmt,
- Expr *Cond, Expr *Inc, DeclStmt *LoopVar,
- Stmt *Body, SourceLocation FL,
- SourceLocation CL, SourceLocation RPL)
- : Stmt(CXXForRangeStmtClass), ForLoc(FL), ColonLoc(CL), RParenLoc(RPL) {
- SubExprs[RANGE] = Range;
- SubExprs[BEGINEND] = BeginEndStmt;
- SubExprs[COND] = Cond;
- SubExprs[INC] = Inc;
- SubExprs[LOOPVAR] = LoopVar;
- SubExprs[BODY] = Body;
-}
-
-Expr *CXXForRangeStmt::getRangeInit() {
- DeclStmt *RangeStmt = getRangeStmt();
- VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
- assert(RangeDecl && "for-range should have a single var decl");
- return RangeDecl->getInit();
-}
-
-const Expr *CXXForRangeStmt::getRangeInit() const {
- return const_cast<CXXForRangeStmt*>(this)->getRangeInit();
-}
-
-VarDecl *CXXForRangeStmt::getLoopVariable() {
- Decl *LV = cast<DeclStmt>(getLoopVarStmt())->getSingleDecl();
- assert(LV && "No loop variable in CXXForRangeStmt");
- return cast<VarDecl>(LV);
-}
-
-const VarDecl *CXXForRangeStmt::getLoopVariable() const {
- return const_cast<CXXForRangeStmt*>(this)->getLoopVariable();
+ // FIXME: Avoid the allocation/copy if at all possible.
+ std::transform(clobbers.begin(), clobbers.end(), Clobbers,
+ [&](StringRef Clobber) {
+ return copyIntoContext(C, Clobber);
+ });
}
IfStmt::IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
@@ -1070,6 +945,44 @@ SEHFinallyStmt* SEHFinallyStmt::Create(const ASTContext &C, SourceLocation Loc,
return new(C)SEHFinallyStmt(Loc,Block);
}
+CapturedStmt::Capture::Capture(SourceLocation Loc, VariableCaptureKind Kind,
+ VarDecl *Var)
+ : VarAndKind(Var, Kind), Loc(Loc) {
+ switch (Kind) {
+ case VCK_This:
+ assert(!Var && "'this' capture cannot have a variable!");
+ break;
+ case VCK_ByRef:
+ assert(Var && "capturing by reference must have a variable!");
+ break;
+ case VCK_ByCopy:
+ assert(Var && "capturing by copy must have a variable!");
+ assert(
+ (Var->getType()->isScalarType() || (Var->getType()->isReferenceType() &&
+ Var->getType()
+ ->castAs<ReferenceType>()
+ ->getPointeeType()
+ ->isScalarType())) &&
+ "captures by copy are expected to have a scalar type!");
+ break;
+ case VCK_VLAType:
+ assert(!Var &&
+ "Variable-length array type capture cannot have a variable!");
+ break;
+ }
+}
+
+CapturedStmt::VariableCaptureKind
+CapturedStmt::Capture::getCaptureKind() const {
+ return VarAndKind.getInt();
+}
+
+VarDecl *CapturedStmt::Capture::getCapturedVar() const {
+ assert((capturesVariable() || capturesVariableByCopy()) &&
+ "No variable available for 'this' or VAT capture");
+ return VarAndKind.getPointer();
+}
+
CapturedStmt::Capture *CapturedStmt::getStoredCaptures() const {
unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1);
@@ -1158,6 +1071,29 @@ Stmt::child_range CapturedStmt::children() {
return child_range(getStoredStmts(), getStoredStmts() + NumCaptures);
}
+CapturedDecl *CapturedStmt::getCapturedDecl() {
+ return CapDeclAndKind.getPointer();
+}
+const CapturedDecl *CapturedStmt::getCapturedDecl() const {
+ return CapDeclAndKind.getPointer();
+}
+
+/// \brief Set the outlined function declaration.
+void CapturedStmt::setCapturedDecl(CapturedDecl *D) {
+ assert(D && "null CapturedDecl");
+ CapDeclAndKind.setPointer(D);
+}
+
+/// \brief Retrieve the captured region kind.
+CapturedRegionKind CapturedStmt::getCapturedRegionKind() const {
+ return CapDeclAndKind.getInt();
+}
+
+/// \brief Set the captured region kind.
+void CapturedStmt::setCapturedRegionKind(CapturedRegionKind Kind) {
+ CapDeclAndKind.setInt(Kind);
+}
+
bool CapturedStmt::capturesVariable(const VarDecl *Var) const {
for (const auto &I : captures()) {
if (!I.capturesVariable())
@@ -1172,1095 +1108,3 @@ bool CapturedStmt::capturesVariable(const VarDecl *Var) const {
return false;
}
-
-StmtRange OMPClause::children() {
- switch(getClauseKind()) {
- default : break;
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_ ## Name : return static_cast<Class *>(this)->children();
-#include "clang/Basic/OpenMPKinds.def"
- }
- llvm_unreachable("unknown OMPClause");
-}
-
-void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
- assert(VL.size() == varlist_size() &&
- "Number of private copies is not the same as the preallocated buffer");
- std::copy(VL.begin(), VL.end(), varlist_end());
-}
-
-OMPPrivateClause *
-OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc,
- ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL) {
- // Allocate space for private variables and initializer expressions.
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
- llvm::alignOf<Expr *>()) +
- 2 * sizeof(Expr *) * VL.size());
- OMPPrivateClause *Clause =
- new (Mem) OMPPrivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setPrivateCopies(PrivateVL);
- return Clause;
-}
-
-OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
- llvm::alignOf<Expr *>()) +
- 2 * sizeof(Expr *) * N);
- return new (Mem) OMPPrivateClause(N);
-}
-
-void OMPFirstprivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
- assert(VL.size() == varlist_size() &&
- "Number of private copies is not the same as the preallocated buffer");
- std::copy(VL.begin(), VL.end(), varlist_end());
-}
-
-void OMPFirstprivateClause::setInits(ArrayRef<Expr *> VL) {
- assert(VL.size() == varlist_size() &&
- "Number of inits is not the same as the preallocated buffer");
- std::copy(VL.begin(), VL.end(), getPrivateCopies().end());
-}
-
-OMPFirstprivateClause *
-OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc,
- ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
- ArrayRef<Expr *> InitVL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
- llvm::alignOf<Expr *>()) +
- 3 * sizeof(Expr *) * VL.size());
- OMPFirstprivateClause *Clause =
- new (Mem) OMPFirstprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setPrivateCopies(PrivateVL);
- Clause->setInits(InitVL);
- return Clause;
-}
-
-OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
- llvm::alignOf<Expr *>()) +
- 3 * sizeof(Expr *) * N);
- return new (Mem) OMPFirstprivateClause(N);
-}
-
-void OMPLastprivateClause::setPrivateCopies(ArrayRef<Expr *> PrivateCopies) {
- assert(PrivateCopies.size() == varlist_size() &&
- "Number of private copies is not the same as the preallocated buffer");
- std::copy(PrivateCopies.begin(), PrivateCopies.end(), varlist_end());
-}
-
-void OMPLastprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
- assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
- "not the same as the "
- "preallocated buffer");
- std::copy(SrcExprs.begin(), SrcExprs.end(), getPrivateCopies().end());
-}
-
-void OMPLastprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
- assert(DstExprs.size() == varlist_size() && "Number of destination "
- "expressions is not the same as "
- "the preallocated buffer");
- std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
-}
-
-void OMPLastprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
- assert(AssignmentOps.size() == varlist_size() &&
- "Number of assignment expressions is not the same as the preallocated "
- "buffer");
- std::copy(AssignmentOps.begin(), AssignmentOps.end(),
- getDestinationExprs().end());
-}
-
-OMPLastprivateClause *OMPLastprivateClause::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
- ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
- llvm::alignOf<Expr *>()) +
- 5 * sizeof(Expr *) * VL.size());
- OMPLastprivateClause *Clause =
- new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setSourceExprs(SrcExprs);
- Clause->setDestinationExprs(DstExprs);
- Clause->setAssignmentOps(AssignmentOps);
- return Clause;
-}
-
-OMPLastprivateClause *OMPLastprivateClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
- llvm::alignOf<Expr *>()) +
- 5 * sizeof(Expr *) * N);
- return new (Mem) OMPLastprivateClause(N);
-}
-
-OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc,
- ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
- OMPSharedClause *Clause = new (Mem) OMPSharedClause(StartLoc, LParenLoc,
- EndLoc, VL.size());
- Clause->setVarRefs(VL);
- return Clause;
-}
-
-OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
- return new (Mem) OMPSharedClause(N);
-}
-
-void OMPLinearClause::setInits(ArrayRef<Expr *> IL) {
- assert(IL.size() == varlist_size() &&
- "Number of inits is not the same as the preallocated buffer");
- std::copy(IL.begin(), IL.end(), varlist_end());
-}
-
-void OMPLinearClause::setUpdates(ArrayRef<Expr *> UL) {
- assert(UL.size() == varlist_size() &&
- "Number of updates is not the same as the preallocated buffer");
- std::copy(UL.begin(), UL.end(), getInits().end());
-}
-
-void OMPLinearClause::setFinals(ArrayRef<Expr *> FL) {
- assert(FL.size() == varlist_size() &&
- "Number of final updates is not the same as the preallocated buffer");
- std::copy(FL.begin(), FL.end(), getUpdates().end());
-}
-
-OMPLinearClause *
-OMPLinearClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation ColonLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> VL,
- ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep) {
- // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
- // (Step and CalcStep).
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
- llvm::alignOf<Expr *>()) +
- (4 * VL.size() + 2) * sizeof(Expr *));
- OMPLinearClause *Clause = new (Mem)
- OMPLinearClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setInits(IL);
- // Fill update and final expressions with zeroes, they are provided later,
- // after the directive construction.
- std::fill(Clause->getInits().end(), Clause->getInits().end() + VL.size(),
- nullptr);
- std::fill(Clause->getUpdates().end(), Clause->getUpdates().end() + VL.size(),
- nullptr);
- Clause->setStep(Step);
- Clause->setCalcStep(CalcStep);
- return Clause;
-}
-
-OMPLinearClause *OMPLinearClause::CreateEmpty(const ASTContext &C,
- unsigned NumVars) {
- // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
- // (Step and CalcStep).
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
- llvm::alignOf<Expr *>()) +
- (4 * NumVars + 2) * sizeof(Expr *));
- return new (Mem) OMPLinearClause(NumVars);
-}
-
-OMPAlignedClause *
-OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation ColonLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * (VL.size() + 1));
- OMPAlignedClause *Clause = new (Mem)
- OMPAlignedClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setAlignment(A);
- return Clause;
-}
-
-OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C,
- unsigned NumVars) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * (NumVars + 1));
- return new (Mem) OMPAlignedClause(NumVars);
-}
-
-void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
- assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
- "not the same as the "
- "preallocated buffer");
- std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
-}
-
-void OMPCopyinClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
- assert(DstExprs.size() == varlist_size() && "Number of destination "
- "expressions is not the same as "
- "the preallocated buffer");
- std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
-}
-
-void OMPCopyinClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
- assert(AssignmentOps.size() == varlist_size() &&
- "Number of assignment expressions is not the same as the preallocated "
- "buffer");
- std::copy(AssignmentOps.begin(), AssignmentOps.end(),
- getDestinationExprs().end());
-}
-
-OMPCopyinClause *OMPCopyinClause::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
- ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * VL.size());
- OMPCopyinClause *Clause = new (Mem) OMPCopyinClause(StartLoc, LParenLoc,
- EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setSourceExprs(SrcExprs);
- Clause->setDestinationExprs(DstExprs);
- Clause->setAssignmentOps(AssignmentOps);
- return Clause;
-}
-
-OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * N);
- return new (Mem) OMPCopyinClause(N);
-}
-
-void OMPCopyprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
- assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
- "not the same as the "
- "preallocated buffer");
- std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
-}
-
-void OMPCopyprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
- assert(DstExprs.size() == varlist_size() && "Number of destination "
- "expressions is not the same as "
- "the preallocated buffer");
- std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
-}
-
-void OMPCopyprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
- assert(AssignmentOps.size() == varlist_size() &&
- "Number of assignment expressions is not the same as the preallocated "
- "buffer");
- std::copy(AssignmentOps.begin(), AssignmentOps.end(),
- getDestinationExprs().end());
-}
-
-OMPCopyprivateClause *OMPCopyprivateClause::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
- ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * VL.size());
- OMPCopyprivateClause *Clause =
- new (Mem) OMPCopyprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setSourceExprs(SrcExprs);
- Clause->setDestinationExprs(DstExprs);
- Clause->setAssignmentOps(AssignmentOps);
- return Clause;
-}
-
-OMPCopyprivateClause *OMPCopyprivateClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * N);
- return new (Mem) OMPCopyprivateClause(N);
-}
-
-void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
- assert(Clauses.size() == getNumClauses() &&
- "Number of clauses is not the same as the preallocated buffer");
- std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
-}
-
-void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
- "Number of loop counters is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getCounters().begin());
-}
-
-void OMPLoopDirective::setInits(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
- "Number of counter inits is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getInits().begin());
-}
-
-void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
- "Number of counter updates is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getUpdates().begin());
-}
-
-void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
- "Number of counter finals is not the same as the collapsed number");
- std::copy(A.begin(), A.end(), getFinals().begin());
-}
-
-void OMPReductionClause::setLHSExprs(ArrayRef<Expr *> LHSExprs) {
- assert(
- LHSExprs.size() == varlist_size() &&
- "Number of LHS expressions is not the same as the preallocated buffer");
- std::copy(LHSExprs.begin(), LHSExprs.end(), varlist_end());
-}
-
-void OMPReductionClause::setRHSExprs(ArrayRef<Expr *> RHSExprs) {
- assert(
- RHSExprs.size() == varlist_size() &&
- "Number of RHS expressions is not the same as the preallocated buffer");
- std::copy(RHSExprs.begin(), RHSExprs.end(), getLHSExprs().end());
-}
-
-void OMPReductionClause::setReductionOps(ArrayRef<Expr *> ReductionOps) {
- assert(ReductionOps.size() == varlist_size() && "Number of reduction "
- "expressions is not the same "
- "as the preallocated buffer");
- std::copy(ReductionOps.begin(), ReductionOps.end(), getRHSExprs().end());
-}
-
-OMPReductionClause *OMPReductionClause::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
- NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
- ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
- ArrayRef<Expr *> ReductionOps) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * VL.size());
- OMPReductionClause *Clause = new (Mem) OMPReductionClause(
- StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
- Clause->setVarRefs(VL);
- Clause->setLHSExprs(LHSExprs);
- Clause->setRHSExprs(RHSExprs);
- Clause->setReductionOps(ReductionOps);
- return Clause;
-}
-
-OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
- llvm::alignOf<Expr *>()) +
- 4 * sizeof(Expr *) * N);
- return new (Mem) OMPReductionClause(N);
-}
-
-OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc,
- ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
- OMPFlushClause *Clause =
- new (Mem) OMPFlushClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- return Clause;
-}
-
-OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
- return new (Mem) OMPFlushClause(N);
-}
-
-OMPDependClause *
-OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc,
- OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
- SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * VL.size());
- OMPDependClause *Clause =
- new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
- Clause->setDependencyKind(DepKind);
- Clause->setDependencyLoc(DepLoc);
- Clause->setColonLoc(ColonLoc);
- return Clause;
-}
-
-OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
- llvm::alignOf<Expr *>()) +
- sizeof(Expr *) * N);
- return new (Mem) OMPDependClause(N);
-}
-
-const OMPClause *
-OMPExecutableDirective::getSingleClause(OpenMPClauseKind K) const {
- auto &&I = getClausesOfKind(K);
-
- if (I) {
- auto *Clause = *I;
- assert(!++I && "There are at least 2 clauses of the specified kind");
- return Clause;
- }
- return nullptr;
-}
-
-OMPParallelDirective *OMPParallelDirective::Create(
- const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *));
- OMPParallelDirective *Dir = new (Mem) OMPParallelDirective(StartLoc, EndLoc,
- Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *));
- return new (Mem) OMPParallelDirective(NumClauses);
-}
-
-OMPSimdDirective *
-OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
- OMPSimdDirective *Dir = new (Mem)
- OMPSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- Dir->setIterationVariable(Exprs.IterationVarRef);
- Dir->setLastIteration(Exprs.LastIteration);
- Dir->setCalcLastIteration(Exprs.CalcLastIteration);
- Dir->setPreCond(Exprs.PreCond);
- Dir->setCond(Exprs.Cond);
- Dir->setInit(Exprs.Init);
- Dir->setInc(Exprs.Inc);
- Dir->setCounters(Exprs.Counters);
- Dir->setInits(Exprs.Inits);
- Dir->setUpdates(Exprs.Updates);
- Dir->setFinals(Exprs.Finals);
- return Dir;
-}
-
-OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- unsigned CollapsedNum,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
- return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
-}
-
-OMPForDirective *
-OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
- OMPForDirective *Dir =
- new (Mem) OMPForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- Dir->setIterationVariable(Exprs.IterationVarRef);
- Dir->setLastIteration(Exprs.LastIteration);
- Dir->setCalcLastIteration(Exprs.CalcLastIteration);
- Dir->setPreCond(Exprs.PreCond);
- Dir->setCond(Exprs.Cond);
- Dir->setInit(Exprs.Init);
- Dir->setInc(Exprs.Inc);
- Dir->setIsLastIterVariable(Exprs.IL);
- Dir->setLowerBoundVariable(Exprs.LB);
- Dir->setUpperBoundVariable(Exprs.UB);
- Dir->setStrideVariable(Exprs.ST);
- Dir->setEnsureUpperBound(Exprs.EUB);
- Dir->setNextLowerBound(Exprs.NLB);
- Dir->setNextUpperBound(Exprs.NUB);
- Dir->setCounters(Exprs.Counters);
- Dir->setInits(Exprs.Inits);
- Dir->setUpdates(Exprs.Updates);
- Dir->setFinals(Exprs.Finals);
- return Dir;
-}
-
-OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- unsigned CollapsedNum,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
- return new (Mem) OMPForDirective(CollapsedNum, NumClauses);
-}
-
-OMPForSimdDirective *
-OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
- OMPForSimdDirective *Dir = new (Mem)
- OMPForSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- Dir->setIterationVariable(Exprs.IterationVarRef);
- Dir->setLastIteration(Exprs.LastIteration);
- Dir->setCalcLastIteration(Exprs.CalcLastIteration);
- Dir->setPreCond(Exprs.PreCond);
- Dir->setCond(Exprs.Cond);
- Dir->setInit(Exprs.Init);
- Dir->setInc(Exprs.Inc);
- Dir->setIsLastIterVariable(Exprs.IL);
- Dir->setLowerBoundVariable(Exprs.LB);
- Dir->setUpperBoundVariable(Exprs.UB);
- Dir->setStrideVariable(Exprs.ST);
- Dir->setEnsureUpperBound(Exprs.EUB);
- Dir->setNextLowerBound(Exprs.NLB);
- Dir->setNextUpperBound(Exprs.NUB);
- Dir->setCounters(Exprs.Counters);
- Dir->setInits(Exprs.Inits);
- Dir->setUpdates(Exprs.Updates);
- Dir->setFinals(Exprs.Finals);
- return Dir;
-}
-
-OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- unsigned CollapsedNum,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
- return new (Mem) OMPForSimdDirective(CollapsedNum, NumClauses);
-}
-
-OMPSectionsDirective *OMPSectionsDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPSectionsDirective *Dir =
- new (Mem) OMPSectionsDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPSectionsDirective *OMPSectionsDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPSectionsDirective(NumClauses);
-}
-
-OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- OMPSectionDirective *Dir = new (Mem) OMPSectionDirective(StartLoc, EndLoc);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- return new (Mem) OMPSectionDirective();
-}
-
-OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPSingleDirective *Dir =
- new (Mem) OMPSingleDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPSingleDirective *OMPSingleDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPSingleDirective(NumClauses);
-}
-
-OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- OMPMasterDirective *Dir = new (Mem) OMPMasterDirective(StartLoc, EndLoc);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPMasterDirective *OMPMasterDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- return new (Mem) OMPMasterDirective();
-}
-
-OMPCriticalDirective *OMPCriticalDirective::Create(
- const ASTContext &C, const DeclarationNameInfo &Name,
- SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- OMPCriticalDirective *Dir =
- new (Mem) OMPCriticalDirective(Name, StartLoc, EndLoc);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- return new (Mem) OMPCriticalDirective();
-}
-
-OMPParallelForDirective *OMPParallelForDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_for));
- OMPParallelForDirective *Dir = new (Mem)
- OMPParallelForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- Dir->setIterationVariable(Exprs.IterationVarRef);
- Dir->setLastIteration(Exprs.LastIteration);
- Dir->setCalcLastIteration(Exprs.CalcLastIteration);
- Dir->setPreCond(Exprs.PreCond);
- Dir->setCond(Exprs.Cond);
- Dir->setInit(Exprs.Init);
- Dir->setInc(Exprs.Inc);
- Dir->setIsLastIterVariable(Exprs.IL);
- Dir->setLowerBoundVariable(Exprs.LB);
- Dir->setUpperBoundVariable(Exprs.UB);
- Dir->setStrideVariable(Exprs.ST);
- Dir->setEnsureUpperBound(Exprs.EUB);
- Dir->setNextLowerBound(Exprs.NLB);
- Dir->setNextUpperBound(Exprs.NUB);
- Dir->setCounters(Exprs.Counters);
- Dir->setInits(Exprs.Inits);
- Dir->setUpdates(Exprs.Updates);
- Dir->setFinals(Exprs.Finals);
- return Dir;
-}
-
-OMPParallelForDirective *
-OMPParallelForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
- unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) *
- numLoopChildren(CollapsedNum, OMPD_parallel_for));
- return new (Mem) OMPParallelForDirective(CollapsedNum, NumClauses);
-}
-
-OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * Clauses.size() +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
- OMPParallelForSimdDirective *Dir = new (Mem) OMPParallelForSimdDirective(
- StartLoc, EndLoc, CollapsedNum, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- Dir->setIterationVariable(Exprs.IterationVarRef);
- Dir->setLastIteration(Exprs.LastIteration);
- Dir->setCalcLastIteration(Exprs.CalcLastIteration);
- Dir->setPreCond(Exprs.PreCond);
- Dir->setCond(Exprs.Cond);
- Dir->setInit(Exprs.Init);
- Dir->setInc(Exprs.Inc);
- Dir->setIsLastIterVariable(Exprs.IL);
- Dir->setLowerBoundVariable(Exprs.LB);
- Dir->setUpperBoundVariable(Exprs.UB);
- Dir->setStrideVariable(Exprs.ST);
- Dir->setEnsureUpperBound(Exprs.EUB);
- Dir->setNextLowerBound(Exprs.NLB);
- Dir->setNextUpperBound(Exprs.NUB);
- Dir->setCounters(Exprs.Counters);
- Dir->setInits(Exprs.Inits);
- Dir->setUpdates(Exprs.Updates);
- Dir->setFinals(Exprs.Finals);
- return Dir;
-}
-
-OMPParallelForSimdDirective *
-OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(
- Size + sizeof(OMPClause *) * NumClauses +
- sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
- return new (Mem) OMPParallelForSimdDirective(CollapsedNum, NumClauses);
-}
-
-OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPParallelSectionsDirective *Dir =
- new (Mem) OMPParallelSectionsDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPParallelSectionsDirective *
-OMPParallelSectionsDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses, EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPParallelSectionsDirective(NumClauses);
-}
-
-OMPTaskDirective *OMPTaskDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTaskDirective *Dir =
- new (Mem) OMPTaskDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPTaskDirective *OMPTaskDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTaskDirective(NumClauses);
-}
-
-OMPTaskyieldDirective *OMPTaskyieldDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
- void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
- OMPTaskyieldDirective *Dir =
- new (Mem) OMPTaskyieldDirective(StartLoc, EndLoc);
- return Dir;
-}
-
-OMPTaskyieldDirective *OMPTaskyieldDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
- return new (Mem) OMPTaskyieldDirective();
-}
-
-OMPBarrierDirective *OMPBarrierDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
- void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
- OMPBarrierDirective *Dir = new (Mem) OMPBarrierDirective(StartLoc, EndLoc);
- return Dir;
-}
-
-OMPBarrierDirective *OMPBarrierDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
- return new (Mem) OMPBarrierDirective();
-}
-
-OMPTaskwaitDirective *OMPTaskwaitDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
- void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
- OMPTaskwaitDirective *Dir = new (Mem) OMPTaskwaitDirective(StartLoc, EndLoc);
- return Dir;
-}
-
-OMPTaskwaitDirective *OMPTaskwaitDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
- return new (Mem) OMPTaskwaitDirective();
-}
-
-OMPTaskgroupDirective *OMPTaskgroupDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- OMPTaskgroupDirective *Dir =
- new (Mem) OMPTaskgroupDirective(StartLoc, EndLoc);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPTaskgroupDirective *OMPTaskgroupDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- return new (Mem) OMPTaskgroupDirective();
-}
-
-OMPCancellationPointDirective *OMPCancellationPointDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- OpenMPDirectiveKind CancelRegion) {
- unsigned Size = llvm::RoundUpToAlignment(
- sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size);
- OMPCancellationPointDirective *Dir =
- new (Mem) OMPCancellationPointDirective(StartLoc, EndLoc);
- Dir->setCancelRegion(CancelRegion);
- return Dir;
-}
-
-OMPCancellationPointDirective *
-OMPCancellationPointDirective::CreateEmpty(const ASTContext &C, EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(
- sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size);
- return new (Mem) OMPCancellationPointDirective();
-}
-
-OMPCancelDirective *
-OMPCancelDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc,
- OpenMPDirectiveKind CancelRegion) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCancelDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size);
- OMPCancelDirective *Dir = new (Mem) OMPCancelDirective(StartLoc, EndLoc);
- Dir->setCancelRegion(CancelRegion);
- return Dir;
-}
-
-OMPCancelDirective *OMPCancelDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCancelDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size);
- return new (Mem) OMPCancelDirective();
-}
-
-OMPFlushDirective *OMPFlushDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size());
- OMPFlushDirective *Dir =
- new (Mem) OMPFlushDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- return Dir;
-}
-
-OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses);
- return new (Mem) OMPFlushDirective(NumClauses);
-}
-
-OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- OMPOrderedDirective *Dir = new (Mem) OMPOrderedDirective(StartLoc, EndLoc);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
- llvm::alignOf<Stmt *>());
- void *Mem = C.Allocate(Size + sizeof(Stmt *));
- return new (Mem) OMPOrderedDirective();
-}
-
-OMPAtomicDirective *OMPAtomicDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
- Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
- 5 * sizeof(Stmt *));
- OMPAtomicDirective *Dir =
- new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- Dir->setX(X);
- Dir->setV(V);
- Dir->setExpr(E);
- Dir->setUpdateExpr(UE);
- Dir->IsXLHSInRHSPart = IsXLHSInRHSPart;
- Dir->IsPostfixUpdate = IsPostfixUpdate;
- return Dir;
-}
-
-OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 5 * sizeof(Stmt *));
- return new (Mem) OMPAtomicDirective(NumClauses);
-}
-
-OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTargetDirective *Dir =
- new (Mem) OMPTargetDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTargetDirective(NumClauses);
-}
-
-OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
- OMPTeamsDirective *Dir =
- new (Mem) OMPTeamsDirective(StartLoc, EndLoc, Clauses.size());
- Dir->setClauses(Clauses);
- Dir->setAssociatedStmt(AssociatedStmt);
- return Dir;
-}
-
-OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
- unsigned NumClauses,
- EmptyShell) {
- unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
- llvm::alignOf<OMPClause *>());
- void *Mem =
- C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
- return new (Mem) OMPTeamsDirective(NumClauses);
-}
-
diff --git a/lib/AST/StmtCXX.cpp b/lib/AST/StmtCXX.cpp
new file mode 100644
index 000000000000..e39a01daf96c
--- /dev/null
+++ b/lib/AST/StmtCXX.cpp
@@ -0,0 +1,86 @@
+//===--- StmtCXX.cpp - Classes for representing C++ statements ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in StmtCXX.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtCXX.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+QualType CXXCatchStmt::getCaughtType() const {
+ if (ExceptionDecl)
+ return ExceptionDecl->getType();
+ return QualType();
+}
+
+CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
+ Stmt *tryBlock, ArrayRef<Stmt *> handlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((handlers.size() + 1) * sizeof(Stmt *));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
+}
+
+CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
+ unsigned numHandlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((numHandlers + 1) * sizeof(Stmt *));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(Empty, numHandlers);
+}
+
+CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
+ ArrayRef<Stmt *> handlers)
+ : Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) {
+ Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1);
+ Stmts[0] = tryBlock;
+ std::copy(handlers.begin(), handlers.end(), Stmts + 1);
+}
+
+CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEndStmt,
+ Expr *Cond, Expr *Inc, DeclStmt *LoopVar,
+ Stmt *Body, SourceLocation FL,
+ SourceLocation CAL, SourceLocation CL,
+ SourceLocation RPL)
+ : Stmt(CXXForRangeStmtClass), ForLoc(FL), CoawaitLoc(CAL), ColonLoc(CL),
+ RParenLoc(RPL) {
+ SubExprs[RANGE] = Range;
+ SubExprs[BEGINEND] = BeginEndStmt;
+ SubExprs[COND] = Cond;
+ SubExprs[INC] = Inc;
+ SubExprs[LOOPVAR] = LoopVar;
+ SubExprs[BODY] = Body;
+}
+
+Expr *CXXForRangeStmt::getRangeInit() {
+ DeclStmt *RangeStmt = getRangeStmt();
+ VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
+ assert(RangeDecl && "for-range should have a single var decl");
+ return RangeDecl->getInit();
+}
+
+const Expr *CXXForRangeStmt::getRangeInit() const {
+ return const_cast<CXXForRangeStmt *>(this)->getRangeInit();
+}
+
+VarDecl *CXXForRangeStmt::getLoopVariable() {
+ Decl *LV = cast<DeclStmt>(getLoopVarStmt())->getSingleDecl();
+ assert(LV && "No loop variable in CXXForRangeStmt");
+ return cast<VarDecl>(LV);
+}
+
+const VarDecl *CXXForRangeStmt::getLoopVariable() const {
+ return const_cast<CXXForRangeStmt *>(this)->getLoopVariable();
+}
diff --git a/lib/AST/StmtIterator.cpp b/lib/AST/StmtIterator.cpp
index 732756fbec9a..861d0908209d 100644
--- a/lib/AST/StmtIterator.cpp
+++ b/lib/AST/StmtIterator.cpp
@@ -42,7 +42,7 @@ void StmtIteratorBase::NextVA() {
if (inDeclGroup()) {
if (VarDecl* VD = dyn_cast<VarDecl>(*DGI))
- if (VD->Init)
+ if (VD->hasInit())
return;
NextDecl();
diff --git a/lib/AST/StmtObjC.cpp b/lib/AST/StmtObjC.cpp
new file mode 100644
index 000000000000..a77550c7605d
--- /dev/null
+++ b/lib/AST/StmtObjC.cpp
@@ -0,0 +1,73 @@
+//===--- StmtObjC.cpp - Classes for representing ObjC statements ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in StmtObjC.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtObjC.h"
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
+ Stmt *Body, SourceLocation FCL,
+ SourceLocation RPL)
+ : Stmt(ObjCForCollectionStmtClass) {
+ SubExprs[ELEM] = Elem;
+ SubExprs[COLLECTION] = Collect;
+ SubExprs[BODY] = Body;
+ ForLoc = FCL;
+ RParenLoc = RPL;
+}
+
+ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt)
+ : Stmt(ObjCAtTryStmtClass), AtTryLoc(atTryLoc),
+ NumCatchStmts(NumCatchStmts), HasFinally(atFinallyStmt != nullptr) {
+ Stmt **Stmts = getStmts();
+ Stmts[0] = atTryStmt;
+ for (unsigned I = 0; I != NumCatchStmts; ++I)
+ Stmts[I + 1] = CatchStmts[I];
+
+ if (HasFinally)
+ Stmts[NumCatchStmts + 1] = atFinallyStmt;
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context,
+ SourceLocation atTryLoc, Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt) {
+ unsigned Size =
+ sizeof(ObjCAtTryStmt) +
+ (1 + NumCatchStmts + (atFinallyStmt != nullptr)) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
+ atFinallyStmt);
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context,
+ unsigned NumCatchStmts,
+ bool HasFinally) {
+ unsigned Size =
+ sizeof(ObjCAtTryStmt) + (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
+}
+
+SourceLocation ObjCAtTryStmt::getLocEnd() const {
+ if (HasFinally)
+ return getFinallyStmt()->getLocEnd();
+ if (NumCatchStmts)
+ return getCatchStmt(NumCatchStmts - 1)->getLocEnd();
+ return getTryBody()->getLocEnd();
+}
diff --git a/lib/AST/StmtOpenMP.cpp b/lib/AST/StmtOpenMP.cpp
new file mode 100644
index 000000000000..7f923d8a8251
--- /dev/null
+++ b/lib/AST/StmtOpenMP.cpp
@@ -0,0 +1,884 @@
+//===--- StmtOpenMP.cpp - Classes for OpenMP directives -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Stmt class declared in StmtOpenMP.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtOpenMP.h"
+
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
+ assert(Clauses.size() == getNumClauses() &&
+ "Number of clauses is not the same as the preallocated buffer");
+ std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
+}
+
+void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of loop counters is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getCounters().begin());
+}
+
+void OMPLoopDirective::setPrivateCounters(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() && "Number of loop private counters "
+ "is not the same as the collapsed "
+ "number");
+ std::copy(A.begin(), A.end(), getPrivateCounters().begin());
+}
+
+void OMPLoopDirective::setInits(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter inits is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getInits().begin());
+}
+
+void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter updates is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getUpdates().begin());
+}
+
+void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) {
+ assert(A.size() == getCollapsedNumber() &&
+ "Number of counter finals is not the same as the collapsed number");
+ std::copy(A.begin(), A.end(), getFinals().begin());
+}
+
+OMPParallelDirective *OMPParallelDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPParallelDirective *Dir =
+ new (Mem) OMPParallelDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPParallelDirective(NumClauses);
+}
+
+OMPSimdDirective *
+OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
+ OMPSimdDirective *Dir = new (Mem)
+ OMPSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
+ return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPForDirective *
+OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
+ OMPForDirective *Dir =
+ new (Mem) OMPForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
+ return new (Mem) OMPForDirective(CollapsedNum, NumClauses);
+}
+
+OMPForSimdDirective *
+OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
+ OMPForSimdDirective *Dir = new (Mem)
+ OMPForSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
+ return new (Mem) OMPForSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPSectionsDirective *OMPSectionsDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPSectionsDirective *Dir =
+ new (Mem) OMPSectionsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPSectionsDirective *OMPSectionsDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPSectionsDirective(NumClauses);
+}
+
+OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ Stmt *AssociatedStmt,
+ bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ OMPSectionDirective *Dir = new (Mem) OMPSectionDirective(StartLoc, EndLoc);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ return new (Mem) OMPSectionDirective();
+}
+
+OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPSingleDirective *Dir =
+ new (Mem) OMPSingleDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPSingleDirective *OMPSingleDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPSingleDirective(NumClauses);
+}
+
+OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ OMPMasterDirective *Dir = new (Mem) OMPMasterDirective(StartLoc, EndLoc);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPMasterDirective *OMPMasterDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ return new (Mem) OMPMasterDirective();
+}
+
+OMPCriticalDirective *OMPCriticalDirective::Create(
+ const ASTContext &C, const DeclarationNameInfo &Name,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPCriticalDirective *Dir =
+ new (Mem) OMPCriticalDirective(Name, StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPCriticalDirective(NumClauses);
+}
+
+OMPParallelForDirective *OMPParallelForDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_parallel_for));
+ OMPParallelForDirective *Dir = new (Mem)
+ OMPParallelForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelForDirective *
+OMPParallelForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_parallel_for));
+ return new (Mem) OMPParallelForDirective(CollapsedNum, NumClauses);
+}
+
+OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
+ OMPParallelForSimdDirective *Dir = new (Mem) OMPParallelForSimdDirective(
+ StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPParallelForSimdDirective *
+OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
+ return new (Mem) OMPParallelForSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPParallelSectionsDirective *Dir =
+ new (Mem) OMPParallelSectionsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelSectionsDirective *
+OMPParallelSectionsDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPParallelSectionsDirective(NumClauses);
+}
+
+OMPTaskDirective *
+OMPTaskDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, bool HasCancel) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTaskDirective *Dir =
+ new (Mem) OMPTaskDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPTaskDirective *OMPTaskDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTaskDirective(NumClauses);
+}
+
+OMPTaskyieldDirective *OMPTaskyieldDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
+ OMPTaskyieldDirective *Dir =
+ new (Mem) OMPTaskyieldDirective(StartLoc, EndLoc);
+ return Dir;
+}
+
+OMPTaskyieldDirective *OMPTaskyieldDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
+ return new (Mem) OMPTaskyieldDirective();
+}
+
+OMPBarrierDirective *OMPBarrierDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
+ OMPBarrierDirective *Dir = new (Mem) OMPBarrierDirective(StartLoc, EndLoc);
+ return Dir;
+}
+
+OMPBarrierDirective *OMPBarrierDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
+ return new (Mem) OMPBarrierDirective();
+}
+
+OMPTaskwaitDirective *OMPTaskwaitDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
+ OMPTaskwaitDirective *Dir = new (Mem) OMPTaskwaitDirective(StartLoc, EndLoc);
+ return Dir;
+}
+
+OMPTaskwaitDirective *OMPTaskwaitDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
+ return new (Mem) OMPTaskwaitDirective();
+}
+
+OMPTaskgroupDirective *OMPTaskgroupDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ OMPTaskgroupDirective *Dir =
+ new (Mem) OMPTaskgroupDirective(StartLoc, EndLoc);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTaskgroupDirective *OMPTaskgroupDirective::CreateEmpty(const ASTContext &C,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size + sizeof(Stmt *));
+ return new (Mem) OMPTaskgroupDirective();
+}
+
+OMPCancellationPointDirective *OMPCancellationPointDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ OpenMPDirectiveKind CancelRegion) {
+ unsigned Size = llvm::RoundUpToAlignment(
+ sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ OMPCancellationPointDirective *Dir =
+ new (Mem) OMPCancellationPointDirective(StartLoc, EndLoc);
+ Dir->setCancelRegion(CancelRegion);
+ return Dir;
+}
+
+OMPCancellationPointDirective *
+OMPCancellationPointDirective::CreateEmpty(const ASTContext &C, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(
+ sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ return new (Mem) OMPCancellationPointDirective();
+}
+
+OMPCancelDirective *
+OMPCancelDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ OpenMPDirectiveKind CancelRegion) {
+ unsigned Size = llvm::RoundUpToAlignment(
+ sizeof(OMPCancelDirective) + sizeof(OMPClause *) * Clauses.size(),
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ OMPCancelDirective *Dir =
+ new (Mem) OMPCancelDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setCancelRegion(CancelRegion);
+ return Dir;
+}
+
+OMPCancelDirective *OMPCancelDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCancelDirective) +
+ sizeof(OMPClause *) * NumClauses,
+ llvm::alignOf<Stmt *>());
+ void *Mem = C.Allocate(Size);
+ return new (Mem) OMPCancelDirective(NumClauses);
+}
+
+OMPFlushDirective *OMPFlushDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size());
+ OMPFlushDirective *Dir =
+ new (Mem) OMPFlushDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ return Dir;
+}
+
+OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses);
+ return new (Mem) OMPFlushDirective(NumClauses);
+}
+
+OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * Clauses.size());
+ OMPOrderedDirective *Dir =
+ new (Mem) OMPOrderedDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * NumClauses);
+ return new (Mem) OMPOrderedDirective(NumClauses);
+}
+
+OMPAtomicDirective *OMPAtomicDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
+ Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ 5 * sizeof(Stmt *));
+ OMPAtomicDirective *Dir =
+ new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setX(X);
+ Dir->setV(V);
+ Dir->setExpr(E);
+ Dir->setUpdateExpr(UE);
+ Dir->IsXLHSInRHSPart = IsXLHSInRHSPart;
+ Dir->IsPostfixUpdate = IsPostfixUpdate;
+ return Dir;
+}
+
+OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 5 * sizeof(Stmt *));
+ return new (Mem) OMPAtomicDirective(NumClauses);
+}
+
+OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTargetDirective *Dir =
+ new (Mem) OMPTargetDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTargetDirective(NumClauses);
+}
+
+OMPTargetDataDirective *OMPTargetDataDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ void *Mem =
+ C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPTargetDataDirective),
+ llvm::alignOf<OMPClause *>()) +
+ sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTargetDataDirective *Dir =
+ new (Mem) OMPTargetDataDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTargetDataDirective *OMPTargetDataDirective::CreateEmpty(const ASTContext &C,
+ unsigned N,
+ EmptyShell) {
+ void *Mem =
+ C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPTargetDataDirective),
+ llvm::alignOf<OMPClause *>()) +
+ sizeof(OMPClause *) * N + sizeof(Stmt *));
+ return new (Mem) OMPTargetDataDirective(N);
+}
+
+OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTeamsDirective *Dir =
+ new (Mem) OMPTeamsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTeamsDirective(NumClauses);
+}
+
+OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
+ OMPTaskLoopDirective *Dir = new (Mem)
+ OMPTaskLoopDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPTaskLoopDirective *OMPTaskLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
+ return new (Mem) OMPTaskLoopDirective(CollapsedNum, NumClauses);
+}
+
+OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
+ OMPTaskLoopSimdDirective *Dir = new (Mem)
+ OMPTaskLoopSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPTaskLoopSimdDirective *
+OMPTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskLoopSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
+ return new (Mem) OMPTaskLoopSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPDistributeDirective *OMPDistributeDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPDistributeDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_distribute));
+ OMPDistributeDirective *Dir = new (Mem)
+ OMPDistributeDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ return Dir;
+}
+
+OMPDistributeDirective *
+OMPDistributeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPDistributeDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_distribute));
+ return new (Mem) OMPDistributeDirective(CollapsedNum, NumClauses);
+}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 79600773f567..e55b2fc19a1a 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/CharInfo.h"
@@ -601,6 +602,8 @@ public:
void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
OS << "if(";
+ if (Node->getNameModifier() != OMPD_unknown)
+ OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
@@ -623,6 +626,12 @@ void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
+ OS << "simdlen(";
+ Node->getSimdlen()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
OS << "collapse(";
Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
@@ -642,8 +651,18 @@ void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
}
void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
- OS << "schedule("
- << getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
+ OS << "schedule(";
+ if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getFirstScheduleModifier());
+ if (Node->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << ", ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getSecondScheduleModifier());
+ }
+ OS << ": ";
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
if (Node->getChunkSize()) {
OS << ", ";
Node->getChunkSize()->printPretty(OS, nullptr, Policy);
@@ -651,8 +670,13 @@ void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
OS << ")";
}
-void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *) {
+void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) {
OS << "ordered";
+ if (auto *Num = Node->getNumForLoops()) {
+ OS << "(";
+ Num->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+ }
}
void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) {
@@ -663,6 +687,10 @@ void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) {
OS << "untied";
}
+void OMPClausePrinter::VisitOMPNogroupClause(OMPNogroupClause *) {
+ OS << "nogroup";
+}
+
void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
OS << "mergeable";
}
@@ -683,6 +711,54 @@ void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
OS << "seq_cst";
}
+void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
+ OS << "threads";
+}
+
+void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
+
+void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
+ OS << "device(";
+ Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTeamsClause(OMPNumTeamsClause *Node) {
+ OS << "num_teams(";
+ Node->getNumTeams()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPThreadLimitClause(OMPThreadLimitClause *Node) {
+ OS << "thread_limit(";
+ Node->getThreadLimit()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) {
+ OS << "priority(";
+ Node->getPriority()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) {
+ OS << "grainsize(";
+ Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) {
+ OS << "num_tasks(";
+ Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
+ OS << "hint(";
+ Node->getHint()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
template<typename T>
void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
for (typename T::varlist_iterator I = Node->varlist_begin(),
@@ -756,7 +832,13 @@ void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
if (!Node->varlist_empty()) {
OS << "linear";
+ if (Node->getModifierLoc().isValid()) {
+ OS << '('
+ << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
+ }
VisitOMPClauseList(Node, '(');
+ if (Node->getModifierLoc().isValid())
+ OS << ')';
if (Node->getStep() != nullptr) {
OS << ": ";
Node->getStep()->printPretty(OS, nullptr, Policy, 0);
@@ -801,11 +883,28 @@ void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
}
void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
+ OS << "depend(";
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getDependencyKind());
+ if (!Node->varlist_empty()) {
+ OS << " :";
+ VisitOMPClauseList(Node, ' ');
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
if (!Node->varlist_empty()) {
- OS << "depend(";
- OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
- Node->getDependencyKind())
- << " :";
+ OS << "map(";
+ if (Node->getMapType() != OMPC_MAP_unknown) {
+ if (Node->getMapTypeModifier() != OMPC_MAP_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map,
+ Node->getMapTypeModifier());
+ OS << ',';
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map, Node->getMapType());
+ OS << ':';
+ }
VisitOMPClauseList(Node, ' ');
OS << ")";
}
@@ -881,6 +980,7 @@ void StmtPrinter::VisitOMPCriticalDirective(OMPCriticalDirective *Node) {
Node->getDirectiveName().printName(OS);
OS << ")";
}
+ OS << " ";
PrintOMPExecutableDirective(Node);
}
@@ -932,7 +1032,7 @@ void StmtPrinter::VisitOMPFlushDirective(OMPFlushDirective *Node) {
}
void StmtPrinter::VisitOMPOrderedDirective(OMPOrderedDirective *Node) {
- Indent() << "#pragma omp ordered";
+ Indent() << "#pragma omp ordered ";
PrintOMPExecutableDirective(Node);
}
@@ -946,6 +1046,11 @@ void StmtPrinter::VisitOMPTargetDirective(OMPTargetDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPTargetDataDirective(OMPTargetDataDirective *Node) {
+ Indent() << "#pragma omp target data ";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPTeamsDirective(OMPTeamsDirective *Node) {
Indent() << "#pragma omp teams ";
PrintOMPExecutableDirective(Node);
@@ -960,9 +1065,26 @@ void StmtPrinter::VisitOMPCancellationPointDirective(
void StmtPrinter::VisitOMPCancelDirective(OMPCancelDirective *Node) {
Indent() << "#pragma omp cancel "
- << getOpenMPDirectiveName(Node->getCancelRegion());
+ << getOpenMPDirectiveName(Node->getCancelRegion()) << " ";
PrintOMPExecutableDirective(Node);
}
+
+void StmtPrinter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *Node) {
+ Indent() << "#pragma omp taskloop ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTaskLoopSimdDirective(
+ OMPTaskLoopSimdDirective *Node) {
+ Indent() << "#pragma omp taskloop simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPDistributeDirective(OMPDistributeDirective *Node) {
+ Indent() << "#pragma omp distribute ";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -1110,8 +1232,6 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
case BuiltinType::ULong: OS << "UL"; break;
case BuiltinType::LongLong: OS << "LL"; break;
case BuiltinType::ULongLong: OS << "ULL"; break;
- case BuiltinType::Int128: OS << "i128"; break;
- case BuiltinType::UInt128: OS << "Ui128"; break;
}
}
@@ -1185,8 +1305,8 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
OS << ", ";
bool PrintedSomething = false;
for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) {
- OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i);
- if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Array) {
+ OffsetOfNode ON = Node->getComponent(i);
+ if (ON.getKind() == OffsetOfNode::Array) {
// Array node
OS << "[";
PrintExpr(Node->getIndexExpr(ON.getArrayExprIndex()));
@@ -1196,7 +1316,7 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
}
// Skip implicit base indirections.
- if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Base)
+ if (ON.getKind() == OffsetOfNode::Base)
continue;
// Field or identifier node.
@@ -1266,6 +1386,19 @@ void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
OS << "]";
}
+void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << "[";
+ if (Node->getLowerBound())
+ PrintExpr(Node->getLowerBound());
+ if (Node->getColonLoc().isValid()) {
+ OS << ":";
+ if (Node->getLength())
+ PrintExpr(Node->getLength());
+ }
+ OS << "]";
+}
+
void StmtPrinter::PrintCallArgs(CallExpr *Call) {
for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
@@ -1667,6 +1800,13 @@ void StmtPrinter::VisitMSPropertyRefExpr(MSPropertyRefExpr *Node) {
OS << Node->getPropertyDecl()->getDeclName();
}
+void StmtPrinter::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << "[";
+ PrintExpr(Node->getIdx());
+ OS << "]";
+}
+
void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
switch (Node->getLiteralOperatorKind()) {
case UserDefinedLiteral::LOK_Raw:
@@ -1679,7 +1819,7 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
assert(Args);
if (Args->size() != 1) {
- OS << "operator \"\" " << Node->getUDSuffix()->getName();
+ OS << "operator\"\"" << Node->getUDSuffix()->getName();
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Args->data(), Args->size(), Policy);
OS << "()";
@@ -1768,7 +1908,7 @@ void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
ArgEnd = Node->arg_end();
Arg != ArgEnd; ++Arg) {
- if (Arg->isDefaultArgument())
+ if ((*Arg)->isDefaultArgument())
break;
if (Arg != Node->arg_begin())
OS << ", ";
@@ -2115,6 +2255,31 @@ void StmtPrinter::VisitCXXFoldExpr(CXXFoldExpr *E) {
OS << ")";
}
+// C++ Coroutines TS
+
+void StmtPrinter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ Visit(S->getBody());
+}
+
+void StmtPrinter::VisitCoreturnStmt(CoreturnStmt *S) {
+ OS << "co_return";
+ if (S->getOperand()) {
+ OS << " ";
+ Visit(S->getOperand());
+ }
+ OS << ";";
+}
+
+void StmtPrinter::VisitCoawaitExpr(CoawaitExpr *S) {
+ OS << "co_await ";
+ PrintExpr(S->getOperand());
+}
+
+void StmtPrinter::VisitCoyieldExpr(CoyieldExpr *S) {
+ OS << "co_yield ";
+ PrintExpr(S->getOperand());
+}
+
// Obj-C
void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
@@ -2129,14 +2294,11 @@ void StmtPrinter::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
OS << "@[ ";
- StmtRange ch = E->children();
- if (ch.first != ch.second) {
- while (1) {
- Visit(*ch.first);
- ++ch.first;
- if (ch.first == ch.second) break;
+ ObjCArrayLiteral::child_range Ch = E->children();
+ for (auto I = Ch.begin(), E = Ch.end(); I != E; ++I) {
+ if (I != Ch.begin())
OS << ", ";
- }
+ Visit(*I);
}
OS << " ]";
}
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index da996920c420..175a43abbf61 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/FoldingSet.h"
using namespace clang;
@@ -261,6 +262,7 @@ class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> {
/// \brief Process clauses with list of variables.
template <typename T>
void VisitOMPClauseList(T *Node);
+
public:
OMPClauseProfiler(StmtProfiler *P) : Profiler(P) { }
#define OPENMP_CLAUSE(Name, Class) \
@@ -288,6 +290,11 @@ void OMPClauseProfiler::VisitOMPSafelenClause(const OMPSafelenClause *C) {
Profiler->VisitStmt(C->getSafelen());
}
+void OMPClauseProfiler::VisitOMPSimdlenClause(const OMPSimdlenClause *C) {
+ if (C->getSimdlen())
+ Profiler->VisitStmt(C->getSimdlen());
+}
+
void OMPClauseProfiler::VisitOMPCollapseClause(const OMPCollapseClause *C) {
if (C->getNumForLoops())
Profiler->VisitStmt(C->getNumForLoops());
@@ -306,7 +313,10 @@ void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) {
}
}
-void OMPClauseProfiler::VisitOMPOrderedClause(const OMPOrderedClause *) {}
+void OMPClauseProfiler::VisitOMPOrderedClause(const OMPOrderedClause *C) {
+ if (auto *Num = C->getNumForLoops())
+ Profiler->VisitStmt(Num);
+}
void OMPClauseProfiler::VisitOMPNowaitClause(const OMPNowaitClause *) {}
@@ -324,6 +334,12 @@ void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
+void OMPClauseProfiler::VisitOMPThreadsClause(const OMPThreadsClause *) {}
+
+void OMPClauseProfiler::VisitOMPSIMDClause(const OMPSIMDClause *) {}
+
+void OMPClauseProfiler::VisitOMPNogroupClause(const OMPNogroupClause *) {}
+
template<typename T>
void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
for (auto *E : Node->varlists()) {
@@ -369,6 +385,9 @@ void OMPClauseProfiler::VisitOMPReductionClause(
C->getQualifierLoc().getNestedNameSpecifier());
Profiler->VisitName(C->getNameInfo().getName());
VisitOMPClauseList(C);
+ for (auto *E : C->privates()) {
+ Profiler->VisitStmt(E);
+ }
for (auto *E : C->lhs_exprs()) {
Profiler->VisitStmt(E);
}
@@ -381,6 +400,9 @@ void OMPClauseProfiler::VisitOMPReductionClause(
}
void OMPClauseProfiler::VisitOMPLinearClause(const OMPLinearClause *C) {
VisitOMPClauseList(C);
+ for (auto *E : C->privates()) {
+ Profiler->VisitStmt(E);
+ }
for (auto *E : C->inits()) {
Profiler->VisitStmt(E);
}
@@ -428,6 +450,31 @@ void OMPClauseProfiler::VisitOMPFlushClause(const OMPFlushClause *C) {
void OMPClauseProfiler::VisitOMPDependClause(const OMPDependClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPDeviceClause(const OMPDeviceClause *C) {
+ Profiler->VisitStmt(C->getDevice());
+}
+void OMPClauseProfiler::VisitOMPMapClause(const OMPMapClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPNumTeamsClause(const OMPNumTeamsClause *C) {
+ Profiler->VisitStmt(C->getNumTeams());
+}
+void OMPClauseProfiler::VisitOMPThreadLimitClause(
+ const OMPThreadLimitClause *C) {
+ Profiler->VisitStmt(C->getThreadLimit());
+}
+void OMPClauseProfiler::VisitOMPPriorityClause(const OMPPriorityClause *C) {
+ Profiler->VisitStmt(C->getPriority());
+}
+void OMPClauseProfiler::VisitOMPGrainsizeClause(const OMPGrainsizeClause *C) {
+ Profiler->VisitStmt(C->getGrainsize());
+}
+void OMPClauseProfiler::VisitOMPNumTasksClause(const OMPNumTasksClause *C) {
+ Profiler->VisitStmt(C->getNumTasks());
+}
+void OMPClauseProfiler::VisitOMPHintClause(const OMPHintClause *C) {
+ Profiler->VisitStmt(C->getHint());
+}
}
void
@@ -533,6 +580,10 @@ void StmtProfiler::VisitOMPTargetDirective(const OMPTargetDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPTargetDataDirective(const OMPTargetDataDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPTeamsDirective(const OMPTeamsDirective *S) {
VisitOMPExecutableDirective(S);
}
@@ -546,6 +597,20 @@ void StmtProfiler::VisitOMPCancelDirective(const OMPCancelDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPTaskLoopDirective(const OMPTaskLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTaskLoopSimdDirective(
+ const OMPTaskLoopSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPDistributeDirective(
+ const OMPDistributeDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
@@ -610,22 +675,22 @@ void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
VisitType(S->getTypeSourceInfo()->getType());
unsigned n = S->getNumComponents();
for (unsigned i = 0; i < n; ++i) {
- const OffsetOfExpr::OffsetOfNode& ON = S->getComponent(i);
+ const OffsetOfNode &ON = S->getComponent(i);
ID.AddInteger(ON.getKind());
switch (ON.getKind()) {
- case OffsetOfExpr::OffsetOfNode::Array:
+ case OffsetOfNode::Array:
// Expressions handled below.
break;
- case OffsetOfExpr::OffsetOfNode::Field:
+ case OffsetOfNode::Field:
VisitDecl(ON.getField());
break;
- case OffsetOfExpr::OffsetOfNode::Identifier:
+ case OffsetOfNode::Identifier:
ID.AddPointer(ON.getFieldName());
break;
-
- case OffsetOfExpr::OffsetOfNode::Base:
+
+ case OffsetOfNode::Base:
// These nodes are implicit, and therefore don't need profiling.
break;
}
@@ -646,6 +711,10 @@ void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitOMPArraySectionExpr(const OMPArraySectionExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitCallExpr(const CallExpr *S) {
VisitExpr(S);
}
@@ -824,6 +893,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
case OO_Arrow:
case OO_Call:
case OO_Conditional:
+ case OO_Coawait:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Invalid operator call kind");
@@ -985,7 +1055,6 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
BinaryOp = BO_Comma;
return Stmt::BinaryOperatorClass;
-
case OO_ArrowStar:
BinaryOp = BO_PtrMemI;
return Stmt::BinaryOperatorClass;
@@ -997,7 +1066,6 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
llvm_unreachable("Invalid overloaded operator expression");
}
-
void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
if (S->isTypeDependent()) {
// Type-dependent operator calls are profiled like their underlying
@@ -1092,6 +1160,11 @@ void StmtProfiler::VisitMSPropertyRefExpr(const MSPropertyRefExpr *S) {
VisitDecl(S->getPropertyDecl());
}
+void StmtProfiler::VisitMSPropertySubscriptExpr(
+ const MSPropertySubscriptExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isImplicit());
@@ -1169,7 +1242,6 @@ void StmtProfiler::VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
VisitDecl(S->getOperatorDelete());
}
-
void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
VisitExpr(S);
VisitType(S->getAllocatedType());
@@ -1203,8 +1275,7 @@ void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
VisitName(S->getName());
ID.AddBoolean(S->hasExplicitTemplateArgs());
if (S->hasExplicitTemplateArgs())
- VisitTemplateArguments(S->getExplicitTemplateArgs().getTemplateArgs(),
- S->getExplicitTemplateArgs().NumTemplateArgs);
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
void
@@ -1290,6 +1361,14 @@ void StmtProfiler::VisitPackExpansionExpr(const PackExpansionExpr *S) {
void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) {
VisitExpr(S);
VisitDecl(S->getPack());
+ if (S->isPartiallySubstituted()) {
+ auto Args = S->getPartialArguments();
+ ID.AddInteger(Args.size());
+ for (const auto &TA : Args)
+ VisitTemplateArgument(TA);
+ } else {
+ ID.AddInteger(0);
+ }
}
void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
@@ -1323,6 +1402,22 @@ void StmtProfiler::VisitCXXFoldExpr(const CXXFoldExpr *S) {
ID.AddInteger(S->getOperator());
}
+void StmtProfiler::VisitCoroutineBodyStmt(const CoroutineBodyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCoreturnStmt(const CoreturnStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCoawaitExpr(const CoawaitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCoyieldExpr(const CoyieldExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
VisitExpr(E);
}
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index f8b73cb8f89a..e9edb0df66df 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -53,7 +53,7 @@ static void printIntegral(const TemplateArgument &TemplArg,
}
}
- if (T->isBooleanType()) {
+ if (T->isBooleanType() && !Policy.MSVCFormatting) {
Out << (Val.getBoolValue() ? "true" : "false");
} else if (T->isCharType()) {
const char Ch = Val.getZExtValue();
@@ -88,15 +88,13 @@ TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value,
Integer.Type = Type.getAsOpaquePtr();
}
-TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context,
- const TemplateArgument *Args,
- unsigned NumArgs) {
- if (NumArgs == 0)
+TemplateArgument
+TemplateArgument::CreatePackCopy(ASTContext &Context,
+ ArrayRef<TemplateArgument> Args) {
+ if (Args.empty())
return getEmptyPack();
-
- TemplateArgument *Storage = new (Context) TemplateArgument [NumArgs];
- std::copy(Args, Args + NumArgs, Storage);
- return TemplateArgument(Storage, NumArgs);
+
+ return TemplateArgument(Args.copy(Context));
}
bool TemplateArgument::isDependent() const {
@@ -522,94 +520,67 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
const ASTTemplateArgumentListInfo *
ASTTemplateArgumentListInfo::Create(ASTContext &C,
const TemplateArgumentListInfo &List) {
- assert(llvm::alignOf<ASTTemplateArgumentListInfo>() >=
- llvm::alignOf<TemplateArgumentLoc>());
- std::size_t size = ASTTemplateArgumentListInfo::sizeFor(List.size());
+ std::size_t size = totalSizeToAlloc<TemplateArgumentLoc>(List.size());
void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
- ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
- TAI->initializeFrom(List);
- return TAI;
+ return new (Mem) ASTTemplateArgumentListInfo(List);
}
-void ASTTemplateArgumentListInfo::initializeFrom(
- const TemplateArgumentListInfo &Info) {
+ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
+ const TemplateArgumentListInfo &Info) {
LAngleLoc = Info.getLAngleLoc();
RAngleLoc = Info.getRAngleLoc();
NumTemplateArgs = Info.size();
- TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
+ TemplateArgumentLoc *ArgBuffer = getTrailingObjects<TemplateArgumentLoc>();
for (unsigned i = 0; i != NumTemplateArgs; ++i)
new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
}
-void ASTTemplateArgumentListInfo::initializeFrom(
- const TemplateArgumentListInfo &Info,
- bool &Dependent,
- bool &InstantiationDependent,
- bool &ContainsUnexpandedParameterPack) {
+void ASTTemplateKWAndArgsInfo::initializeFrom(
+ SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
+ TemplateArgumentLoc *OutArgArray) {
+ this->TemplateKWLoc = TemplateKWLoc;
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ new (&OutArgArray[i]) TemplateArgumentLoc(Info[i]);
+}
+
+void ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
+ assert(TemplateKWLoc.isValid());
+ LAngleLoc = SourceLocation();
+ RAngleLoc = SourceLocation();
+ this->TemplateKWLoc = TemplateKWLoc;
+ NumTemplateArgs = 0;
+}
+
+void ASTTemplateKWAndArgsInfo::initializeFrom(
+ SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
+ TemplateArgumentLoc *OutArgArray, bool &Dependent,
+ bool &InstantiationDependent, bool &ContainsUnexpandedParameterPack) {
+ this->TemplateKWLoc = TemplateKWLoc;
LAngleLoc = Info.getLAngleLoc();
RAngleLoc = Info.getRAngleLoc();
NumTemplateArgs = Info.size();
- TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
for (unsigned i = 0; i != NumTemplateArgs; ++i) {
Dependent = Dependent || Info[i].getArgument().isDependent();
- InstantiationDependent = InstantiationDependent ||
+ InstantiationDependent = InstantiationDependent ||
Info[i].getArgument().isInstantiationDependent();
- ContainsUnexpandedParameterPack
- = ContainsUnexpandedParameterPack ||
+ ContainsUnexpandedParameterPack =
+ ContainsUnexpandedParameterPack ||
Info[i].getArgument().containsUnexpandedParameterPack();
- new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
+ new (&OutArgArray[i]) TemplateArgumentLoc(Info[i]);
}
}
-void ASTTemplateArgumentListInfo::copyInto(
- TemplateArgumentListInfo &Info) const {
+void ASTTemplateKWAndArgsInfo::copyInto(const TemplateArgumentLoc *ArgArray,
+ TemplateArgumentListInfo &Info) const {
Info.setLAngleLoc(LAngleLoc);
Info.setRAngleLoc(RAngleLoc);
for (unsigned I = 0; I != NumTemplateArgs; ++I)
- Info.addArgument(getTemplateArgs()[I]);
-}
-
-std::size_t ASTTemplateArgumentListInfo::sizeFor(unsigned NumTemplateArgs) {
- return sizeof(ASTTemplateArgumentListInfo) +
- sizeof(TemplateArgumentLoc) * NumTemplateArgs;
-}
-
-void
-ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc,
- const TemplateArgumentListInfo &Info) {
- Base::initializeFrom(Info);
- setTemplateKeywordLoc(TemplateKWLoc);
-}
-
-void
-ASTTemplateKWAndArgsInfo
-::initializeFrom(SourceLocation TemplateKWLoc,
- const TemplateArgumentListInfo &Info,
- bool &Dependent,
- bool &InstantiationDependent,
- bool &ContainsUnexpandedParameterPack) {
- Base::initializeFrom(Info, Dependent, InstantiationDependent,
- ContainsUnexpandedParameterPack);
- setTemplateKeywordLoc(TemplateKWLoc);
-}
-
-void
-ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
- // No explicit template arguments, but template keyword loc is valid.
- assert(TemplateKWLoc.isValid());
- LAngleLoc = SourceLocation();
- RAngleLoc = SourceLocation();
- NumTemplateArgs = 0;
- setTemplateKeywordLoc(TemplateKWLoc);
-}
-
-std::size_t
-ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) {
- // Add space for the template keyword location.
- // FIXME: There's room for this in the padding before the template args in
- // 64-bit builds.
- return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation);
+ Info.addArgument(ArgArray[I]);
}
diff --git a/lib/AST/TemplateName.cpp b/lib/AST/TemplateName.cpp
index 77c8fd5d1e02..47e0255d52ef 100644
--- a/lib/AST/TemplateName.cpp
+++ b/lib/AST/TemplateName.cpp
@@ -24,7 +24,7 @@ using namespace llvm;
TemplateArgument
SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
- return TemplateArgument(Arguments, size());
+ return TemplateArgument(llvm::makeArrayRef(Arguments, size()));
}
void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) {
@@ -40,7 +40,7 @@ void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID,
void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
ASTContext &Context) {
- Profile(ID, Context, Parameter, TemplateArgument(Arguments, size()));
+ Profile(ID, Context, Parameter, getArgumentPack());
}
void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
@@ -51,6 +51,22 @@ void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
ArgPack.Profile(ID, Context);
}
+TemplateName::TemplateName(void *Ptr) {
+ Storage = StorageType::getFromOpaqueValue(Ptr);
+}
+
+TemplateName::TemplateName(TemplateDecl *Template) : Storage(Template) {}
+TemplateName::TemplateName(OverloadedTemplateStorage *Storage)
+ : Storage(Storage) {}
+TemplateName::TemplateName(SubstTemplateTemplateParmStorage *Storage)
+ : Storage(Storage) {}
+TemplateName::TemplateName(SubstTemplateTemplateParmPackStorage *Storage)
+ : Storage(Storage) {}
+TemplateName::TemplateName(QualifiedTemplateName *Qual) : Storage(Qual) {}
+TemplateName::TemplateName(DependentTemplateName *Dep) : Storage(Dep) {}
+
+bool TemplateName::isNull() const { return Storage.isNull(); }
+
TemplateName::NameKind TemplateName::getKind() const {
if (Storage.is<TemplateDecl *>())
return Template;
@@ -81,6 +97,40 @@ TemplateDecl *TemplateName::getAsTemplateDecl() const {
return nullptr;
}
+OverloadedTemplateStorage *TemplateName::getAsOverloadedTemplate() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsOverloadedStorage();
+
+ return nullptr;
+}
+
+SubstTemplateTemplateParmStorage *
+TemplateName::getAsSubstTemplateTemplateParm() const {
+ if (UncommonTemplateNameStorage *uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return uncommon->getAsSubstTemplateTemplateParm();
+
+ return nullptr;
+}
+
+SubstTemplateTemplateParmPackStorage *
+TemplateName::getAsSubstTemplateTemplateParmPack() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsSubstTemplateTemplateParmPack();
+
+ return nullptr;
+}
+
+QualifiedTemplateName *TemplateName::getAsQualifiedTemplateName() const {
+ return Storage.dyn_cast<QualifiedTemplateName *>();
+}
+
+DependentTemplateName *TemplateName::getAsDependentTemplateName() const {
+ return Storage.dyn_cast<DependentTemplateName *>();
+}
+
bool TemplateName::isDependent() const {
if (TemplateDecl *Template = getAsTemplateDecl()) {
if (isa<TemplateTemplateParmDecl>(Template))
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index cee5fee83913..7dd38cba229b 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -509,6 +510,28 @@ bool Type::isObjCClassOrClassKindOfType() const {
return OPT->isObjCClassType() || OPT->isObjCQualifiedClassType();
}
+/// Was this type written with the special inert-in-MRC __unsafe_unretained
+/// qualifier?
+///
+/// This approximates the answer to the following question: if this
+/// translation unit were compiled in ARC, would this type be qualified
+/// with __unsafe_unretained?
+bool Type::isObjCInertUnsafeUnretainedType() const {
+ const Type *cur = this;
+ while (true) {
+ if (auto attributed = dyn_cast<AttributedType>(cur)) {
+ if (attributed->getAttrKind() ==
+ AttributedType::attr_objc_inert_unsafe_unretained)
+ return true;
+ }
+
+ // Single-step desugar until we run out of sugar.
+ QualType next = cur->getLocallyUnqualifiedSingleStepDesugaredType();
+ if (next.getTypePtr() == cur) return false;
+ cur = next.getTypePtr();
+ }
+}
+
ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ArrayRef<QualType> typeArgs,
ArrayRef<ObjCProtocolDecl *> protocols,
@@ -836,11 +859,8 @@ public:
}
if (exceptionChanged) {
- unsigned size = sizeof(QualType) * exceptionTypes.size();
- void *mem = Ctx.Allocate(size, llvm::alignOf<QualType>());
- memcpy(mem, exceptionTypes.data(), size);
- info.ExceptionSpec.Exceptions
- = llvm::makeArrayRef((QualType *)mem, exceptionTypes.size());
+ info.ExceptionSpec.Exceptions =
+ llvm::makeArrayRef(exceptionTypes).copy(Ctx);
}
}
@@ -950,7 +970,7 @@ public:
== T->getDeducedType().getAsOpaquePtr())
return QualType(T, 0);
- return Ctx.getAutoType(deducedType, T->isDecltypeAuto(),
+ return Ctx.getAutoType(deducedType, T->getKeyword(),
T->isDependentType());
}
@@ -1158,11 +1178,8 @@ QualType QualType::substObjCTypeArgs(
}
if (exceptionChanged) {
- unsigned size = sizeof(QualType) * exceptionTypes.size();
- void *mem = ctx.Allocate(size, llvm::alignOf<QualType>());
- memcpy(mem, exceptionTypes.data(), size);
- info.ExceptionSpec.Exceptions
- = llvm::makeArrayRef((QualType *)mem, exceptionTypes.size());
+ info.ExceptionSpec.Exceptions =
+ llvm::makeArrayRef(exceptionTypes).copy(ctx);
}
}
@@ -1275,7 +1292,7 @@ Optional<ArrayRef<QualType>> Type::getObjCSubstitutions(
if (!dcTypeParams)
return None;
} else {
- // If we are in neither a class mor a category, there's no
+ // If we are in neither a class nor a category, there's no
// substitution to perform.
dcCategoryDecl = dyn_cast<ObjCCategoryDecl>(dc);
if (!dcCategoryDecl)
@@ -1905,6 +1922,28 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
case IncompleteArray:
// An array of unknown size is an incomplete type (C99 6.2.5p22).
return true;
+ case MemberPointer: {
+ // Member pointers in the MS ABI have special behavior in
+ // RequireCompleteType: they attach a MSInheritanceAttr to the CXXRecordDecl
+ // to indicate which inheritance model to use.
+ auto *MPTy = cast<MemberPointerType>(CanonicalType);
+ const Type *ClassTy = MPTy->getClass();
+ // Member pointers with dependent class types don't get special treatment.
+ if (ClassTy->isDependentType())
+ return false;
+ const CXXRecordDecl *RD = ClassTy->getAsCXXRecordDecl();
+ ASTContext &Context = RD->getASTContext();
+ // Member pointers not in the MS ABI don't get special treatment.
+ if (!Context.getTargetInfo().getCXXABI().isMicrosoft())
+ return false;
+ // The inheritance attribute might only be present on the most recent
+ // CXXRecordDecl, use that one.
+ RD = RD->getMostRecentDecl();
+ // Nothing interesting to do if the inheritance attribute is already set.
+ if (RD->hasAttr<MSInheritanceAttr>())
+ return false;
+ return true;
+ }
case ObjCObject:
return cast<ObjCObjectType>(CanonicalType)->getBaseType()
->isIncompleteType(Def);
@@ -2260,7 +2299,7 @@ bool QualType::isCXX11PODType(ASTContext &Context) const {
// a standard-layout class, and has no non-static data members of type
// non-POD struct, non-POD union (or array of such types). [...]
//
- // We don't directly query the recursive aspect as the requiremets for
+ // We don't directly query the recursive aspect as the requirements for
// both standard-layout classes and trivial classes apply recursively
// already.
}
@@ -2473,51 +2512,115 @@ const char *Type::getTypeClassName() const {
StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
switch (getKind()) {
- case Void: return "void";
- case Bool: return Policy.Bool ? "bool" : "_Bool";
- case Char_S: return "char";
- case Char_U: return "char";
- case SChar: return "signed char";
- case Short: return "short";
- case Int: return "int";
- case Long: return "long";
- case LongLong: return "long long";
- case Int128: return "__int128";
- case UChar: return "unsigned char";
- case UShort: return "unsigned short";
- case UInt: return "unsigned int";
- case ULong: return "unsigned long";
- case ULongLong: return "unsigned long long";
- case UInt128: return "unsigned __int128";
- case Half: return Policy.Half ? "half" : "__fp16";
- case Float: return "float";
- case Double: return "double";
- case LongDouble: return "long double";
+ case Void:
+ return "void";
+ case Bool:
+ return Policy.Bool ? "bool" : "_Bool";
+ case Char_S:
+ return "char";
+ case Char_U:
+ return "char";
+ case SChar:
+ return "signed char";
+ case Short:
+ return "short";
+ case Int:
+ return "int";
+ case Long:
+ return "long";
+ case LongLong:
+ return "long long";
+ case Int128:
+ return "__int128";
+ case UChar:
+ return "unsigned char";
+ case UShort:
+ return "unsigned short";
+ case UInt:
+ return "unsigned int";
+ case ULong:
+ return "unsigned long";
+ case ULongLong:
+ return "unsigned long long";
+ case UInt128:
+ return "unsigned __int128";
+ case Half:
+ return Policy.Half ? "half" : "__fp16";
+ case Float:
+ return "float";
+ case Double:
+ return "double";
+ case LongDouble:
+ return "long double";
case WChar_S:
- case WChar_U: return Policy.MSWChar ? "__wchar_t" : "wchar_t";
- case Char16: return "char16_t";
- case Char32: return "char32_t";
- case NullPtr: return "nullptr_t";
- case Overload: return "<overloaded function type>";
- case BoundMember: return "<bound member function type>";
- case PseudoObject: return "<pseudo-object type>";
- case Dependent: return "<dependent type>";
- case UnknownAny: return "<unknown type>";
- case ARCUnbridgedCast: return "<ARC unbridged cast type>";
- case BuiltinFn: return "<builtin fn type>";
- case ObjCId: return "id";
- case ObjCClass: return "Class";
- case ObjCSel: return "SEL";
- case OCLImage1d: return "image1d_t";
- case OCLImage1dArray: return "image1d_array_t";
- case OCLImage1dBuffer: return "image1d_buffer_t";
- case OCLImage2d: return "image2d_t";
- case OCLImage2dArray: return "image2d_array_t";
- case OCLImage3d: return "image3d_t";
- case OCLSampler: return "sampler_t";
- case OCLEvent: return "event_t";
+ case WChar_U:
+ return Policy.MSWChar ? "__wchar_t" : "wchar_t";
+ case Char16:
+ return "char16_t";
+ case Char32:
+ return "char32_t";
+ case NullPtr:
+ return "nullptr_t";
+ case Overload:
+ return "<overloaded function type>";
+ case BoundMember:
+ return "<bound member function type>";
+ case PseudoObject:
+ return "<pseudo-object type>";
+ case Dependent:
+ return "<dependent type>";
+ case UnknownAny:
+ return "<unknown type>";
+ case ARCUnbridgedCast:
+ return "<ARC unbridged cast type>";
+ case BuiltinFn:
+ return "<builtin fn type>";
+ case ObjCId:
+ return "id";
+ case ObjCClass:
+ return "Class";
+ case ObjCSel:
+ return "SEL";
+ case OCLImage1d:
+ return "image1d_t";
+ case OCLImage1dArray:
+ return "image1d_array_t";
+ case OCLImage1dBuffer:
+ return "image1d_buffer_t";
+ case OCLImage2d:
+ return "image2d_t";
+ case OCLImage2dArray:
+ return "image2d_array_t";
+ case OCLImage2dDepth:
+ return "image2d_depth_t";
+ case OCLImage2dArrayDepth:
+ return "image2d_array_depth_t";
+ case OCLImage2dMSAA:
+ return "image2d_msaa_t";
+ case OCLImage2dArrayMSAA:
+ return "image2d_array_msaa_t";
+ case OCLImage2dMSAADepth:
+ return "image2d_msaa_depth_t";
+ case OCLImage2dArrayMSAADepth:
+ return "image2d_array_msaa_depth_t";
+ case OCLImage3d:
+ return "image3d_t";
+ case OCLSampler:
+ return "sampler_t";
+ case OCLEvent:
+ return "event_t";
+ case OCLClkEvent:
+ return "clk_event_t";
+ case OCLQueue:
+ return "queue_t";
+ case OCLNDRange:
+ return "event_t";
+ case OCLReserveID:
+ return "reserve_id_t";
+ case OMPArraySection:
+ return "<OpenMP array section type>";
}
-
+
llvm_unreachable("Invalid builtin type.");
}
@@ -2863,6 +2966,48 @@ bool TagType::isBeingDefined() const {
return getDecl()->isBeingDefined();
}
+bool AttributedType::isQualifier() const {
+ switch (getAttrKind()) {
+ // These are type qualifiers in the traditional C sense: they annotate
+ // something about a specific value/variable of a type. (They aren't
+ // always part of the canonical type, though.)
+ case AttributedType::attr_address_space:
+ case AttributedType::attr_objc_gc:
+ case AttributedType::attr_objc_ownership:
+ case AttributedType::attr_objc_inert_unsafe_unretained:
+ case AttributedType::attr_nonnull:
+ case AttributedType::attr_nullable:
+ case AttributedType::attr_null_unspecified:
+ return true;
+
+ // These aren't qualifiers; they rewrite the modified type to be a
+ // semantically different type.
+ case AttributedType::attr_regparm:
+ case AttributedType::attr_vector_size:
+ case AttributedType::attr_neon_vector_type:
+ case AttributedType::attr_neon_polyvector_type:
+ case AttributedType::attr_pcs:
+ case AttributedType::attr_pcs_vfp:
+ case AttributedType::attr_noreturn:
+ case AttributedType::attr_cdecl:
+ case AttributedType::attr_fastcall:
+ case AttributedType::attr_stdcall:
+ case AttributedType::attr_thiscall:
+ case AttributedType::attr_pascal:
+ case AttributedType::attr_vectorcall:
+ case AttributedType::attr_inteloclbicc:
+ case AttributedType::attr_ms_abi:
+ case AttributedType::attr_sysv_abi:
+ case AttributedType::attr_ptr32:
+ case AttributedType::attr_ptr64:
+ case AttributedType::attr_sptr:
+ case AttributedType::attr_uptr:
+ case AttributedType::attr_objc_kindof:
+ return false;
+ }
+ llvm_unreachable("bad attributed type kind");
+}
+
bool AttributedType::isMSTypeSpec() const {
switch (getAttrKind()) {
default: return false;
@@ -2888,6 +3033,7 @@ bool AttributedType::isCallingConv() const {
case attr_neon_polyvector_type:
case attr_objc_gc:
case attr_objc_ownership:
+ case attr_objc_inert_unsafe_unretained:
case attr_noreturn:
case attr_nonnull:
case attr_nullable:
@@ -2930,7 +3076,7 @@ SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
}
TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
- return TemplateArgument(Arguments, NumArguments);
+ return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
}
void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
@@ -3416,11 +3562,22 @@ bool Type::canHaveNullability() const {
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
+ case BuiltinType::OMPArraySection:
return false;
}
@@ -3521,7 +3678,7 @@ bool Type::isObjCARCImplicitlyUnretainedType() const {
if (const ObjCObjectPointerType *opt
= dyn_cast<ObjCObjectPointerType>(canon)) {
- // Class and Class<Protocol> don't require retension.
+ // Class and Class<Protocol> don't require retention.
if (opt->getObjectType()->isObjCClass())
return true;
}
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index 85bda6a06d97..d08b07b2ccd6 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -192,7 +192,7 @@ SourceLocation TypeLoc::getBeginLoc() const {
Cur = Cur.getNextTypeLoc();
continue;
default:
- if (!Cur.getLocalSourceRange().getBegin().isInvalid())
+ if (Cur.getLocalSourceRange().getBegin().isValid())
LeftMost = Cur;
Cur = Cur.getNextTypeLoc();
if (Cur.isNull())
@@ -338,10 +338,21 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
case BuiltinType::BuiltinFn:
+ case BuiltinType::OMPArraySection:
return TST_unspecified;
}
@@ -365,6 +376,27 @@ SourceLocation TypeLoc::findNullabilityLoc() const {
return SourceLocation();
}
+TypeLoc TypeLoc::findExplicitQualifierLoc() const {
+ // Qualified types.
+ if (auto qual = getAs<QualifiedTypeLoc>())
+ return qual;
+
+ TypeLoc loc = IgnoreParens();
+
+ // Attributed types.
+ if (auto attr = loc.getAs<AttributedTypeLoc>()) {
+ if (attr.isQualifier()) return attr;
+ return attr.getModifiedLoc().findExplicitQualifierLoc();
+ }
+
+ // C11 _Atomic types.
+ if (auto atomic = loc.getAs<AtomicTypeLoc>()) {
+ return atomic;
+ }
+
+ return TypeLoc();
+}
+
void ObjCObjectTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setHasBaseTypeAsWritten(true);
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 0bb50c6ba815..4617e1d3803f 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -835,7 +835,11 @@ void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
if (!T->getDeducedType().isNull()) {
printBefore(T->getDeducedType(), OS);
} else {
- OS << (T->isDecltypeAuto() ? "decltype(auto)" : "auto");
+ switch (T->getKeyword()) {
+ case AutoTypeKeyword::Auto: OS << "auto"; break;
+ case AutoTypeKeyword::DecltypeAuto: OS << "decltype(auto)"; break;
+ case AutoTypeKeyword::GNUAutoType: OS << "__auto_type"; break;
+ }
spaceBeforePlaceHolder(OS);
}
}
@@ -921,12 +925,13 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
} else {
// Make an unambiguous representation for anonymous types, e.g.
// (anonymous enum at /usr/include/string.h:120:9)
-
+ OS << (Policy.MSVCFormatting ? '`' : '(');
+
if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
- OS << "(lambda";
+ OS << "lambda";
HasKindDecoration = true;
} else {
- OS << "(anonymous";
+ OS << "anonymous";
}
if (Policy.AnonymousTagLocations) {
@@ -944,8 +949,8 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
<< ':' << PLoc.getColumn();
}
}
-
- OS << ')';
+
+ OS << (Policy.MSVCFormatting ? '\'' : ')');
}
// If this is a class template specialization, print the template
@@ -1187,6 +1192,10 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
printAfter(T->getModifiedType(), OS);
+ // Don't print the inert __unsafe_unretained attribute at all.
+ if (T->getAttrKind() == AttributedType::attr_objc_inert_unsafe_unretained)
+ return;
+
// Print nullability type specifiers that occur after
if (T->getAttrKind() == AttributedType::attr_nonnull ||
T->getAttrKind() == AttributedType::attr_nullable ||
@@ -1393,6 +1402,7 @@ TemplateSpecializationType::PrintTemplateArgumentList(
unsigned NumArgs,
const PrintingPolicy &Policy,
bool SkipBrackets) {
+ const char *Comma = Policy.MSVCFormatting ? "," : ", ";
if (!SkipBrackets)
OS << '<';
@@ -1403,14 +1413,14 @@ TemplateSpecializationType::PrintTemplateArgumentList(
llvm::raw_svector_ostream ArgOS(Buf);
if (Args[Arg].getKind() == TemplateArgument::Pack) {
if (Args[Arg].pack_size() && Arg > 0)
- OS << ", ";
+ OS << Comma;
PrintTemplateArgumentList(ArgOS,
Args[Arg].pack_begin(),
Args[Arg].pack_size(),
Policy, true);
} else {
if (Arg > 0)
- OS << ", ";
+ OS << Comma;
Args[Arg].print(Policy, ArgOS);
}
StringRef ArgString = ArgOS.str();
@@ -1442,11 +1452,12 @@ PrintTemplateArgumentList(raw_ostream &OS,
const TemplateArgumentLoc *Args, unsigned NumArgs,
const PrintingPolicy &Policy) {
OS << '<';
+ const char *Comma = Policy.MSVCFormatting ? "," : ", ";
bool needSpace = false;
for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
if (Arg > 0)
- OS << ", ";
+ OS << Comma;
// Print the argument into a string.
SmallString<128> Buf;
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index ca5f0aad0013..bae018652f91 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -177,14 +177,12 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
CXXFinalOverriderMap FinalOverriders;
MostDerivedClass->getFinalOverriders(FinalOverriders);
- for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
- E = FinalOverriders.end(); I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
- const OverridingMethods& Methods = I->second;
-
- for (OverridingMethods::const_iterator I = Methods.begin(),
- E = Methods.end(); I != E; ++I) {
- unsigned SubobjectNumber = I->first;
+ for (const auto &Overrider : FinalOverriders) {
+ const CXXMethodDecl *MD = Overrider.first;
+ const OverridingMethods &Methods = Overrider.second;
+
+ for (const auto &M : Methods) {
+ unsigned SubobjectNumber = M.first;
assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
SubobjectNumber)) &&
"Did not find subobject offset!");
@@ -192,8 +190,8 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
SubobjectNumber)];
- assert(I->second.size() == 1 && "Final overrider is not unique!");
- const UniqueVirtualMethod &Method = I->second.front();
+ assert(M.second.size() == 1 && "Final overrider is not unique!");
+ const UniqueVirtualMethod &Method = M.second.front();
const CXXRecordDecl *OverriderRD = Method.Method->getParent();
assert(SubobjectLayoutClassOffsets.count(
@@ -482,13 +480,9 @@ static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
// Force the signatures to match. We can't rely on the overrides
// list here because there isn't necessarily an inheritance
// relationship between the two methods.
- if (LT->getTypeQuals() != RT->getTypeQuals() ||
- LT->getNumParams() != RT->getNumParams())
+ if (LT->getTypeQuals() != RT->getTypeQuals())
return false;
- for (unsigned I = 0, E = LT->getNumParams(); I != E; ++I)
- if (LT->getParamType(I) != RT->getParamType(I))
- return false;
- return true;
+ return LT->getParamTypes() == RT->getParamTypes();
}
bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
@@ -515,8 +509,8 @@ bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
CharUnits OffsetOffset) {
// Check if we can reuse an offset.
- for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
- if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ for (const auto &OffsetPair : Offsets) {
+ if (MethodsCanShareVCallOffset(OffsetPair.first, MD))
return false;
}
@@ -527,9 +521,9 @@ bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
// Look for an offset.
- for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
- if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
- return Offsets[I].second;
+ for (const auto &OffsetPair : Offsets) {
+ if (MethodsCanShareVCallOffset(OffsetPair.first, MD))
+ return OffsetPair.second;
}
llvm_unreachable("Should always find a vcall offset offset!");
@@ -1097,39 +1091,30 @@ visitAllOverriddenMethods(const CXXMethodDecl *MD, VisitorTy &Visitor) {
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
E = MD->end_overridden_methods(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
- if (!Visitor.visit(OverriddenMD))
+ if (!Visitor(OverriddenMD))
continue;
visitAllOverriddenMethods(OverriddenMD, Visitor);
}
}
-namespace {
- struct OverriddenMethodsCollector {
- OverriddenMethodsSetTy *Methods;
-
- bool visit(const CXXMethodDecl *MD) {
- // Don't recurse on this method if we've already collected it.
- return Methods->insert(MD).second;
- }
- };
-}
-
/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
/// the overridden methods that the function decl overrides.
static void
ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
OverriddenMethodsSetTy& OverriddenMethods) {
- OverriddenMethodsCollector Collector = { &OverriddenMethods };
- visitAllOverriddenMethods(MD, Collector);
+ auto OverriddenMethodsCollector = [&](const CXXMethodDecl *MD) {
+ // Don't recurse on this method if we've already collected it.
+ return OverriddenMethods.insert(MD).second;
+ };
+ visitAllOverriddenMethods(MD, OverriddenMethodsCollector);
}
void ItaniumVTableBuilder::ComputeThisAdjustments() {
// Now go through the method info map and see if any of the methods need
// 'this' pointer adjustments.
- for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
- E = MethodInfoMap.end(); I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
- const MethodInfo &MethodInfo = I->second;
+ for (const auto &MI : MethodInfoMap) {
+ const CXXMethodDecl *MD = MI.first;
+ const MethodInfo &MethodInfo = MI.second;
// Ignore adjustments for unused function pointers.
uint64_t VTableIndex = MethodInfo.VTableIndex;
@@ -1175,10 +1160,9 @@ void ItaniumVTableBuilder::ComputeThisAdjustments() {
return;
}
- for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
- E = VTableThunks.end(); I != E; ++I) {
- const VTableComponent &Component = Components[I->first];
- const ThunkInfo &Thunk = I->second;
+ for (const auto &TI : VTableThunks) {
+ const VTableComponent &Component = Components[TI.first];
+ const ThunkInfo &Thunk = TI.second;
const CXXMethodDecl *MD;
switch (Component.getKind()) {
@@ -1237,10 +1221,9 @@ BaseOffset ItaniumVTableBuilder::ComputeThisAdjustmentBaseOffset(
// We have to go through all the paths, and see which one leads us to the
// right base subobject.
- for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
- I != E; ++I) {
- BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
-
+ for (const CXXBasePath &Path : Paths) {
+ BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, Path);
+
CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset;
if (Offset.VirtualBase) {
@@ -1440,15 +1423,11 @@ FindNearestOverriddenMethod(const CXXMethodDecl *MD,
BasesSetVectorTy &Bases) {
OverriddenMethodsSetTy OverriddenMethods;
ComputeAllOverriddenMethods(MD, OverriddenMethods);
-
- for (int I = Bases.size(), E = 0; I != E; --I) {
- const CXXRecordDecl *PrimaryBase = Bases[I - 1];
+ for (const CXXRecordDecl *PrimaryBase :
+ llvm::make_range(Bases.rbegin(), Bases.rend())) {
// Now check the overridden methods.
- for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
- E = OverriddenMethods.end(); I != E; ++I) {
- const CXXMethodDecl *OverriddenMD = *I;
-
+ for (const CXXMethodDecl *OverriddenMD : OverriddenMethods) {
// We found our overridden method.
if (OverriddenMD->getParent() == PrimaryBase)
return OverriddenMD;
@@ -1596,10 +1575,7 @@ void ItaniumVTableBuilder::AddMethods(
if (ImplicitVirtualDtor)
NewVirtualFunctions.push_back(ImplicitVirtualDtor);
- for (NewVirtualFunctionsTy::const_iterator I = NewVirtualFunctions.begin(),
- E = NewVirtualFunctions.end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
+ for (const CXXMethodDecl *MD : NewVirtualFunctions) {
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
Overriders.getOverrider(MD, Base.getBaseOffset());
@@ -1700,10 +1676,9 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
const CXXRecordDecl *RD = Base.getBase();
if (RD == MostDerivedClass) {
assert(MethodVTableIndices.empty());
- for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
- E = MethodInfoMap.end(); I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
- const MethodInfo &MI = I->second;
+ for (const auto &I : MethodInfoMap) {
+ const CXXMethodDecl *MD = I.first;
+ const MethodInfo &MI = I.second;
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)]
= MI.VTableIndex - AddressPoint;
@@ -1924,11 +1899,10 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
// Since an address point can be shared by multiple subobjects, we use an
// STL multimap.
std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
- for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
- E = AddressPoints.end(); I != E; ++I) {
- const BaseSubobject& Base = I->first;
- uint64_t Index = I->second;
-
+ for (const auto &AP : AddressPoints) {
+ const BaseSubobject &Base = AP.first;
+ uint64_t Index = AP.second;
+
AddressPointsByIndex.insert(std::make_pair(Index, Base));
}
@@ -2077,18 +2051,16 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
// We store the class names in a set to get a stable order.
std::set<std::string> ClassNames;
- for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
- AddressPointsByIndex.lower_bound(NextIndex), E =
- AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
- assert(I->second.getBaseOffset() == BaseOffset &&
+ for (const auto &I :
+ llvm::make_range(AddressPointsByIndex.equal_range(NextIndex))) {
+ assert(I.second.getBaseOffset() == BaseOffset &&
"Invalid base offset!");
- const CXXRecordDecl *RD = I->second.getBase();
+ const CXXRecordDecl *RD = I.second.getBase();
ClassNames.insert(RD->getQualifiedNameAsString());
}
-
- for (std::set<std::string>::const_iterator I = ClassNames.begin(),
- E = ClassNames.end(); I != E; ++I) {
- Out << " -- (" << *I;
+
+ for (const std::string &Name : ClassNames) {
+ Out << " -- (" << Name;
Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n";
}
}
@@ -2105,12 +2077,10 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
// a stable order.
std::map<std::string, CharUnits> ClassNamesAndOffsets;
- for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
- E = VBaseOffsetOffsets.end(); I != E; ++I) {
- std::string ClassName = I->first->getQualifiedNameAsString();
- CharUnits OffsetOffset = I->second;
- ClassNamesAndOffsets.insert(
- std::make_pair(ClassName, OffsetOffset));
+ for (const auto &I : VBaseOffsetOffsets) {
+ std::string ClassName = I.first->getQualifiedNameAsString();
+ CharUnits OffsetOffset = I.second;
+ ClassNamesAndOffsets.insert(std::make_pair(ClassName, OffsetOffset));
}
Out << "Virtual base offset offsets for '";
@@ -2119,10 +2089,8 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
Out << ClassNamesAndOffsets.size();
Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
- for (std::map<std::string, CharUnits>::const_iterator I =
- ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
- I != E; ++I)
- Out << " " << I->first << " | " << I->second.getQuantity() << '\n';
+ for (const auto &I : ClassNamesAndOffsets)
+ Out << " " << I.first << " | " << I.second.getQuantity() << '\n';
Out << "\n";
}
@@ -2130,10 +2098,9 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
if (!Thunks.empty()) {
// We store the method names in a map to get a stable order.
std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
-
- for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
- I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
+
+ for (const auto &I : Thunks) {
+ const CXXMethodDecl *MD = I.first;
std::string MethodName =
PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
MD);
@@ -2141,11 +2108,9 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
}
- for (std::map<std::string, const CXXMethodDecl *>::const_iterator I =
- MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end();
- I != E; ++I) {
- const std::string &MethodName = I->first;
- const CXXMethodDecl *MD = I->second;
+ for (const auto &I : MethodNamesAndDecls) {
+ const std::string &MethodName = I.first;
+ const CXXMethodDecl *MD = I.second;
ThunkInfoVectorTy ThunksVector = Thunks[MD];
std::sort(ThunksVector.begin(), ThunksVector.end(),
@@ -2225,10 +2190,9 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
MostDerivedClass->printQualifiedName(Out);
Out << "' (" << IndicesMap.size() << " entries).\n";
- for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
- E = IndicesMap.end(); I != E; ++I) {
- uint64_t VTableIndex = I->first;
- const std::string &MethodName = I->second;
+ for (const auto &I : IndicesMap) {
+ uint64_t VTableIndex = I.first;
+ const std::string &MethodName = I.second;
Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName
<< '\n';
@@ -2303,14 +2267,11 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
/*BaseIsVirtual=*/false,
/*OffsetInLayoutClass=*/CharUnits::Zero());
- for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
- Builder.getVBaseOffsetOffsets().begin(),
- E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ for (const auto &I : Builder.getVBaseOffsetOffsets()) {
// Insert all types.
- ClassPairTy ClassPair(RD, I->first);
-
- VirtualBaseClassOffsetOffsets.insert(
- std::make_pair(ClassPair, I->second));
+ ClassPairTy ClassPair(RD, I.first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I.second));
}
I = VirtualBaseClassOffsetOffsets.find(ClassPair);
@@ -2361,14 +2322,11 @@ ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
return;
- for (ItaniumVTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator
- I = Builder.getVBaseOffsetOffsets().begin(),
- E = Builder.getVBaseOffsetOffsets().end();
- I != E; ++I) {
+ for (const auto &I : Builder.getVBaseOffsetOffsets()) {
// Insert all types.
- ClassPairTy ClassPair(RD, I->first);
-
- VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ ClassPairTy ClassPair(RD, I.first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I.second));
}
}
@@ -2561,10 +2519,9 @@ private:
"vftable can't be empty");
assert(MethodVFTableLocations.empty());
- for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
- E = MethodInfoMap.end(); I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
- const MethodInfo &MI = I->second;
+ for (const auto &I : MethodInfoMap) {
+ const CXXMethodDecl *MD = I.first;
+ const MethodInfo &MI = I.second;
// Skip the methods that the MostDerivedClass didn't override
// and the entries shadowed by return adjusting thunks.
if (MD->getParent() != MostDerivedClass || MI.Shadowed)
@@ -2633,28 +2590,8 @@ public:
void dumpLayout(raw_ostream &);
};
-/// InitialOverriddenDefinitionCollector - Finds the set of least derived bases
-/// that define the given method.
-struct InitialOverriddenDefinitionCollector {
- BasesSetVectorTy Bases;
- OverriddenMethodsSetTy VisitedOverriddenMethods;
-
- bool visit(const CXXMethodDecl *OverriddenMD) {
- if (OverriddenMD->size_overridden_methods() == 0)
- Bases.insert(OverriddenMD->getParent());
- // Don't recurse on this method if we've already collected it.
- return VisitedOverriddenMethods.insert(OverriddenMD).second;
- }
-};
-
} // end namespace
-static bool BaseInSet(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path, void *BasesSet) {
- BasesSetVectorTy *Bases = (BasesSetVectorTy *)BasesSet;
- return Bases->count(Specifier->getType()->getAsCXXRecordDecl());
-}
-
// Let's study one class hierarchy as an example:
// struct A {
// virtual void f();
@@ -2711,17 +2648,33 @@ static bool BaseInSet(const CXXBaseSpecifier *Specifier,
// for the given method, relative to the beginning of the MostDerivedClass.
CharUnits
VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
- InitialOverriddenDefinitionCollector Collector;
- visitAllOverriddenMethods(Overrider.Method, Collector);
+ BasesSetVectorTy Bases;
+
+ {
+ // Find the set of least derived bases that define the given method.
+ OverriddenMethodsSetTy VisitedOverriddenMethods;
+ auto InitialOverriddenDefinitionCollector = [&](
+ const CXXMethodDecl *OverriddenMD) {
+ if (OverriddenMD->size_overridden_methods() == 0)
+ Bases.insert(OverriddenMD->getParent());
+ // Don't recurse on this method if we've already collected it.
+ return VisitedOverriddenMethods.insert(OverriddenMD).second;
+ };
+ visitAllOverriddenMethods(Overrider.Method,
+ InitialOverriddenDefinitionCollector);
+ }
// If there are no overrides then 'this' is located
// in the base that defines the method.
- if (Collector.Bases.size() == 0)
+ if (Bases.size() == 0)
return Overrider.Offset;
CXXBasePaths Paths;
- Overrider.Method->getParent()->lookupInBases(BaseInSet, &Collector.Bases,
- Paths);
+ Overrider.Method->getParent()->lookupInBases(
+ [&Bases](const CXXBaseSpecifier *Specifier, CXXBasePath &) {
+ return Bases.count(Specifier->getType()->getAsCXXRecordDecl());
+ },
+ Paths);
// This will hold the smallest this offset among overridees of MD.
// This implies that an offset of a non-virtual base will dominate an offset
@@ -2732,17 +2685,14 @@ VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
const ASTRecordLayout &OverriderRDLayout =
Context.getASTRecordLayout(Overrider.Method->getParent());
- for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
- I != E; ++I) {
- const CXXBasePath &Path = (*I);
+ for (const CXXBasePath &Path : Paths) {
CharUnits ThisOffset = Overrider.Offset;
CharUnits LastVBaseOffset;
// For each path from the overrider to the parents of the overridden
// methods, traverse the path, calculating the this offset in the most
// derived class.
- for (int J = 0, F = Path.size(); J != F; ++J) {
- const CXXBasePathElement &Element = Path[J];
+ for (const CXXBasePathElement &Element : Path) {
QualType CurTy = Element.Base->getType();
const CXXRecordDecl *PrevRD = Element.Class,
*CurRD = CurTy->getAsCXXRecordDecl();
@@ -2932,26 +2882,30 @@ static void GroupNewVirtualOverloads(
// Put the virtual methods into VirtualMethods in the proper order:
// 1) Group overloads by declaration name. New groups are added to the
// vftable in the order of their first declarations in this class
- // (including overrides and non-virtual methods).
+ // (including overrides, non-virtual methods and any other named decl that
+ // might be nested within the class).
// 2) In each group, new overloads appear in the reverse order of declaration.
typedef SmallVector<const CXXMethodDecl *, 1> MethodGroup;
SmallVector<MethodGroup, 10> Groups;
typedef llvm::DenseMap<DeclarationName, unsigned> VisitedGroupIndicesTy;
VisitedGroupIndicesTy VisitedGroupIndices;
- for (const auto *MD : RD->methods()) {
- MD = MD->getCanonicalDecl();
+ for (const auto *D : RD->decls()) {
+ const auto *ND = dyn_cast<NamedDecl>(D);
+ if (!ND)
+ continue;
VisitedGroupIndicesTy::iterator J;
bool Inserted;
std::tie(J, Inserted) = VisitedGroupIndices.insert(
- std::make_pair(MD->getDeclName(), Groups.size()));
+ std::make_pair(ND->getDeclName(), Groups.size()));
if (Inserted)
Groups.push_back(MethodGroup());
- if (MD->isVirtual())
- Groups[J->second].push_back(MD);
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isVirtual())
+ Groups[J->second].push_back(MD->getCanonicalDecl());
}
- for (unsigned I = 0, E = Groups.size(); I != E; ++I)
- VirtualMethods.append(Groups[I].rbegin(), Groups[I].rend());
+ for (const MethodGroup &Group : Groups)
+ VirtualMethods.append(Group.rbegin(), Group.rend());
}
static bool isDirectVBase(const CXXRecordDecl *Base, const CXXRecordDecl *RD) {
@@ -3011,9 +2965,7 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
// sub-bases;
// - adding new slots for methods that require Return adjustment.
// We keep track of the methods visited in the sub-bases in MethodInfoMap.
- for (unsigned I = 0, E = VirtualMethods.size(); I != E; ++I) {
- const CXXMethodDecl *MD = VirtualMethods[I];
-
+ for (const CXXMethodDecl *MD : VirtualMethods) {
FinalOverriders::OverriderInfo FinalOverrider =
Overriders.getOverrider(MD, Base.getBaseOffset());
const CXXMethodDecl *FinalOverriderMD = FinalOverrider.Method;
@@ -3122,10 +3074,10 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
}
static void PrintBasePath(const VPtrInfo::BasePath &Path, raw_ostream &Out) {
- for (VPtrInfo::BasePath::const_reverse_iterator I = Path.rbegin(),
- E = Path.rend(); I != E; ++I) {
+ for (const CXXRecordDecl *Elem :
+ llvm::make_range(Path.rbegin(), Path.rend())) {
Out << "'";
- (*I)->printQualifiedName(Out);
+ Elem->printQualifiedName(Out);
Out << "' in ";
}
}
@@ -3247,21 +3199,17 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
// We store the method names in a map to get a stable order.
std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
- for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
- I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
+ for (const auto &I : Thunks) {
+ const CXXMethodDecl *MD = I.first;
std::string MethodName = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
}
- for (std::map<std::string, const CXXMethodDecl *>::const_iterator
- I = MethodNamesAndDecls.begin(),
- E = MethodNamesAndDecls.end();
- I != E; ++I) {
- const std::string &MethodName = I->first;
- const CXXMethodDecl *MD = I->second;
+ for (const auto &MethodNameAndDecl : MethodNamesAndDecls) {
+ const std::string &MethodName = MethodNameAndDecl.first;
+ const CXXMethodDecl *MD = MethodNameAndDecl.second;
ThunkInfoVectorTy ThunksVector = Thunks[MD];
std::stable_sort(ThunksVector.begin(), ThunksVector.end(),
@@ -3291,9 +3239,8 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
static bool setsIntersect(const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &A,
ArrayRef<const CXXRecordDecl *> B) {
- for (ArrayRef<const CXXRecordDecl *>::iterator I = B.begin(), E = B.end();
- I != E; ++I) {
- if (A.count(*I))
+ for (const CXXRecordDecl *Decl : B) {
+ if (A.count(Decl))
return true;
}
return false;
@@ -3636,11 +3583,10 @@ void MicrosoftVTableContext::computeVTableRelatedInformation(
VFPtrLocations[RD] = VFPtrs;
MethodVFTableLocationsTy NewMethodLocations;
- for (VPtrInfoVector::iterator I = VFPtrs->begin(), E = VFPtrs->end();
- I != E; ++I) {
- VFTableBuilder Builder(*this, RD, *I);
+ for (const VPtrInfo *VFPtr : *VFPtrs) {
+ VFTableBuilder Builder(*this, RD, VFPtr);
- VFTableIdTy id(RD, (*I)->FullOffsetInMDC);
+ VFTableIdTy id(RD, VFPtr->FullOffsetInMDC);
assert(VFTableLayouts.count(id) == 0);
SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks(
Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
@@ -3672,21 +3618,20 @@ void MicrosoftVTableContext::dumpMethodLocations(
std::map<MethodVFTableLocation, std::string> IndicesMap;
bool HasNonzeroOffset = false;
- for (MethodVFTableLocationsTy::const_iterator I = NewMethods.begin(),
- E = NewMethods.end(); I != E; ++I) {
- const CXXMethodDecl *MD = cast<const CXXMethodDecl>(I->first.getDecl());
+ for (const auto &I : NewMethods) {
+ const CXXMethodDecl *MD = cast<const CXXMethodDecl>(I.first.getDecl());
assert(MD->isVirtual());
std::string MethodName = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
if (isa<CXXDestructorDecl>(MD)) {
- IndicesMap[I->second] = MethodName + " [scalar deleting]";
+ IndicesMap[I.second] = MethodName + " [scalar deleting]";
} else {
- IndicesMap[I->second] = MethodName;
+ IndicesMap[I.second] = MethodName;
}
- if (!I->second.VFPtrOffset.isZero() || I->second.VBTableIndex != 0)
+ if (!I.second.VFPtrOffset.isZero() || I.second.VBTableIndex != 0)
HasNonzeroOffset = true;
}
@@ -3700,12 +3645,9 @@ void MicrosoftVTableContext::dumpMethodLocations(
CharUnits LastVFPtrOffset = CharUnits::fromQuantity(-1);
uint64_t LastVBIndex = 0;
- for (std::map<MethodVFTableLocation, std::string>::const_iterator
- I = IndicesMap.begin(),
- E = IndicesMap.end();
- I != E; ++I) {
- CharUnits VFPtrOffset = I->first.VFPtrOffset;
- uint64_t VBIndex = I->first.VBTableIndex;
+ for (const auto &I : IndicesMap) {
+ CharUnits VFPtrOffset = I.first.VFPtrOffset;
+ uint64_t VBIndex = I.first.VBTableIndex;
if (HasNonzeroOffset &&
(VFPtrOffset != LastVFPtrOffset || VBIndex != LastVBIndex)) {
assert(VBIndex > LastVBIndex || VFPtrOffset > LastVFPtrOffset);
@@ -3717,8 +3659,8 @@ void MicrosoftVTableContext::dumpMethodLocations(
LastVBIndex = VBIndex;
}
- uint64_t VTableIndex = I->first.Index;
- const std::string &MethodName = I->second;
+ uint64_t VTableIndex = I.first.Index;
+ const std::string &MethodName = I.second;
Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName << '\n';
}
Out << '\n';
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index e3b666ef42af..847398c0861c 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -197,9 +197,6 @@ public:
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
- // Disables data recursion. We intercept Traverse* methods in the RAV, which
- // are not triggered during data recursion.
- bool shouldUseDataRecursionFor(clang::Stmt *S) const { return false; }
private:
// Used for updating the depth during traversal.
@@ -487,9 +484,6 @@ public:
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
- // Disables data recursion. We intercept Traverse* methods in the RAV, which
- // are not triggered during data recursion.
- bool shouldUseDataRecursionFor(clang::Stmt *S) const { return false; }
private:
class TimeBucketRegion {
@@ -621,9 +615,6 @@ private:
if (Node.get<TranslationUnitDecl>() ==
ActiveASTContext->getTranslationUnitDecl())
return false;
- assert(Node.getMemoizationData() &&
- "Invariant broken: only nodes that support memoization may be "
- "used in the parent map.");
MatchKey Key;
Key.MatcherID = Matcher.getID();
@@ -867,7 +858,11 @@ bool MatchASTVisitor::TraverseNestedNameSpecifier(NestedNameSpecifier *NNS) {
bool MatchASTVisitor::TraverseNestedNameSpecifierLoc(
NestedNameSpecifierLoc NNS) {
+ if (!NNS)
+ return true;
+
match(NNS);
+
// We only match the nested name specifier here (as opposed to traversing it)
// because the traversal is already done in the parallel "Loc"-hierarchy.
if (NNS.hasQualifier())
@@ -913,37 +908,37 @@ MatchFinder::~MatchFinder() {}
void MatchFinder::addMatcher(const DeclarationMatcher &NodeMatch,
MatchCallback *Action) {
Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
- Matchers.AllCallbacks.push_back(Action);
+ Matchers.AllCallbacks.insert(Action);
}
void MatchFinder::addMatcher(const TypeMatcher &NodeMatch,
MatchCallback *Action) {
Matchers.Type.emplace_back(NodeMatch, Action);
- Matchers.AllCallbacks.push_back(Action);
+ Matchers.AllCallbacks.insert(Action);
}
void MatchFinder::addMatcher(const StatementMatcher &NodeMatch,
MatchCallback *Action) {
Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
- Matchers.AllCallbacks.push_back(Action);
+ Matchers.AllCallbacks.insert(Action);
}
void MatchFinder::addMatcher(const NestedNameSpecifierMatcher &NodeMatch,
MatchCallback *Action) {
Matchers.NestedNameSpecifier.emplace_back(NodeMatch, Action);
- Matchers.AllCallbacks.push_back(Action);
+ Matchers.AllCallbacks.insert(Action);
}
void MatchFinder::addMatcher(const NestedNameSpecifierLocMatcher &NodeMatch,
MatchCallback *Action) {
Matchers.NestedNameSpecifierLoc.emplace_back(NodeMatch, Action);
- Matchers.AllCallbacks.push_back(Action);
+ Matchers.AllCallbacks.insert(Action);
}
void MatchFinder::addMatcher(const TypeLocMatcher &NodeMatch,
MatchCallback *Action) {
Matchers.TypeLoc.emplace_back(NodeMatch, Action);
- Matchers.AllCallbacks.push_back(Action);
+ Matchers.AllCallbacks.insert(Action);
}
bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index 069fcba474b2..463cf0ba9df6 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -110,15 +110,15 @@ static llvm::ManagedStatic<TrueMatcherImpl> TrueMatcherInstance;
DynTypedMatcher DynTypedMatcher::constructVariadic(
DynTypedMatcher::VariadicOperator Op,
+ ast_type_traits::ASTNodeKind SupportedKind,
std::vector<DynTypedMatcher> InnerMatchers) {
assert(InnerMatchers.size() > 0 && "Array must not be empty.");
assert(std::all_of(InnerMatchers.begin(), InnerMatchers.end(),
- [&InnerMatchers](const DynTypedMatcher &M) {
- return InnerMatchers[0].canConvertTo(M.SupportedKind);
- }) &&
- "SupportedKind must be convertible to a common type!");
+ [SupportedKind](const DynTypedMatcher &M) {
+ return M.canConvertTo(SupportedKind);
+ }) &&
+ "InnerMatchers must be convertible to SupportedKind!");
- auto SupportedKind = InnerMatchers[0].SupportedKind;
// We must relax the restrict kind here.
// The different operators might deal differently with a mismatch.
// Make it the same as SupportedKind, since that is the broadest type we are
diff --git a/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/lib/ASTMatchers/Dynamic/Diagnostics.cpp
index 72f127185ef5..787b780c4243 100644
--- a/lib/ASTMatchers/Dynamic/Diagnostics.cpp
+++ b/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -23,14 +23,14 @@ Diagnostics::ArgStream Diagnostics::pushContextFrame(ContextType Type,
Diagnostics::Context::Context(ConstructMatcherEnum, Diagnostics *Error,
StringRef MatcherName,
- const SourceRange &MatcherRange)
+ SourceRange MatcherRange)
: Error(Error) {
Error->pushContextFrame(CT_MatcherConstruct, MatcherRange) << MatcherName;
}
Diagnostics::Context::Context(MatcherArgEnum, Diagnostics *Error,
StringRef MatcherName,
- const SourceRange &MatcherRange,
+ SourceRange MatcherRange,
unsigned ArgNumber)
: Error(Error) {
Error->pushContextFrame(CT_MatcherArg, MatcherRange) << ArgNumber
@@ -63,7 +63,7 @@ Diagnostics::ArgStream &Diagnostics::ArgStream::operator<<(const Twine &Arg) {
return *this;
}
-Diagnostics::ArgStream Diagnostics::addError(const SourceRange &Range,
+Diagnostics::ArgStream Diagnostics::addError(SourceRange Range,
ErrorType Error) {
Errors.emplace_back();
ErrorContent &Last = Errors.back();
@@ -150,7 +150,7 @@ static void formatErrorString(StringRef FormatString,
}
}
-static void maybeAddLineAndColumn(const SourceRange &Range,
+static void maybeAddLineAndColumn(SourceRange Range,
llvm::raw_ostream &OS) {
if (Range.Start.Line > 0 && Range.Start.Column > 0) {
OS << Range.Start.Line << ":" << Range.Start.Column << ": ";
diff --git a/lib/ASTMatchers/Dynamic/Marshallers.h b/lib/ASTMatchers/Dynamic/Marshallers.h
index 36a6415ae82c..64d6b7814aeb 100644
--- a/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -104,7 +104,7 @@ public:
class MatcherDescriptor {
public:
virtual ~MatcherDescriptor() {}
- virtual VariantMatcher create(const SourceRange &NameRange,
+ virtual VariantMatcher create(SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) const = 0;
@@ -162,7 +162,7 @@ class FixedArgCountMatcherDescriptor : public MatcherDescriptor {
public:
typedef VariantMatcher (*MarshallerType)(void (*Func)(),
StringRef MatcherName,
- const SourceRange &NameRange,
+ SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error);
@@ -180,7 +180,7 @@ public:
RetKinds(RetKinds.begin(), RetKinds.end()),
ArgKinds(ArgKinds.begin(), ArgKinds.end()) {}
- VariantMatcher create(const SourceRange &NameRange,
+ VariantMatcher create(SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) const override {
return Marshaller(Func, MatcherName, NameRange, Args, Error);
@@ -279,7 +279,7 @@ struct BuildReturnTypeVector<ast_matchers::internal::BindableMatcher<T> > {
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
VariantMatcher
-variadicMatcherDescriptor(StringRef MatcherName, const SourceRange &NameRange,
+variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
ArrayRef<ParserValue> Args, Diagnostics *Error) {
ArgT **InnerArgs = new ArgT *[Args.size()]();
@@ -320,7 +320,7 @@ variadicMatcherDescriptor(StringRef MatcherName, const SourceRange &NameRange,
class VariadicFuncMatcherDescriptor : public MatcherDescriptor {
public:
typedef VariantMatcher (*RunFunc)(StringRef MatcherName,
- const SourceRange &NameRange,
+ SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error);
@@ -334,7 +334,7 @@ public:
BuildReturnTypeVector<ResultT>::build(RetKinds);
}
- VariantMatcher create(const SourceRange &NameRange,
+ VariantMatcher create(SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) const override {
return Func(MatcherName, NameRange, Args, Error);
@@ -414,7 +414,7 @@ private:
/// \brief 0-arg marshaller function.
template <typename ReturnType>
static VariantMatcher matcherMarshall0(void (*Func)(), StringRef MatcherName,
- const SourceRange &NameRange,
+ SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) {
typedef ReturnType (*FuncType)();
@@ -425,7 +425,7 @@ static VariantMatcher matcherMarshall0(void (*Func)(), StringRef MatcherName,
/// \brief 1-arg marshaller function.
template <typename ReturnType, typename ArgType1>
static VariantMatcher matcherMarshall1(void (*Func)(), StringRef MatcherName,
- const SourceRange &NameRange,
+ SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) {
typedef ReturnType (*FuncType)(ArgType1);
@@ -438,7 +438,7 @@ static VariantMatcher matcherMarshall1(void (*Func)(), StringRef MatcherName,
/// \brief 2-arg marshaller function.
template <typename ReturnType, typename ArgType1, typename ArgType2>
static VariantMatcher matcherMarshall2(void (*Func)(), StringRef MatcherName,
- const SourceRange &NameRange,
+ SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) {
typedef ReturnType (*FuncType)(ArgType1, ArgType2);
@@ -493,7 +493,7 @@ public:
~OverloadedMatcherDescriptor() override {}
- VariantMatcher create(const SourceRange &NameRange,
+ VariantMatcher create(SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) const override {
std::vector<VariantMatcher> Constructed;
@@ -567,7 +567,7 @@ public:
: MinCount(MinCount), MaxCount(MaxCount), Op(Op),
MatcherName(MatcherName) {}
- VariantMatcher create(const SourceRange &NameRange,
+ VariantMatcher create(SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) const override {
if (Args.size() < MinCount || MaxCount < Args.size()) {
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index 96a78cd9f8fa..cf9dab6dc7db 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -534,7 +534,7 @@ Parser::RegistrySema::lookupMatcherCtor(StringRef MatcherName) {
}
VariantMatcher Parser::RegistrySema::actOnMatcherExpression(
- MatcherCtor Ctor, const SourceRange &NameRange, StringRef BindID,
+ MatcherCtor Ctor, SourceRange NameRange, StringRef BindID,
ArrayRef<ParserValue> Args, Diagnostics *Error) {
if (BindID.empty()) {
return Registry::constructMatcher(Ctor, NameRange, Args, Error);
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index 72713dda03c7..5b1c5529aa47 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -15,6 +15,7 @@
#include "clang/ASTMatchers/Dynamic/Registry.h"
#include "Marshallers.h"
#include "clang/ASTMatchers/ASTMatchers.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ManagedStatic.h"
@@ -106,15 +107,13 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(atomicType);
REGISTER_MATCHER(autoType);
REGISTER_MATCHER(binaryOperator);
- REGISTER_MATCHER(bindTemporaryExpr);
REGISTER_MATCHER(blockPointerType);
- REGISTER_MATCHER(boolLiteral);
+ REGISTER_MATCHER(booleanType);
REGISTER_MATCHER(breakStmt);
REGISTER_MATCHER(builtinType);
REGISTER_MATCHER(callExpr);
REGISTER_MATCHER(caseStmt);
REGISTER_MATCHER(castExpr);
- REGISTER_MATCHER(catchStmt);
REGISTER_MATCHER(characterLiteral);
REGISTER_MATCHER(classTemplateDecl);
REGISTER_MATCHER(classTemplateSpecializationDecl);
@@ -123,27 +122,46 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(compoundStmt);
REGISTER_MATCHER(conditionalOperator);
REGISTER_MATCHER(constantArrayType);
- REGISTER_MATCHER(constCastExpr);
- REGISTER_MATCHER(constructExpr);
- REGISTER_MATCHER(constructorDecl);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
- REGISTER_MATCHER(conversionDecl);
REGISTER_MATCHER(cStyleCastExpr);
- REGISTER_MATCHER(ctorInitializer);
- REGISTER_MATCHER(CUDAKernelCallExpr);
+ REGISTER_MATCHER(cudaKernelCallExpr);
+ REGISTER_MATCHER(cxxBindTemporaryExpr);
+ REGISTER_MATCHER(cxxBoolLiteral);
+ REGISTER_MATCHER(cxxCatchStmt);
+ REGISTER_MATCHER(cxxConstCastExpr);
+ REGISTER_MATCHER(cxxConstructExpr);
+ REGISTER_MATCHER(cxxConstructorDecl);
+ REGISTER_MATCHER(cxxConversionDecl);
+ REGISTER_MATCHER(cxxCtorInitializer);
+ REGISTER_MATCHER(cxxDefaultArgExpr);
+ REGISTER_MATCHER(cxxDeleteExpr);
+ REGISTER_MATCHER(cxxDestructorDecl);
+ REGISTER_MATCHER(cxxDynamicCastExpr);
+ REGISTER_MATCHER(cxxForRangeStmt);
+ REGISTER_MATCHER(cxxFunctionalCastExpr);
+ REGISTER_MATCHER(cxxMemberCallExpr);
+ REGISTER_MATCHER(cxxMethodDecl);
+ REGISTER_MATCHER(cxxNewExpr);
+ REGISTER_MATCHER(cxxNullPtrLiteralExpr);
+ REGISTER_MATCHER(cxxOperatorCallExpr);
+ REGISTER_MATCHER(cxxRecordDecl);
+ REGISTER_MATCHER(cxxReinterpretCastExpr);
+ REGISTER_MATCHER(cxxStaticCastExpr);
+ REGISTER_MATCHER(cxxTemporaryObjectExpr);
+ REGISTER_MATCHER(cxxThisExpr);
+ REGISTER_MATCHER(cxxThrowExpr);
+ REGISTER_MATCHER(cxxTryStmt);
+ REGISTER_MATCHER(cxxUnresolvedConstructExpr);
+ REGISTER_MATCHER(decayedType);
REGISTER_MATCHER(decl);
REGISTER_MATCHER(declaratorDecl);
REGISTER_MATCHER(declCountIs);
REGISTER_MATCHER(declRefExpr);
REGISTER_MATCHER(declStmt);
- REGISTER_MATCHER(defaultArgExpr);
REGISTER_MATCHER(defaultStmt);
- REGISTER_MATCHER(deleteExpr);
REGISTER_MATCHER(dependentSizedArrayType);
- REGISTER_MATCHER(destructorDecl);
REGISTER_MATCHER(doStmt);
- REGISTER_MATCHER(dynamicCastExpr);
REGISTER_MATCHER(eachOf);
REGISTER_MATCHER(elaboratedType);
REGISTER_MATCHER(enumConstantDecl);
@@ -160,10 +178,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(forEachDescendant);
REGISTER_MATCHER(forEachSwitchCase);
REGISTER_MATCHER(forField);
- REGISTER_MATCHER(forRangeStmt);
REGISTER_MATCHER(forStmt);
REGISTER_MATCHER(friendDecl);
- REGISTER_MATCHER(functionalCastExpr);
REGISTER_MATCHER(functionDecl);
REGISTER_MATCHER(functionTemplateDecl);
REGISTER_MATCHER(functionType);
@@ -179,12 +195,14 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasArgument);
REGISTER_MATCHER(hasArgumentOfType);
REGISTER_MATCHER(hasAttr);
+ REGISTER_MATCHER(hasAutomaticStorageDuration);
REGISTER_MATCHER(hasBase);
REGISTER_MATCHER(hasBody);
REGISTER_MATCHER(hasCanonicalType);
REGISTER_MATCHER(hasCaseConstant);
REGISTER_MATCHER(hasCondition);
REGISTER_MATCHER(hasConditionVariableStatement);
+ REGISTER_MATCHER(hasDecayedType);
REGISTER_MATCHER(hasDeclaration);
REGISTER_MATCHER(hasDeclContext);
REGISTER_MATCHER(hasDeducedType);
@@ -222,9 +240,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasSize);
REGISTER_MATCHER(hasSizeExpr);
REGISTER_MATCHER(hasSourceExpression);
+ REGISTER_MATCHER(hasStaticStorageDuration);
REGISTER_MATCHER(hasTargetDecl);
REGISTER_MATCHER(hasTemplateArgument);
REGISTER_MATCHER(hasThen);
+ REGISTER_MATCHER(hasThreadStorageDuration);
REGISTER_MATCHER(hasTrueExpression);
REGISTER_MATCHER(hasTypeLoc);
REGISTER_MATCHER(hasUnaryOperand);
@@ -237,17 +257,27 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(implicitCastExpr);
REGISTER_MATCHER(incompleteArrayType);
REGISTER_MATCHER(initListExpr);
+ REGISTER_MATCHER(injectedClassNameType);
REGISTER_MATCHER(innerType);
REGISTER_MATCHER(integerLiteral);
+ REGISTER_MATCHER(isAnonymous);
REGISTER_MATCHER(isArrow);
+ REGISTER_MATCHER(isBaseInitializer);
REGISTER_MATCHER(isCatchAll);
+ REGISTER_MATCHER(isClass);
REGISTER_MATCHER(isConst);
REGISTER_MATCHER(isConstQualified);
+ REGISTER_MATCHER(isCopyConstructor);
+ REGISTER_MATCHER(isDefaultConstructor);
REGISTER_MATCHER(isDefinition);
REGISTER_MATCHER(isDeleted);
+ REGISTER_MATCHER(isExceptionVariable);
+ REGISTER_MATCHER(isExplicit);
REGISTER_MATCHER(isExplicitTemplateSpecialization);
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
+ REGISTER_MATCHER(isFinal);
+ REGISTER_MATCHER(isInline);
REGISTER_MATCHER(isImplicit);
REGISTER_MATCHER(isExpansionInFileMatching);
REGISTER_MATCHER(isExpansionInMainFile);
@@ -257,13 +287,20 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isIntegral);
REGISTER_MATCHER(isInTemplateInstantiation);
REGISTER_MATCHER(isListInitialization);
+ REGISTER_MATCHER(isMemberInitializer);
+ REGISTER_MATCHER(isMoveConstructor);
+ REGISTER_MATCHER(isNoThrow);
REGISTER_MATCHER(isOverride);
REGISTER_MATCHER(isPrivate);
REGISTER_MATCHER(isProtected);
REGISTER_MATCHER(isPublic);
REGISTER_MATCHER(isPure);
+ REGISTER_MATCHER(isStruct);
REGISTER_MATCHER(isTemplateInstantiation);
+ REGISTER_MATCHER(isUnion);
+ REGISTER_MATCHER(isVariadic);
REGISTER_MATCHER(isVirtual);
+ REGISTER_MATCHER(isVolatileQualified);
REGISTER_MATCHER(isWritten);
REGISTER_MATCHER(labelStmt);
REGISTER_MATCHER(lambdaExpr);
@@ -272,24 +309,22 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(matchesSelector);
REGISTER_MATCHER(materializeTemporaryExpr);
REGISTER_MATCHER(member);
- REGISTER_MATCHER(memberCallExpr);
REGISTER_MATCHER(memberExpr);
REGISTER_MATCHER(memberPointerType);
- REGISTER_MATCHER(methodDecl);
REGISTER_MATCHER(namedDecl);
+ REGISTER_MATCHER(namespaceAliasDecl);
REGISTER_MATCHER(namespaceDecl);
REGISTER_MATCHER(namesType);
REGISTER_MATCHER(nestedNameSpecifier);
REGISTER_MATCHER(nestedNameSpecifierLoc);
- REGISTER_MATCHER(newExpr);
- REGISTER_MATCHER(nullPtrLiteralExpr);
REGISTER_MATCHER(nullStmt);
REGISTER_MATCHER(numSelectorArgs);
REGISTER_MATCHER(ofClass);
+ REGISTER_MATCHER(objcInterfaceDecl);
REGISTER_MATCHER(objcMessageExpr);
+ REGISTER_MATCHER(objcObjectPointerType);
REGISTER_MATCHER(on);
REGISTER_MATCHER(onImplicitObjectArgument);
- REGISTER_MATCHER(operatorCallExpr);
REGISTER_MATCHER(parameterCountIs);
REGISTER_MATCHER(parenType);
REGISTER_MATCHER(parmVarDecl);
@@ -302,7 +337,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(refersToDeclaration);
REGISTER_MATCHER(refersToIntegralType);
REGISTER_MATCHER(refersToType);
- REGISTER_MATCHER(reinterpretCastExpr);
REGISTER_MATCHER(returns);
REGISTER_MATCHER(returnStmt);
REGISTER_MATCHER(rValueReferenceType);
@@ -311,23 +345,20 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(specifiesType);
REGISTER_MATCHER(specifiesTypeLoc);
REGISTER_MATCHER(statementCountIs);
- REGISTER_MATCHER(staticCastExpr);
REGISTER_MATCHER(staticAssertDecl);
REGISTER_MATCHER(stmt);
REGISTER_MATCHER(stringLiteral);
REGISTER_MATCHER(substNonTypeTemplateParmExpr);
+ REGISTER_MATCHER(substTemplateTypeParmType);
REGISTER_MATCHER(switchCase);
REGISTER_MATCHER(switchStmt);
REGISTER_MATCHER(templateArgument);
REGISTER_MATCHER(templateArgumentCountIs);
REGISTER_MATCHER(templateSpecializationType);
- REGISTER_MATCHER(temporaryObjectExpr);
- REGISTER_MATCHER(thisExpr);
+ REGISTER_MATCHER(templateTypeParmType);
REGISTER_MATCHER(throughUsingDecl);
- REGISTER_MATCHER(throwExpr);
REGISTER_MATCHER(to);
REGISTER_MATCHER(translationUnitDecl);
- REGISTER_MATCHER(tryStmt);
REGISTER_MATCHER(type);
REGISTER_MATCHER(typedefDecl);
REGISTER_MATCHER(typedefType);
@@ -336,7 +367,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(unaryOperator);
REGISTER_MATCHER(unaryTransformType);
REGISTER_MATCHER(unless);
- REGISTER_MATCHER(unresolvedConstructExpr);
+ REGISTER_MATCHER(unresolvedUsingTypenameDecl);
REGISTER_MATCHER(unresolvedUsingValueDecl);
REGISTER_MATCHER(userDefinedLiteral);
REGISTER_MATCHER(usingDecl);
@@ -350,11 +381,7 @@ RegistryMaps::RegistryMaps() {
}
RegistryMaps::~RegistryMaps() {
- for (ConstructorMap::iterator it = Constructors.begin(),
- end = Constructors.end();
- it != end; ++it) {
- delete it->second;
- }
+ llvm::DeleteContainerSeconds(Constructors);
}
static llvm::ManagedStatic<RegistryMaps> RegistryData;
@@ -425,12 +452,13 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
std::vector<MatcherCompletion> Completions;
// Search the registry for acceptable matchers.
- for (ConstructorMap::const_iterator I = RegistryData->constructors().begin(),
- E = RegistryData->constructors().end();
- I != E; ++I) {
+ for (const auto &M : RegistryData->constructors()) {
+ const auto *Matcher = M.getValue();
+ StringRef Name = M.getKey();
+
std::set<ASTNodeKind> RetKinds;
- unsigned NumArgs = I->second->isVariadic() ? 1 : I->second->getNumArgs();
- bool IsPolymorphic = I->second->isPolymorphic();
+ unsigned NumArgs = Matcher->isVariadic() ? 1 : Matcher->getNumArgs();
+ bool IsPolymorphic = Matcher->isPolymorphic();
std::vector<std::vector<ArgKind>> ArgsKinds(NumArgs);
unsigned MaxSpecificity = 0;
for (const ArgKind& Kind : AcceptedTypes) {
@@ -438,13 +466,13 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
continue;
unsigned Specificity;
ASTNodeKind LeastDerivedKind;
- if (I->second->isConvertibleTo(Kind.getMatcherKind(), &Specificity,
- &LeastDerivedKind)) {
+ if (Matcher->isConvertibleTo(Kind.getMatcherKind(), &Specificity,
+ &LeastDerivedKind)) {
if (MaxSpecificity < Specificity)
MaxSpecificity = Specificity;
RetKinds.insert(LeastDerivedKind);
for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
- I->second->getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
+ Matcher->getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
if (IsPolymorphic)
break;
}
@@ -455,9 +483,9 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
llvm::raw_string_ostream OS(Decl);
if (IsPolymorphic) {
- OS << "Matcher<T> " << I->first() << "(Matcher<T>";
+ OS << "Matcher<T> " << Name << "(Matcher<T>";
} else {
- OS << "Matcher<" << RetKinds << "> " << I->first() << "(";
+ OS << "Matcher<" << RetKinds << "> " << Name << "(";
for (const std::vector<ArgKind> &Arg : ArgsKinds) {
if (&Arg != &ArgsKinds[0])
OS << ", ";
@@ -480,11 +508,11 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
}
}
}
- if (I->second->isVariadic())
+ if (Matcher->isVariadic())
OS << "...";
OS << ")";
- std::string TypedText = I->first();
+ std::string TypedText = Name;
TypedText += "(";
if (ArgsKinds.empty())
TypedText += ")";
@@ -500,7 +528,7 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
// static
VariantMatcher Registry::constructMatcher(MatcherCtor Ctor,
- const SourceRange &NameRange,
+ SourceRange NameRange,
ArrayRef<ParserValue> Args,
Diagnostics *Error) {
return Ctor->create(NameRange, Args, Error);
@@ -508,7 +536,7 @@ VariantMatcher Registry::constructMatcher(MatcherCtor Ctor,
// static
VariantMatcher Registry::constructBoundMatcher(MatcherCtor Ctor,
- const SourceRange &NameRange,
+ SourceRange NameRange,
StringRef BindID,
ArrayRef<ParserValue> Args,
Diagnostics *Error) {
diff --git a/lib/ASTMatchers/Dynamic/VariantValue.cpp b/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 9d8be4700581..8f3c70c1a8d8 100644
--- a/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -72,7 +72,7 @@ VariantMatcher::MatcherOps::constructVariadicOperator(
return llvm::None;
DynMatchers.push_back(*Inner);
}
- return DynTypedMatcher::constructVariadic(Op, DynMatchers);
+ return DynTypedMatcher::constructVariadic(Op, NodeKind, DynMatchers);
}
VariantMatcher::Payload::~Payload() {}
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index d7fb7e95d758..52c7f2613654 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -148,6 +148,23 @@ const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
}
}
+ auto *CXXMethod = dyn_cast<CXXMethodDecl>(D);
+ if (!CXXMethod)
+ return nullptr;
+
+ const CXXRecordDecl *parent = CXXMethod->getParent();
+ if (!parent->isLambda())
+ return nullptr;
+
+ for (const LambdaCapture &LC : parent->captures()) {
+ if (!LC.capturesVariable())
+ continue;
+
+ VarDecl *VD = LC.getCapturedVar();
+ if (VD->getName() == "self")
+ return dyn_cast<ImplicitParamDecl>(VD);
+ }
+
return nullptr;
}
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 7d1b23575293..09904369ba9c 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -36,10 +36,7 @@ static bool isDispatchBlock(QualType Ty) {
// returns void.
const FunctionProtoType *FT =
BPT->getPointeeType()->getAs<FunctionProtoType>();
- if (!FT || !FT->getReturnType()->isVoidType() || FT->getNumParams() != 0)
- return false;
-
- return true;
+ return FT && FT->getReturnType()->isVoidType() && FT->getNumParams() == 0;
}
namespace {
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 54d15bd232a1..ed2239f88ae5 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -39,6 +39,78 @@ static SourceLocation GetEndLoc(Decl *D) {
return D->getLocation();
}
+/// Helper for tryNormalizeBinaryOperator. Attempts to extract an IntegerLiteral
+/// or EnumConstantDecl from the given Expr. If it fails, returns nullptr.
+const Expr *tryTransformToIntOrEnumConstant(const Expr *E) {
+ E = E->IgnoreParens();
+ if (isa<IntegerLiteral>(E))
+ return E;
+ if (auto *DR = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ return isa<EnumConstantDecl>(DR->getDecl()) ? DR : nullptr;
+ return nullptr;
+}
+
+/// Tries to interpret a binary operator into `Decl Op Expr` form, if Expr is
+/// an integer literal or an enum constant.
+///
+/// If this fails, at least one of the returned DeclRefExpr or Expr will be
+/// null.
+static std::tuple<const DeclRefExpr *, BinaryOperatorKind, const Expr *>
+tryNormalizeBinaryOperator(const BinaryOperator *B) {
+ BinaryOperatorKind Op = B->getOpcode();
+
+ const Expr *MaybeDecl = B->getLHS();
+ const Expr *Constant = tryTransformToIntOrEnumConstant(B->getRHS());
+ // Expr looked like `0 == Foo` instead of `Foo == 0`
+ if (Constant == nullptr) {
+ // Flip the operator
+ if (Op == BO_GT)
+ Op = BO_LT;
+ else if (Op == BO_GE)
+ Op = BO_LE;
+ else if (Op == BO_LT)
+ Op = BO_GT;
+ else if (Op == BO_LE)
+ Op = BO_GE;
+
+ MaybeDecl = B->getRHS();
+ Constant = tryTransformToIntOrEnumConstant(B->getLHS());
+ }
+
+ auto *D = dyn_cast<DeclRefExpr>(MaybeDecl->IgnoreParenImpCasts());
+ return std::make_tuple(D, Op, Constant);
+}
+
+/// For an expression `x == Foo && x == Bar`, this determines whether the
+/// `Foo` and `Bar` are either of the same enumeration type, or both integer
+/// literals.
+///
+/// It's an error to pass this arguments that are not either IntegerLiterals
+/// or DeclRefExprs (that have decls of type EnumConstantDecl)
+static bool areExprTypesCompatible(const Expr *E1, const Expr *E2) {
+ // User intent isn't clear if they're mixing int literals with enum
+ // constants.
+ if (isa<IntegerLiteral>(E1) != isa<IntegerLiteral>(E2))
+ return false;
+
+ // Integer literal comparisons, regardless of literal type, are acceptable.
+ if (isa<IntegerLiteral>(E1))
+ return true;
+
+ // IntegerLiterals are handled above and only EnumConstantDecls are expected
+ // beyond this point
+ assert(isa<DeclRefExpr>(E1) && isa<DeclRefExpr>(E2));
+ auto *Decl1 = cast<DeclRefExpr>(E1)->getDecl();
+ auto *Decl2 = cast<DeclRefExpr>(E2)->getDecl();
+
+ assert(isa<EnumConstantDecl>(Decl1) && isa<EnumConstantDecl>(Decl2));
+ const DeclContext *DC1 = Decl1->getDeclContext();
+ const DeclContext *DC2 = Decl2->getDeclContext();
+
+ assert(isa<EnumDecl>(DC1) && isa<EnumDecl>(DC2));
+ return DC1 == DC2;
+}
+
class CFGBuilder;
/// The CFG builder uses a recursive algorithm to build the CFG. When
@@ -176,8 +248,8 @@ private:
public:
/// Constructs empty scope linked to previous scope in specified place.
- LocalScope(BumpVectorContext &ctx, const_iterator P)
- : ctx(ctx), Vars(ctx, 4), Prev(P) {}
+ LocalScope(BumpVectorContext ctx, const_iterator P)
+ : ctx(std::move(ctx)), Vars(this->ctx, 4), Prev(P) {}
/// Begin of scope in direction of CFG building (backwards).
const_iterator begin() const { return const_iterator(*this, Vars.size()); }
@@ -284,7 +356,7 @@ reverse_children::reverse_children(Stmt *S) {
/// Example usage:
///
/// CFGBuilder builder;
-/// CFG* cfg = builder.BuildAST(stmt1);
+/// std::unique_ptr<CFG> cfg = builder.buildCFG(decl, stmt1);
///
/// CFG construction is done via a recursive walk of an AST. We actually parse
/// the AST in reverse order so that the successor of a basic block is
@@ -388,6 +460,7 @@ private:
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
+ CFGBlock *VisitBlockExpr(BlockExpr *E, AddStmtChoice asc);
CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
CFGBlock *VisitLogicalOperator(BinaryOperator *B);
std::pair<CFGBlock *, CFGBlock *> VisitLogicalOperator(BinaryOperator *B,
@@ -694,56 +767,35 @@ private:
if (!LHS->isComparisonOp() || !RHS->isComparisonOp())
return TryResult();
- BinaryOperatorKind BO1 = LHS->getOpcode();
- const DeclRefExpr *Decl1 =
- dyn_cast<DeclRefExpr>(LHS->getLHS()->IgnoreParenImpCasts());
- const IntegerLiteral *Literal1 =
- dyn_cast<IntegerLiteral>(LHS->getRHS()->IgnoreParens());
- if (!Decl1 && !Literal1) {
- if (BO1 == BO_GT)
- BO1 = BO_LT;
- else if (BO1 == BO_GE)
- BO1 = BO_LE;
- else if (BO1 == BO_LT)
- BO1 = BO_GT;
- else if (BO1 == BO_LE)
- BO1 = BO_GE;
- Decl1 = dyn_cast<DeclRefExpr>(LHS->getRHS()->IgnoreParenImpCasts());
- Literal1 = dyn_cast<IntegerLiteral>(LHS->getLHS()->IgnoreParens());
- }
+ const DeclRefExpr *Decl1;
+ const Expr *Expr1;
+ BinaryOperatorKind BO1;
+ std::tie(Decl1, BO1, Expr1) = tryNormalizeBinaryOperator(LHS);
- if (!Decl1 || !Literal1)
+ if (!Decl1 || !Expr1)
return TryResult();
- BinaryOperatorKind BO2 = RHS->getOpcode();
- const DeclRefExpr *Decl2 =
- dyn_cast<DeclRefExpr>(RHS->getLHS()->IgnoreParenImpCasts());
- const IntegerLiteral *Literal2 =
- dyn_cast<IntegerLiteral>(RHS->getRHS()->IgnoreParens());
- if (!Decl2 && !Literal2) {
- if (BO2 == BO_GT)
- BO2 = BO_LT;
- else if (BO2 == BO_GE)
- BO2 = BO_LE;
- else if (BO2 == BO_LT)
- BO2 = BO_GT;
- else if (BO2 == BO_LE)
- BO2 = BO_GE;
- Decl2 = dyn_cast<DeclRefExpr>(RHS->getRHS()->IgnoreParenImpCasts());
- Literal2 = dyn_cast<IntegerLiteral>(RHS->getLHS()->IgnoreParens());
- }
+ const DeclRefExpr *Decl2;
+ const Expr *Expr2;
+ BinaryOperatorKind BO2;
+ std::tie(Decl2, BO2, Expr2) = tryNormalizeBinaryOperator(RHS);
- if (!Decl2 || !Literal2)
+ if (!Decl2 || !Expr2)
return TryResult();
// Check that it is the same variable on both sides.
if (Decl1->getDecl() != Decl2->getDecl())
return TryResult();
+ // Make sure the user's intent is clear (e.g. they're comparing against two
+ // int literals, or two things from the same enum)
+ if (!areExprTypesCompatible(Expr1, Expr2))
+ return TryResult();
+
llvm::APSInt L1, L2;
- if (!Literal1->EvaluateAsInt(L1, *Context) ||
- !Literal2->EvaluateAsInt(L2, *Context))
+ if (!Expr1->EvaluateAsInt(L1, *Context) ||
+ !Expr2->EvaluateAsInt(L2, *Context))
return TryResult();
// Can't compare signed with unsigned or with different bit width.
@@ -773,10 +825,7 @@ private:
// * Variable x is equal to the largest literal.
// * Variable x is greater than largest literal.
bool AlwaysTrue = true, AlwaysFalse = true;
- for (unsigned int ValueIndex = 0;
- ValueIndex < sizeof(Values) / sizeof(Values[0]);
- ++ValueIndex) {
- llvm::APSInt Value = Values[ValueIndex];
+ for (llvm::APSInt Value : Values) {
TryResult Res1, Res2;
Res1 = analyzeLogicOperatorCondition(BO1, Value, L1);
Res2 = analyzeLogicOperatorCondition(BO2, Value, L2);
@@ -994,9 +1043,8 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
// For C++ constructor add initializers to CFG.
if (const CXXConstructorDecl *CD = dyn_cast_or_null<CXXConstructorDecl>(D)) {
- for (CXXConstructorDecl::init_const_reverse_iterator I = CD->init_rbegin(),
- E = CD->init_rend(); I != E; ++I) {
- B = addInitializer(*I);
+ for (auto *I : llvm::reverse(CD->inits())) {
+ B = addInitializer(I);
if (badCFG)
return nullptr;
}
@@ -1248,13 +1296,11 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
/// createOrReuseLocalScope - If Scope is NULL create new LocalScope. Either
/// way return valid LocalScope object.
LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
- if (!Scope) {
- llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
- Scope = alloc.Allocate<LocalScope>();
- BumpVectorContext ctx(alloc);
- new (Scope) LocalScope(ctx, ScopePos);
- }
- return Scope;
+ if (Scope)
+ return Scope;
+ llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
+ return new (alloc.Allocate<LocalScope>())
+ LocalScope(BumpVectorContext(alloc), ScopePos);
}
/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
@@ -1405,7 +1451,7 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
return VisitBinaryOperator(cast<BinaryOperator>(S), asc);
case Stmt::BlockExprClass:
- return VisitNoRecurse(cast<Expr>(S), asc);
+ return VisitBlockExpr(cast<BlockExpr>(S), asc);
case Stmt::BreakStmtClass:
return VisitBreakStmt(cast<BreakStmt>(S));
@@ -1894,7 +1940,15 @@ CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C) {
- addLocalScopeAndDtors(C);
+ LocalScope::const_iterator scopeBeginPos = ScopePos;
+ if (BuildOpts.AddImplicitDtors) {
+ addLocalScopeForStmt(C);
+ }
+ if (!C->body_empty() && !isa<ReturnStmt>(*C->body_rbegin())) {
+ // If the body ends with a ReturnStmt, the dtors will be added in VisitReturnStmt
+ addAutomaticObjDtors(ScopePos, scopeBeginPos, C);
+ }
+
CFGBlock *LastBlock = Block;
for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
@@ -2277,6 +2331,18 @@ CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
return LabelBlock;
}
+CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr *E, AddStmtChoice asc) {
+ CFGBlock *LastBlock = VisitNoRecurse(E, asc);
+ for (const BlockDecl::Capture &CI : E->getBlockDecl()->captures()) {
+ if (Expr *CopyExpr = CI.getCopyExpr()) {
+ CFGBlock *Tmp = Visit(CopyExpr);
+ if (Tmp)
+ LastBlock = Tmp;
+ }
+ }
+ return LastBlock;
+}
+
CFGBlock *CFGBuilder::VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc) {
CFGBlock *LastBlock = VisitNoRecurse(E, asc);
for (LambdaExpr::capture_init_iterator it = E->capture_init_begin(),
@@ -3104,11 +3170,11 @@ static bool shouldAddCase(bool &switchExclusivelyCovered,
addCase = true;
switchExclusivelyCovered = true;
}
- else if (condInt < lhsInt) {
+ else if (condInt > lhsInt) {
if (const Expr *RHS = CS->getRHS()) {
// Evaluate the RHS of the case value.
const llvm::APSInt &V2 = RHS->EvaluateKnownConstInt(Ctx);
- if (V2 <= condInt) {
+ if (V2 >= condInt) {
addCase = true;
switchExclusivelyCovered = true;
}
@@ -4128,7 +4194,8 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
if (const StmtExpr *SE = dyn_cast<StmtExpr>(S)) {
const CompoundStmt *Sub = SE->getSubStmt();
- if (Sub->children()) {
+ auto Children = Sub->children();
+ if (Children.begin() != Children.end()) {
OS << "({ ... ; ";
Helper.handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
OS << " })\n";
diff --git a/lib/Analysis/Consumed.cpp b/lib/Analysis/Consumed.cpp
index fa985ee02e59..9df23923b014 100644
--- a/lib/Analysis/Consumed.cpp
+++ b/lib/Analysis/Consumed.cpp
@@ -1038,65 +1038,54 @@ bool ConsumedBlockInfo::allBackEdgesVisited(const CFGBlock *CurrBlock,
return true;
}
-void ConsumedBlockInfo::addInfo(const CFGBlock *Block,
- ConsumedStateMap *StateMap,
- bool &AlreadyOwned) {
-
+void ConsumedBlockInfo::addInfo(
+ const CFGBlock *Block, ConsumedStateMap *StateMap,
+ std::unique_ptr<ConsumedStateMap> &OwnedStateMap) {
+
assert(Block && "Block pointer must not be NULL");
-
- ConsumedStateMap *Entry = StateMapsArray[Block->getBlockID()];
-
+
+ auto &Entry = StateMapsArray[Block->getBlockID()];
+
if (Entry) {
- Entry->intersect(StateMap);
-
- } else if (AlreadyOwned) {
- StateMapsArray[Block->getBlockID()] = new ConsumedStateMap(*StateMap);
-
- } else {
- StateMapsArray[Block->getBlockID()] = StateMap;
- AlreadyOwned = true;
- }
+ Entry->intersect(*StateMap);
+ } else if (OwnedStateMap)
+ Entry = std::move(OwnedStateMap);
+ else
+ Entry = llvm::make_unique<ConsumedStateMap>(*StateMap);
}
void ConsumedBlockInfo::addInfo(const CFGBlock *Block,
- ConsumedStateMap *StateMap) {
+ std::unique_ptr<ConsumedStateMap> StateMap) {
assert(Block && "Block pointer must not be NULL");
- ConsumedStateMap *Entry = StateMapsArray[Block->getBlockID()];
-
+ auto &Entry = StateMapsArray[Block->getBlockID()];
+
if (Entry) {
- Entry->intersect(StateMap);
- delete StateMap;
-
+ Entry->intersect(*StateMap);
} else {
- StateMapsArray[Block->getBlockID()] = StateMap;
+ Entry = std::move(StateMap);
}
}
ConsumedStateMap* ConsumedBlockInfo::borrowInfo(const CFGBlock *Block) {
assert(Block && "Block pointer must not be NULL");
assert(StateMapsArray[Block->getBlockID()] && "Block has no block info");
-
- return StateMapsArray[Block->getBlockID()];
+
+ return StateMapsArray[Block->getBlockID()].get();
}
void ConsumedBlockInfo::discardInfo(const CFGBlock *Block) {
- unsigned int BlockID = Block->getBlockID();
- delete StateMapsArray[BlockID];
- StateMapsArray[BlockID] = nullptr;
+ StateMapsArray[Block->getBlockID()] = nullptr;
}
-ConsumedStateMap* ConsumedBlockInfo::getInfo(const CFGBlock *Block) {
+std::unique_ptr<ConsumedStateMap>
+ConsumedBlockInfo::getInfo(const CFGBlock *Block) {
assert(Block && "Block pointer must not be NULL");
-
- ConsumedStateMap *StateMap = StateMapsArray[Block->getBlockID()];
- if (isBackEdgeTarget(Block)) {
- return new ConsumedStateMap(*StateMap);
- } else {
- StateMapsArray[Block->getBlockID()] = nullptr;
- return StateMap;
- }
+
+ auto &Entry = StateMapsArray[Block->getBlockID()];
+ return isBackEdgeTarget(Block) ? llvm::make_unique<ConsumedStateMap>(*Entry)
+ : std::move(Entry);
}
bool ConsumedBlockInfo::isBackEdge(const CFGBlock *From, const CFGBlock *To) {
@@ -1166,15 +1155,15 @@ ConsumedStateMap::getState(const CXXBindTemporaryExpr *Tmp) const {
return CS_None;
}
-void ConsumedStateMap::intersect(const ConsumedStateMap *Other) {
+void ConsumedStateMap::intersect(const ConsumedStateMap &Other) {
ConsumedState LocalState;
-
- if (this->From && this->From == Other->From && !Other->Reachable) {
+
+ if (this->From && this->From == Other.From && !Other.Reachable) {
this->markUnreachable();
return;
}
-
- for (const auto &DM : Other->VarMap) {
+
+ for (const auto &DM : Other.VarMap) {
LocalState = this->getState(DM.first);
if (LocalState == CS_None)
@@ -1282,14 +1271,14 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
if (PInfo.isVarTest()) {
CurrStates->setSource(Cond);
FalseStates->setSource(Cond);
- splitVarStateForIf(IfNode, PInfo.getVarTest(), CurrStates,
+ splitVarStateForIf(IfNode, PInfo.getVarTest(), CurrStates.get(),
FalseStates.get());
-
+
} else if (PInfo.isBinTest()) {
CurrStates->setSource(PInfo.testSourceNode());
FalseStates->setSource(PInfo.testSourceNode());
- splitVarStateForIfBinOp(PInfo, CurrStates, FalseStates.get());
-
+ splitVarStateForIfBinOp(PInfo, CurrStates.get(), FalseStates.get());
+
} else {
return false;
}
@@ -1337,14 +1326,13 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin();
if (*SI)
- BlockInfo.addInfo(*SI, CurrStates);
+ BlockInfo.addInfo(*SI, std::move(CurrStates));
else
- delete CurrStates;
-
+ CurrStates = nullptr;
+
if (*++SI)
- BlockInfo.addInfo(*SI, FalseStates.release());
+ BlockInfo.addInfo(*SI, std::move(FalseStates));
- CurrStates = nullptr;
return true;
}
@@ -1363,10 +1351,10 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
// AC.getCFG()->viewCFG(LangOptions());
BlockInfo = ConsumedBlockInfo(CFGraph->getNumBlockIDs(), SortedGraph);
-
- CurrStates = new ConsumedStateMap();
- ConsumedStmtVisitor Visitor(AC, *this, CurrStates);
-
+
+ CurrStates = llvm::make_unique<ConsumedStateMap>();
+ ConsumedStmtVisitor Visitor(AC, *this, CurrStates.get());
+
// Add all trackable parameters to the state map.
for (const auto *PI : D->params())
Visitor.VisitParmVarDecl(PI);
@@ -1380,13 +1368,12 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
continue;
} else if (!CurrStates->isReachable()) {
- delete CurrStates;
CurrStates = nullptr;
continue;
}
-
- Visitor.reset(CurrStates);
-
+
+ Visitor.reset(CurrStates.get());
+
// Visit all of the basic block's statements.
for (const auto &B : *CurrBlock) {
switch (B.getKind()) {
@@ -1429,28 +1416,24 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
if (CurrBlock->succ_size() > 1 ||
(CurrBlock->succ_size() == 1 &&
(*CurrBlock->succ_begin())->pred_size() > 1)) {
-
- bool OwnershipTaken = false;
-
+
+ auto *RawState = CurrStates.get();
+
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
if (*SI == nullptr) continue;
if (BlockInfo.isBackEdge(CurrBlock, *SI)) {
- BlockInfo.borrowInfo(*SI)->intersectAtLoopHead(*SI, CurrBlock,
- CurrStates,
- WarningsHandler);
-
+ BlockInfo.borrowInfo(*SI)->intersectAtLoopHead(
+ *SI, CurrBlock, RawState, WarningsHandler);
+
if (BlockInfo.allBackEdgesVisited(CurrBlock, *SI))
BlockInfo.discardInfo(*SI);
} else {
- BlockInfo.addInfo(*SI, CurrStates, OwnershipTaken);
+ BlockInfo.addInfo(*SI, RawState, CurrStates);
}
}
-
- if (!OwnershipTaken)
- delete CurrStates;
CurrStates = nullptr;
}
@@ -1463,8 +1446,8 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
} // End of block iterator.
// Delete the last existing state map.
- delete CurrStates;
-
+ CurrStates = nullptr;
+
WarningsHandler.emitDiagnostics();
}
}} // end namespace clang::consumed
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index e2c6ab5d9485..b282a5bbd8d8 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -258,16 +258,15 @@ private:
typedef SmallVector<const ValueDecl*, 4> BeforeVect;
struct BeforeInfo {
- BeforeInfo() : Vect(nullptr), Visited(false) { }
- BeforeInfo(BeforeInfo &&O)
- : Vect(std::move(O.Vect)), Visited(O.Visited)
- {}
+ BeforeInfo() : Visited(0) {}
+ BeforeInfo(BeforeInfo &&O) : Vect(std::move(O.Vect)), Visited(O.Visited) {}
- std::unique_ptr<BeforeVect> Vect;
- int Visited;
+ BeforeVect Vect;
+ int Visited;
};
- typedef llvm::DenseMap<const ValueDecl*, BeforeInfo> BeforeMap;
+ typedef llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>
+ BeforeMap;
typedef llvm::DenseMap<const ValueDecl*, bool> CycleMap;
public:
@@ -276,6 +275,9 @@ public:
BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
ThreadSafetyAnalyzer& Analyzer);
+ BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
+ ThreadSafetyAnalyzer &Analyzer);
+
void checkBeforeAfter(const ValueDecl* Vd,
const FactSet& FSet,
ThreadSafetyAnalyzer& Analyzer,
@@ -787,7 +789,7 @@ static void findBlockLocations(CFG *CFGraph,
}
}
- if (!CurrBlockInfo->ExitLoc.isInvalid()) {
+ if (CurrBlockInfo->ExitLoc.isValid()) {
// This block contains at least one statement. Find the source location
// of the first statement in the block.
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
@@ -965,26 +967,27 @@ public:
BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
ThreadSafetyAnalyzer& Analyzer) {
// Create a new entry for Vd.
- auto& Entry = BMap.FindAndConstruct(Vd);
- BeforeInfo* Info = &Entry.second;
- BeforeVect* Bv = nullptr;
+ BeforeInfo *Info = nullptr;
+ {
+ // Keep InfoPtr in its own scope in case BMap is modified later and the
+ // reference becomes invalid.
+ std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
+ if (!InfoPtr)
+ InfoPtr.reset(new BeforeInfo());
+ Info = InfoPtr.get();
+ }
for (Attr* At : Vd->attrs()) {
switch (At->getKind()) {
case attr::AcquiredBefore: {
auto *A = cast<AcquiredBeforeAttr>(At);
- // Create a new BeforeVect for Vd if necessary.
- if (!Bv) {
- Bv = new BeforeVect;
- Info->Vect.reset(Bv);
- }
// Read exprs from the attribute, and add them to BeforeVect.
for (const auto *Arg : A->args()) {
CapabilityExpr Cp =
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
if (const ValueDecl *Cpvd = Cp.valueDecl()) {
- Bv->push_back(Cpvd);
+ Info->Vect.push_back(Cpvd);
auto It = BMap.find(Cpvd);
if (It == BMap.end())
insertAttrExprs(Cpvd, Analyzer);
@@ -1001,20 +1004,8 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
if (const ValueDecl *ArgVd = Cp.valueDecl()) {
// Get entry for mutex listed in attribute
- BeforeInfo* ArgInfo;
- auto It = BMap.find(ArgVd);
- if (It == BMap.end())
- ArgInfo = insertAttrExprs(ArgVd, Analyzer);
- else
- ArgInfo = &It->second;
-
- // Create a new BeforeVect if necessary.
- BeforeVect* ArgBv = ArgInfo->Vect.get();
- if (!ArgBv) {
- ArgBv = new BeforeVect;
- ArgInfo->Vect.reset(ArgBv);
- }
- ArgBv->push_back(Vd);
+ BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
+ ArgInfo->Vect.push_back(Vd);
}
}
break;
@@ -1027,6 +1018,18 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
return Info;
}
+BeforeSet::BeforeInfo *
+BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
+ ThreadSafetyAnalyzer &Analyzer) {
+ auto It = BMap.find(Vd);
+ BeforeInfo *Info = nullptr;
+ if (It == BMap.end())
+ Info = insertAttrExprs(Vd, Analyzer);
+ else
+ Info = It->second.get();
+ assert(Info && "BMap contained nullptr?");
+ return Info;
+}
/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
@@ -1041,12 +1044,7 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
if (!Vd)
return false;
- BeforeSet::BeforeInfo* Info;
- auto It = BMap.find(Vd);
- if (It == BMap.end())
- Info = insertAttrExprs(Vd, Analyzer);
- else
- Info = &It->second;
+ BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
if (Info->Visited == 1)
return true;
@@ -1054,13 +1052,12 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
if (Info->Visited == 2)
return false;
- BeforeVect* Bv = Info->Vect.get();
- if (!Bv)
+ if (Info->Vect.empty())
return false;
InfoVect.push_back(Info);
Info->Visited = 1;
- for (auto *Vdb : *Bv) {
+ for (auto *Vdb : Info->Vect) {
// Exclude mutexes in our immediate before set.
if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
StringRef L1 = StartVd->getName();
@@ -1926,34 +1923,42 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
}
}
-
if (ExamineArgs) {
if (FunctionDecl *FD = Exp->getDirectCallee()) {
- unsigned Fn = FD->getNumParams();
- unsigned Cn = Exp->getNumArgs();
- unsigned Skip = 0;
-
- unsigned i = 0;
- if (OperatorFun) {
- if (isa<CXXMethodDecl>(FD)) {
- // First arg in operator call is implicit self argument,
- // and doesn't appear in the FunctionDecl.
- Skip = 1;
- Cn--;
- } else {
- // Ignore the first argument of operators; it's been checked above.
- i = 1;
+
+ // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
+ // only turns off checking within the body of a function, but we also
+ // use it to turn off checking in arguments to the function. This
+ // could result in some false negatives, but the alternative is to
+ // create yet another attribute.
+ //
+ if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) {
+ unsigned Fn = FD->getNumParams();
+ unsigned Cn = Exp->getNumArgs();
+ unsigned Skip = 0;
+
+ unsigned i = 0;
+ if (OperatorFun) {
+ if (isa<CXXMethodDecl>(FD)) {
+ // First arg in operator call is implicit self argument,
+ // and doesn't appear in the FunctionDecl.
+ Skip = 1;
+ Cn--;
+ } else {
+ // Ignore the first argument of operators; it's been checked above.
+ i = 1;
+ }
+ }
+ // Ignore default arguments
+ unsigned n = (Fn < Cn) ? Fn : Cn;
+
+ for (; i < n; ++i) {
+ ParmVarDecl* Pvd = FD->getParamDecl(i);
+ Expr* Arg = Exp->getArg(i+Skip);
+ QualType Qt = Pvd->getType();
+ if (Qt->isReferenceType())
+ checkAccess(Arg, AK_Read, POK_PassByRef);
}
- }
- // Ignore default arguments
- unsigned n = (Fn < Cn) ? Fn : Cn;
-
- for (; i < n; ++i) {
- ParmVarDecl* Pvd = FD->getParamDecl(i);
- Expr* Arg = Exp->getArg(i+Skip);
- QualType Qt = Pvd->getType();
- if (Qt->isReferenceType())
- checkAccess(Arg, AK_Read, POK_PassByRef);
}
}
}
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index d4b1ce26d4b3..ffe95ea22a42 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -1,4 +1,4 @@
-//===- ThreadSafetyCommon.cpp ----------------------------------*- C++ --*-===//
+//===- ThreadSafetyCommon.cpp -----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -31,6 +31,7 @@
#include <algorithm>
#include <climits>
#include <vector>
+
using namespace clang;
using namespace threadSafety;
@@ -66,7 +67,6 @@ static bool isIncompletePhi(const til::SExpr *E) {
typedef SExprBuilder::CallingContext CallingContext;
-
til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
auto It = SMap.find(S);
if (It != SMap.end())
@@ -74,7 +74,6 @@ til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
return nullptr;
}
-
til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
Walker.walk(*this);
return Scfg;
@@ -85,7 +84,6 @@ static bool isCalleeArrow(const Expr *E) {
return ME ? ME->isArrow() : false;
}
-
/// \brief Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
@@ -148,7 +146,6 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return translateAttrExpr(AttrExp, &Ctx);
}
-
/// \brief Translate a clang expression in an attribute to a til::SExpr.
// This assumes a CallingContext has already been created.
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
@@ -195,8 +192,6 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return CapabilityExpr(E, Neg);
}
-
-
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.
@@ -268,8 +263,6 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
return new (Arena) til::Undefined(S);
}
-
-
til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
CallingContext *Ctx) {
const ValueDecl *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
@@ -290,11 +283,10 @@ til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
VD = FD->getParamDecl(I);
}
- // For non-local variables, treat it as a referenced to a named object.
+ // For non-local variables, treat it as a reference to a named object.
return new (Arena) til::LiteralPtr(VD);
}
-
til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
CallingContext *Ctx) {
// Substitute for 'this'
@@ -313,7 +305,7 @@ static const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
return P->clangDecl();
if (auto *L = dyn_cast<til::LiteralPtr>(E))
return L->clangDecl();
- return 0;
+ return nullptr;
}
static bool hasCppPointerType(const til::SExpr *E) {
@@ -344,7 +336,8 @@ til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
til::SExpr *BE = translate(ME->getBase(), Ctx);
til::SExpr *E = new (Arena) til::SApply(BE);
- const ValueDecl *D = ME->getMemberDecl();
+ const ValueDecl *D =
+ cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
if (auto *VD = dyn_cast<CXXMethodDecl>(D))
D = getFirstVirtualDecl(VD);
@@ -354,7 +347,6 @@ til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
return P;
}
-
til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
CallingContext *Ctx,
const Expr *SelfE) {
@@ -380,7 +372,6 @@ til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
return new (Arena) til::Call(E, CE);
}
-
til::SExpr *SExprBuilder::translateCXXMemberCallExpr(
const CXXMemberCallExpr *ME, CallingContext *Ctx) {
if (CapabilityExprMode) {
@@ -396,7 +387,6 @@ til::SExpr *SExprBuilder::translateCXXMemberCallExpr(
ME->getImplicitObjectArgument());
}
-
til::SExpr *SExprBuilder::translateCXXOperatorCallExpr(
const CXXOperatorCallExpr *OCE, CallingContext *Ctx) {
if (CapabilityExprMode) {
@@ -411,7 +401,6 @@ til::SExpr *SExprBuilder::translateCXXOperatorCallExpr(
return translateCallExpr(cast<CallExpr>(OCE), Ctx);
}
-
til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
CallingContext *Ctx) {
switch (UO->getOpcode()) {
@@ -456,12 +445,12 @@ til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
case UO_Real:
case UO_Imag:
case UO_Extension:
+ case UO_Coawait:
return new (Arena) til::Undefined(UO);
}
return new (Arena) til::Undefined(UO);
}
-
til::SExpr *SExprBuilder::translateBinOp(til::TIL_BinaryOpcode Op,
const BinaryOperator *BO,
CallingContext *Ctx, bool Reverse) {
@@ -473,7 +462,6 @@ til::SExpr *SExprBuilder::translateBinOp(til::TIL_BinaryOpcode Op,
return new (Arena) til::BinaryOp(Op, E0, E1);
}
-
til::SExpr *SExprBuilder::translateBinAssign(til::TIL_BinaryOpcode Op,
const BinaryOperator *BO,
CallingContext *Ctx,
@@ -500,7 +488,6 @@ til::SExpr *SExprBuilder::translateBinAssign(til::TIL_BinaryOpcode Op,
return new (Arena) til::Store(E0, E1);
}
-
til::SExpr *SExprBuilder::translateBinaryOperator(const BinaryOperator *BO,
CallingContext *Ctx) {
switch (BO->getOpcode()) {
@@ -546,7 +533,6 @@ til::SExpr *SExprBuilder::translateBinaryOperator(const BinaryOperator *BO,
return new (Arena) til::Undefined(BO);
}
-
til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
CallingContext *Ctx) {
clang::CastKind K = CE->getCastKind();
@@ -580,7 +566,6 @@ til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
}
}
-
til::SExpr *
SExprBuilder::translateArraySubscriptExpr(const ArraySubscriptExpr *E,
CallingContext *Ctx) {
@@ -589,7 +574,6 @@ SExprBuilder::translateArraySubscriptExpr(const ArraySubscriptExpr *E,
return new (Arena) til::ArrayIndex(E0, E1);
}
-
til::SExpr *
SExprBuilder::translateAbstractConditionalOperator(
const AbstractConditionalOperator *CO, CallingContext *Ctx) {
@@ -599,7 +583,6 @@ SExprBuilder::translateAbstractConditionalOperator(
return new (Arena) til::IfThenElse(C, T, E);
}
-
til::SExpr *
SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
DeclGroupRef DGrp = S->getDeclGroup();
@@ -621,8 +604,6 @@ SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
return nullptr;
}
-
-
// If (E) is non-trivial, then add it to the current basic block, and
// update the statement map so that S refers to E. Returns a new variable
// that refers to E.
@@ -639,7 +620,6 @@ til::SExpr *SExprBuilder::addStatement(til::SExpr* E, const Stmt *S,
return E;
}
-
// Returns the current value of VD, if known, and nullptr otherwise.
til::SExpr *SExprBuilder::lookupVarDecl(const ValueDecl *VD) {
auto It = LVarIdxMap.find(VD);
@@ -650,7 +630,6 @@ til::SExpr *SExprBuilder::lookupVarDecl(const ValueDecl *VD) {
return nullptr;
}
-
// if E is a til::Variable, update its clangDecl.
static void maybeUpdateVD(til::SExpr *E, const ValueDecl *VD) {
if (!E)
@@ -670,7 +649,6 @@ til::SExpr *SExprBuilder::addVarDecl(const ValueDecl *VD, til::SExpr *E) {
return E;
}
-
// Updates a current variable declaration. (E.g. by assignment)
til::SExpr *SExprBuilder::updateVarDecl(const ValueDecl *VD, til::SExpr *E) {
maybeUpdateVD(E, VD);
@@ -685,7 +663,6 @@ til::SExpr *SExprBuilder::updateVarDecl(const ValueDecl *VD, til::SExpr *E) {
return E;
}
-
// Make a Phi node in the current block for the i^th variable in CurrentVarMap.
// If E != null, sets Phi[CurrentBlockInfo->ArgIndex] = E.
// If E == null, this is a backedge and will be set later.
@@ -728,7 +705,6 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
CurrentLVarMap.elem(i).second = Ph;
}
-
// Merge values from Map into the current variable map.
// This will construct Phi nodes in the current basic block as necessary.
void SExprBuilder::mergeEntryMap(LVarDefinitionMap Map) {
@@ -763,7 +739,6 @@ void SExprBuilder::mergeEntryMap(LVarDefinitionMap Map) {
}
}
-
// Merge a back edge into the current variable map.
// This will create phi nodes for all variables in the variable map.
void SExprBuilder::mergeEntryMapBackEdge() {
@@ -790,7 +765,6 @@ void SExprBuilder::mergeEntryMapBackEdge() {
}
}
-
// Update the phi nodes that were initially created for a back edge
// once the variable definitions have been computed.
// I.e., merge the current variable map into the phi nodes for Blk.
@@ -843,7 +817,6 @@ void SExprBuilder::enterCFG(CFG *Cfg, const NamedDecl *D,
}
}
-
void SExprBuilder::enterCFGBlock(const CFGBlock *B) {
// Intialize TIL basic block and add it to the CFG.
CurrentBB = lookupBlock(B);
@@ -857,7 +830,6 @@ void SExprBuilder::enterCFGBlock(const CFGBlock *B) {
// assert(!CurrentLVarMap.valid() && "CurrentLVarMap already initialized.");
}
-
void SExprBuilder::handlePredecessor(const CFGBlock *Pred) {
// Compute CurrentLVarMap on entry from ExitMaps of predecessors
@@ -873,12 +845,10 @@ void SExprBuilder::handlePredecessor(const CFGBlock *Pred) {
++CurrentBlockInfo->ProcessedPredecessors;
}
-
void SExprBuilder::handlePredecessorBackEdge(const CFGBlock *Pred) {
mergeEntryMapBackEdge();
}
-
void SExprBuilder::enterCFGBlockBody(const CFGBlock *B) {
// The merge*() methods have created arguments.
// Push those arguments onto the basic block.
@@ -888,13 +858,11 @@ void SExprBuilder::enterCFGBlockBody(const CFGBlock *B) {
CurrentBB->addArgument(A);
}
-
void SExprBuilder::handleStatement(const Stmt *S) {
til::SExpr *E = translate(S, nullptr);
addStatement(E, S);
}
-
void SExprBuilder::handleDestructorCall(const VarDecl *VD,
const CXXDestructorDecl *DD) {
til::SExpr *Sf = new (Arena) til::LiteralPtr(VD);
@@ -904,8 +872,6 @@ void SExprBuilder::handleDestructorCall(const VarDecl *VD,
addStatement(E, nullptr);
}
-
-
void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
CurrentBB->instructions().reserve(
static_cast<unsigned>(CurrentInstructions.size()), Arena);
@@ -933,18 +899,15 @@ void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
}
}
-
void SExprBuilder::handleSuccessor(const CFGBlock *Succ) {
++CurrentBlockInfo->UnprocessedSuccessors;
}
-
void SExprBuilder::handleSuccessorBackEdge(const CFGBlock *Succ) {
mergePhiNodesBackEdge(Succ);
++BBInfo[Succ->getBlockID()].ProcessedPredecessors;
}
-
void SExprBuilder::exitCFGBlock(const CFGBlock *B) {
CurrentArguments.clear();
CurrentInstructions.clear();
@@ -953,7 +916,6 @@ void SExprBuilder::exitCFGBlock(const CFGBlock *B) {
CurrentBlockInfo = nullptr;
}
-
void SExprBuilder::exitCFG(const CFGBlock *Last) {
for (auto *Ph : IncompleteArgs) {
if (Ph->status() == til::Phi::PH_Incomplete)
@@ -965,7 +927,6 @@ void SExprBuilder::exitCFG(const CFGBlock *Last) {
IncompleteArgs.clear();
}
-
/*
void printSCFG(CFGWalker &Walker) {
llvm::BumpPtrAllocator Bpa;
diff --git a/lib/Basic/Attributes.cpp b/lib/Basic/Attributes.cpp
index da9ac793f163..c215366fc398 100644
--- a/lib/Basic/Attributes.cpp
+++ b/lib/Basic/Attributes.cpp
@@ -4,8 +4,8 @@
using namespace clang;
int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
- const IdentifierInfo *Attr, const llvm::Triple &T,
- const LangOptions &LangOpts) {
+ const IdentifierInfo *Attr, const TargetInfo &Target,
+ const LangOptions &LangOpts) {
StringRef Name = Attr->getName();
// Normalize the attribute name, __foo__ becomes foo.
if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index 8efcac6d7fe4..69b10c13ede1 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -15,86 +15,78 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
using namespace clang;
static const Builtin::Info BuiltinInfo[] = {
- { "not a builtin function", nullptr, nullptr, nullptr, ALL_LANGUAGES},
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LANGBUILTIN(ID, TYPE, ATTRS, BUILTIN_LANG) { #ID, TYPE, ATTRS, 0, BUILTIN_LANG },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, BUILTIN_LANG) { #ID, TYPE, ATTRS, HEADER,\
- BUILTIN_LANG },
+ { "not a builtin function", nullptr, nullptr, nullptr, ALL_LANGUAGES,nullptr},
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LANGBUILTIN(ID, TYPE, ATTRS, LANGS) \
+ { #ID, TYPE, ATTRS, nullptr, LANGS, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, LANGS) \
+ { #ID, TYPE, ATTRS, HEADER, LANGS, nullptr },
#include "clang/Basic/Builtins.def"
};
-const Builtin::Info &Builtin::Context::GetRecord(unsigned ID) const {
+const Builtin::Info &Builtin::Context::getRecord(unsigned ID) const {
if (ID < Builtin::FirstTSBuiltin)
return BuiltinInfo[ID];
- assert(ID - Builtin::FirstTSBuiltin < NumTSRecords && "Invalid builtin ID!");
+ assert(((ID - Builtin::FirstTSBuiltin) <
+ (TSRecords.size() + AuxTSRecords.size())) &&
+ "Invalid builtin ID!");
+ if (isAuxBuiltinID(ID))
+ return AuxTSRecords[getAuxBuiltinID(ID) - Builtin::FirstTSBuiltin];
return TSRecords[ID - Builtin::FirstTSBuiltin];
}
-Builtin::Context::Context() {
- // Get the target specific builtins from the target.
- TSRecords = nullptr;
- NumTSRecords = 0;
+void Builtin::Context::InitializeTarget(const TargetInfo &Target,
+ const TargetInfo *AuxTarget) {
+ assert(TSRecords.empty() && "Already initialized target?");
+ TSRecords = Target.getTargetBuiltins();
+ if (AuxTarget)
+ AuxTSRecords = AuxTarget->getTargetBuiltins();
}
-void Builtin::Context::InitializeTarget(const TargetInfo &Target) {
- assert(NumTSRecords == 0 && "Already initialized target?");
- Target.getTargetBuiltins(TSRecords, NumTSRecords);
-}
-
-bool Builtin::Context::BuiltinIsSupported(const Builtin::Info &BuiltinInfo,
+bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
const LangOptions &LangOpts) {
bool BuiltinsUnsupported = LangOpts.NoBuiltin &&
strchr(BuiltinInfo.Attributes, 'f');
bool MathBuiltinsUnsupported =
- LangOpts.NoMathBuiltin && BuiltinInfo.HeaderName &&
+ LangOpts.NoMathBuiltin && BuiltinInfo.HeaderName &&
llvm::StringRef(BuiltinInfo.HeaderName).equals("math.h");
- bool GnuModeUnsupported = !LangOpts.GNUMode &&
- (BuiltinInfo.builtin_lang & GNU_LANG);
- bool MSModeUnsupported = !LangOpts.MicrosoftExt &&
- (BuiltinInfo.builtin_lang & MS_LANG);
- bool ObjCUnsupported = !LangOpts.ObjC1 &&
- BuiltinInfo.builtin_lang == OBJC_LANG;
+ bool GnuModeUnsupported = !LangOpts.GNUMode && (BuiltinInfo.Langs & GNU_LANG);
+ bool MSModeUnsupported =
+ !LangOpts.MicrosoftExt && (BuiltinInfo.Langs & MS_LANG);
+ bool ObjCUnsupported = !LangOpts.ObjC1 && BuiltinInfo.Langs == OBJC_LANG;
return !BuiltinsUnsupported && !MathBuiltinsUnsupported &&
!GnuModeUnsupported && !MSModeUnsupported && !ObjCUnsupported;
}
-/// InitializeBuiltins - Mark the identifiers for all the builtins with their
+/// initializeBuiltins - Mark the identifiers for all the builtins with their
/// appropriate builtin ID # and mark any non-portable builtin identifiers as
/// such.
-void Builtin::Context::InitializeBuiltins(IdentifierTable &Table,
+void Builtin::Context::initializeBuiltins(IdentifierTable &Table,
const LangOptions& LangOpts) {
// Step #1: mark all target-independent builtins with their ID's.
for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
- if (BuiltinIsSupported(BuiltinInfo[i], LangOpts)) {
+ if (builtinIsSupported(BuiltinInfo[i], LangOpts)) {
Table.get(BuiltinInfo[i].Name).setBuiltinID(i);
}
// Step #2: Register target-specific builtins.
- for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
- if (BuiltinIsSupported(TSRecords[i], LangOpts))
- Table.get(TSRecords[i].Name).setBuiltinID(i+Builtin::FirstTSBuiltin);
-}
-
-void
-Builtin::Context::GetBuiltinNames(SmallVectorImpl<const char *> &Names) {
- // Final all target-independent names
- for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
- if (!strchr(BuiltinInfo[i].Attributes, 'f'))
- Names.push_back(BuiltinInfo[i].Name);
-
- // Find target-specific names.
- for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
- if (!strchr(TSRecords[i].Attributes, 'f'))
- Names.push_back(TSRecords[i].Name);
+ for (unsigned i = 0, e = TSRecords.size(); i != e; ++i)
+ if (builtinIsSupported(TSRecords[i], LangOpts))
+ Table.get(TSRecords[i].Name).setBuiltinID(i + Builtin::FirstTSBuiltin);
+
+ // Step #3: Register target-specific builtins for AuxTarget.
+ for (unsigned i = 0, e = AuxTSRecords.size(); i != e; ++i)
+ Table.get(AuxTSRecords[i].Name)
+ .setBuiltinID(i + Builtin::FirstTSBuiltin + TSRecords.size());
}
-void Builtin::Context::ForgetBuiltin(unsigned ID, IdentifierTable &Table) {
- Table.get(GetRecord(ID).Name).setBuiltinID(0);
+void Builtin::Context::forgetBuiltin(unsigned ID, IdentifierTable &Table) {
+ Table.get(getRecord(ID).Name).setBuiltinID(0);
}
bool Builtin::Context::isLike(unsigned ID, unsigned &FormatIdx,
@@ -105,7 +97,7 @@ bool Builtin::Context::isLike(unsigned ID, unsigned &FormatIdx,
assert(::toupper(Fmt[0]) == Fmt[1] &&
"Format string is not in the form \"xX\"");
- const char *Like = ::strpbrk(GetRecord(ID).Attributes, Fmt);
+ const char *Like = ::strpbrk(getRecord(ID).Attributes, Fmt);
if (!Like)
return false;
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index f89caf7b248f..7cf7305827fe 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -226,12 +226,12 @@ void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
// Update all diagnostic states that are active after the given location.
for (DiagStatePointsTy::iterator
I = Pos+1, E = DiagStatePoints.end(); I != E; ++I) {
- GetCurDiagState()->setMapping(Diag, Mapping);
+ I->State->setMapping(Diag, Mapping);
}
// If the location corresponds to an existing point, just update its state.
if (Pos->Loc == Loc) {
- GetCurDiagState()->setMapping(Diag, Mapping);
+ Pos->State->setMapping(Diag, Mapping);
return;
}
@@ -240,7 +240,7 @@ void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
assert(Pos->Loc.isBeforeInTranslationUnitThan(Loc));
DiagStates.push_back(*Pos->State);
DiagState *NewState = &DiagStates.back();
- GetCurDiagState()->setMapping(Diag, Mapping);
+ NewState->setMapping(Diag, Mapping);
DiagStatePoints.insert(Pos+1, DiagStatePoint(NewState,
FullSourceLoc(Loc, *SourceMgr)));
}
@@ -278,8 +278,8 @@ bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group,
return true;
// Perform the mapping change.
- for (unsigned i = 0, e = GroupDiags.size(); i != e; ++i) {
- DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(GroupDiags[i]);
+ for (diag::kind Diag : GroupDiags) {
+ DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
if (Info.getSeverity() == diag::Severity::Error ||
Info.getSeverity() == diag::Severity::Fatal)
@@ -309,8 +309,8 @@ bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group,
return true;
// Perform the mapping change.
- for (unsigned i = 0, e = GroupDiags.size(); i != e; ++i) {
- DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(GroupDiags[i]);
+ for (diag::kind Diag : GroupDiags) {
+ DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
if (Info.getSeverity() == diag::Severity::Fatal)
Info.setSeverity(diag::Severity::Error);
@@ -329,9 +329,9 @@ void DiagnosticsEngine::setSeverityForAll(diag::Flavor Flavor,
Diags->getAllDiagnostics(Flavor, AllDiags);
// Set the mapping.
- for (unsigned i = 0, e = AllDiags.size(); i != e; ++i)
- if (Diags->isBuiltinWarningOrExtension(AllDiags[i]))
- setSeverity(AllDiags[i], Map, Loc);
+ for (diag::kind Diag : AllDiags)
+ if (Diags->isBuiltinWarningOrExtension(Diag))
+ setSeverity(Diag, Map, Loc);
}
void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
@@ -945,8 +945,6 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
OutStr.append(Tree.begin(), Tree.end());
}
-StoredDiagnostic::StoredDiagnostic() { }
-
StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
StringRef Message)
: ID(ID), Level(Level), Loc(), Message(Message) { }
@@ -975,8 +973,6 @@ StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
{
}
-StoredDiagnostic::~StoredDiagnostic() { }
-
/// IncludeInDiagnosticCounts - This method (whose default implementation
/// returns true) indicates whether the diagnostics handled by this
/// DiagnosticConsumer should be included in the number of diagnostics
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index 643503b00b91..a34c7fecb53b 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -100,14 +100,10 @@ static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
#ifndef NDEBUG
static bool IsFirst = true; // So the check is only performed on first call.
if (IsFirst) {
- for (unsigned i = 1; i != StaticDiagInfoSize; ++i) {
- assert(StaticDiagInfo[i-1].DiagID != StaticDiagInfo[i].DiagID &&
- "Diag ID conflict, the enums at the start of clang::diag (in "
- "DiagnosticIDs.h) probably need to be increased");
-
- assert(StaticDiagInfo[i-1] < StaticDiagInfo[i] &&
- "Improperly sorted diag info");
- }
+ assert(std::is_sorted(std::begin(StaticDiagInfo),
+ std::end(StaticDiagInfo)) &&
+ "Diag ID conflict, the enums at the start of clang::diag (in "
+ "DiagnosticIDs.h) probably need to be increased");
IsFirst = false;
}
#endif
@@ -505,11 +501,6 @@ static const WarningOption OptionTable[] = {
#include "clang/Basic/DiagnosticGroups.inc"
#undef GET_DIAG_TABLE
};
-static const size_t OptionTableSize = llvm::array_lengthof(OptionTable);
-
-static bool WarningOptionCompare(const WarningOption &LHS, StringRef RHS) {
- return LHS.getName() < RHS;
-}
/// getWarningOptionForDiag - Return the lowest-level warning option that
/// enables the specified diagnostic. If there is no -Wfoo flag that controls
@@ -553,10 +544,12 @@ static bool getDiagnosticsInGroup(diag::Flavor Flavor,
bool
DiagnosticIDs::getDiagnosticsInGroup(diag::Flavor Flavor, StringRef Group,
SmallVectorImpl<diag::kind> &Diags) const {
- const WarningOption *Found = std::lower_bound(
- OptionTable, OptionTable + OptionTableSize, Group, WarningOptionCompare);
- if (Found == OptionTable + OptionTableSize ||
- Found->getName() != Group)
+ auto Found = std::lower_bound(std::begin(OptionTable), std::end(OptionTable),
+ Group,
+ [](const WarningOption &LHS, StringRef RHS) {
+ return LHS.getName() < RHS;
+ });
+ if (Found == std::end(OptionTable) || Found->getName() != Group)
return true; // Option not found.
return ::getDiagnosticsInGroup(Flavor, Found, Diags);
@@ -573,19 +566,18 @@ StringRef DiagnosticIDs::getNearestOption(diag::Flavor Flavor,
StringRef Group) {
StringRef Best;
unsigned BestDistance = Group.size() + 1; // Sanity threshold.
- for (const WarningOption *i = OptionTable, *e = OptionTable + OptionTableSize;
- i != e; ++i) {
+ for (const WarningOption &O : OptionTable) {
// Don't suggest ignored warning flags.
- if (!i->Members && !i->SubGroups)
+ if (!O.Members && !O.SubGroups)
continue;
- unsigned Distance = i->getName().edit_distance(Group, true, BestDistance);
+ unsigned Distance = O.getName().edit_distance(Group, true, BestDistance);
if (Distance > BestDistance)
continue;
// Don't suggest groups that are not of this kind.
llvm::SmallVector<diag::kind, 8> Diags;
- if (::getDiagnosticsInGroup(Flavor, i, Diags) || Diags.empty())
+ if (::getDiagnosticsInGroup(Flavor, &O, Diags) || Diags.empty())
continue;
if (Distance == BestDistance) {
@@ -593,7 +585,7 @@ StringRef DiagnosticIDs::getNearestOption(diag::Flavor Flavor,
Best = "";
} else if (Distance < BestDistance) {
// This is a better match.
- Best = i->getName();
+ Best = O.getName();
BestDistance = Distance;
}
}
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index d4927443aa9c..cb3f75c25a0b 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -22,6 +22,7 @@
#include "clang/Frontend/PCHContainerOperations.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
@@ -58,12 +59,7 @@ FileManager::FileManager(const FileSystemOptions &FSO,
this->FS = vfs::getRealFileSystem();
}
-FileManager::~FileManager() {
- for (unsigned i = 0, e = VirtualFileEntries.size(); i != e; ++i)
- delete VirtualFileEntries[i];
- for (unsigned i = 0, e = VirtualDirectoryEntries.size(); i != e; ++i)
- delete VirtualDirectoryEntries[i];
-}
+FileManager::~FileManager() = default;
void FileManager::addStatCache(std::unique_ptr<FileSystemStatCache> statCache,
bool AtBeginning) {
@@ -137,14 +133,14 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
// at the same time. Therefore, if DirName is already in the cache,
// we don't need to recurse as its ancestors must also already be in
// the cache.
- if (NamedDirEnt.second)
+ if (NamedDirEnt.second && NamedDirEnt.second != NON_EXISTENT_DIR)
return;
// Add the virtual directory to the cache.
- DirectoryEntry *UDE = new DirectoryEntry;
+ auto UDE = llvm::make_unique<DirectoryEntry>();
UDE->Name = NamedDirEnt.first().data();
- NamedDirEnt.second = UDE;
- VirtualDirectoryEntries.push_back(UDE);
+ NamedDirEnt.second = UDE.get();
+ VirtualDirectoryEntries.push_back(std::move(UDE));
// Recursively add the other ancestors.
addAncestorsAsVirtualDirs(DirName);
@@ -375,8 +371,8 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
}
if (!UFE) {
- UFE = new FileEntry();
- VirtualFileEntries.push_back(UFE);
+ VirtualFileEntries.push_back(llvm::make_unique<FileEntry>());
+ UFE = VirtualFileEntries.back().get();
NamedFileEnt.second = UFE;
}
@@ -389,16 +385,28 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
return UFE;
}
-void FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
+bool FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
StringRef pathRef(path.data(), path.size());
if (FileSystemOpts.WorkingDir.empty()
|| llvm::sys::path::is_absolute(pathRef))
- return;
+ return false;
SmallString<128> NewPath(FileSystemOpts.WorkingDir);
llvm::sys::path::append(NewPath, pathRef);
path = NewPath;
+ return true;
+}
+
+bool FileManager::makeAbsolutePath(SmallVectorImpl<char> &Path) const {
+ bool Changed = FixupRelativePath(Path);
+
+ if (!llvm::sys::path::is_absolute(StringRef(Path.data(), Path.size()))) {
+ llvm::sys::fs::make_absolute(Path);
+ Changed = true;
+ }
+
+ return Changed;
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
@@ -501,11 +509,9 @@ void FileManager::GetUniqueIDMapping(
UIDToFiles[FE->getValue()->getUID()] = FE->getValue();
// Map virtual file entries
- for (SmallVectorImpl<FileEntry *>::const_iterator
- VFE = VirtualFileEntries.begin(), VFEEnd = VirtualFileEntries.end();
- VFE != VFEEnd; ++VFE)
- if (*VFE && *VFE != NON_EXISTENT_FILE)
- UIDToFiles[(*VFE)->getUID()] = *VFE;
+ for (const auto &VFE : VirtualFileEntries)
+ if (VFE && VFE.get() != NON_EXISTENT_FILE)
+ UIDToFiles[VFE->getUID()] = VFE.get();
}
void FileManager::modifyFileEntry(FileEntry *File,
@@ -514,37 +520,6 @@ void FileManager::modifyFileEntry(FileEntry *File,
File->ModTime = ModificationTime;
}
-/// Remove '.' path components from the given absolute path.
-/// \return \c true if any changes were made.
-// FIXME: Move this to llvm::sys::path.
-bool FileManager::removeDotPaths(SmallVectorImpl<char> &Path) {
- using namespace llvm::sys;
-
- SmallVector<StringRef, 16> ComponentStack;
- StringRef P(Path.data(), Path.size());
-
- // Skip the root path, then look for traversal in the components.
- StringRef Rel = path::relative_path(P);
- bool AnyDots = false;
- for (StringRef C : llvm::make_range(path::begin(Rel), path::end(Rel))) {
- if (C == ".") {
- AnyDots = true;
- continue;
- }
- ComponentStack.push_back(C);
- }
-
- if (!AnyDots)
- return false;
-
- SmallString<256> Buffer = path::root_path(P);
- for (StringRef C : ComponentStack)
- path::append(Buffer, C);
-
- Path.swap(Buffer);
- return true;
-}
-
StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
// FIXME: use llvm::sys::fs::canonical() when it gets implemented
llvm::DenseMap<const DirectoryEntry *, llvm::StringRef>::iterator Known
@@ -556,17 +531,20 @@ StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
#ifdef LLVM_ON_UNIX
char CanonicalNameBuf[PATH_MAX];
- if (realpath(Dir->getName(), CanonicalNameBuf)) {
- unsigned Len = strlen(CanonicalNameBuf);
- char *Mem = static_cast<char *>(CanonicalNameStorage.Allocate(Len, 1));
- memcpy(Mem, CanonicalNameBuf, Len);
- CanonicalName = StringRef(Mem, Len);
- }
+ if (realpath(Dir->getName(), CanonicalNameBuf))
+ CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
#else
SmallString<256> CanonicalNameBuf(CanonicalName);
llvm::sys::fs::make_absolute(CanonicalNameBuf);
llvm::sys::path::native(CanonicalNameBuf);
- removeDotPaths(CanonicalNameBuf);
+ // We've run into needing to remove '..' here in the wild though, so
+ // remove it.
+ // On Windows, symlinks are significantly less prevalent, so removing
+ // '..' is pretty safe.
+ // Ideally we'd have an equivalent of `realpath` and could implement
+ // sys::fs::canonical across all the platforms.
+ llvm::sys::path::remove_dots(CanonicalNameBuf, /* remove_dot_dot */ true);
+ CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
#endif
CanonicalDirNames.insert(std::make_pair(Dir, CanonicalName));
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index 7705834d91a0..67de1cb6fdaa 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -111,7 +111,8 @@ namespace {
KEYCONCEPTS = 0x10000,
KEYOBJC2 = 0x20000,
KEYZVECTOR = 0x40000,
- KEYALL = (0x7ffff & ~KEYNOMS18 &
+ KEYCOROUTINES = 0x80000,
+ KEYALL = (0xfffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
@@ -147,6 +148,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.ObjC2 && (Flags & KEYARC)) return KS_Enabled;
if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
if (LangOpts.ObjC2 && (Flags & KEYOBJC2)) return KS_Enabled;
+ if (LangOpts.Coroutines && (Flags & KEYCOROUTINES)) return KS_Enabled;
if (LangOpts.CPlusPlus && (Flags & KEYCXX11)) return KS_Future;
return KS_Disabled;
}
@@ -220,10 +222,7 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
AddKeyword("__unknown_anytype", tok::kw___unknown_anytype, KEYALL,
LangOpts, *this);
- // FIXME: __declspec isn't really a CUDA extension, however it is required for
- // supporting cuda_builtin_vars.h, which uses __declspec(property). Once that
- // has been rewritten in terms of something more generic, remove this code.
- if (LangOpts.CUDA)
+ if (LangOpts.DeclSpecKeyword)
AddKeyword("__declspec", tok::kw___declspec, KEYALL, LangOpts, *this);
}
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index 4314b41eb340..0b7832636943 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -28,11 +28,12 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
bool IsFramework, bool IsExplicit, unsigned VisibilityID)
: Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent), Directory(),
Umbrella(), Signature(0), ASTFile(nullptr), VisibilityID(VisibilityID),
- IsMissingRequirement(false), IsAvailable(true), IsFromModuleFile(false),
- IsFramework(IsFramework), IsExplicit(IsExplicit), IsSystem(false),
- IsExternC(false), IsInferred(false), InferSubmodules(false),
- InferExplicitSubmodules(false), InferExportWildcard(false),
- ConfigMacrosExhaustive(false), NameVisibility(Hidden) {
+ IsMissingRequirement(false), HasIncompatibleModuleFile(false),
+ IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework),
+ IsExplicit(IsExplicit), IsSystem(false), IsExternC(false),
+ IsInferred(false), InferSubmodules(false), InferExplicitSubmodules(false),
+ InferExportWildcard(false), ConfigMacrosExhaustive(false),
+ NameVisibility(Hidden) {
if (Parent) {
if (!Parent->isAvailable())
IsAvailable = false;
@@ -139,6 +140,15 @@ std::string Module::getFullModuleName() const {
return Result;
}
+bool Module::fullModuleNameIs(ArrayRef<StringRef> nameParts) const {
+ for (const Module *M = this; M; M = M->Parent) {
+ if (nameParts.empty() || M->Name != nameParts.back())
+ return false;
+ nameParts = nameParts.drop_back();
+ }
+ return nameParts.empty();
+}
+
Module::DirectoryName Module::getUmbrellaDir() const {
if (Header U = getUmbrellaHeader())
return {"", U.Entry->getDir()};
diff --git a/lib/Basic/ObjCRuntime.cpp b/lib/Basic/ObjCRuntime.cpp
index be50fc4fe24f..133c66945dde 100644
--- a/lib/Basic/ObjCRuntime.cpp
+++ b/lib/Basic/ObjCRuntime.cpp
@@ -30,6 +30,7 @@ raw_ostream &clang::operator<<(raw_ostream &out, const ObjCRuntime &value) {
case ObjCRuntime::MacOSX: out << "macosx"; break;
case ObjCRuntime::FragileMacOSX: out << "macosx-fragile"; break;
case ObjCRuntime::iOS: out << "ios"; break;
+ case ObjCRuntime::WatchOS: out << "watchos"; break;
case ObjCRuntime::GNUstep: out << "gnustep"; break;
case ObjCRuntime::GCC: out << "gcc"; break;
case ObjCRuntime::ObjFW: out << "objfw"; break;
@@ -62,6 +63,8 @@ bool ObjCRuntime::tryParse(StringRef input) {
kind = ObjCRuntime::FragileMacOSX;
} else if (runtimeName == "ios") {
kind = ObjCRuntime::iOS;
+ } else if (runtimeName == "watchos") {
+ kind = ObjCRuntime::WatchOS;
} else if (runtimeName == "gnustep") {
// If no version is specified then default to the most recent one that we
// know about.
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index b7407f60e6d1..577132dc1442 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -87,8 +87,11 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_PROC_BIND_unknown);
case OMPC_schedule:
- return llvm::StringSwitch<OpenMPScheduleClauseKind>(Str)
-#define OPENMP_SCHEDULE_KIND(Name) .Case(#Name, OMPC_SCHEDULE_##Name)
+ return llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_SCHEDULE_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_SCHEDULE_##Name))
+#define OPENMP_SCHEDULE_MODIFIER(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_SCHEDULE_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_SCHEDULE_unknown);
case OMPC_depend:
@@ -96,19 +99,29 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
#define OPENMP_DEPEND_KIND(Name) .Case(#Name, OMPC_DEPEND_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_DEPEND_unknown);
+ case OMPC_linear:
+ return llvm::StringSwitch<OpenMPLinearClauseKind>(Str)
+#define OPENMP_LINEAR_KIND(Name) .Case(#Name, OMPC_LINEAR_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_LINEAR_unknown);
+ case OMPC_map:
+ return llvm::StringSwitch<OpenMPMapClauseKind>(Str)
+#define OPENMP_MAP_KIND(Name) .Case(#Name, OMPC_MAP_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_MAP_unknown);
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
case OMPC_private:
case OMPC_firstprivate:
case OMPC_lastprivate:
case OMPC_shared:
case OMPC_reduction:
- case OMPC_linear:
case OMPC_aligned:
case OMPC_copyin:
case OMPC_copyprivate:
@@ -122,6 +135,16 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -153,12 +176,17 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_schedule:
switch (Type) {
case OMPC_SCHEDULE_unknown:
+ case OMPC_SCHEDULE_MODIFIER_last:
return "unknown";
#define OPENMP_SCHEDULE_KIND(Name) \
- case OMPC_SCHEDULE_##Name: \
- return #Name;
+ case OMPC_SCHEDULE_##Name: \
+ return #Name;
+#define OPENMP_SCHEDULE_MODIFIER(Name) \
+ case OMPC_SCHEDULE_MODIFIER_##Name: \
+ return #Name;
#include "clang/Basic/OpenMPKinds.def"
}
+ llvm_unreachable("Invalid OpenMP 'schedule' clause type");
case OMPC_depend:
switch (Type) {
case OMPC_DEPEND_unknown:
@@ -168,20 +196,42 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
return #Name;
#include "clang/Basic/OpenMPKinds.def"
}
- llvm_unreachable("Invalid OpenMP 'schedule' clause type");
+ llvm_unreachable("Invalid OpenMP 'depend' clause type");
+ case OMPC_linear:
+ switch (Type) {
+ case OMPC_LINEAR_unknown:
+ return "unknown";
+#define OPENMP_LINEAR_KIND(Name) \
+ case OMPC_LINEAR_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'linear' clause type");
+ case OMPC_map:
+ switch (Type) {
+ case OMPC_MAP_unknown:
+ return "unknown";
+#define OPENMP_MAP_KIND(Name) \
+ case OMPC_MAP_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid OpenMP 'map' clause type");
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
case OMPC_private:
case OMPC_firstprivate:
case OMPC_lastprivate:
case OMPC_shared:
case OMPC_reduction:
- case OMPC_linear:
case OMPC_aligned:
case OMPC_copyin:
case OMPC_copyprivate:
@@ -195,6 +245,16 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -328,6 +388,16 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_target_data:
+ switch (CKind) {
+#define OPENMP_TARGET_DATA_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_teams:
switch (CKind) {
#define OPENMP_TEAMS_CLAUSE(Name) \
@@ -338,18 +408,75 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_cancel:
+ switch (CKind) {
+#define OPENMP_CANCEL_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_ordered:
+ switch (CKind) {
+#define OPENMP_ORDERED_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_taskloop:
+ switch (CKind) {
+#define OPENMP_TASKLOOP_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_taskloop_simd:
+ switch (CKind) {
+#define OPENMP_TASKLOOP_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_critical:
+ switch (CKind) {
+#define OPENMP_CRITICAL_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_distribute:
+ switch (CKind) {
+#define OPENMP_DISTRIBUTE_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_unknown:
case OMPD_threadprivate:
case OMPD_section:
case OMPD_master:
- case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_cancellation_point:
- case OMPD_cancel:
- case OMPD_ordered:
break;
}
return false;
@@ -357,8 +484,10 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_simd || DKind == OMPD_for || DKind == OMPD_for_simd ||
- DKind == OMPD_parallel_for ||
- DKind == OMPD_parallel_for_simd; // TODO add next directives.
+ DKind == OMPD_parallel_for || DKind == OMPD_parallel_for_simd ||
+ DKind == OMPD_taskloop ||
+ DKind == OMPD_taskloop_simd ||
+ DKind == OMPD_distribute; // TODO add next directives.
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -369,19 +498,32 @@ bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_parallel_sections; // TODO add next directives.
}
+bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_taskloop || DKind == OMPD_taskloop_simd;
+}
+
bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_parallel || DKind == OMPD_parallel_for ||
DKind == OMPD_parallel_for_simd ||
DKind == OMPD_parallel_sections; // TODO add next directives.
}
+bool clang::isOpenMPTargetDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_target; // TODO add next directives.
+}
+
bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_teams; // TODO add next directives.
}
bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_simd || DKind == OMPD_for_simd ||
- DKind == OMPD_parallel_for_simd; // TODO add next directives.
+ DKind == OMPD_parallel_for_simd ||
+ DKind == OMPD_taskloop_simd; // TODO add next directives.
+}
+
+bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
+ return Kind == OMPD_distribute; // TODO add next directives.
}
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
diff --git a/lib/Basic/SanitizerBlacklist.cpp b/lib/Basic/SanitizerBlacklist.cpp
index 095fcd6ccaeb..de78c94bc195 100644
--- a/lib/Basic/SanitizerBlacklist.cpp
+++ b/lib/Basic/SanitizerBlacklist.cpp
@@ -40,7 +40,7 @@ bool SanitizerBlacklist::isBlacklistedFile(StringRef FileName,
bool SanitizerBlacklist::isBlacklistedLocation(SourceLocation Loc,
StringRef Category) const {
- return !Loc.isInvalid() &&
+ return Loc.isValid() &&
isBlacklistedFile(SM.getFilename(SM.getFileLoc(Loc)), Category);
}
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index c0b045331dd6..4c501616a3e8 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -279,9 +279,7 @@ void LineTableInfo::AddEntry(FileID FID,
/// getLineTableFilenameID - Return the uniqued ID for the specified filename.
///
unsigned SourceManager::getLineTableFilenameID(StringRef Name) {
- if (!LineTable)
- LineTable = new LineTableInfo();
- return LineTable->getLineTableFilenameID(Name);
+ return getLineTable().getLineTableFilenameID(Name);
}
@@ -302,9 +300,7 @@ void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
// Remember that this file has #line directives now if it doesn't already.
const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
- if (!LineTable)
- LineTable = new LineTableInfo();
- LineTable->AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID);
+ getLineTable().AddLineNote(LocInfo.first, LocInfo.second, LineNo, FilenameID);
}
/// AddLineNote - Add a GNU line marker to the line table.
@@ -332,8 +328,7 @@ void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
// Remember that this file has #line directives now if it doesn't already.
const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
- if (!LineTable)
- LineTable = new LineTableInfo();
+ (void) getLineTable();
SrcMgr::CharacteristicKind FileKind;
if (IsExternCHeader)
@@ -366,7 +361,7 @@ LineTableInfo &SourceManager::getLineTable() {
SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr,
bool UserFilesAreVolatile)
: Diag(Diag), FileMgr(FileMgr), OverridenFilesKeepOriginalName(true),
- UserFilesAreVolatile(UserFilesAreVolatile),
+ UserFilesAreVolatile(UserFilesAreVolatile), FilesAreTransient(false),
ExternalSLocEntries(nullptr), LineTable(nullptr), NumLinearScans(0),
NumBinaryProbes(0) {
clearIDTables();
@@ -444,6 +439,7 @@ SourceManager::getOrCreateContentCache(const FileEntry *FileEnt,
}
Entry->IsSystemFile = isSystemFile;
+ Entry->IsTransient = FilesAreTransient;
return Entry;
}
@@ -484,10 +480,12 @@ std::pair<int, unsigned>
SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
unsigned TotalSize) {
assert(ExternalSLocEntries && "Don't have an external sloc source");
+ // Make sure we're not about to run out of source locations.
+ if (CurrentLoadedOffset - TotalSize < NextLocalOffset)
+ return std::make_pair(0, 0);
LoadedSLocEntryTable.resize(LoadedSLocEntryTable.size() + NumSLocEntries);
SLocEntryLoaded.resize(LoadedSLocEntryTable.size());
CurrentLoadedOffset -= TotalSize;
- assert(CurrentLoadedOffset >= NextLocalOffset && "Out of source locations");
int ID = LoadedSLocEntryTable.size();
return std::make_pair(-ID - 1, CurrentLoadedOffset);
}
@@ -676,6 +674,11 @@ void SourceManager::disableFileContentsOverride(const FileEntry *File) {
OverriddenFilesInfo->OverriddenFilesWithBuffer.erase(File);
}
+void SourceManager::setFileIsTransient(const FileEntry *File) {
+ const SrcMgr::ContentCache *CC = getOrCreateContentCache(File);
+ const_cast<SrcMgr::ContentCache *>(CC)->IsTransient = true;
+}
+
StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
bool MyInvalid = false;
const SLocEntry &SLoc = getSLocEntry(FID, &MyInvalid);
@@ -995,12 +998,17 @@ SourceManager::getExpansionRange(SourceLocation Loc) const {
return Res;
}
-bool SourceManager::isMacroArgExpansion(SourceLocation Loc) const {
+bool SourceManager::isMacroArgExpansion(SourceLocation Loc,
+ SourceLocation *StartLoc) const {
if (!Loc.isMacroID()) return false;
FileID FID = getFileID(Loc);
const SrcMgr::ExpansionInfo &Expansion = getSLocEntry(FID).getExpansion();
- return Expansion.isMacroArgExpansion();
+ if (!Expansion.isMacroArgExpansion()) return false;
+
+ if (StartLoc)
+ *StartLoc = Expansion.getExpansionLocStart();
+ return true;
}
bool SourceManager::isMacroBodyExpansion(SourceLocation Loc) const {
@@ -1394,7 +1402,7 @@ unsigned SourceManager::getPresumedLineNumber(SourceLocation Loc,
/// considered to be from a system header.
SrcMgr::CharacteristicKind
SourceManager::getFileCharacteristic(SourceLocation Loc) const {
- assert(!Loc.isInvalid() && "Can't get file characteristic of invalid loc!");
+ assert(Loc.isValid() && "Can't get file characteristic of invalid loc!");
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
bool Invalid = false;
const SLocEntry &SEntry = getSLocEntry(LocInfo.first, &Invalid);
@@ -1599,7 +1607,7 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
// location in the main file.
Optional<llvm::sys::fs::UniqueID> SourceFileUID;
Optional<StringRef> SourceFileName;
- if (!MainFileID.isInvalid()) {
+ if (MainFileID.isValid()) {
bool Invalid = false;
const SLocEntry &MainSLoc = getSLocEntry(MainFileID, &Invalid);
if (Invalid)
@@ -1709,7 +1717,7 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
unsigned Col) const {
// Lines are used as a one-based index into a zero-based array. This assert
// checks for possible buffer underruns.
- assert(Line != 0 && "Passed a zero-based line");
+ assert(Line && Col && "Line and column should start from 1!");
if (FID.isInvalid())
return SourceLocation();
@@ -1772,7 +1780,7 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
/// 110 -> SourceLocation()
void SourceManager::computeMacroArgsCache(MacroArgsMap *&CachePtr,
FileID FID) const {
- assert(!FID.isInvalid());
+ assert(FID.isValid());
assert(!CachePtr);
CachePtr = new MacroArgsMap();
@@ -2133,6 +2141,63 @@ void SourceManager::PrintStats() const {
<< NumBinaryProbes << " binary.\n";
}
+LLVM_DUMP_METHOD void SourceManager::dump() const {
+ llvm::raw_ostream &out = llvm::errs();
+
+ auto DumpSLocEntry = [&](int ID, const SrcMgr::SLocEntry &Entry,
+ llvm::Optional<unsigned> NextStart) {
+ out << "SLocEntry <FileID " << ID << "> " << (Entry.isFile() ? "file" : "expansion")
+ << " <SourceLocation " << Entry.getOffset() << ":";
+ if (NextStart)
+ out << *NextStart << ">\n";
+ else
+ out << "???\?>\n";
+ if (Entry.isFile()) {
+ auto &FI = Entry.getFile();
+ if (FI.NumCreatedFIDs)
+ out << " covers <FileID " << ID << ":" << int(ID + FI.NumCreatedFIDs)
+ << ">\n";
+ if (FI.getIncludeLoc().isValid())
+ out << " included from " << FI.getIncludeLoc().getOffset() << "\n";
+ if (auto *CC = FI.getContentCache()) {
+ out << " for " << (CC->OrigEntry ? CC->OrigEntry->getName() : "<none>")
+ << "\n";
+ if (CC->BufferOverridden)
+ out << " contents overridden\n";
+ if (CC->ContentsEntry != CC->OrigEntry) {
+ out << " contents from "
+ << (CC->ContentsEntry ? CC->ContentsEntry->getName() : "<none>")
+ << "\n";
+ }
+ }
+ } else {
+ auto &EI = Entry.getExpansion();
+ out << " spelling from " << EI.getSpellingLoc().getOffset() << "\n";
+ out << " macro " << (EI.isMacroArgExpansion() ? "arg" : "body")
+ << " range <" << EI.getExpansionLocStart().getOffset() << ":"
+ << EI.getExpansionLocEnd().getOffset() << ">\n";
+ }
+ };
+
+ // Dump local SLocEntries.
+ for (unsigned ID = 0, NumIDs = LocalSLocEntryTable.size(); ID != NumIDs; ++ID) {
+ DumpSLocEntry(ID, LocalSLocEntryTable[ID],
+ ID == NumIDs - 1 ? NextLocalOffset
+ : LocalSLocEntryTable[ID + 1].getOffset());
+ }
+ // Dump loaded SLocEntries.
+ llvm::Optional<unsigned> NextStart;
+ for (unsigned Index = 0; Index != LoadedSLocEntryTable.size(); ++Index) {
+ int ID = -(int)Index - 2;
+ if (SLocEntryLoaded[Index]) {
+ DumpSLocEntry(ID, LoadedSLocEntryTable[Index], NextStart);
+ NextStart = LoadedSLocEntryTable[Index].getOffset();
+ } else {
+ NextStart = None;
+ }
+ }
+}
+
ExternalSLocEntrySource::~ExternalSLocEntrySource() { }
/// Return the amount of memory used by memory buffers, breaking down
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index dbd2f9ae9954..1648a27d8b37 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -71,12 +71,13 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
FloatFormat = &llvm::APFloat::IEEEsingle;
DoubleFormat = &llvm::APFloat::IEEEdouble;
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
- DescriptionString = nullptr;
+ DataLayoutString = nullptr;
UserLabelPrefix = "_";
MCountName = "mcount";
RegParmMax = 0;
SSERegParmMax = 0;
HasAlignMac68kSupport = false;
+ HasBuiltinMSVaList = false;
// Default to no types using fpret.
RealTypeUsesObjCFPRet = 0;
@@ -286,9 +287,9 @@ void TargetInfo::adjust(const LangOptions &Opts) {
LongLongWidth = LongLongAlign = 128;
HalfWidth = HalfAlign = 16;
FloatWidth = FloatAlign = 32;
-
- // Embedded 32-bit targets (OpenCL EP) might have double C type
- // defined as float. Let's not override this as it might lead
+
+ // Embedded 32-bit targets (OpenCL EP) might have double C type
+ // defined as float. Let's not override this as it might lead
// to generating illegal code that uses 64bit doubles.
if (DoubleWidth != FloatWidth) {
DoubleWidth = DoubleAlign = 64;
@@ -311,6 +312,18 @@ void TargetInfo::adjust(const LangOptions &Opts) {
}
}
+bool TargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeatureVec) const {
+ for (const auto &F : FeatureVec) {
+ StringRef Name = F;
+ // Apply the feature via the target.
+ bool Enabled = Name[0] == '+';
+ setFeatureEnabled(Features, Name.substr(1), Enabled);
+ }
+ return true;
+}
+
//===----------------------------------------------------------------------===//
@@ -326,7 +339,7 @@ static StringRef removeGCCRegisterPrefix(StringRef Name) {
/// Sema.
bool TargetInfo::isValidClobber(StringRef Name) const {
return (isValidGCCRegisterName(Name) ||
- Name == "memory" || Name == "cc");
+ Name == "memory" || Name == "cc");
}
/// isValidGCCRegisterName - Returns whether the passed in string
@@ -336,56 +349,43 @@ bool TargetInfo::isValidGCCRegisterName(StringRef Name) const {
if (Name.empty())
return false;
- const char * const *Names;
- unsigned NumNames;
-
// Get rid of any register prefix.
Name = removeGCCRegisterPrefix(Name);
if (Name.empty())
- return false;
+ return false;
- getGCCRegNames(Names, NumNames);
+ ArrayRef<const char *> Names = getGCCRegNames();
// If we have a number it maps to an entry in the register name array.
if (isDigit(Name[0])) {
- int n;
+ unsigned n;
if (!Name.getAsInteger(0, n))
- return n >= 0 && (unsigned)n < NumNames;
+ return n < Names.size();
}
// Check register names.
- for (unsigned i = 0; i < NumNames; i++) {
- if (Name == Names[i])
- return true;
- }
+ if (std::find(Names.begin(), Names.end(), Name) != Names.end())
+ return true;
// Check any additional names that we have.
- const AddlRegName *AddlNames;
- unsigned NumAddlNames;
- getGCCAddlRegNames(AddlNames, NumAddlNames);
- for (unsigned i = 0; i < NumAddlNames; i++)
- for (unsigned j = 0; j < llvm::array_lengthof(AddlNames[i].Names); j++) {
- if (!AddlNames[i].Names[j])
- break;
+ for (const AddlRegName &ARN : getGCCAddlRegNames())
+ for (const char *AN : ARN.Names) {
+ if (!AN)
+ break;
// Make sure the register that the additional name is for is within
// the bounds of the register names from above.
- if (AddlNames[i].Names[j] == Name && AddlNames[i].RegNum < NumNames)
- return true;
- }
+ if (AN == Name && ARN.RegNum < Names.size())
+ return true;
+ }
// Now check aliases.
- const GCCRegAlias *Aliases;
- unsigned NumAliases;
-
- getGCCRegAliases(Aliases, NumAliases);
- for (unsigned i = 0; i < NumAliases; i++) {
- for (unsigned j = 0 ; j < llvm::array_lengthof(Aliases[i].Aliases); j++) {
- if (!Aliases[i].Aliases[j])
+ for (const GCCRegAlias &GRA : getGCCRegAliases())
+ for (const char *A : GRA.Aliases) {
+ if (!A)
break;
- if (Aliases[i].Aliases[j] == Name)
+ if (A == Name)
return true;
}
- }
return false;
}
@@ -397,48 +397,36 @@ TargetInfo::getNormalizedGCCRegisterName(StringRef Name) const {
// Get rid of any register prefix.
Name = removeGCCRegisterPrefix(Name);
- const char * const *Names;
- unsigned NumNames;
-
- getGCCRegNames(Names, NumNames);
+ ArrayRef<const char *> Names = getGCCRegNames();
// First, check if we have a number.
if (isDigit(Name[0])) {
- int n;
+ unsigned n;
if (!Name.getAsInteger(0, n)) {
- assert(n >= 0 && (unsigned)n < NumNames &&
- "Out of bounds register number!");
+ assert(n < Names.size() && "Out of bounds register number!");
return Names[n];
}
}
// Check any additional names that we have.
- const AddlRegName *AddlNames;
- unsigned NumAddlNames;
- getGCCAddlRegNames(AddlNames, NumAddlNames);
- for (unsigned i = 0; i < NumAddlNames; i++)
- for (unsigned j = 0; j < llvm::array_lengthof(AddlNames[i].Names); j++) {
- if (!AddlNames[i].Names[j])
- break;
+ for (const AddlRegName &ARN : getGCCAddlRegNames())
+ for (const char *AN : ARN.Names) {
+ if (!AN)
+ break;
// Make sure the register that the additional name is for is within
// the bounds of the register names from above.
- if (AddlNames[i].Names[j] == Name && AddlNames[i].RegNum < NumNames)
- return Name;
+ if (AN == Name && ARN.RegNum < Names.size())
+ return Name;
}
// Now check aliases.
- const GCCRegAlias *Aliases;
- unsigned NumAliases;
-
- getGCCRegAliases(Aliases, NumAliases);
- for (unsigned i = 0; i < NumAliases; i++) {
- for (unsigned j = 0 ; j < llvm::array_lengthof(Aliases[i].Aliases); j++) {
- if (!Aliases[i].Aliases[j])
+ for (const GCCRegAlias &RA : getGCCRegAliases())
+ for (const char *A : RA.Aliases) {
+ if (!A)
break;
- if (Aliases[i].Aliases[j] == Name)
- return Aliases[i].Register;
+ if (A == Name)
+ return RA.Register;
}
- }
return Name;
}
@@ -513,8 +501,7 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
}
bool TargetInfo::resolveSymbolicName(const char *&Name,
- ConstraintInfo *OutputConstraints,
- unsigned NumOutputs,
+ ArrayRef<ConstraintInfo> OutputConstraints,
unsigned &Index) const {
assert(*Name == '[' && "Symbolic name did not start with '['");
Name++;
@@ -529,16 +516,16 @@ bool TargetInfo::resolveSymbolicName(const char *&Name,
std::string SymbolicName(Start, Name - Start);
- for (Index = 0; Index != NumOutputs; ++Index)
+ for (Index = 0; Index != OutputConstraints.size(); ++Index)
if (SymbolicName == OutputConstraints[Index].getName())
return true;
return false;
}
-bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
- unsigned NumOutputs,
- ConstraintInfo &Info) const {
+bool TargetInfo::validateInputConstraint(
+ MutableArrayRef<ConstraintInfo> OutputConstraints,
+ ConstraintInfo &Info) const {
const char *Name = Info.ConstraintStr.c_str();
if (!*Name)
@@ -559,13 +546,13 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
return false;
// Check if matching constraint is out of bounds.
- if (i >= NumOutputs) return false;
+ if (i >= OutputConstraints.size()) return false;
// A number must refer to an output only operand.
if (OutputConstraints[i].isReadWrite())
return false;
- // If the constraint is already tied, it must be tied to the
+ // If the constraint is already tied, it must be tied to the
// same operand referenced to by the number.
if (Info.hasTiedOperand() && Info.getTiedOperand() != i)
return false;
@@ -582,10 +569,10 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
break;
case '[': {
unsigned Index = 0;
- if (!resolveSymbolicName(Name, OutputConstraints, NumOutputs, Index))
+ if (!resolveSymbolicName(Name, OutputConstraints, Index))
return false;
- // If the constraint is already tied, it must be tied to the
+ // If the constraint is already tied, it must be tied to the
// same operand referenced to by the number.
if (Info.hasTiedOperand() && Info.getTiedOperand() != Index)
return false;
@@ -650,18 +637,3 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
return true;
}
-
-bool TargetCXXABI::tryParse(llvm::StringRef name) {
- const Kind unknown = static_cast<Kind>(-1);
- Kind kind = llvm::StringSwitch<Kind>(name)
- .Case("arm", GenericARM)
- .Case("ios", iOS)
- .Case("itanium", GenericItanium)
- .Case("microsoft", Microsoft)
- .Case("mips", GenericMIPS)
- .Default(unknown);
- if (kind == unknown) return false;
-
- set(kind);
- return true;
-}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 9e44f7d9be63..893bd7c49815 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -1,4 +1,4 @@
-//===--- Targets.cpp - Implement -arch option and targets -----------------===//
+//===--- Targets.cpp - Implement target feature support -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,6 +19,7 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/Version.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@@ -30,6 +31,7 @@
#include "llvm/Support/TargetParser.h"
#include <algorithm>
#include <memory>
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -82,8 +84,28 @@ public:
}
};
-} // end anonymous namespace
+// CloudABI Target
+template <typename Target>
+class CloudABITargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__CloudABI__");
+ Builder.defineMacro("__ELF__");
+
+ // CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t.
+ Builder.defineMacro("__STDC_ISO_10646__", "201206L");
+ Builder.defineMacro("__STDC_UTF_16__");
+ Builder.defineMacro("__STDC_UTF_32__");
+ }
+
+public:
+ CloudABITargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {
+ this->UserLabelPrefix = "";
+ }
+};
static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
const llvm::Triple &Triple,
@@ -97,19 +119,11 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
if (Opts.Sanitize.has(SanitizerKind::Address))
Builder.defineMacro("_FORTIFY_SOURCE", "0");
- if (!Opts.ObjCAutoRefCount) {
+ // Darwin defines __weak, __strong, and __unsafe_unretained even in C mode.
+ if (!Opts.ObjC1) {
// __weak is always defined, for use in blocks and with objc pointers.
Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
-
- // Darwin defines __strong even in C mode (just to nothing).
- if (Opts.getGC() != LangOptions::NonGC)
- Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
- else
- Builder.defineMacro("__strong", "");
-
- // __unsafe_unretained is defined to nothing in non-ARC mode. We even
- // allow this in C, since one might have block pointers in structs that
- // are used in pure C code and in Objective-C ARC.
+ Builder.defineMacro("__strong", "");
Builder.defineMacro("__unsafe_unretained", "");
}
@@ -149,8 +163,22 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Str[3] = '0' + (Rev / 10);
Str[4] = '0' + (Rev % 10);
Str[5] = '\0';
- Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
- Str);
+ if (Triple.isTvOS())
+ Builder.defineMacro("__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__", Str);
+ else
+ Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
+ Str);
+
+ } else if (Triple.isWatchOS()) {
+ assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
+ char Str[6];
+ Str[0] = '0' + Maj;
+ Str[1] = '0' + (Min / 10);
+ Str[2] = '0' + (Min % 10);
+ Str[3] = '0' + (Rev / 10);
+ Str[4] = '0' + (Rev % 10);
+ Str[5] = '\0';
+ Builder.defineMacro("__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__", Str);
} else if (Triple.isMacOSX()) {
// Note that the Driver allows versions which aren't representable in the
// define (because we only get a single digit for the minor and micro
@@ -184,29 +212,6 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
PlatformMinVersion = VersionTuple(Maj, Min, Rev);
}
-namespace {
-// CloudABI Target
-template <typename Target>
-class CloudABITargetInfo : public OSTargetInfo<Target> {
-protected:
- void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const override {
- Builder.defineMacro("__CloudABI__");
- Builder.defineMacro("__ELF__");
-
- // CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t.
- Builder.defineMacro("__STDC_ISO_10646__", "201206L");
- Builder.defineMacro("__STDC_UTF_16__");
- Builder.defineMacro("__STDC_UTF_32__");
- }
-
-public:
- CloudABITargetInfo(const llvm::Triple &Triple)
- : OSTargetInfo<Target>(Triple) {
- this->UserLabelPrefix = "";
- }
-};
-
template<typename Target>
class DarwinTargetInfo : public OSTargetInfo<Target> {
protected:
@@ -386,7 +391,7 @@ protected:
DefineStd(Builder, "linux", Opts);
Builder.defineMacro("__gnu_linux__");
Builder.defineMacro("__ELF__");
- if (Triple.getEnvironment() == llvm::Triple::Android) {
+ if (Triple.isAndroid()) {
Builder.defineMacro("__ANDROID__", "1");
unsigned Maj, Min, Rev;
Triple.getEnvironmentVersion(Maj, Min, Rev);
@@ -560,7 +565,7 @@ public:
this->IntMaxType = TargetInfo::SignedLongLong;
this->Int64Type = TargetInfo::SignedLongLong;
this->SizeType = TargetInfo::UnsignedInt;
- this->DescriptionString = "E-m:e-p:32:32-i64:64-n32:64";
+ this->DataLayoutString = "E-m:e-p:32:32-i64:64-n32:64";
}
};
@@ -645,6 +650,9 @@ protected:
Builder.defineMacro("_CPPUNWIND");
}
+ if (Opts.Bool)
+ Builder.defineMacro("__BOOL_DEFINED");
+
if (!Opts.CharIsSigned)
Builder.defineMacro("_CHAR_UNSIGNED");
@@ -719,18 +727,45 @@ public:
if (Triple.getArch() == llvm::Triple::arm) {
// Handled in ARM's setABI().
} else if (Triple.getArch() == llvm::Triple::x86) {
- this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32-S128";
+ this->DataLayoutString = "e-m:e-p:32:32-i64:64-n8:16:32-S128";
} else if (Triple.getArch() == llvm::Triple::x86_64) {
- this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32:64-S128";
+ this->DataLayoutString = "e-m:e-p:32:32-i64:64-n8:16:32:64-S128";
} else if (Triple.getArch() == llvm::Triple::mipsel) {
- // Handled on mips' setDescriptionString.
+ // Handled on mips' setDataLayoutString.
} else {
assert(Triple.getArch() == llvm::Triple::le32);
- this->DescriptionString = "e-p:32:32-i64:64";
+ this->DataLayoutString = "e-p:32:32-i64:64";
}
}
};
+// WebAssembly target
+template <typename Target>
+class WebAssemblyOSTargetInfo : public OSTargetInfo<Target> {
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const final {
+ // A common platform macro.
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ // Follow g++ convention and predefine _GNU_SOURCE for C++.
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+
+ // As an optimization, group static init code together in a section.
+ const char *getStaticInitSectionSpecifier() const final {
+ return ".text.__startup";
+ }
+
+public:
+ explicit WebAssemblyOSTargetInfo(const llvm::Triple &Triple)
+ : OSTargetInfo<Target>(Triple) {
+ this->MCountName = "__mcount";
+ this->UserLabelPrefix = "";
+ this->TheCXXABI.set(TargetCXXABI::WebAssembly);
+ }
+};
+
//===----------------------------------------------------------------------===//
// Specific target implementations.
//===----------------------------------------------------------------------===//
@@ -849,10 +884,9 @@ public:
StringRef getABI() const override { return ABI; }
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin);
}
bool isCLZForZeroUndef() const override { return false; }
@@ -860,7 +894,10 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- void getDefaultFeatures(llvm::StringMap<bool> &Features) const override;
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
@@ -868,10 +905,8 @@ public:
void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
bool Enabled) const override;
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
switch (*Name) {
@@ -1006,9 +1041,10 @@ public:
};
const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsPPC.def"
};
@@ -1016,65 +1052,27 @@ const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
/// configured set of features.
bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
- for (unsigned i = 0, e = Features.size(); i !=e; ++i) {
- // Ignore disabled features.
- if (Features[i][0] == '-')
- continue;
-
- StringRef Feature = StringRef(Features[i]).substr(1);
-
- if (Feature == "vsx") {
+ for (const auto &Feature : Features) {
+ if (Feature == "+vsx") {
HasVSX = true;
- continue;
- }
-
- if (Feature == "bpermd") {
+ } else if (Feature == "+bpermd") {
HasBPERMD = true;
- continue;
- }
-
- if (Feature == "extdiv") {
+ } else if (Feature == "+extdiv") {
HasExtDiv = true;
- continue;
- }
-
- if (Feature == "power8-vector") {
+ } else if (Feature == "+power8-vector") {
HasP8Vector = true;
- continue;
- }
-
- if (Feature == "crypto") {
+ } else if (Feature == "+crypto") {
HasP8Crypto = true;
- continue;
- }
-
- if (Feature == "direct-move") {
+ } else if (Feature == "+direct-move") {
HasDirectMove = true;
- continue;
- }
-
- if (Feature == "qpx") {
+ } else if (Feature == "+qpx") {
HasQPX = true;
- continue;
- }
-
- if (Feature == "htm") {
+ } else if (Feature == "+htm") {
HasHTM = true;
- continue;
}
-
// TODO: Finish this list and add an assert that we've handled them
// all.
}
- if (!HasVSX && (HasP8Vector || HasDirectMove)) {
- if (HasP8Vector)
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector" <<
- "-mno-vsx";
- else if (HasDirectMove)
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move" <<
- "-mno-vsx";
- return false;
- }
return true;
}
@@ -1228,14 +1226,12 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__CRYPTO__");
if (HasHTM)
Builder.defineMacro("__HTM__");
- if (getTriple().getArch() == llvm::Triple::ppc64le ||
- (defs & ArchDefinePwr8) || (CPU == "pwr8")) {
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
- if (PointerWidth == 64)
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
- }
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ if (PointerWidth == 64)
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
// FIXME: The following are not yet generated here by Clang, but are
// generated by GCC:
@@ -1258,7 +1254,36 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// __NO_FPRS__
}
-void PPCTargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+// Handle explicit options being passed to the compiler here: if we've
+// explicitly turned off vsx and turned on power8-vector or direct-move then
+// go ahead and error since the customer has expressed a somewhat incompatible
+// set of options.
+static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
+ const std::vector<std::string> &FeaturesVec) {
+
+ if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "-vsx") !=
+ FeaturesVec.end()) {
+ if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+power8-vector") !=
+ FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector"
+ << "-mno-vsx";
+ return false;
+ }
+
+ if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+direct-move") !=
+ FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move"
+ << "-mno-vsx";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool PPCTargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
Features["altivec"] = llvm::StringSwitch<bool>(CPU)
.Case("7400", true)
.Case("g4", true)
@@ -1301,6 +1326,11 @@ void PPCTargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
.Case("pwr8", true)
.Case("pwr7", true)
.Default(false);
+
+ if (!ppcUserFeaturesCheck(Diags, FeaturesVec))
+ return false;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
bool PPCTargetInfo::hasFeature(StringRef Feature) const {
@@ -1317,37 +1347,29 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Default(false);
}
-/* There is no clear way for the target to know which of the features in the
- final feature vector came from defaults and which are actually specified by
- the user. To that end, we use the fact that this function is not called on
- default features - only user specified ones. By the first time this
- function is called, the default features are populated.
- We then keep track of the features that the user specified so that we
- can ensure we do not override a user's request (only defaults).
- For example:
- -mcpu=pwr8 -mno-vsx (should disable vsx and everything that depends on it)
- -mcpu=pwr8 -mdirect-move -mno-vsx (should actually be diagnosed)
-
-NOTE: Do not call this from PPCTargetInfo::getDefaultFeatures
-*/
void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name, bool Enabled) const {
- static llvm::StringMap<bool> ExplicitFeatures;
- ExplicitFeatures[Name] = Enabled;
-
- // At this point, -mno-vsx turns off the dependent features but we respect
- // the user's requests.
- if (!Enabled && Name == "vsx") {
- Features["direct-move"] = ExplicitFeatures["direct-move"];
- Features["power8-vector"] = ExplicitFeatures["power8-vector"];
- }
- if ((Enabled && Name == "power8-vector") ||
- (Enabled && Name == "direct-move")) {
- if (ExplicitFeatures.find("vsx") == ExplicitFeatures.end()) {
- Features["vsx"] = true;
+ // If we're enabling direct-move or power8-vector go ahead and enable vsx
+ // as well. Do the inverse if we're disabling vsx. We'll diagnose any user
+ // incompatible options.
+ if (Enabled) {
+ if (Name == "vsx") {
+ Features[Name] = true;
+ } else if (Name == "direct-move") {
+ Features[Name] = Features["vsx"] = true;
+ } else if (Name == "power8-vector") {
+ Features[Name] = Features["vsx"] = true;
+ } else {
+ Features[Name] = true;
+ }
+ } else {
+ if (Name == "vsx") {
+ Features[Name] = Features["direct-move"] = Features["power8-vector"] =
+ false;
+ } else {
+ Features[Name] = false;
}
}
- Features[Name] = Enabled;
}
const char * const PPCTargetInfo::GCCRegNames[] = {
@@ -1371,10 +1393,8 @@ const char * const PPCTargetInfo::GCCRegNames[] = {
"sfp"
};
-void PPCTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ArrayRef<const char*> PPCTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = {
@@ -1447,16 +1467,14 @@ const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = {
{ { "cc" }, "cr0" },
};
-void PPCTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
}
class PPC32TargetInfo : public PPCTargetInfo {
public:
PPC32TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) {
- DescriptionString = "E-m:e-p:32:32-i64:64-n32";
+ DataLayoutString = "E-m:e-p:32:32-i64:64-n32";
switch (getTriple().getOS()) {
case llvm::Triple::Linux:
@@ -1495,10 +1513,10 @@ public:
Int64Type = SignedLong;
if ((Triple.getArch() == llvm::Triple::ppc64le)) {
- DescriptionString = "e-m:e-i64:64-n32:64";
+ DataLayoutString = "e-m:e-i64:64-n32:64";
ABI = "elfv2";
} else {
- DescriptionString = "E-m:e-i64:64-n32:64";
+ DataLayoutString = "E-m:e-i64:64-n32:64";
ABI = "elfv1";
}
@@ -1541,7 +1559,7 @@ public:
PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
LongLongAlign = 32;
SuitableAlign = 128;
- DescriptionString = "E-m:o-p:32:32-f64:32:64-n32";
+ DataLayoutString = "E-m:o-p:32:32-f64:32:64-n32";
}
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::CharPtrBuiltinVaList;
@@ -1555,23 +1573,24 @@ public:
: DarwinTargetInfo<PPC64TargetInfo>(Triple) {
HasAlignMac68kSupport = true;
SuitableAlign = 128;
- DescriptionString = "E-m:o-i64:64-n32:64";
+ DataLayoutString = "E-m:o-i64:64-n32:64";
}
};
- static const unsigned NVPTXAddrSpaceMap[] = {
- 1, // opencl_global
- 3, // opencl_local
- 4, // opencl_constant
+static const unsigned NVPTXAddrSpaceMap[] = {
+ 1, // opencl_global
+ 3, // opencl_local
+ 4, // opencl_constant
// FIXME: generic has to be added to the target
- 0, // opencl_generic
- 1, // cuda_device
- 4, // cuda_constant
- 3, // cuda_shared
- };
- class NVPTXTargetInfo : public TargetInfo {
- static const char * const GCCRegNames[];
- static const Builtin::Info BuiltinInfo[];
+ 0, // opencl_generic
+ 1, // cuda_device
+ 4, // cuda_constant
+ 3, // cuda_shared
+};
+
+class NVPTXTargetInfo : public TargetInfo {
+ static const char *const GCCRegNames[];
+ static const Builtin::Info BuiltinInfo[];
// The GPU profiles supported by the NVPTX backend
enum GPUKind {
@@ -1583,139 +1602,133 @@ public:
GK_SM37,
} GPU;
- public:
- NVPTXTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
- BigEndian = false;
- TLSSupported = false;
- LongWidth = LongAlign = 64;
- AddrSpaceMap = &NVPTXAddrSpaceMap;
- UseAddrSpaceMapMangling = true;
- // Define available target features
- // These must be defined in sorted order!
- NoAsmVariants = true;
- // Set the default GPU to sm20
- GPU = GK_SM20;
- }
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override {
- Builder.defineMacro("__PTX__");
- Builder.defineMacro("__NVPTX__");
- if (Opts.CUDAIsDevice) {
- // Set __CUDA_ARCH__ for the GPU specified.
- std::string CUDAArchCode;
- switch (GPU) {
- case GK_SM20:
- CUDAArchCode = "200";
- break;
- case GK_SM21:
- CUDAArchCode = "210";
- break;
- case GK_SM30:
- CUDAArchCode = "300";
- break;
- case GK_SM35:
- CUDAArchCode = "350";
- break;
- case GK_SM37:
- CUDAArchCode = "370";
- break;
- default:
- llvm_unreachable("Unhandled target CPU");
- }
- Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
- }
- }
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::NVPTX::LastTSBuiltin-Builtin::FirstTSBuiltin;
- }
- bool hasFeature(StringRef Feature) const override {
- return Feature == "ptx" || Feature == "nvptx";
- }
-
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
- // No aliases.
- Aliases = nullptr;
- NumAliases = 0;
- }
- bool
- validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &Info) const override {
- switch (*Name) {
- default: return false;
- case 'c':
- case 'h':
- case 'r':
- case 'l':
- case 'f':
- case 'd':
- Info.setAllowsRegister();
- return true;
+public:
+ NVPTXTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ TLSSupported = false;
+ LongWidth = LongAlign = 64;
+ AddrSpaceMap = &NVPTXAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ // Define available target features
+ // These must be defined in sorted order!
+ NoAsmVariants = true;
+ // Set the default GPU to sm20
+ GPU = GK_SM20;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__PTX__");
+ Builder.defineMacro("__NVPTX__");
+ if (Opts.CUDAIsDevice) {
+ // Set __CUDA_ARCH__ for the GPU specified.
+ std::string CUDAArchCode;
+ switch (GPU) {
+ case GK_SM20:
+ CUDAArchCode = "200";
+ break;
+ case GK_SM21:
+ CUDAArchCode = "210";
+ break;
+ case GK_SM30:
+ CUDAArchCode = "300";
+ break;
+ case GK_SM35:
+ CUDAArchCode = "350";
+ break;
+ case GK_SM37:
+ CUDAArchCode = "370";
+ break;
+ default:
+ llvm_unreachable("Unhandled target CPU");
}
+ Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
}
- const char *getClobbers() const override {
- // FIXME: Is this really right?
- return "";
- }
- BuiltinVaListKind getBuiltinVaListKind() const override {
- // FIXME: implement
- return TargetInfo::CharPtrBuiltinVaList;
- }
- bool setCPU(const std::string &Name) override {
- GPU = llvm::StringSwitch<GPUKind>(Name)
- .Case("sm_20", GK_SM20)
- .Case("sm_21", GK_SM21)
- .Case("sm_30", GK_SM30)
- .Case("sm_35", GK_SM35)
- .Case("sm_37", GK_SM37)
- .Default(GK_NONE);
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::NVPTX::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "ptx" || Feature == "nvptx";
+ }
- return GPU != GK_NONE;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ // No aliases.
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default:
+ return false;
+ case 'c':
+ case 'h':
+ case 'r':
+ case 'l':
+ case 'f':
+ case 'd':
+ Info.setAllowsRegister();
+ return true;
}
- };
+ }
+ const char *getClobbers() const override {
+ // FIXME: Is this really right?
+ return "";
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ // FIXME: implement
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+ bool setCPU(const std::string &Name) override {
+ GPU = llvm::StringSwitch<GPUKind>(Name)
+ .Case("sm_20", GK_SM20)
+ .Case("sm_21", GK_SM21)
+ .Case("sm_30", GK_SM30)
+ .Case("sm_35", GK_SM35)
+ .Case("sm_37", GK_SM37)
+ .Default(GK_NONE);
- const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+ return GPU != GK_NONE;
+ }
+};
+
+const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsNVPTX.def"
- };
+};
- const char * const NVPTXTargetInfo::GCCRegNames[] = {
- "r0"
- };
+const char *const NVPTXTargetInfo::GCCRegNames[] = {"r0"};
- void NVPTXTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
- }
+ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
- class NVPTX32TargetInfo : public NVPTXTargetInfo {
- public:
- NVPTX32TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
- PointerWidth = PointerAlign = 32;
- SizeType = TargetInfo::UnsignedInt;
- PtrDiffType = TargetInfo::SignedInt;
- IntPtrType = TargetInfo::SignedInt;
- DescriptionString = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64";
- }
- };
+class NVPTX32TargetInfo : public NVPTXTargetInfo {
+public:
+ NVPTX32TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
+ LongWidth = LongAlign = 32;
+ PointerWidth = PointerAlign = 32;
+ SizeType = TargetInfo::UnsignedInt;
+ PtrDiffType = TargetInfo::SignedInt;
+ IntPtrType = TargetInfo::SignedInt;
+ DataLayoutString = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64";
+ }
+};
- class NVPTX64TargetInfo : public NVPTXTargetInfo {
- public:
- NVPTX64TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
- PointerWidth = PointerAlign = 64;
- SizeType = TargetInfo::UnsignedLong;
- PtrDiffType = TargetInfo::SignedLong;
- IntPtrType = TargetInfo::SignedLong;
- DescriptionString = "e-i64:64-v16:16-v32:32-n16:32:64";
- }
- };
+class NVPTX64TargetInfo : public NVPTXTargetInfo {
+public:
+ NVPTX64TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) {
+ PointerWidth = PointerAlign = 64;
+ SizeType = TargetInfo::UnsignedLong;
+ PtrDiffType = TargetInfo::SignedLong;
+ IntPtrType = TargetInfo::SignedLong;
+ DataLayoutString = "e-i64:64-v16:16-v32:32-n16:32:64";
+ }
+};
static const unsigned AMDGPUAddrSpaceMap[] = {
1, // opencl_global
@@ -1730,15 +1743,15 @@ static const unsigned AMDGPUAddrSpaceMap[] = {
// If you edit the description strings, make sure you update
// getPointerWidthV().
-static const char *DescriptionStringR600 =
+static const char *const DataLayoutStringR600 =
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
-static const char *DescriptionStringR600DoubleOps =
+static const char *const DataLayoutStringR600DoubleOps =
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
-static const char *DescriptionStringSI =
+static const char *const DataLayoutStringSI =
"e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64"
"-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
@@ -1772,13 +1785,13 @@ public:
: TargetInfo(Triple) {
if (Triple.getArch() == llvm::Triple::amdgcn) {
- DescriptionString = DescriptionStringSI;
+ DataLayoutString = DataLayoutStringSI;
GPU = GK_SOUTHERN_ISLANDS;
hasFP64 = true;
hasFMAF = true;
hasLDEXPF = true;
} else {
- DescriptionString = DescriptionStringR600;
+ DataLayoutString = DataLayoutStringR600;
GPU = GK_R600;
hasFP64 = false;
hasFMAF = false;
@@ -1806,24 +1819,27 @@ public:
return "";
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
- Aliases = nullptr;
- NumAliases = 0;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
}
bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override {
- return true;
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default: break;
+ case 'v': // vgpr
+ case 's': // sgpr
+ Info.setAllowsRegister();
+ return true;
+ }
+ return false;
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::AMDGPU::LastTSBuiltin - Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::AMDGPU::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
void getTargetDefines(const LangOptions &Opts,
@@ -1833,8 +1849,16 @@ public:
Builder.defineMacro("__HAS_FMAF__");
if (hasLDEXPF)
Builder.defineMacro("__HAS_LDEXPF__");
- if (hasFP64 && Opts.OpenCL) {
+ if (hasFP64 && Opts.OpenCL)
Builder.defineMacro("cl_khr_fp64");
+ if (Opts.OpenCL) {
+ if (GPU >= GK_NORTHERN_ISLANDS) {
+ Builder.defineMacro("cl_khr_byte_addressable_store");
+ Builder.defineMacro("cl_khr_global_int32_base_atomics");
+ Builder.defineMacro("cl_khr_global_int32_extended_atomics");
+ Builder.defineMacro("cl_khr_local_int32_base_atomics");
+ Builder.defineMacro("cl_khr_local_int32_extended_atomics");
+ }
}
}
@@ -1895,7 +1919,7 @@ public:
case GK_R700:
case GK_EVERGREEN:
case GK_NORTHERN_ISLANDS:
- DescriptionString = DescriptionStringR600;
+ DataLayoutString = DataLayoutStringR600;
hasFP64 = false;
hasFMAF = false;
hasLDEXPF = false;
@@ -1904,7 +1928,7 @@ public:
case GK_R700_DOUBLE_OPS:
case GK_EVERGREEN_DOUBLE_OPS:
case GK_CAYMAN:
- DescriptionString = DescriptionStringR600DoubleOps;
+ DataLayoutString = DataLayoutStringR600DoubleOps;
hasFP64 = true;
hasFMAF = true;
hasLDEXPF = false;
@@ -1912,7 +1936,7 @@ public:
case GK_SOUTHERN_ISLANDS:
case GK_SEA_ISLANDS:
case GK_VOLCANIC_ISLANDS:
- DescriptionString = DescriptionStringSI;
+ DataLayoutString = DataLayoutStringSI;
hasFP64 = true;
hasFMAF = true;
hasLDEXPF = true;
@@ -1925,7 +1949,7 @@ public:
const Builtin::Info AMDGPUTargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsAMDGPU.def"
};
const char * const AMDGPUTargetInfo::GCCRegNames[] = {
@@ -1981,17 +2005,18 @@ const char * const AMDGPUTargetInfo::GCCRegNames[] = {
"vcc_lo", "vcc_hi", "flat_scr_lo", "flat_scr_hi"
};
-void AMDGPUTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ArrayRef<const char *> AMDGPUTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
}
// Namespace for x86 abstract base class
const Builtin::Info BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE },
#include "clang/Basic/BuiltinsX86.def"
};
@@ -2016,6 +2041,14 @@ const TargetInfo::AddlRegName AddlRegNames[] = {
{ { "edi", "rdi" }, 5 },
{ { "esp", "rsp" }, 7 },
{ { "ebp", "rbp" }, 6 },
+ { { "r8d", "r8w", "r8b" }, 38 },
+ { { "r9d", "r9w", "r9b" }, 39 },
+ { { "r10d", "r10w", "r10b" }, 40 },
+ { { "r11d", "r11w", "r11b" }, 41 },
+ { { "r12d", "r12w", "r12b" }, 42 },
+ { { "r13d", "r13w", "r13b" }, 43 },
+ { { "r14d", "r14w", "r14b" }, 44 },
+ { { "r15d", "r15w", "r15b" }, 45 },
};
// X86 target abstract base class; x86-32 and x86-64 are very close, so
@@ -2023,36 +2056,45 @@ const TargetInfo::AddlRegName AddlRegNames[] = {
class X86TargetInfo : public TargetInfo {
enum X86SSEEnum {
NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
- } SSELevel;
+ } SSELevel = NoSSE;
enum MMX3DNowEnum {
NoMMX3DNow, MMX, AMD3DNow, AMD3DNowAthlon
- } MMX3DNowLevel;
+ } MMX3DNowLevel = NoMMX3DNow;
enum XOPEnum {
NoXOP,
SSE4A,
FMA4,
XOP
- } XOPLevel;
-
- bool HasAES;
- bool HasPCLMUL;
- bool HasLZCNT;
- bool HasRDRND;
- bool HasFSGSBASE;
- bool HasBMI;
- bool HasBMI2;
- bool HasPOPCNT;
- bool HasRTM;
- bool HasPRFCHW;
- bool HasRDSEED;
- bool HasADX;
- bool HasTBM;
- bool HasFMA;
- bool HasF16C;
- bool HasAVX512CD, HasAVX512ER, HasAVX512PF, HasAVX512DQ, HasAVX512BW,
- HasAVX512VL;
- bool HasSHA;
- bool HasCX16;
+ } XOPLevel = NoXOP;
+
+ bool HasAES = false;
+ bool HasPCLMUL = false;
+ bool HasLZCNT = false;
+ bool HasRDRND = false;
+ bool HasFSGSBASE = false;
+ bool HasBMI = false;
+ bool HasBMI2 = false;
+ bool HasPOPCNT = false;
+ bool HasRTM = false;
+ bool HasPRFCHW = false;
+ bool HasRDSEED = false;
+ bool HasADX = false;
+ bool HasTBM = false;
+ bool HasFMA = false;
+ bool HasF16C = false;
+ bool HasAVX512CD = false;
+ bool HasAVX512ER = false;
+ bool HasAVX512PF = false;
+ bool HasAVX512DQ = false;
+ bool HasAVX512BW = false;
+ bool HasAVX512VL = false;
+ bool HasSHA = false;
+ bool HasCX16 = false;
+ bool HasFXSR = false;
+ bool HasXSAVE = false;
+ bool HasXSAVEOPT = false;
+ bool HasXSAVEC = false;
+ bool HasXSAVES = false;
/// \brief Enumeration of all of the X86 CPUs supported by Clang.
///
@@ -2220,24 +2262,85 @@ class X86TargetInfo : public TargetInfo {
//@{
CK_Geode
//@}
- } CPU;
+ } CPU = CK_Generic;
+
+ CPUKind getCPUKind(StringRef CPU) const {
+ return llvm::StringSwitch<CPUKind>(CPU)
+ .Case("i386", CK_i386)
+ .Case("i486", CK_i486)
+ .Case("winchip-c6", CK_WinChipC6)
+ .Case("winchip2", CK_WinChip2)
+ .Case("c3", CK_C3)
+ .Case("i586", CK_i586)
+ .Case("pentium", CK_Pentium)
+ .Case("pentium-mmx", CK_PentiumMMX)
+ .Case("i686", CK_i686)
+ .Case("pentiumpro", CK_PentiumPro)
+ .Case("pentium2", CK_Pentium2)
+ .Case("pentium3", CK_Pentium3)
+ .Case("pentium3m", CK_Pentium3M)
+ .Case("pentium-m", CK_PentiumM)
+ .Case("c3-2", CK_C3_2)
+ .Case("yonah", CK_Yonah)
+ .Case("pentium4", CK_Pentium4)
+ .Case("pentium4m", CK_Pentium4M)
+ .Case("prescott", CK_Prescott)
+ .Case("nocona", CK_Nocona)
+ .Case("core2", CK_Core2)
+ .Case("penryn", CK_Penryn)
+ .Case("bonnell", CK_Bonnell)
+ .Case("atom", CK_Bonnell) // Legacy name.
+ .Case("silvermont", CK_Silvermont)
+ .Case("slm", CK_Silvermont) // Legacy name.
+ .Case("nehalem", CK_Nehalem)
+ .Case("corei7", CK_Nehalem) // Legacy name.
+ .Case("westmere", CK_Westmere)
+ .Case("sandybridge", CK_SandyBridge)
+ .Case("corei7-avx", CK_SandyBridge) // Legacy name.
+ .Case("ivybridge", CK_IvyBridge)
+ .Case("core-avx-i", CK_IvyBridge) // Legacy name.
+ .Case("haswell", CK_Haswell)
+ .Case("core-avx2", CK_Haswell) // Legacy name.
+ .Case("broadwell", CK_Broadwell)
+ .Case("skylake", CK_Skylake)
+ .Case("skx", CK_Skylake) // Legacy name.
+ .Case("knl", CK_KNL)
+ .Case("k6", CK_K6)
+ .Case("k6-2", CK_K6_2)
+ .Case("k6-3", CK_K6_3)
+ .Case("athlon", CK_Athlon)
+ .Case("athlon-tbird", CK_AthlonThunderbird)
+ .Case("athlon-4", CK_Athlon4)
+ .Case("athlon-xp", CK_AthlonXP)
+ .Case("athlon-mp", CK_AthlonMP)
+ .Case("athlon64", CK_Athlon64)
+ .Case("athlon64-sse3", CK_Athlon64SSE3)
+ .Case("athlon-fx", CK_AthlonFX)
+ .Case("k8", CK_K8)
+ .Case("k8-sse3", CK_K8SSE3)
+ .Case("opteron", CK_Opteron)
+ .Case("opteron-sse3", CK_OpteronSSE3)
+ .Case("barcelona", CK_AMDFAM10)
+ .Case("amdfam10", CK_AMDFAM10)
+ .Case("btver1", CK_BTVER1)
+ .Case("btver2", CK_BTVER2)
+ .Case("bdver1", CK_BDVER1)
+ .Case("bdver2", CK_BDVER2)
+ .Case("bdver3", CK_BDVER3)
+ .Case("bdver4", CK_BDVER4)
+ .Case("x86-64", CK_x86_64)
+ .Case("geode", CK_Geode)
+ .Default(CK_Generic);
+ }
enum FPMathKind {
FP_Default,
FP_SSE,
FP_387
- } FPMath;
+ } FPMath = FP_Default;
public:
- X86TargetInfo(const llvm::Triple &Triple)
- : TargetInfo(Triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow),
- XOPLevel(NoXOP), HasAES(false), HasPCLMUL(false), HasLZCNT(false),
- HasRDRND(false), HasFSGSBASE(false), HasBMI(false), HasBMI2(false),
- HasPOPCNT(false), HasRTM(false), HasPRFCHW(false), HasRDSEED(false),
- HasADX(false), HasTBM(false), HasFMA(false), HasF16C(false),
- HasAVX512CD(false), HasAVX512ER(false), HasAVX512PF(false),
- HasAVX512DQ(false), HasAVX512BW(false), HasAVX512VL(false),
- HasSHA(false), HasCX16(false), CPU(CK_Generic), FPMath(FP_Default) {
+ X86TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
BigEndian = false;
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
}
@@ -2245,30 +2348,37 @@ public:
// X87 evaluates with 80 bits "long double" precision.
return SSELevel == NoSSE ? 2 : 0;
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin);
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return llvm::makeArrayRef(GCCRegNames);
}
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
- Aliases = nullptr;
- NumAliases = 0;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
}
- void getGCCAddlRegNames(const AddlRegName *&Names,
- unsigned &NumNames) const override {
- Names = AddlRegNames;
- NumNames = llvm::array_lengthof(AddlRegNames);
+ ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override {
+ return llvm::makeArrayRef(AddlRegNames);
}
bool validateCpuSupports(StringRef Name) const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
+ bool validateGlobalRegisterVariable(StringRef RegName,
+ unsigned RegSize,
+ bool &HasSizeMismatch) const override {
+ // esp and ebp are the only 32-bit registers the x86 backend can currently
+ // handle.
+ if (RegName.equals("esp") || RegName.equals("ebp")) {
+ // Check that the register size is 32-bit.
+ HasSizeMismatch = RegSize != 32;
+ return true;
+ }
+
+ return false;
+ }
+
bool validateOutputSize(StringRef Constraint, unsigned Size) const override;
bool validateInputSize(StringRef Constraint, unsigned Size) const override;
@@ -2292,90 +2402,28 @@ public:
setFeatureEnabledImpl(Features, Name, Enabled);
}
// This exists purely to cut down on the number of virtual calls in
- // getDefaultFeatures which calls this repeatedly.
+ // initFeatureMap which calls this repeatedly.
static void setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
StringRef Name, bool Enabled);
- void getDefaultFeatures(llvm::StringMap<bool> &Features) const override;
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
bool hasFeature(StringRef Feature) const override;
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
StringRef getABI() const override {
if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX512F)
return "avx512";
- else if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX)
+ if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX)
return "avx";
- else if (getTriple().getArch() == llvm::Triple::x86 &&
+ if (getTriple().getArch() == llvm::Triple::x86 &&
MMX3DNowLevel == NoMMX3DNow)
return "no-mmx";
return "";
}
bool setCPU(const std::string &Name) override {
- CPU = llvm::StringSwitch<CPUKind>(Name)
- .Case("i386", CK_i386)
- .Case("i486", CK_i486)
- .Case("winchip-c6", CK_WinChipC6)
- .Case("winchip2", CK_WinChip2)
- .Case("c3", CK_C3)
- .Case("i586", CK_i586)
- .Case("pentium", CK_Pentium)
- .Case("pentium-mmx", CK_PentiumMMX)
- .Case("i686", CK_i686)
- .Case("pentiumpro", CK_PentiumPro)
- .Case("pentium2", CK_Pentium2)
- .Case("pentium3", CK_Pentium3)
- .Case("pentium3m", CK_Pentium3M)
- .Case("pentium-m", CK_PentiumM)
- .Case("c3-2", CK_C3_2)
- .Case("yonah", CK_Yonah)
- .Case("pentium4", CK_Pentium4)
- .Case("pentium4m", CK_Pentium4M)
- .Case("prescott", CK_Prescott)
- .Case("nocona", CK_Nocona)
- .Case("core2", CK_Core2)
- .Case("penryn", CK_Penryn)
- .Case("bonnell", CK_Bonnell)
- .Case("atom", CK_Bonnell) // Legacy name.
- .Case("silvermont", CK_Silvermont)
- .Case("slm", CK_Silvermont) // Legacy name.
- .Case("nehalem", CK_Nehalem)
- .Case("corei7", CK_Nehalem) // Legacy name.
- .Case("westmere", CK_Westmere)
- .Case("sandybridge", CK_SandyBridge)
- .Case("corei7-avx", CK_SandyBridge) // Legacy name.
- .Case("ivybridge", CK_IvyBridge)
- .Case("core-avx-i", CK_IvyBridge) // Legacy name.
- .Case("haswell", CK_Haswell)
- .Case("core-avx2", CK_Haswell) // Legacy name.
- .Case("broadwell", CK_Broadwell)
- .Case("skylake", CK_Skylake)
- .Case("skx", CK_Skylake) // Legacy name.
- .Case("knl", CK_KNL)
- .Case("k6", CK_K6)
- .Case("k6-2", CK_K6_2)
- .Case("k6-3", CK_K6_3)
- .Case("athlon", CK_Athlon)
- .Case("athlon-tbird", CK_AthlonThunderbird)
- .Case("athlon-4", CK_Athlon4)
- .Case("athlon-xp", CK_AthlonXP)
- .Case("athlon-mp", CK_AthlonMP)
- .Case("athlon64", CK_Athlon64)
- .Case("athlon64-sse3", CK_Athlon64SSE3)
- .Case("athlon-fx", CK_AthlonFX)
- .Case("k8", CK_K8)
- .Case("k8-sse3", CK_K8SSE3)
- .Case("opteron", CK_Opteron)
- .Case("opteron-sse3", CK_OpteronSSE3)
- .Case("barcelona", CK_AMDFAM10)
- .Case("amdfam10", CK_AMDFAM10)
- .Case("btver1", CK_BTVER1)
- .Case("btver2", CK_BTVER2)
- .Case("bdver1", CK_BDVER1)
- .Case("bdver2", CK_BDVER2)
- .Case("bdver3", CK_BDVER3)
- .Case("bdver4", CK_BDVER4)
- .Case("x86-64", CK_x86_64)
- .Case("geode", CK_Geode)
- .Default(CK_Generic);
+ CPU = getCPUKind(Name);
// Perform any per-CPU checks necessary to determine if this CPU is
// acceptable.
@@ -2486,14 +2534,15 @@ bool X86TargetInfo::setFPMath(StringRef Name) {
return false;
}
-void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+bool X86TargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
// FIXME: This *really* should not be here.
-
// X86_64 always has SSE2.
if (getTriple().getArch() == llvm::Triple::x86_64)
setFeatureEnabledImpl(Features, "sse2", true);
- switch (CPU) {
+ switch (getCPUKind(CPU)) {
case CK_Generic:
case CK_i386:
case CK_i486:
@@ -2512,26 +2561,31 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
case CK_Pentium3M:
case CK_C3_2:
setFeatureEnabledImpl(Features, "sse", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
break;
case CK_PentiumM:
case CK_Pentium4:
case CK_Pentium4M:
case CK_x86_64:
setFeatureEnabledImpl(Features, "sse2", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
break;
case CK_Yonah:
case CK_Prescott:
case CK_Nocona:
setFeatureEnabledImpl(Features, "sse3", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_Core2:
case CK_Bonnell:
setFeatureEnabledImpl(Features, "ssse3", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_Penryn:
setFeatureEnabledImpl(Features, "sse4.1", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_Skylake:
@@ -2540,6 +2594,8 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "avx512dq", true);
setFeatureEnabledImpl(Features, "avx512bw", true);
setFeatureEnabledImpl(Features, "avx512vl", true);
+ setFeatureEnabledImpl(Features, "xsavec", true);
+ setFeatureEnabledImpl(Features, "xsaves", true);
// FALLTHROUGH
case CK_Broadwell:
setFeatureEnabledImpl(Features, "rdseed", true);
@@ -2560,6 +2616,8 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
// FALLTHROUGH
case CK_SandyBridge:
setFeatureEnabledImpl(Features, "avx", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
// FALLTHROUGH
case CK_Westmere:
case CK_Silvermont:
@@ -2568,6 +2626,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
// FALLTHROUGH
case CK_Nehalem:
setFeatureEnabledImpl(Features, "sse4.2", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_KNL:
@@ -2575,6 +2634,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "avx512cd", true);
setFeatureEnabledImpl(Features, "avx512er", true);
setFeatureEnabledImpl(Features, "avx512pf", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "rdseed", true);
setFeatureEnabledImpl(Features, "adx", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
@@ -2588,6 +2648,8 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "cx16", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
break;
case CK_K6_2:
case CK_K6_3:
@@ -2605,6 +2667,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
case CK_AthlonMP:
setFeatureEnabledImpl(Features, "sse", true);
setFeatureEnabledImpl(Features, "3dnowa", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
break;
case CK_K8:
case CK_Opteron:
@@ -2612,6 +2675,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
case CK_AthlonFX:
setFeatureEnabledImpl(Features, "sse2", true);
setFeatureEnabledImpl(Features, "3dnowa", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
break;
case CK_AMDFAM10:
setFeatureEnabledImpl(Features, "sse4a", true);
@@ -2623,6 +2687,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
case CK_Athlon64SSE3:
setFeatureEnabledImpl(Features, "sse3", true);
setFeatureEnabledImpl(Features, "3dnowa", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
break;
case CK_BTVER2:
setFeatureEnabledImpl(Features, "avx", true);
@@ -2630,6 +2695,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "f16c", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
// FALLTHROUGH
case CK_BTVER1:
setFeatureEnabledImpl(Features, "ssse3", true);
@@ -2638,6 +2704,8 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "popcnt", true);
setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "cx16", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
break;
case CK_BDVER4:
setFeatureEnabledImpl(Features, "avx2", true);
@@ -2645,6 +2713,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
// FALLTHROUGH
case CK_BDVER3:
setFeatureEnabledImpl(Features, "fsgsbase", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
// FALLTHROUGH
case CK_BDVER2:
setFeatureEnabledImpl(Features, "bmi", true);
@@ -2660,8 +2729,39 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "cx16", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
break;
}
+ if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec))
+ return false;
+
+ // Can't do this earlier because we need to be able to explicitly enable
+ // or disable these features and the things that they depend upon.
+
+ // Enable popcnt if sse4.2 is enabled and popcnt is not explicitly disabled.
+ auto I = Features.find("sse4.2");
+ if (I != Features.end() && I->getValue() &&
+ std::find(FeaturesVec.begin(), FeaturesVec.end(), "-popcnt") ==
+ FeaturesVec.end())
+ Features["popcnt"] = true;
+
+ // Enable prfchw if 3DNow! is enabled and prfchw is not explicitly disabled.
+ I = Features.find("3dnow");
+ if (I != Features.end() && I->getValue() &&
+ std::find(FeaturesVec.begin(), FeaturesVec.end(), "-prfchw") ==
+ FeaturesVec.end())
+ Features["prfchw"] = true;
+
+ // Additionally, if SSE is enabled and mmx is not explicitly disabled,
+ // then enable MMX.
+ I = Features.find("sse");
+ if (I != Features.end() && I->getValue() &&
+ std::find(FeaturesVec.begin(), FeaturesVec.end(), "-mmx") ==
+ FeaturesVec.end())
+ Features["mmx"] = true;
+
+ return true;
}
void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
@@ -2674,6 +2774,7 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
Features["avx2"] = true;
case AVX:
Features["avx"] = true;
+ Features["xsave"] = true;
case SSE42:
Features["sse4.2"] = true;
case SSE41:
@@ -2709,7 +2810,8 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
case SSE42:
Features["sse4.2"] = false;
case AVX:
- Features["fma"] = Features["avx"] = Features["f16c"] = false;
+ Features["fma"] = Features["avx"] = Features["f16c"] = Features["xsave"] =
+ Features["xsaveopt"] = false;
setXOPLevel(Features, FMA4, false);
case AVX2:
Features["avx2"] = false;
@@ -2842,6 +2944,16 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
setSSELevel(Features, SSE42, Enabled);
else
setSSELevel(Features, SSE41, Enabled);
+ } else if (Name == "xsave") {
+ if (Enabled)
+ setSSELevel(Features, AVX, Enabled);
+ else
+ Features["xsaveopt"] = false;
+ } else if (Name == "xsaveopt" || Name == "xsavec" || Name == "xsaves") {
+ if (Enabled) {
+ Features["xsave"] = true;
+ setSSELevel(Features, AVX, Enabled);
+ }
}
}
@@ -2849,198 +2961,108 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
/// configured set of features.
bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
- // Remember the maximum enabled sselevel.
- for (unsigned i = 0, e = Features.size(); i !=e; ++i) {
- // Ignore disabled features.
- if (Features[i][0] == '-')
+ for (const auto &Feature : Features) {
+ if (Feature[0] != '+')
continue;
- StringRef Feature = StringRef(Features[i]).substr(1);
-
- if (Feature == "aes") {
+ if (Feature == "+aes") {
HasAES = true;
- continue;
- }
-
- if (Feature == "pclmul") {
+ } else if (Feature == "+pclmul") {
HasPCLMUL = true;
- continue;
- }
-
- if (Feature == "lzcnt") {
+ } else if (Feature == "+lzcnt") {
HasLZCNT = true;
- continue;
- }
-
- if (Feature == "rdrnd") {
+ } else if (Feature == "+rdrnd") {
HasRDRND = true;
- continue;
- }
-
- if (Feature == "fsgsbase") {
+ } else if (Feature == "+fsgsbase") {
HasFSGSBASE = true;
- continue;
- }
-
- if (Feature == "bmi") {
+ } else if (Feature == "+bmi") {
HasBMI = true;
- continue;
- }
-
- if (Feature == "bmi2") {
+ } else if (Feature == "+bmi2") {
HasBMI2 = true;
- continue;
- }
-
- if (Feature == "popcnt") {
+ } else if (Feature == "+popcnt") {
HasPOPCNT = true;
- continue;
- }
-
- if (Feature == "rtm") {
+ } else if (Feature == "+rtm") {
HasRTM = true;
- continue;
- }
-
- if (Feature == "prfchw") {
+ } else if (Feature == "+prfchw") {
HasPRFCHW = true;
- continue;
- }
-
- if (Feature == "rdseed") {
+ } else if (Feature == "+rdseed") {
HasRDSEED = true;
- continue;
- }
-
- if (Feature == "adx") {
+ } else if (Feature == "+adx") {
HasADX = true;
- continue;
- }
-
- if (Feature == "tbm") {
+ } else if (Feature == "+tbm") {
HasTBM = true;
- continue;
- }
-
- if (Feature == "fma") {
+ } else if (Feature == "+fma") {
HasFMA = true;
- continue;
- }
-
- if (Feature == "f16c") {
+ } else if (Feature == "+f16c") {
HasF16C = true;
- continue;
- }
-
- if (Feature == "avx512cd") {
+ } else if (Feature == "+avx512cd") {
HasAVX512CD = true;
- continue;
- }
-
- if (Feature == "avx512er") {
+ } else if (Feature == "+avx512er") {
HasAVX512ER = true;
- continue;
- }
-
- if (Feature == "avx512pf") {
+ } else if (Feature == "+avx512pf") {
HasAVX512PF = true;
- continue;
- }
-
- if (Feature == "avx512dq") {
+ } else if (Feature == "+avx512dq") {
HasAVX512DQ = true;
- continue;
- }
-
- if (Feature == "avx512bw") {
+ } else if (Feature == "+avx512bw") {
HasAVX512BW = true;
- continue;
- }
-
- if (Feature == "avx512vl") {
+ } else if (Feature == "+avx512vl") {
HasAVX512VL = true;
- continue;
- }
-
- if (Feature == "sha") {
+ } else if (Feature == "+sha") {
HasSHA = true;
- continue;
- }
-
- if (Feature == "cx16") {
+ } else if (Feature == "+cx16") {
HasCX16 = true;
- continue;
+ } else if (Feature == "+fxsr") {
+ HasFXSR = true;
+ } else if (Feature == "+xsave") {
+ HasXSAVE = true;
+ } else if (Feature == "+xsaveopt") {
+ HasXSAVEOPT = true;
+ } else if (Feature == "+xsavec") {
+ HasXSAVEC = true;
+ } else if (Feature == "+xsaves") {
+ HasXSAVES = true;
}
- assert(Features[i][0] == '+' && "Invalid target feature!");
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
- .Case("avx512f", AVX512F)
- .Case("avx2", AVX2)
- .Case("avx", AVX)
- .Case("sse4.2", SSE42)
- .Case("sse4.1", SSE41)
- .Case("ssse3", SSSE3)
- .Case("sse3", SSE3)
- .Case("sse2", SSE2)
- .Case("sse", SSE1)
+ .Case("+avx512f", AVX512F)
+ .Case("+avx2", AVX2)
+ .Case("+avx", AVX)
+ .Case("+sse4.2", SSE42)
+ .Case("+sse4.1", SSE41)
+ .Case("+ssse3", SSSE3)
+ .Case("+sse3", SSE3)
+ .Case("+sse2", SSE2)
+ .Case("+sse", SSE1)
.Default(NoSSE);
SSELevel = std::max(SSELevel, Level);
MMX3DNowEnum ThreeDNowLevel =
llvm::StringSwitch<MMX3DNowEnum>(Feature)
- .Case("3dnowa", AMD3DNowAthlon)
- .Case("3dnow", AMD3DNow)
- .Case("mmx", MMX)
+ .Case("+3dnowa", AMD3DNowAthlon)
+ .Case("+3dnow", AMD3DNow)
+ .Case("+mmx", MMX)
.Default(NoMMX3DNow);
MMX3DNowLevel = std::max(MMX3DNowLevel, ThreeDNowLevel);
XOPEnum XLevel = llvm::StringSwitch<XOPEnum>(Feature)
- .Case("xop", XOP)
- .Case("fma4", FMA4)
- .Case("sse4a", SSE4A)
+ .Case("+xop", XOP)
+ .Case("+fma4", FMA4)
+ .Case("+sse4a", SSE4A)
.Default(NoXOP);
XOPLevel = std::max(XOPLevel, XLevel);
}
- // Enable popcnt if sse4.2 is enabled and popcnt is not explicitly disabled.
- // Can't do this earlier because we need to be able to explicitly enable
- // popcnt and still disable sse4.2.
- if (!HasPOPCNT && SSELevel >= SSE42 &&
- std::find(Features.begin(), Features.end(), "-popcnt") == Features.end()){
- HasPOPCNT = true;
- Features.push_back("+popcnt");
- }
-
- // Enable prfchw if 3DNow! is enabled and prfchw is not explicitly disabled.
- if (!HasPRFCHW && MMX3DNowLevel >= AMD3DNow &&
- std::find(Features.begin(), Features.end(), "-prfchw") == Features.end()){
- HasPRFCHW = true;
- Features.push_back("+prfchw");
- }
-
// LLVM doesn't have a separate switch for fpmath, so only accept it if it
// matches the selected sse level.
- if (FPMath == FP_SSE && SSELevel < SSE1) {
- Diags.Report(diag::err_target_unsupported_fpmath) << "sse";
- return false;
- } else if (FPMath == FP_387 && SSELevel >= SSE1) {
- Diags.Report(diag::err_target_unsupported_fpmath) << "387";
+ if ((FPMath == FP_SSE && SSELevel < SSE1) ||
+ (FPMath == FP_387 && SSELevel >= SSE1)) {
+ Diags.Report(diag::err_target_unsupported_fpmath) <<
+ (FPMath == FP_SSE ? "sse" : "387");
return false;
}
- // Don't tell the backend if we're turning off mmx; it will end up disabling
- // SSE, which we don't want.
- // Additionally, if SSE is enabled and mmx is not explicitly disabled,
- // then enable MMX.
- std::vector<std::string>::iterator it;
- it = std::find(Features.begin(), Features.end(), "-mmx");
- if (it != Features.end())
- Features.erase(it);
- else if (SSELevel > NoSSE)
- MMX3DNowLevel = std::max(MMX3DNowLevel, MMX);
-
SimdDefaultAlign =
- (getABI() == "avx512") ? 512 : (getABI() == "avx") ? 256 : 128;
+ hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
return true;
}
@@ -3290,6 +3312,17 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasSHA)
Builder.defineMacro("__SHA__");
+ if (HasFXSR)
+ Builder.defineMacro("__FXSR__");
+ if (HasXSAVE)
+ Builder.defineMacro("__XSAVE__");
+ if (HasXSAVEOPT)
+ Builder.defineMacro("__XSAVEOPT__");
+ if (HasXSAVEC)
+ Builder.defineMacro("__XSAVEC__");
+ if (HasXSAVES)
+ Builder.defineMacro("__XSAVES__");
+
if (HasCX16)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
@@ -3379,6 +3412,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("fma", HasFMA)
.Case("fma4", XOPLevel >= FMA4)
.Case("fsgsbase", HasFSGSBASE)
+ .Case("fxsr", HasFXSR)
.Case("lzcnt", HasLZCNT)
.Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
.Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
@@ -3402,6 +3436,10 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
.Case("xop", XOPLevel >= XOP)
+ .Case("xsave", HasXSAVE)
+ .Case("xsavec", HasXSAVEC)
+ .Case("xsaves", HasXSAVES)
+ .Case("xsaveopt", HasXSAVEOPT)
.Default(false);
}
@@ -3437,6 +3475,14 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
default: return false;
+ // Constant constraints.
+ case 'e': // 32-bit signed integer constant for use with sign-extending x86_64
+ // instructions.
+ case 'Z': // 32-bit unsigned integer constant for use with zero-extending
+ // x86_64 instructions.
+ case 's':
+ Info.setRequiresImmediate();
+ return true;
case 'I':
Info.setRequiresImmediate(0, 31);
return true;
@@ -3447,8 +3493,7 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
Info.setRequiresImmediate(-128, 127);
return true;
case 'L':
- // FIXME: properly analyze this constraint:
- // must be one of 0xff, 0xffff, or 0xffffffff
+ Info.setRequiresImmediate({ int(0xff), int(0xffff), int(0xffffffff) });
return true;
case 'M':
Info.setRequiresImmediate(0, 3);
@@ -3459,20 +3504,24 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
case 'O':
Info.setRequiresImmediate(0, 127);
return true;
- case 'Y': // first letter of a pair:
- switch (*(Name+1)) {
- default: return false;
- case '0': // First SSE register.
- case 't': // Any SSE register, when SSE2 is enabled.
- case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
- case 'm': // any MMX register, when inter-unit moves enabled.
- break; // falls through to setAllowsRegister.
- }
- case 'f': // any x87 floating point stack register.
+ // Register constraints.
+ case 'Y': // 'Y' is the first character for several 2-character constraints.
+ // Shift the pointer to the second character of the constraint.
+ Name++;
+ switch (*Name) {
+ default:
+ return false;
+ case '0': // First SSE register.
+ case 't': // Any SSE register, when SSE2 is enabled.
+ case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
+ case 'm': // Any MMX register, when inter-unit moves enabled.
+ Info.setAllowsRegister();
+ return true;
+ }
+ case 'f': // Any x87 floating point stack register.
// Constraint 'f' cannot be used for output operands.
if (Info.ConstraintStr[0] == '=')
return false;
-
Info.setAllowsRegister();
return true;
case 'a': // eax.
@@ -3482,8 +3531,8 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
case 'S': // esi.
case 'D': // edi.
case 'A': // edx:eax.
- case 't': // top of floating point stack.
- case 'u': // second from top of floating point stack.
+ case 't': // Top of floating point stack.
+ case 'u': // Second from top of floating point stack.
case 'q': // Any register accessible as [r]l: a, b, c, and d.
case 'y': // Any MMX register.
case 'x': // Any SSE register.
@@ -3493,12 +3542,9 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
// index in a base+index memory access.
Info.setAllowsRegister();
return true;
+ // Floating point constant constraints.
case 'C': // SSE floating point constant.
case 'G': // x87 floating point constant.
- case 'e': // 32-bit signed integer constant for use with zero-extending
- // x86_64 instructions.
- case 'Z': // 32-bit unsigned integer constant for use with zero-extending
- // x86_64 instructions.
return true;
}
}
@@ -3530,8 +3576,30 @@ bool X86TargetInfo::validateOperandSize(StringRef Constraint,
case 'u':
return Size <= 128;
case 'x':
- // 256-bit ymm registers can be used if target supports AVX.
- return Size <= (SSELevel >= AVX ? 256U : 128U);
+ if (SSELevel >= AVX512F)
+ // 512-bit zmm registers can be used if target supports AVX512F.
+ return Size <= 512U;
+ else if (SSELevel >= AVX)
+ // 256-bit ymm registers can be used if target supports AVX.
+ return Size <= 256U;
+ return Size <= 128U;
+ case 'Y':
+ // 'Y' is the first character for several 2-character constraints.
+ switch (Constraint[1]) {
+ default: break;
+ case 'm':
+ // 'Ym' is synonymous with 'y'.
+ return Size <= 64;
+ case 'i':
+ case 't':
+ // 'Yi' and 'Yt' are synonymous with 'x' when SSE2 is enabled.
+ if (SSELevel >= AVX512F)
+ return Size <= 512U;
+ else if (SSELevel >= AVX)
+ return Size <= 256U;
+ return SSELevel >= SSE2 && Size <= 128U;
+ }
+
}
return true;
@@ -3565,7 +3633,7 @@ public:
LongDoubleWidth = 96;
LongDoubleAlign = 32;
SuitableAlign = 128;
- DescriptionString = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128";
+ DataLayoutString = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128";
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
@@ -3656,12 +3724,26 @@ public:
LongDoubleAlign = 128;
SuitableAlign = 128;
MaxVectorAlign = 256;
+ // The watchOS simulator uses the builtin bool type for Objective-C.
+ llvm::Triple T = llvm::Triple(Triple);
+ if (T.isWatchOS())
+ UseSignedCharForObjCBool = false;
SizeType = UnsignedLong;
IntPtrType = SignedLong;
- DescriptionString = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128";
+ DataLayoutString = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128";
HasAlignMac68kSupport = true;
}
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ if (!DarwinTargetInfo<X86_32TargetInfo>::handleTargetFeatures(Features,
+ Diags))
+ return false;
+ // We now know the features we have: we can decide how to align vectors.
+ MaxVectorAlign =
+ hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
+ return true;
+ }
};
// x86-32 Windows target
@@ -3673,9 +3755,9 @@ public:
DoubleAlign = LongLongAlign = 64;
bool IsWinCOFF =
getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
- DescriptionString = IsWinCOFF
- ? "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
- : "e-m:e-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32";
+ DataLayoutString = IsWinCOFF
+ ? "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
+ : "e-m:e-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32";
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -3701,12 +3783,11 @@ public:
Builder.defineMacro("_M_IX86", "600");
}
};
-} // end anonymous namespace
static void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) {
- // Mingw and cygwin define __declspec(a) to __attribute__((a)). Clang supports
- // __declspec natively under -fms-extensions, but we define a no-op __declspec
- // macro anyway for pre-processor compatibility.
+ // Mingw and cygwin define __declspec(a) to __attribute__((a)). Clang
+ // supports __declspec natively under -fms-extensions, but we define a no-op
+ // __declspec macro anyway for pre-processor compatibility.
if (Opts.MicrosoftExt)
Builder.defineMacro("__declspec", "__declspec");
else
@@ -3733,7 +3814,6 @@ static void addMinGWDefines(const LangOptions &Opts, MacroBuilder &Builder) {
addCygMingDefines(Opts, Builder);
}
-namespace {
// x86-32 MinGW target
class MinGWX86_32TargetInfo : public WindowsX86_32TargetInfo {
public:
@@ -3754,10 +3834,9 @@ class CygwinX86_32TargetInfo : public X86_32TargetInfo {
public:
CygwinX86_32TargetInfo(const llvm::Triple &Triple)
: X86_32TargetInfo(Triple) {
- TLSSupported = false;
WCharType = UnsignedShort;
DoubleAlign = LongLongAlign = 64;
- DescriptionString = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32";
+ DataLayoutString = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32";
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -3791,6 +3870,27 @@ public:
}
};
+// X86-32 MCU target
+class MCUX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ MCUX86_32TargetInfo(const llvm::Triple &Triple) : X86_32TargetInfo(Triple) {
+ LongDoubleWidth = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ // On MCU we support only C calling convention.
+ return CC == CC_C ? CCCR_OK : CCCR_Warning;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__iamcu");
+ Builder.defineMacro("__iamcu__");
+ }
+};
+
// RTEMS Target
template<typename Target>
class RTEMSTargetInfo : public OSTargetInfo<Target> {
@@ -3864,10 +3964,10 @@ public:
RegParmMax = 6;
// Pointers are 32-bit in x32.
- DescriptionString = IsX32 ? "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128"
- : IsWinCOFF
- ? "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
- : "e-m:e-i64:64-f80:128-n8:16:32:64-S128";
+ DataLayoutString = IsX32 ? "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128"
+ : IsWinCOFF
+ ? "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+ : "e-m:e-i64:64-f80:128-n8:16:32:64-S128";
// Use fpret only for long double.
RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
@@ -3875,6 +3975,9 @@ public:
// Use fp2ret for _Complex long double.
ComplexLongDoubleUsesFP2Ret = true;
+ // Make __builtin_ms_va_list available.
+ HasBuiltinMSVaList = true;
+
// x86-64 has atomics up to 16 bytes.
MaxAtomicPromoteWidth = 128;
MaxAtomicInlineWidth = 128;
@@ -3902,6 +4005,22 @@ public:
// for x32 we need it here explicitly
bool hasInt128Type() const override { return true; }
+
+ bool validateGlobalRegisterVariable(StringRef RegName,
+ unsigned RegSize,
+ bool &HasSizeMismatch) const override {
+ // rsp and rbp are the only 64-bit registers the x86 backend can currently
+ // handle.
+ if (RegName.equals("rsp") || RegName.equals("rbp")) {
+ // Check that the register size is 64-bit.
+ HasSizeMismatch = RegSize != 64;
+ return true;
+ }
+
+ // Check if the register is a 32-bit register the backend can handle.
+ return X86TargetInfo::validateGlobalRegisterVariable(RegName, RegSize,
+ HasSizeMismatch);
+ }
};
// x86-64 Windows target
@@ -3959,8 +4078,8 @@ public:
MacroBuilder &Builder) const override {
WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
WindowsX86_64TargetInfo::getVisualStudioDefines(Opts, Builder);
- Builder.defineMacro("_M_X64");
- Builder.defineMacro("_M_AMD64");
+ Builder.defineMacro("_M_X64", "100");
+ Builder.defineMacro("_M_AMD64", "100");
}
};
@@ -3988,17 +4107,52 @@ public:
}
};
+// x86-64 Cygwin target
+class CygwinX86_64TargetInfo : public X86_64TargetInfo {
+public:
+ CygwinX86_64TargetInfo(const llvm::Triple &Triple)
+ : X86_64TargetInfo(Triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ X86_64TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__x86_64__");
+ Builder.defineMacro("__CYGWIN__");
+ Builder.defineMacro("__CYGWIN64__");
+ addCygMingDefines(Opts, Builder);
+ DefineStd(Builder, "unix", Opts);
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+
+ // GCC defines this macro when it is using __gxx_personality_seh0.
+ if (!Opts.SjLjExceptions)
+ Builder.defineMacro("__SEH__");
+ }
+};
+
class DarwinX86_64TargetInfo : public DarwinTargetInfo<X86_64TargetInfo> {
public:
DarwinX86_64TargetInfo(const llvm::Triple &Triple)
: DarwinTargetInfo<X86_64TargetInfo>(Triple) {
Int64Type = SignedLongLong;
- MaxVectorAlign = 256;
// The 64-bit iOS simulator uses the builtin bool type for Objective-C.
llvm::Triple T = llvm::Triple(Triple);
if (T.isiOS())
UseSignedCharForObjCBool = false;
- DescriptionString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128";
+ DataLayoutString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128";
+ }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override {
+ if (!DarwinTargetInfo<X86_64TargetInfo>::handleTargetFeatures(Features,
+ Diags))
+ return false;
+ // We now know the features we have: we can decide how to align vectors.
+ MaxVectorAlign =
+ hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
+ return true;
}
};
@@ -4045,16 +4199,23 @@ class ARMTargetInfo : public TargetInfo {
std::string ABI, CPU;
+ StringRef CPUProfile;
+ StringRef CPUAttr;
+
enum {
FP_Default,
FP_VFP,
FP_Neon
} FPMath;
+ unsigned ArchISA;
+ unsigned ArchKind = llvm::ARM::AK_ARMV4T;
+ unsigned ArchProfile;
+ unsigned ArchVersion;
+
unsigned FPU : 5;
unsigned IsAAPCS : 1;
- unsigned IsThumb : 1;
unsigned HWDiv : 2;
// Initialized via features.
@@ -4063,6 +4224,17 @@ class ARMTargetInfo : public TargetInfo {
unsigned CRC : 1;
unsigned Crypto : 1;
+ unsigned DSP : 1;
+ unsigned Unaligned : 1;
+
+ enum {
+ LDREX_B = (1 << 0), /// byte (8-bit)
+ LDREX_H = (1 << 1), /// half (16-bit)
+ LDREX_W = (1 << 2), /// word (32-bit)
+ LDREX_D = (1 << 3), /// double (64-bit)
+ };
+
+ uint32_t LDREX;
// ACLE 6.5.1 Hardware floating point
enum {
@@ -4074,37 +4246,6 @@ class ARMTargetInfo : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
- static bool shouldUseInlineAtomic(const llvm::Triple &T) {
- StringRef ArchName = T.getArchName();
- if (T.getArch() == llvm::Triple::arm ||
- T.getArch() == llvm::Triple::armeb) {
- StringRef VersionStr;
- if (ArchName.startswith("armv"))
- VersionStr = ArchName.substr(4, 1);
- else if (ArchName.startswith("armebv"))
- VersionStr = ArchName.substr(6, 1);
- else
- return false;
- unsigned Version;
- if (VersionStr.getAsInteger(10, Version))
- return false;
- return Version >= 6;
- }
- assert(T.getArch() == llvm::Triple::thumb ||
- T.getArch() == llvm::Triple::thumbeb);
- StringRef VersionStr;
- if (ArchName.startswith("thumbv"))
- VersionStr = ArchName.substr(6, 1);
- else if (ArchName.startswith("thumbebv"))
- VersionStr = ArchName.substr(8, 1);
- else
- return false;
- unsigned Version;
- if (VersionStr.getAsInteger(10, Version))
- return false;
- return Version >= 7;
- }
-
void setABIAAPCS() {
IsAAPCS = true;
@@ -4139,24 +4280,24 @@ class ARMTargetInfo : public TargetInfo {
// Thumb1 add sp, #imm requires the immediate value be multiple of 4,
// so set preferred for small types to 32.
if (T.isOSBinFormatMachO()) {
- DescriptionString =
+ DataLayoutString =
BigEndian ? "E-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
: "e-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
} else if (T.isOSWindows()) {
assert(!BigEndian && "Windows on ARM does not support big endian");
- DescriptionString = "e"
- "-m:w"
- "-p:32:32"
- "-i64:64"
- "-v128:64:128"
- "-a:0:32"
- "-n32"
- "-S64";
+ DataLayoutString = "e"
+ "-m:w"
+ "-p:32:32"
+ "-i64:64"
+ "-v128:64:128"
+ "-a:0:32"
+ "-n32"
+ "-S64";
} else if (T.isOSNaCl()) {
assert(!BigEndian && "NaCl on ARM does not support big endian");
- DescriptionString = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128";
+ DataLayoutString = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128";
} else {
- DescriptionString =
+ DataLayoutString =
BigEndian ? "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
}
@@ -4164,12 +4305,15 @@ class ARMTargetInfo : public TargetInfo {
// FIXME: Enumerated types are variable width in straight AAPCS.
}
- void setABIAPCS() {
+ void setABIAPCS(bool IsAAPCS16) {
const llvm::Triple &T = getTriple();
IsAAPCS = false;
- DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
+ if (IsAAPCS16)
+ DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
+ else
+ DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
// size_t is unsigned int on FreeBSD.
if (T.getOS() == llvm::Triple::FreeBSD)
@@ -4189,13 +4333,16 @@ class ARMTargetInfo : public TargetInfo {
/// gcc.
ZeroLengthBitfieldBoundary = 32;
- if (T.isOSBinFormatMachO())
- DescriptionString =
+ if (T.isOSBinFormatMachO() && IsAAPCS16) {
+ assert(!BigEndian && "AAPCS16 does not support big-endian");
+ DataLayoutString = "e-m:o-p:32:32-i64:64-a:0:32-n32-S128";
+ } else if (T.isOSBinFormatMachO())
+ DataLayoutString =
BigEndian
? "E-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
else
- DescriptionString =
+ DataLayoutString =
BigEndian
? "E-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
: "e-m:e-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32";
@@ -4203,10 +4350,104 @@ class ARMTargetInfo : public TargetInfo {
// FIXME: Override "preferred align" for double and long long.
}
+ void setArchInfo() {
+ StringRef ArchName = getTriple().getArchName();
+
+ ArchISA = llvm::ARM::parseArchISA(ArchName);
+ CPU = llvm::ARM::getDefaultCPU(ArchName);
+ unsigned AK = llvm::ARM::parseArch(ArchName);
+ if (AK != llvm::ARM::AK_INVALID)
+ ArchKind = AK;
+ setArchInfo(ArchKind);
+ }
+
+ void setArchInfo(unsigned Kind) {
+ StringRef SubArch;
+
+ // cache TargetParser info
+ ArchKind = Kind;
+ SubArch = llvm::ARM::getSubArch(ArchKind);
+ ArchProfile = llvm::ARM::parseArchProfile(SubArch);
+ ArchVersion = llvm::ARM::parseArchVersion(SubArch);
+
+ // cache CPU related strings
+ CPUAttr = getCPUAttr();
+ CPUProfile = getCPUProfile();
+ }
+
+ void setAtomic() {
+ // when triple does not specify a sub arch,
+ // then we are not using inline atomics
+ bool ShouldUseInlineAtomic =
+ (ArchISA == llvm::ARM::IK_ARM && ArchVersion >= 6) ||
+ (ArchISA == llvm::ARM::IK_THUMB && ArchVersion >= 7);
+ // Cortex M does not support 8 byte atomics, while general Thumb2 does.
+ if (ArchProfile == llvm::ARM::PK_M) {
+ MaxAtomicPromoteWidth = 32;
+ if (ShouldUseInlineAtomic)
+ MaxAtomicInlineWidth = 32;
+ }
+ else {
+ MaxAtomicPromoteWidth = 64;
+ if (ShouldUseInlineAtomic)
+ MaxAtomicInlineWidth = 64;
+ }
+ }
+
+ bool isThumb() const {
+ return (ArchISA == llvm::ARM::IK_THUMB);
+ }
+
+ bool supportsThumb() const {
+ return CPUAttr.count('T') || ArchVersion >= 6;
+ }
+
+ bool supportsThumb2() const {
+ return CPUAttr.equals("6T2") || ArchVersion >= 7;
+ }
+
+ StringRef getCPUAttr() const {
+ // For most sub-arches, the build attribute CPU name is enough.
+ // For Cortex variants, it's slightly different.
+ switch(ArchKind) {
+ default:
+ return llvm::ARM::getCPUAttr(ArchKind);
+ case llvm::ARM::AK_ARMV6M:
+ return "6M";
+ case llvm::ARM::AK_ARMV7S:
+ return "7S";
+ case llvm::ARM::AK_ARMV7A:
+ return "7A";
+ case llvm::ARM::AK_ARMV7R:
+ return "7R";
+ case llvm::ARM::AK_ARMV7M:
+ return "7M";
+ case llvm::ARM::AK_ARMV7EM:
+ return "7EM";
+ case llvm::ARM::AK_ARMV8A:
+ return "8A";
+ case llvm::ARM::AK_ARMV8_1A:
+ return "8_1A";
+ }
+ }
+
+ StringRef getCPUProfile() const {
+ switch(ArchProfile) {
+ case llvm::ARM::PK_A:
+ return "A";
+ case llvm::ARM::PK_R:
+ return "R";
+ case llvm::ARM::PK_M:
+ return "M";
+ default:
+ return "";
+ }
+ }
+
public:
ARMTargetInfo(const llvm::Triple &Triple, bool IsBigEndian)
- : TargetInfo(Triple), CPU("arm1136j-s"), FPMath(FP_Default),
- IsAAPCS(true), HW_FP(0) {
+ : TargetInfo(Triple), FPMath(FP_Default),
+ IsAAPCS(true), LDREX(0), HW_FP(0) {
BigEndian = IsBigEndian;
switch (getTriple().getOS()) {
@@ -4218,13 +4459,13 @@ public:
break;
}
+ // Cache arch related info.
+ setArchInfo();
+
// {} in inline assembly are neon specifiers, not assembly variant
// specifiers.
NoAsmVariants = true;
- // FIXME: Should we just treat this as a feature?
- IsThumb = getTriple().getArchName().startswith("thumb");
-
// FIXME: This duplicates code from the driver that sets the -target-abi
// option - this code is used if -target-abi isn't passed and should
// be unified in some way.
@@ -4235,6 +4476,8 @@ public:
Triple.getOS() == llvm::Triple::UnknownOS ||
StringRef(CPU).startswith("cortex-m")) {
setABI("aapcs");
+ } else if (Triple.isWatchOS()) {
+ setABI("aapcs16");
} else {
setABI("apcs-gnu");
}
@@ -4254,8 +4497,8 @@ public:
setABI("aapcs");
break;
case llvm::Triple::GNU:
- setABI("apcs-gnu");
- break;
+ setABI("apcs-gnu");
+ break;
default:
if (Triple.getOS() == llvm::Triple::NetBSD)
setABI("apcs-gnu");
@@ -4269,9 +4512,7 @@ public:
TheCXXABI.set(TargetCXXABI::GenericARM);
// ARM has atomics up to 8 bytes
- MaxAtomicPromoteWidth = 64;
- if (shouldUseInlineAtomic(getTriple()))
- MaxAtomicInlineWidth = 64;
+ setAtomic();
// Do force alignment of members that follow zero length bitfields. If
// the alignment of the zero-length bitfield is greater than the member
@@ -4289,8 +4530,8 @@ public:
//
// FIXME: We need support for -meabi... we could just mangle it into the
// name.
- if (Name == "apcs-gnu") {
- setABIAPCS();
+ if (Name == "apcs-gnu" || Name == "aapcs16") {
+ setABIAPCS(Name == "aapcs16");
return true;
}
if (Name == "aapcs" || Name == "aapcs-vfp" || Name == "aapcs-linux") {
@@ -4301,43 +4542,27 @@ public:
}
// FIXME: This should be based on Arch attributes, not CPU names.
- void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
- StringRef ArchName = getTriple().getArchName();
- unsigned ArchKind = llvm::ARMTargetParser::parseArch(ArchName);
- bool IsV8 = (ArchKind == llvm::ARM::AK_ARMV8A ||
- ArchKind == llvm::ARM::AK_ARMV8_1A);
-
- if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore")
- Features["vfp2"] = true;
- else if (CPU == "cortex-a8" || CPU == "cortex-a9") {
- Features["vfp3"] = true;
- Features["neon"] = true;
- }
- else if (CPU == "cortex-a5") {
- Features["vfp4"] = true;
- Features["neon"] = true;
- } else if (CPU == "swift" || CPU == "cortex-a7" ||
- CPU == "cortex-a12" || CPU == "cortex-a15" ||
- CPU == "cortex-a17" || CPU == "krait") {
- Features["vfp4"] = true;
- Features["neon"] = true;
- Features["hwdiv"] = true;
- Features["hwdiv-arm"] = true;
- } else if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" ||
- CPU == "cortex-a72") {
- Features["fp-armv8"] = true;
- Features["neon"] = true;
- Features["hwdiv"] = true;
- Features["hwdiv-arm"] = true;
- Features["crc"] = true;
- Features["crypto"] = true;
- } else if (CPU == "cortex-r5" || CPU == "cortex-r7" || IsV8) {
- Features["hwdiv"] = true;
- Features["hwdiv-arm"] = true;
- } else if (CPU == "cortex-m3" || CPU == "cortex-m4" || CPU == "cortex-m7" ||
- CPU == "sc300" || CPU == "cortex-r4" || CPU == "cortex-r4f") {
- Features["hwdiv"] = true;
- }
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
+
+ std::vector<const char*> TargetFeatures;
+ unsigned Arch = llvm::ARM::parseArch(getTriple().getArchName());
+
+ // get default FPU features
+ unsigned FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
+ llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
+
+ // get default Extension features
+ unsigned Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
+ llvm::ARM::getExtensionFeatures(Extensions, TargetFeatures);
+
+ for (const char *Feature : TargetFeatures)
+ if (Feature[0] == '+')
+ Features[Feature+1] = true;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -4345,6 +4570,8 @@ public:
FPU = 0;
CRC = 0;
Crypto = 0;
+ DSP = 0;
+ Unaligned = 1;
SoftFloat = SoftFloatABI = false;
HWDiv = 0;
@@ -4379,12 +4606,37 @@ public:
CRC = 1;
} else if (Feature == "+crypto") {
Crypto = 1;
+ } else if (Feature == "+dsp") {
+ DSP = 1;
} else if (Feature == "+fp-only-sp") {
- HW_FP_remove |= HW_FP_DP | HW_FP_HP;
+ HW_FP_remove |= HW_FP_DP;
+ } else if (Feature == "+strict-align") {
+ Unaligned = 0;
+ } else if (Feature == "+fp16") {
+ HW_FP |= HW_FP_HP;
}
}
HW_FP &= ~HW_FP_remove;
+ switch (ArchVersion) {
+ case 6:
+ if (ArchProfile == llvm::ARM::PK_M)
+ LDREX = 0;
+ else if (ArchKind == llvm::ARM::AK_ARMV6K)
+ LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B ;
+ else
+ LDREX = LDREX_W;
+ break;
+ case 7:
+ if (ArchProfile == llvm::ARM::PK_M)
+ LDREX = LDREX_W | LDREX_H | LDREX_B ;
+ else
+ LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B ;
+ break;
+ case 8:
+ LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B ;
+ }
+
if (!(FPU & NeonFPU) && FPMath == FP_Neon) {
Diags.Report(diag::err_target_unsupported_fpmath) << "neon";
return false;
@@ -4407,107 +4659,28 @@ public:
bool hasFeature(StringRef Feature) const override {
return llvm::StringSwitch<bool>(Feature)
.Case("arm", true)
+ .Case("aarch32", true)
.Case("softfloat", SoftFloat)
- .Case("thumb", IsThumb)
+ .Case("thumb", isThumb())
.Case("neon", (FPU & NeonFPU) && !SoftFloat)
.Case("hwdiv", HWDiv & HWDivThumb)
.Case("hwdiv-arm", HWDiv & HWDivARM)
.Default(false);
}
- const char *getCPUDefineSuffix(StringRef Name) const {
- if(Name == "generic") {
- auto subarch = getTriple().getSubArch();
- switch (subarch) {
- case llvm::Triple::SubArchType::ARMSubArch_v8_1a:
- return "8_1A";
- default:
- break;
- }
- }
-
- unsigned ArchKind = llvm::ARMTargetParser::parseCPUArch(Name);
- if (ArchKind == llvm::ARM::AK_INVALID)
- return "";
-
- // For most sub-arches, the build attribute CPU name is enough.
- // For Cortex variants, it's slightly different.
- switch(ArchKind) {
- default:
- return llvm::ARMTargetParser::getCPUAttr(ArchKind);
- case llvm::ARM::AK_ARMV6M:
- case llvm::ARM::AK_ARMV6SM:
- return "6M";
- case llvm::ARM::AK_ARMV7:
- case llvm::ARM::AK_ARMV7A:
- case llvm::ARM::AK_ARMV7S:
- return "7A";
- case llvm::ARM::AK_ARMV7R:
- return "7R";
- case llvm::ARM::AK_ARMV7M:
- return "7M";
- case llvm::ARM::AK_ARMV7EM:
- return "7EM";
- case llvm::ARM::AK_ARMV8A:
- return "8A";
- case llvm::ARM::AK_ARMV8_1A:
- return "8_1A";
- }
- }
- const char *getCPUProfile(StringRef Name) const {
- if(Name == "generic") {
- auto subarch = getTriple().getSubArch();
- switch (subarch) {
- case llvm::Triple::SubArchType::ARMSubArch_v8_1a:
- return "A";
- default:
- break;
- }
- }
-
- unsigned CPUArch = llvm::ARMTargetParser::parseCPUArch(Name);
- if (CPUArch == llvm::ARM::AK_INVALID)
- return "";
- StringRef ArchName = llvm::ARMTargetParser::getArchName(CPUArch);
- switch(llvm::ARMTargetParser::parseArchProfile(ArchName)) {
- case llvm::ARM::PK_A:
- return "A";
- case llvm::ARM::PK_R:
- return "R";
- case llvm::ARM::PK_M:
- return "M";
- default:
- return "";
- }
- }
bool setCPU(const std::string &Name) override {
- if (!getCPUDefineSuffix(Name))
- return false;
-
- // Cortex M does not support 8 byte atomics, while general Thumb2 does.
- StringRef Profile = getCPUProfile(Name);
- if (Profile == "M" && MaxAtomicInlineWidth) {
- MaxAtomicPromoteWidth = 32;
- MaxAtomicInlineWidth = 32;
- }
+ if (Name != "generic")
+ setArchInfo(llvm::ARM::parseCPUArch(Name));
+ if (ArchKind == llvm::ARM::AK_INVALID)
+ return false;
+ setAtomic();
CPU = Name;
return true;
}
+
bool setFPMath(StringRef Name) override;
- bool supportsThumb(StringRef ArchName, StringRef CPUArch,
- unsigned CPUArchVer) const {
- return CPUArchVer >= 7 || (CPUArch.find('T') != StringRef::npos) ||
- (CPUArch.find('M') != StringRef::npos);
- }
- bool supportsThumb2(StringRef ArchName, StringRef CPUArch,
- unsigned CPUArchVer) const {
- // We check both CPUArchVer and ArchName because when only triple is
- // specified, the default CPU is arm1136j-s.
- return ArchName.endswith("v6t2") || ArchName.endswith("v7") ||
- ArchName.endswith("v8.1a") ||
- ArchName.endswith("v8") || CPUArch == "6T2" || CPUArchVer >= 7;
- }
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
// Target identification.
@@ -4517,21 +4690,29 @@ public:
// Target properties.
Builder.defineMacro("__REGISTER_PREFIX__", "");
- StringRef CPUArch = getCPUDefineSuffix(CPU);
- unsigned int CPUArchVer;
- if (CPUArch.substr(0, 1).getAsInteger<unsigned int>(10, CPUArchVer))
- llvm_unreachable("Invalid char for architecture version number");
- Builder.defineMacro("__ARM_ARCH_" + CPUArch + "__");
+ // Unfortunately, __ARM_ARCH_7K__ is now more of an ABI descriptor. The CPU
+ // happens to be Cortex-A7 though, so it should still get __ARM_ARCH_7A__.
+ if (getTriple().isWatchOS())
+ Builder.defineMacro("__ARM_ARCH_7K__", "2");
- // ACLE 6.4.1 ARM/Thumb instruction set architecture
- StringRef CPUProfile = getCPUProfile(CPU);
- StringRef ArchName = getTriple().getArchName();
+ if (!CPUAttr.empty())
+ Builder.defineMacro("__ARM_ARCH_" + CPUAttr + "__");
+ // ACLE 6.4.1 ARM/Thumb instruction set architecture
// __ARM_ARCH is defined as an integer value indicating the current ARM ISA
- Builder.defineMacro("__ARM_ARCH", CPUArch.substr(0, 1));
- if (CPUArch[0] >= '8') {
- Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN");
- Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING");
+ Builder.defineMacro("__ARM_ARCH", Twine(ArchVersion));
+
+ if (ArchVersion >= 8) {
+ // ACLE 6.5.7 Crypto Extension
+ if (Crypto)
+ Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
+ // ACLE 6.5.8 CRC32 Extension
+ if (CRC)
+ Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
+ // ACLE 6.5.10 Numeric Maximum and Minimum
+ Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
+ // ACLE 6.5.9 Directed Rounding
+ Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
}
// __ARM_ARCH_ISA_ARM is defined to 1 if the core supports the ARM ISA. It
@@ -4543,9 +4724,9 @@ public:
// __ARM_ARCH_ISA_THUMB is defined to 1 if the core supporst the original
// Thumb ISA (including v6-M). It is set to 2 if the core supports the
// Thumb-2 ISA as found in the v6T2 architecture and all v7 architecture.
- if (supportsThumb2(ArchName, CPUArch, CPUArchVer))
+ if (supportsThumb2())
Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "2");
- else if (supportsThumb(ArchName, CPUArch, CPUArchVer))
+ else if (supportsThumb())
Builder.defineMacro("__ARM_ARCH_ISA_THUMB", "1");
// __ARM_32BIT_STATE is defined to 1 if code is being generated for a 32-bit
@@ -4558,6 +4739,20 @@ public:
if (!CPUProfile.empty())
Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'");
+ // ACLE 6.4.3 Unaligned access supported in hardware
+ if (Unaligned)
+ Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
+
+ // ACLE 6.4.4 LDREX/STREX
+ if (LDREX)
+ Builder.defineMacro("__ARM_FEATURE_LDREX", "0x" + llvm::utohexstr(LDREX));
+
+ // ACLE 6.4.5 CLZ
+ if (ArchVersion == 5 ||
+ (ArchVersion == 6 && CPUProfile != "M") ||
+ ArchVersion > 6)
+ Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
+
// ACLE 6.5.1 Hardware Floating Point
if (HW_FP)
Builder.defineMacro("__ARM_FP", "0x" + llvm::utohexstr(HW_FP));
@@ -4565,12 +4760,20 @@ public:
// ACLE predefines.
Builder.defineMacro("__ARM_ACLE", "200");
+ // FP16 support (we currently only support IEEE format).
+ Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
+ Builder.defineMacro("__ARM_FP16_ARGS", "1");
+
+ // ACLE 6.5.3 Fused multiply-accumulate (FMA)
+ if (ArchVersion >= 7 && (CPUProfile != "M" || CPUAttr == "7EM"))
+ Builder.defineMacro("__ARM_FEATURE_FMA", "1");
+
// Subtarget options.
// FIXME: It's more complicated than this and we don't really support
// interworking.
// Windows on ARM does not "support" interworking
- if (5 <= CPUArchVer && CPUArchVer <= 8 && !getTriple().isOSWindows())
+ if (5 <= ArchVersion && ArchVersion <= 8 && !getTriple().isOSWindows())
Builder.defineMacro("__THUMB_INTERWORK__");
if (ABI == "aapcs" || ABI == "aapcs-linux" || ABI == "aapcs-vfp") {
@@ -4590,14 +4793,23 @@ public:
if (CPU == "xscale")
Builder.defineMacro("__XSCALE__");
- if (IsThumb) {
+ if (isThumb()) {
Builder.defineMacro("__THUMBEL__");
Builder.defineMacro("__thumb__");
- if (supportsThumb2(ArchName, CPUArch, CPUArchVer))
+ if (supportsThumb2())
Builder.defineMacro("__thumb2__");
}
- if (((HWDiv & HWDivThumb) && IsThumb) || ((HWDiv & HWDivARM) && !IsThumb))
+
+ // ACLE 6.4.9 32-bit SIMD instructions
+ if (ArchVersion >= 6 && (CPUProfile != "M" || CPUAttr == "7EM"))
+ Builder.defineMacro("__ARM_FEATURE_SIMD32", "1");
+
+ // ACLE 6.4.10 Hardware Integer Divide
+ if (((HWDiv & HWDivThumb) && isThumb()) ||
+ ((HWDiv & HWDivARM) && !isThumb())) {
+ Builder.defineMacro("__ARM_FEATURE_IDIV", "1");
Builder.defineMacro("__ARM_ARCH_EXT_IDIV__", "1");
+ }
// Note, this is always on in gcc, even though it doesn't make sense.
Builder.defineMacro("__APCS_32__");
@@ -4616,9 +4828,13 @@ public:
// the VFP define, hence the soft float and arch check. This is subtly
// different from gcc, we follow the intent which was that it should be set
// when Neon instructions are actually available.
- if ((FPU & NeonFPU) && !SoftFloat && CPUArchVer >= 7) {
- Builder.defineMacro("__ARM_NEON");
+ if ((FPU & NeonFPU) && !SoftFloat && ArchVersion >= 7) {
+ Builder.defineMacro("__ARM_NEON", "1");
Builder.defineMacro("__ARM_NEON__");
+ // current AArch32 NEON implementations do not support double-precision
+ // floating-point even when it is present in VFP.
+ Builder.defineMacro("__ARM_NEON_FP",
+ "0x" + llvm::utohexstr(HW_FP & ~HW_FP_DP));
}
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
@@ -4627,39 +4843,49 @@ public:
Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
Opts.ShortEnums ? "1" : "4");
- if (CRC)
- Builder.defineMacro("__ARM_FEATURE_CRC32");
-
- if (Crypto)
- Builder.defineMacro("__ARM_FEATURE_CRYPTO");
-
- if (CPUArchVer >= 6 && CPUArch != "6M") {
+ if (ArchVersion >= 6 && CPUAttr != "6M") {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
- bool is5EOrAbove = (CPUArchVer >= 6 ||
- (CPUArchVer == 5 &&
- CPUArch.find('E') != StringRef::npos));
- bool is32Bit = (!IsThumb || supportsThumb2(ArchName, CPUArch, CPUArchVer));
- if (is5EOrAbove && is32Bit && (CPUProfile != "M" || CPUArch == "7EM"))
- Builder.defineMacro("__ARM_FEATURE_DSP");
+ // ACLE 6.4.7 DSP instructions
+ if (DSP) {
+ Builder.defineMacro("__ARM_FEATURE_DSP", "1");
+ }
+
+ // ACLE 6.4.8 Saturation instructions
+ bool SAT = false;
+ if ((ArchVersion == 6 && CPUProfile != "M") || ArchVersion > 6 ) {
+ Builder.defineMacro("__ARM_FEATURE_SAT", "1");
+ SAT = true;
+ }
+
+ // ACLE 6.4.6 Q (saturation) flag
+ if (DSP || SAT)
+ Builder.defineMacro("__ARM_FEATURE_QBIT", "1");
+
+ if (Opts.UnsafeFPMath)
+ Builder.defineMacro("__ARM_FP_FAST", "1");
+
+ if (ArchKind == llvm::ARM::AK_ARMV8_1A)
+ Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin);
}
bool isCLZForZeroUndef() const override { return false; }
BuiltinVaListKind getBuiltinVaListKind() const override {
- return IsAAPCS ? AAPCSABIBuiltinVaList : TargetInfo::VoidPtrBuiltinVaList;
+ return IsAAPCS
+ ? AAPCSABIBuiltinVaList
+ : (getTriple().isWatchOS() ? TargetInfo::CharPtrBuiltinVaList
+ : TargetInfo::VoidPtrBuiltinVaList);
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
switch (*Name) {
@@ -4754,6 +4980,10 @@ public:
if (RegNo == 1) return 1;
return -1;
}
+
+ bool hasSjLjLowering() const override {
+ return true;
+ }
};
bool ARMTargetInfo::setFPMath(StringRef Name) {
@@ -4790,10 +5020,8 @@ const char * const ARMTargetInfo::GCCRegNames[] = {
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
};
-void ARMTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ArrayRef<const char *> ARMTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
@@ -4817,22 +5045,23 @@ const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
// don't want to substitute one of these for a different-sized one.
};
-void ARMTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ArrayRef<TargetInfo::GCCRegAlias> ARMTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
}
const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsNEON.def"
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) { #ID, TYPE, ATTRS, 0, LANG },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
+ { #ID, TYPE, ATTRS, nullptr, LANG, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsARM.def"
};
@@ -4891,6 +5120,19 @@ public:
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::CharPtrBuiltinVaList;
}
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ case CC_X86StdCall:
+ case CC_X86ThisCall:
+ case CC_X86FastCall:
+ case CC_X86VectorCall:
+ return CCCR_Ignore;
+ case CC_C:
+ return CCCR_OK;
+ default:
+ return CCCR_Warning;
+ }
+ }
};
// Windows ARM + Itanium C++ ABI Target
@@ -4925,6 +5167,45 @@ public:
}
};
+// ARM MinGW target
+class MinGWARMTargetInfo : public WindowsARMTargetInfo {
+public:
+ MinGWARMTargetInfo(const llvm::Triple &Triple)
+ : WindowsARMTargetInfo(Triple) {
+ TheCXXABI.set(TargetCXXABI::GenericARM);
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsARMTargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN32", Opts);
+ DefineStd(Builder, "WINNT", Opts);
+ Builder.defineMacro("_ARM_");
+ addMinGWDefines(Opts, Builder);
+ }
+};
+
+// ARM Cygwin target
+class CygwinARMTargetInfo : public ARMleTargetInfo {
+public:
+ CygwinARMTargetInfo(const llvm::Triple &Triple) : ARMleTargetInfo(Triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ DoubleAlign = LongLongAlign = 64;
+ DataLayoutString = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ ARMleTargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("_ARM_");
+ Builder.defineMacro("__CYGWIN__");
+ Builder.defineMacro("__CYGWIN32__");
+ DefineStd(Builder, "unix", Opts);
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+};
+
class DarwinARMTargetInfo :
public DarwinTargetInfo<ARMleTargetInfo> {
protected:
@@ -4942,13 +5223,23 @@ public:
// ARMleTargetInfo.
MaxAtomicInlineWidth = 64;
- // Darwin on iOS uses a variant of the ARM C++ ABI.
- TheCXXABI.set(TargetCXXABI::iOS);
+ if (Triple.isWatchOS()) {
+ // Darwin on iOS uses a variant of the ARM C++ ABI.
+ TheCXXABI.set(TargetCXXABI::WatchOS);
+
+ // The 32-bit ABI is silent on what ptrdiff_t should be, but given that
+ // size_t is long, it's a bit weird for it to be int.
+ PtrDiffType = SignedLong;
+
+ // BOOL should be a real boolean on the new ABI
+ UseSignedCharForObjCBool = false;
+ } else
+ TheCXXABI.set(TargetCXXABI::iOS);
}
};
class AArch64TargetInfo : public TargetInfo {
- virtual void setDescriptionString() = 0;
+ virtual void setDataLayoutString() = 0;
static const TargetInfo::GCCRegAlias GCCRegAliases[];
static const char *const GCCRegNames[];
@@ -4960,6 +5251,8 @@ class AArch64TargetInfo : public TargetInfo {
unsigned FPU;
unsigned CRC;
unsigned Crypto;
+ unsigned Unaligned;
+ unsigned V8_1A;
static const Builtin::Info BuiltinInfo[];
@@ -4998,7 +5291,7 @@ public:
// contributes to the alignment of the containing aggregate in the same way
// a plain (non bit-field) member of that type would, without exception for
// zero-sized or anonymous bit-fields."
- UseBitFieldTypeAlignment = true;
+ assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
UseZeroLengthBitfieldAlignment = true;
// AArch64 targets default to using the ARM C++ ABI.
@@ -5017,7 +5310,7 @@ public:
bool setCPU(const std::string &Name) override {
bool CPUKnown = llvm::StringSwitch<bool>(Name)
.Case("generic", true)
- .Cases("cortex-a53", "cortex-a57", "cortex-a72", true)
+ .Cases("cortex-a53", "cortex-a57", "cortex-a72", "cortex-a35", true)
.Case("cyclone", true)
.Default(false);
return CPUKnown;
@@ -5037,33 +5330,30 @@ public:
Builder.defineMacro("__ARM_ARCH", "8");
Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
- Builder.defineMacro("__ARM_64BIT_STATE");
- Builder.defineMacro("__ARM_PCS_AAPCS64");
- Builder.defineMacro("__ARM_ARCH_ISA_A64");
+ Builder.defineMacro("__ARM_64BIT_STATE", "1");
+ Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
+ Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
- Builder.defineMacro("__ARM_FEATURE_UNALIGNED");
- Builder.defineMacro("__ARM_FEATURE_CLZ");
- Builder.defineMacro("__ARM_FEATURE_FMA");
- Builder.defineMacro("__ARM_FEATURE_DIV");
- Builder.defineMacro("__ARM_FEATURE_IDIV"); // As specified in ACLE
+ Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
+ Builder.defineMacro("__ARM_FEATURE_FMA", "1");
+ Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
+ Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
- Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN");
- Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING");
+ Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
+ Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
// 0xe implies support for half, single and double precision operations.
- Builder.defineMacro("__ARM_FP", "0xe");
+ Builder.defineMacro("__ARM_FP", "0xE");
// PCS specifies this for SysV variants, which is all we support. Other ABIs
// may choose __ARM_FP16_FORMAT_ALTERNATIVE.
- Builder.defineMacro("__ARM_FP16_FORMAT_IEEE");
+ Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
+ Builder.defineMacro("__ARM_FP16_ARGS", "1");
- if (Opts.FastMath || Opts.FiniteMathOnly)
- Builder.defineMacro("__ARM_FP_FAST");
-
- if (Opts.C99 && !Opts.Freestanding)
- Builder.defineMacro("__ARM_FP_FENV_ROUNDING");
+ if (Opts.UnsafeFPMath)
+ Builder.defineMacro("__ARM_FP_FAST", "1");
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T", Opts.ShortWChar ? "2" : "4");
@@ -5071,16 +5361,22 @@ public:
Opts.ShortEnums ? "1" : "4");
if (FPU == NeonMode) {
- Builder.defineMacro("__ARM_NEON");
+ Builder.defineMacro("__ARM_NEON", "1");
// 64-bit NEON supports half, single and double precision operations.
- Builder.defineMacro("__ARM_NEON_FP", "0xe");
+ Builder.defineMacro("__ARM_NEON_FP", "0xE");
}
if (CRC)
- Builder.defineMacro("__ARM_FEATURE_CRC32");
+ Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
if (Crypto)
- Builder.defineMacro("__ARM_FEATURE_CRYPTO");
+ Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
+
+ if (Unaligned)
+ Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
+
+ if (V8_1A)
+ Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
@@ -5089,15 +5385,15 @@ public:
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
bool hasFeature(StringRef Feature) const override {
return Feature == "aarch64" ||
Feature == "arm64" ||
+ Feature == "arm" ||
(Feature == "neon" && FPU == NeonMode);
}
@@ -5106,16 +5402,23 @@ public:
FPU = FPUMode;
CRC = 0;
Crypto = 0;
- for (unsigned i = 0, e = Features.size(); i != e; ++i) {
- if (Features[i] == "+neon")
+ Unaligned = 1;
+ V8_1A = 0;
+
+ for (const auto &Feature : Features) {
+ if (Feature == "+neon")
FPU = NeonMode;
- if (Features[i] == "+crc")
+ if (Feature == "+crc")
CRC = 1;
- if (Features[i] == "+crypto")
+ if (Feature == "+crypto")
Crypto = 1;
+ if (Feature == "+strict-align")
+ Unaligned = 0;
+ if (Feature == "+v8.1a")
+ V8_1A = 1;
}
- setDescriptionString();
+ setDataLayoutString();
return true;
}
@@ -5126,10 +5429,8 @@ public:
return TargetInfo::AArch64ABIBuiltinVaList;
}
- void getGCCRegNames(const char *const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
@@ -5239,10 +5540,8 @@ const char *const AArch64TargetInfo::GCCRegNames[] = {
"v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
};
-void AArch64TargetInfo::getGCCRegNames(const char *const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
@@ -5254,28 +5553,26 @@ const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
// don't want to substitute one of these for a different-sized one.
};
-void AArch64TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
}
const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsAArch64.def"
};
class AArch64leTargetInfo : public AArch64TargetInfo {
- void setDescriptionString() override {
+ void setDataLayoutString() override {
if (getTriple().isOSBinFormatMachO())
- DescriptionString = "e-m:o-i64:64-i128:128-n32:64-S128";
+ DataLayoutString = "e-m:o-i64:64-i128:128-n32:64-S128";
else
- DescriptionString = "e-m:e-i64:64-i128:128-n32:64-S128";
+ DataLayoutString = "e-m:e-i64:64-i128:128-n32:64-S128";
}
public:
@@ -5291,9 +5588,9 @@ public:
};
class AArch64beTargetInfo : public AArch64TargetInfo {
- void setDescriptionString() override {
+ void setDataLayoutString() override {
assert(!getTriple().isOSBinFormatMachO());
- DescriptionString = "E-m:e-i64:64-i128:128-n32:64-S128";
+ DataLayoutString = "E-m:e-i64:64-i128:128-n32:64-S128";
}
public:
@@ -5347,20 +5644,32 @@ class HexagonTargetInfo : public TargetInfo {
static const char * const GCCRegNames[];
static const TargetInfo::GCCRegAlias GCCRegAliases[];
std::string CPU;
+ bool HasHVX, HasHVXDouble;
+
public:
HexagonTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
BigEndian = false;
- DescriptionString = "e-m:e-p:32:32-i1:32-i64:64-a:0-n32";
+ DataLayoutString = "e-m:e-p:32:32:32-"
+ "i64:64:64-i32:32:32-i16:16:16-i1:8:8-"
+ "f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-n16:32";
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
// {} in inline assembly are packet specifiers, not assembly variant
// specifiers.
NoAsmVariants = true;
+
+ LargeArrayMinWidth = 64;
+ LargeArrayAlign = 64;
+ UseBitFieldTypeAlignment = true;
+ ZeroLengthBitfieldBoundary = 32;
+ HasHVX = HasHVXDouble = false;
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::Hexagon::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::Hexagon::LastTSBuiltin-Builtin::FirstTSBuiltin);
}
bool validateAsmConstraint(const char *&Name,
@@ -5371,17 +5680,28 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
+ bool isCLZForZeroUndef() const override { return false; }
+
bool hasFeature(StringRef Feature) const override {
- return Feature == "hexagon";
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("hexagon", true)
+ .Case("hvx", HasHVX)
+ .Case("hvx-double", HasHVXDouble)
+ .Default(false);
}
+ bool initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU, const std::vector<std::string> &FeaturesVec)
+ const override;
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::CharPtrBuiltinVaList;
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
const char *getClobbers() const override {
return "";
}
@@ -5390,71 +5710,77 @@ public:
return llvm::StringSwitch<const char*>(Name)
.Case("hexagonv4", "4")
.Case("hexagonv5", "5")
+ .Case("hexagonv55", "55")
+ .Case("hexagonv60", "60")
.Default(nullptr);
}
bool setCPU(const std::string &Name) override {
if (!getHexagonCPUSuffix(Name))
return false;
-
CPU = Name;
return true;
}
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ return RegNo < 2 ? RegNo : -1;
+ }
};
void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- Builder.defineMacro("qdsp6");
- Builder.defineMacro("__qdsp6", "1");
+ MacroBuilder &Builder) const {
Builder.defineMacro("__qdsp6__", "1");
-
- Builder.defineMacro("hexagon");
- Builder.defineMacro("__hexagon", "1");
Builder.defineMacro("__hexagon__", "1");
- if(CPU == "hexagonv1") {
- Builder.defineMacro("__HEXAGON_V1__");
- Builder.defineMacro("__HEXAGON_ARCH__", "1");
- if(Opts.HexagonQdsp6Compat) {
- Builder.defineMacro("__QDSP6_V1__");
- Builder.defineMacro("__QDSP6_ARCH__", "1");
- }
- }
- else if(CPU == "hexagonv2") {
- Builder.defineMacro("__HEXAGON_V2__");
- Builder.defineMacro("__HEXAGON_ARCH__", "2");
- if(Opts.HexagonQdsp6Compat) {
- Builder.defineMacro("__QDSP6_V2__");
- Builder.defineMacro("__QDSP6_ARCH__", "2");
- }
- }
- else if(CPU == "hexagonv3") {
- Builder.defineMacro("__HEXAGON_V3__");
- Builder.defineMacro("__HEXAGON_ARCH__", "3");
- if(Opts.HexagonQdsp6Compat) {
- Builder.defineMacro("__QDSP6_V3__");
- Builder.defineMacro("__QDSP6_ARCH__", "3");
- }
- }
- else if(CPU == "hexagonv4") {
+ if (CPU == "hexagonv4") {
Builder.defineMacro("__HEXAGON_V4__");
Builder.defineMacro("__HEXAGON_ARCH__", "4");
- if(Opts.HexagonQdsp6Compat) {
+ if (Opts.HexagonQdsp6Compat) {
Builder.defineMacro("__QDSP6_V4__");
Builder.defineMacro("__QDSP6_ARCH__", "4");
}
- }
- else if(CPU == "hexagonv5") {
+ } else if (CPU == "hexagonv5") {
Builder.defineMacro("__HEXAGON_V5__");
Builder.defineMacro("__HEXAGON_ARCH__", "5");
if(Opts.HexagonQdsp6Compat) {
Builder.defineMacro("__QDSP6_V5__");
Builder.defineMacro("__QDSP6_ARCH__", "5");
}
+ } else if (CPU == "hexagonv60") {
+ Builder.defineMacro("__HEXAGON_V60__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "60");
+ Builder.defineMacro("__QDSP6_V60__");
+ Builder.defineMacro("__QDSP6_ARCH__", "60");
}
}
-const char * const HexagonTargetInfo::GCCRegNames[] = {
+bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (auto &F : Features) {
+ if (F == "+hvx")
+ HasHVX = true;
+ else if (F == "-hvx")
+ HasHVX = HasHVXDouble = false;
+ else if (F == "+hvx-double")
+ HasHVX = HasHVXDouble = true;
+ else if (F == "-hvx-double")
+ HasHVXDouble = false;
+ }
+ return true;
+}
+
+bool HexagonTargetInfo::initFeatureMap(llvm::StringMap<bool> &Features,
+ DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ // Default for v60: -hvx, -hvx-double.
+ Features["hvx"] = false;
+ Features["hvx-double"] = false;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+}
+
+
+const char *const HexagonTargetInfo::GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
@@ -5463,30 +5789,26 @@ const char * const HexagonTargetInfo::GCCRegNames[] = {
"sa0", "lc0", "sa1", "lc1", "m0", "m1", "usr", "ugp"
};
-void HexagonTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ArrayRef<const char*> HexagonTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
}
-
const TargetInfo::GCCRegAlias HexagonTargetInfo::GCCRegAliases[] = {
{ { "sp" }, "r29" },
{ { "fp" }, "r30" },
{ { "lr" }, "r31" },
- };
+};
-void HexagonTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ArrayRef<TargetInfo::GCCRegAlias> HexagonTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
}
const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsHexagon.def"
};
@@ -5526,17 +5848,15 @@ public:
.Default(false);
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
// FIXME: Implement!
+ return None;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override {
// FIXME: Implement!
@@ -5556,6 +5876,80 @@ public:
// FIXME: Implement!
return "";
}
+
+ // No Sparc V7 for now, the backend doesn't support it anyway.
+ enum CPUKind {
+ CK_GENERIC,
+ CK_V8,
+ CK_SUPERSPARC,
+ CK_SPARCLITE,
+ CK_F934,
+ CK_HYPERSPARC,
+ CK_SPARCLITE86X,
+ CK_SPARCLET,
+ CK_TSC701,
+ CK_V9,
+ CK_ULTRASPARC,
+ CK_ULTRASPARC3,
+ CK_NIAGARA,
+ CK_NIAGARA2,
+ CK_NIAGARA3,
+ CK_NIAGARA4
+ } CPU = CK_GENERIC;
+
+ enum CPUGeneration {
+ CG_V8,
+ CG_V9,
+ };
+
+ CPUGeneration getCPUGeneration(CPUKind Kind) const {
+ switch (Kind) {
+ case CK_GENERIC:
+ case CK_V8:
+ case CK_SUPERSPARC:
+ case CK_SPARCLITE:
+ case CK_F934:
+ case CK_HYPERSPARC:
+ case CK_SPARCLITE86X:
+ case CK_SPARCLET:
+ case CK_TSC701:
+ return CG_V8;
+ case CK_V9:
+ case CK_ULTRASPARC:
+ case CK_ULTRASPARC3:
+ case CK_NIAGARA:
+ case CK_NIAGARA2:
+ case CK_NIAGARA3:
+ case CK_NIAGARA4:
+ return CG_V9;
+ }
+ llvm_unreachable("Unexpected CPU kind");
+ }
+
+ CPUKind getCPUKind(StringRef Name) const {
+ return llvm::StringSwitch<CPUKind>(Name)
+ .Case("v8", CK_V8)
+ .Case("supersparc", CK_SUPERSPARC)
+ .Case("sparclite", CK_SPARCLITE)
+ .Case("f934", CK_F934)
+ .Case("hypersparc", CK_HYPERSPARC)
+ .Case("sparclite86x", CK_SPARCLITE86X)
+ .Case("sparclet", CK_SPARCLET)
+ .Case("tsc701", CK_TSC701)
+ .Case("v9", CK_V9)
+ .Case("ultrasparc", CK_ULTRASPARC)
+ .Case("ultrasparc3", CK_ULTRASPARC3)
+ .Case("niagara", CK_NIAGARA)
+ .Case("niagara2", CK_NIAGARA2)
+ .Case("niagara3", CK_NIAGARA3)
+ .Case("niagara4", CK_NIAGARA4)
+ .Default(CK_GENERIC);
+ }
+
+ bool setCPU(const std::string &Name) override {
+ CPU = getCPUKind(Name);
+ return CPU != CK_GENERIC;
+ }
};
const char * const SparcTargetInfo::GCCRegNames[] = {
@@ -5565,10 +5959,8 @@ const char * const SparcTargetInfo::GCCRegNames[] = {
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
};
-void SparcTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ArrayRef<const char *> SparcTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias SparcTargetInfo::GCCRegAliases[] = {
@@ -5606,33 +5998,48 @@ const TargetInfo::GCCRegAlias SparcTargetInfo::GCCRegAliases[] = {
{ { "i7" }, "r31" },
};
-void SparcTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ArrayRef<TargetInfo::GCCRegAlias> SparcTargetInfo::getGCCRegAliases() const {
+ return llvm::makeArrayRef(GCCRegAliases);
}
// SPARC v8 is the 32-bit mode selected by Triple::sparc.
class SparcV8TargetInfo : public SparcTargetInfo {
public:
SparcV8TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) {
- DescriptionString = "E-m:e-p:32:32-i64:64-f128:64-n32-S64";
- // NetBSD uses long (same as llvm default); everyone else uses int.
- if (getTriple().getOS() == llvm::Triple::NetBSD) {
- SizeType = UnsignedLong;
- IntPtrType = SignedLong;
- PtrDiffType = SignedLong;
- } else {
+ DataLayoutString = "E-m:e-p:32:32-i64:64-f128:64-n32-S64";
+ // NetBSD / OpenBSD use long (same as llvm default); everyone else uses int.
+ switch (getTriple().getOS()) {
+ default:
SizeType = UnsignedInt;
IntPtrType = SignedInt;
PtrDiffType = SignedInt;
+ break;
+ case llvm::Triple::NetBSD:
+ case llvm::Triple::OpenBSD:
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ break;
}
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
SparcTargetInfo::getTargetDefines(Opts, Builder);
- Builder.defineMacro("__sparcv8");
+ switch (getCPUGeneration(CPU)) {
+ case CG_V8:
+ Builder.defineMacro("__sparcv8");
+ if (getTriple().getOS() != llvm::Triple::Solaris)
+ Builder.defineMacro("__sparcv8__");
+ break;
+ case CG_V9:
+ Builder.defineMacro("__sparcv9");
+ if (getTriple().getOS() != llvm::Triple::Solaris) {
+ Builder.defineMacro("__sparcv9__");
+ Builder.defineMacro("__sparc_v9__");
+ }
+ break;
+ }
}
};
@@ -5640,7 +6047,7 @@ public:
class SparcV8elTargetInfo : public SparcV8TargetInfo {
public:
SparcV8elTargetInfo(const llvm::Triple &Triple) : SparcV8TargetInfo(Triple) {
- DescriptionString = "e-m:e-p:32:32-i64:64-f128:64-n32-S64";
+ DataLayoutString = "e-m:e-p:32:32-i64:64-f128:64-n32-S64";
BigEndian = false;
}
};
@@ -5650,7 +6057,7 @@ class SparcV9TargetInfo : public SparcTargetInfo {
public:
SparcV9TargetInfo(const llvm::Triple &Triple) : SparcTargetInfo(Triple) {
// FIXME: Support Sparc quad-precision long double?
- DescriptionString = "E-m:e-i64:64-n32:64-S128";
+ DataLayoutString = "E-m:e-i64:64-n32:64-S128";
// This is an LP64 platform.
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
@@ -5683,19 +6090,9 @@ public:
}
bool setCPU(const std::string &Name) override {
- bool CPUKnown = llvm::StringSwitch<bool>(Name)
- .Case("v9", true)
- .Case("ultrasparc", true)
- .Case("ultrasparc3", true)
- .Case("niagara", true)
- .Case("niagara2", true)
- .Case("niagara3", true)
- .Case("niagara4", true)
- .Default(false);
-
- // No need to store the CPU yet. There aren't any CPU-specific
- // macros to define.
- return CPUKnown;
+ if (!SparcTargetInfo::setCPU(Name))
+ return false;
+ return getCPUGeneration(CPU) == CG_V9;
}
};
@@ -5708,7 +6105,8 @@ class SystemZTargetInfo : public TargetInfo {
public:
SystemZTargetInfo(const llvm::Triple &Triple)
- : TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false), HasVector(false) {
+ : TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false),
+ HasVector(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
TLSSupported = true;
@@ -5720,7 +6118,7 @@ public:
LongDoubleFormat = &llvm::APFloat::IEEEquad;
DefaultAlignForAttributeAligned = 64;
MinGlobalAlign = 16;
- DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64";
+ DataLayoutString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64";
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
void getTargetDefines(const LangOptions &Opts,
@@ -5734,19 +6132,15 @@ public:
if (Opts.ZVector)
Builder.defineMacro("__VEC__", "10301");
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::SystemZ::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::SystemZ::LastTSBuiltin-Builtin::FirstTSBuiltin);
}
- void getGCCRegNames(const char *const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
// No aliases.
- Aliases = nullptr;
- NumAliases = 0;
+ return None;
}
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -5768,29 +6162,33 @@ public:
return CPUKnown;
}
- void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
if (CPU == "zEC12")
Features["transactional-execution"] = true;
if (CPU == "z13") {
Features["transactional-execution"] = true;
Features["vector"] = true;
}
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override {
HasTransactionalExecution = false;
- for (unsigned i = 0, e = Features.size(); i != e; ++i) {
- if (Features[i] == "+transactional-execution")
+ for (const auto &Feature : Features) {
+ if (Feature == "+transactional-execution")
HasTransactionalExecution = true;
- if (Features[i] == "+vector")
+ else if (Feature == "+vector")
HasVector = true;
}
// If we use the vector ABI, vector types are 64-bit aligned.
if (HasVector) {
MaxVectorAlign = 64;
- DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
- "-v128:64-a:8:16-n32:64";
+ DataLayoutString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
+ "-v128:64-a:8:16-n32:64";
}
return true;
}
@@ -5816,7 +6214,7 @@ public:
const Builtin::Info SystemZTargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsSystemZ.def"
};
@@ -5827,10 +6225,8 @@ const char *const SystemZTargetInfo::GCCRegNames[] = {
"f8", "f10", "f12", "f14", "f9", "f11", "f13", "f15"
};
-void SystemZTargetInfo::getGCCRegNames(const char *const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ArrayRef<const char *> SystemZTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
}
bool SystemZTargetInfo::
@@ -5862,157 +6258,147 @@ validateAsmConstraint(const char *&Name,
}
}
- class MSP430TargetInfo : public TargetInfo {
- static const char * const GCCRegNames[];
- public:
- MSP430TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
- BigEndian = false;
- TLSSupported = false;
- IntWidth = 16; IntAlign = 16;
- LongWidth = 32; LongLongWidth = 64;
- LongAlign = LongLongAlign = 16;
- PointerWidth = 16; PointerAlign = 16;
- SuitableAlign = 16;
- SizeType = UnsignedInt;
- IntMaxType = SignedLongLong;
- IntPtrType = SignedInt;
- PtrDiffType = SignedInt;
- SigAtomicType = SignedLong;
- DescriptionString = "e-m:e-p:16:16-i32:16:32-a:16-n8:16";
- }
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override {
- Builder.defineMacro("MSP430");
- Builder.defineMacro("__MSP430__");
- // FIXME: defines for different 'flavours' of MCU
- }
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- // FIXME: Implement.
- Records = nullptr;
- NumRecords = 0;
- }
- bool hasFeature(StringRef Feature) const override {
- return Feature == "msp430";
- }
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
- // No aliases.
- Aliases = nullptr;
- NumAliases = 0;
- }
- bool
- validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override {
- // FIXME: implement
- switch (*Name) {
- case 'K': // the constant 1
- case 'L': // constant -1^20 .. 1^19
- case 'M': // constant 1-4:
- return true;
- }
- // No target constraints for now.
- return false;
- }
- const char *getClobbers() const override {
- // FIXME: Is this really right?
- return "";
+class MSP430TargetInfo : public TargetInfo {
+ static const char *const GCCRegNames[];
+
+public:
+ MSP430TargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ BigEndian = false;
+ TLSSupported = false;
+ IntWidth = 16;
+ IntAlign = 16;
+ LongWidth = 32;
+ LongLongWidth = 64;
+ LongAlign = LongLongAlign = 16;
+ PointerWidth = 16;
+ PointerAlign = 16;
+ SuitableAlign = 16;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLongLong;
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ SigAtomicType = SignedLong;
+ DataLayoutString = "e-m:e-p:16:16-i32:16:32-a:16-n8:16";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("MSP430");
+ Builder.defineMacro("__MSP430__");
+ // FIXME: defines for different 'flavours' of MCU
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ // FIXME: Implement.
+ return None;
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "msp430";
+ }
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ // No aliases.
+ return None;
+ }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ // FIXME: implement
+ switch (*Name) {
+ case 'K': // the constant 1
+ case 'L': // constant -1^20 .. 1^19
+ case 'M': // constant 1-4:
+ return true;
}
- BuiltinVaListKind getBuiltinVaListKind() const override {
- // FIXME: implement
- return TargetInfo::CharPtrBuiltinVaList;
- }
- };
+ // No target constraints for now.
+ return false;
+ }
+ const char *getClobbers() const override {
+ // FIXME: Is this really right?
+ return "";
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ // FIXME: implement
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+};
- const char * const MSP430TargetInfo::GCCRegNames[] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
- };
+const char *const MSP430TargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"};
+
+ArrayRef<const char *> MSP430TargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+// LLVM and Clang cannot be used directly to output native binaries for
+// target, but is used to compile C code to llvm bitcode with correct
+// type and alignment information.
+//
+// TCE uses the llvm bitcode as input and uses it for generating customized
+// target processor and program binary. TCE co-design environment is
+// publicly available in http://tce.cs.tut.fi
+
+static const unsigned TCEOpenCLAddrSpaceMap[] = {
+ 3, // opencl_global
+ 4, // opencl_local
+ 5, // opencl_constant
+ // FIXME: generic has to be added to the target
+ 0, // opencl_generic
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0 // cuda_shared
+};
- void MSP430TargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+class TCETargetInfo : public TargetInfo {
+public:
+ TCETargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ TLSSupported = false;
+ IntWidth = 32;
+ LongWidth = LongLongWidth = 32;
+ PointerWidth = 32;
+ IntAlign = 32;
+ LongAlign = LongLongAlign = 32;
+ PointerAlign = 32;
+ SuitableAlign = 32;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLong;
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ FloatWidth = 32;
+ FloatAlign = 32;
+ DoubleWidth = 32;
+ DoubleAlign = 32;
+ LongDoubleWidth = 32;
+ LongDoubleAlign = 32;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ DoubleFormat = &llvm::APFloat::IEEEsingle;
+ LongDoubleFormat = &llvm::APFloat::IEEEsingle;
+ DataLayoutString = "E-p:32:32-i8:8:32-i16:16:32-i64:32"
+ "-f64:32-v64:32-v128:32-a:0:32-n32";
+ AddrSpaceMap = &TCEOpenCLAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
}
- // LLVM and Clang cannot be used directly to output native binaries for
- // target, but is used to compile C code to llvm bitcode with correct
- // type and alignment information.
- //
- // TCE uses the llvm bitcode as input and uses it for generating customized
- // target processor and program binary. TCE co-design environment is
- // publicly available in http://tce.cs.tut.fi
-
- static const unsigned TCEOpenCLAddrSpaceMap[] = {
- 3, // opencl_global
- 4, // opencl_local
- 5, // opencl_constant
- // FIXME: generic has to be added to the target
- 0, // opencl_generic
- 0, // cuda_device
- 0, // cuda_constant
- 0 // cuda_shared
- };
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "tce", Opts);
+ Builder.defineMacro("__TCE__");
+ Builder.defineMacro("__TCE_V1__");
+ }
+ bool hasFeature(StringRef Feature) const override { return Feature == "tce"; }
- class TCETargetInfo : public TargetInfo{
- public:
- TCETargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
- TLSSupported = false;
- IntWidth = 32;
- LongWidth = LongLongWidth = 32;
- PointerWidth = 32;
- IntAlign = 32;
- LongAlign = LongLongAlign = 32;
- PointerAlign = 32;
- SuitableAlign = 32;
- SizeType = UnsignedInt;
- IntMaxType = SignedLong;
- IntPtrType = SignedInt;
- PtrDiffType = SignedInt;
- FloatWidth = 32;
- FloatAlign = 32;
- DoubleWidth = 32;
- DoubleAlign = 32;
- LongDoubleWidth = 32;
- LongDoubleAlign = 32;
- FloatFormat = &llvm::APFloat::IEEEsingle;
- DoubleFormat = &llvm::APFloat::IEEEsingle;
- LongDoubleFormat = &llvm::APFloat::IEEEsingle;
- DescriptionString = "E-p:32:32-i8:8:32-i16:16:32-i64:32"
- "-f64:32-v64:32-v128:32-a:0:32-n32";
- AddrSpaceMap = &TCEOpenCLAddrSpaceMap;
- UseAddrSpaceMapMangling = true;
- }
-
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override {
- DefineStd(Builder, "tce", Opts);
- Builder.defineMacro("__TCE__");
- Builder.defineMacro("__TCE_V1__");
- }
- bool hasFeature(StringRef Feature) const override {
- return Feature == "tce";
- }
-
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {}
- const char *getClobbers() const override {
- return "";
- }
- BuiltinVaListKind getBuiltinVaListKind() const override {
- return TargetInfo::VoidPtrBuiltinVaList;
- }
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override {}
- bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override{
- return true;
- }
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {}
- };
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ const char *getClobbers() const override { return ""; }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ return true;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+};
class BPFTargetInfo : public TargetInfo {
public:
@@ -6026,10 +6412,10 @@ public:
RegParmMax = 5;
if (Triple.getArch() == llvm::Triple::bpfeb) {
BigEndian = true;
- DescriptionString = "E-m:e-p:64:64-i64:64-n32:64-S128";
+ DataLayoutString = "E-m:e-p:64:64-i64:64-n32:64-S128";
} else {
BigEndian = false;
- DescriptionString = "e-m:e-p:64:64-i64:64-n32:64-S128";
+ DataLayoutString = "e-m:e-p:64:64-i64:64-n32:64-S128";
}
MaxAtomicPromoteWidth = 64;
MaxAtomicInlineWidth = 64;
@@ -6044,32 +6430,27 @@ public:
return Feature == "bpf";
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {}
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
const char *getClobbers() const override {
return "";
}
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override {
- Names = nullptr;
- NumNames = 0;
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return None;
}
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override {
return true;
}
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
- Aliases = nullptr;
- NumAliases = 0;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
}
};
class MipsTargetInfoBase : public TargetInfo {
- virtual void setDescriptionString() = 0;
+ virtual void setDataLayoutString() = 0;
static const Builtin::Info BuiltinInfo[];
std::string CPU;
@@ -6132,14 +6513,19 @@ public:
.Case("mips64r5", true)
.Case("mips64r6", true)
.Case("octeon", true)
+ .Case("p5600", true)
.Default(false);
}
const std::string& getCPU() const { return CPU; }
- void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
if (CPU == "octeon")
Features["mips64r2"] = Features["cnmips"] = true;
else
Features[CPU] = true;
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
void getTargetDefines(const LangOptions &Opts,
@@ -6199,12 +6585,17 @@ public:
Builder.defineMacro("_MIPS_ARCH", "\"" + CPU + "\"");
Builder.defineMacro("_MIPS_ARCH_" + StringRef(CPU).upper());
+
+ // These shouldn't be defined for MIPS-I but there's no need to check
+ // for that since MIPS-I isn't supported.
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::Mips::LastTSBuiltin - Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::Mips::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
bool hasFeature(StringRef Feature) const override {
return llvm::StringSwitch<bool>(Feature)
@@ -6215,8 +6606,7 @@ public:
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override {
+ ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
// CPU register names
// Must match second column of GCCRegAliases
@@ -6241,11 +6631,9 @@ public:
"$msair", "$msacsr", "$msaaccess", "$msasave", "$msamodify",
"$msarequest", "$msamap", "$msaunmap"
};
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ return llvm::makeArrayRef(GCCRegNames);
}
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override = 0;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override = 0;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
switch (*Name) {
@@ -6331,33 +6719,32 @@ public:
DspRev = NoDSP;
HasFP64 = isFP64Default();
- for (std::vector<std::string>::iterator it = Features.begin(),
- ie = Features.end(); it != ie; ++it) {
- if (*it == "+single-float")
+ for (const auto &Feature : Features) {
+ if (Feature == "+single-float")
IsSingleFloat = true;
- else if (*it == "+soft-float")
+ else if (Feature == "+soft-float")
FloatABI = SoftFloat;
- else if (*it == "+mips16")
+ else if (Feature == "+mips16")
IsMips16 = true;
- else if (*it == "+micromips")
+ else if (Feature == "+micromips")
IsMicromips = true;
- else if (*it == "+dsp")
+ else if (Feature == "+dsp")
DspRev = std::max(DspRev, DSP1);
- else if (*it == "+dspr2")
+ else if (Feature == "+dspr2")
DspRev = std::max(DspRev, DSP2);
- else if (*it == "+msa")
+ else if (Feature == "+msa")
HasMSA = true;
- else if (*it == "+fp64")
+ else if (Feature == "+fp64")
HasFP64 = true;
- else if (*it == "-fp64")
+ else if (Feature == "-fp64")
HasFP64 = false;
- else if (*it == "+nan2008")
+ else if (Feature == "+nan2008")
IsNan2008 = true;
- else if (*it == "-nan2008")
+ else if (Feature == "-nan2008")
IsNan2008 = false;
}
- setDescriptionString();
+ setDataLayoutString();
return true;
}
@@ -6372,9 +6759,10 @@ public:
};
const Builtin::Info MipsTargetInfoBase::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsMips.def"
};
@@ -6424,8 +6812,7 @@ public:
else
llvm_unreachable("Invalid ABI for Mips32.");
}
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
{ { "at" }, "$1" },
{ { "v0" }, "$2" },
@@ -6459,14 +6846,13 @@ public:
{ { "fp","$fp" }, "$30" },
{ { "ra" }, "$31" }
};
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ return llvm::makeArrayRef(GCCRegAliases);
}
};
class Mips32EBTargetInfo : public Mips32TargetInfoBase {
- void setDescriptionString() override {
- DescriptionString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
+ void setDataLayoutString() override {
+ DataLayoutString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
}
public:
@@ -6482,8 +6868,8 @@ public:
};
class Mips32ELTargetInfo : public Mips32TargetInfoBase {
- void setDescriptionString() override {
- DescriptionString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
+ void setDataLayoutString() override {
+ DataLayoutString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64";
}
public:
@@ -6579,9 +6965,10 @@ public:
}
else
llvm_unreachable("Invalid ABI for Mips64.");
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
{ { "at" }, "$1" },
{ { "v0" }, "$2" },
@@ -6615,19 +7002,18 @@ public:
{ { "fp","$fp" }, "$30" },
{ { "ra" }, "$31" }
};
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ return llvm::makeArrayRef(GCCRegAliases);
}
bool hasInt128Type() const override { return true; }
};
class Mips64EBTargetInfo : public Mips64TargetInfoBase {
- void setDescriptionString() override {
+ void setDataLayoutString() override {
if (ABI == "n32")
- DescriptionString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+ DataLayoutString = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
else
- DescriptionString = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+ DataLayoutString = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
}
@@ -6643,11 +7029,11 @@ public:
};
class Mips64ELTargetInfo : public Mips64TargetInfoBase {
- void setDescriptionString() override {
+ void setDataLayoutString() override {
if (ABI == "n32")
- DescriptionString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+ DataLayoutString = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128";
else
- DescriptionString = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
+ DataLayoutString = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128";
}
public:
Mips64ELTargetInfo(const llvm::Triple &Triple)
@@ -6683,8 +7069,6 @@ public:
this->RegParmMax = 0; // Disallow regparm
}
- void getDefaultFeatures(llvm::StringMap<bool> &Features) const override {
- }
void getArchDefines(const LangOptions &Opts, MacroBuilder &Builder) const {
Builder.defineMacro("__le32__");
Builder.defineMacro("__pnacl__");
@@ -6696,16 +7080,12 @@ public:
bool hasFeature(StringRef Feature) const override {
return Feature == "pnacl";
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::PNaClABIBuiltinVaList;
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override;
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
return false;
@@ -6716,24 +7096,19 @@ public:
}
};
-void PNaClTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = nullptr;
- NumNames = 0;
+ArrayRef<const char *> PNaClTargetInfo::getGCCRegNames() const {
+ return None;
}
-void PNaClTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- Aliases = nullptr;
- NumAliases = 0;
+ArrayRef<TargetInfo::GCCRegAlias> PNaClTargetInfo::getGCCRegAliases() const {
+ return None;
}
// We attempt to use PNaCl (le32) frontend and Mips32EL backend.
class NaClMips32ELTargetInfo : public Mips32ELTargetInfo {
public:
NaClMips32ELTargetInfo(const llvm::Triple &Triple) :
- Mips32ELTargetInfo(Triple) {
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0;
+ Mips32ELTargetInfo(Triple) {
}
BuiltinVaListKind getBuiltinVaListKind() const override {
@@ -6750,8 +7125,7 @@ public:
NoAsmVariants = true;
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
- DescriptionString =
- "e-m:e-v128:32-v16:16-v32:32-v96:32-n8:16:32:64-S128";
+ DataLayoutString = "e-m:e-v128:32-v16:16-v32:32-v96:32-n8:16:32:64-S128";
}
void getTargetDefines(const LangOptions &Opts,
@@ -6760,24 +7134,19 @@ public:
defineCPUMacros(Builder, "le64", /*Tuning=*/false);
Builder.defineMacro("__ELF__");
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::Le64::LastTSBuiltin - Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::Le64::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::PNaClABIBuiltinVaList;
}
const char *getClobbers() const override { return ""; }
- void getGCCRegNames(const char *const *&Names,
- unsigned &NumNames) const override {
- Names = nullptr;
- NumNames = 0;
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return None;
}
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
- Aliases = nullptr;
- NumAliases = 0;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
}
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
@@ -6786,107 +7155,246 @@ public:
bool hasProtectedVisibility() const override { return false; }
};
-} // end anonymous namespace.
+
+class WebAssemblyTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+
+ enum SIMDEnum {
+ NoSIMD,
+ SIMD128,
+ } SIMDLevel;
+
+public:
+ explicit WebAssemblyTargetInfo(const llvm::Triple &T)
+ : TargetInfo(T), SIMDLevel(NoSIMD) {
+ BigEndian = false;
+ NoAsmVariants = true;
+ SuitableAlign = 128;
+ LargeArrayMinWidth = 128;
+ LargeArrayAlign = 128;
+ SimdDefaultAlign = 128;
+ SigAtomicType = SignedLong;
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ }
+
+protected:
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ defineCPUMacros(Builder, "wasm", /*Tuning=*/false);
+ if (SIMDLevel >= SIMD128)
+ Builder.defineMacro("__wasm_simd128__");
+ }
+
+private:
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
+ if (CPU == "bleeding-edge")
+ Features["simd128"] = true;
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ }
+ bool hasFeature(StringRef Feature) const final {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("simd128", SIMDLevel >= SIMD128)
+ .Default(false);
+ }
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) final {
+ for (const auto &Feature : Features) {
+ if (Feature == "+simd128") {
+ SIMDLevel = std::max(SIMDLevel, SIMD128);
+ continue;
+ }
+ if (Feature == "-simd128") {
+ SIMDLevel = std::min(SIMDLevel, SIMDEnum(SIMD128 - 1));
+ continue;
+ }
+
+ Diags.Report(diag::err_opt_not_valid_with_opt) << Feature
+ << "-target-feature";
+ return false;
+ }
+ return true;
+ }
+ bool setCPU(const std::string &Name) final {
+ return llvm::StringSwitch<bool>(Name)
+ .Case("mvp", true)
+ .Case("bleeding-edge", true)
+ .Case("generic", true)
+ .Default(false);
+ }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const final {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::WebAssembly::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const final {
+ return VoidPtrBuiltinVaList;
+ }
+ ArrayRef<const char *> getGCCRegNames() const final {
+ return None;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const final {
+ return None;
+ }
+ bool
+ validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const final {
+ return false;
+ }
+ const char *getClobbers() const final { return ""; }
+ bool isCLZForZeroUndef() const final { return false; }
+ bool hasInt128Type() const final { return true; }
+ IntType getIntTypeByWidth(unsigned BitWidth,
+ bool IsSigned) const final {
+ // WebAssembly prefers long long for explicitly 64-bit integers.
+ return BitWidth == 64 ? (IsSigned ? SignedLongLong : UnsignedLongLong)
+ : TargetInfo::getIntTypeByWidth(BitWidth, IsSigned);
+ }
+ IntType getLeastIntTypeByWidth(unsigned BitWidth,
+ bool IsSigned) const final {
+ // WebAssembly uses long long for int_least64_t and int_fast64_t.
+ return BitWidth == 64
+ ? (IsSigned ? SignedLongLong : UnsignedLongLong)
+ : TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
+ }
+};
+
+const Builtin::Info WebAssemblyTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#include "clang/Basic/BuiltinsWebAssembly.def"
+};
+
+class WebAssembly32TargetInfo : public WebAssemblyTargetInfo {
+public:
+ explicit WebAssembly32TargetInfo(const llvm::Triple &T)
+ : WebAssemblyTargetInfo(T) {
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ DataLayoutString = "e-p:32:32-i64:64-n32:64-S128";
+ }
+
+protected:
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WebAssemblyTargetInfo::getTargetDefines(Opts, Builder);
+ defineCPUMacros(Builder, "wasm32", /*Tuning=*/false);
+ }
+};
+
+class WebAssembly64TargetInfo : public WebAssemblyTargetInfo {
+public:
+ explicit WebAssembly64TargetInfo(const llvm::Triple &T)
+ : WebAssemblyTargetInfo(T) {
+ LongAlign = LongWidth = 64;
+ PointerAlign = PointerWidth = 64;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ DataLayoutString = "e-p:64:64-i64:64-n32:64-S128";
+ }
+
+protected:
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WebAssemblyTargetInfo::getTargetDefines(Opts, Builder);
+ defineCPUMacros(Builder, "wasm64", /*Tuning=*/false);
+ }
+};
const Builtin::Info Le64TargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsLe64.def"
};
-namespace {
- static const unsigned SPIRAddrSpaceMap[] = {
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 4, // opencl_generic
- 0, // cuda_device
- 0, // cuda_constant
- 0 // cuda_shared
- };
- class SPIRTargetInfo : public TargetInfo {
- public:
- SPIRTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
- assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
- "SPIR target must use unknown OS");
- assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
- "SPIR target must use unknown environment type");
- BigEndian = false;
- TLSSupported = false;
- LongWidth = LongAlign = 64;
- AddrSpaceMap = &SPIRAddrSpaceMap;
- UseAddrSpaceMapMangling = true;
- // Define available target features
- // These must be defined in sorted order!
- NoAsmVariants = true;
- }
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override {
- DefineStd(Builder, "SPIR", Opts);
- }
- bool hasFeature(StringRef Feature) const override {
- return Feature == "spir";
- }
-
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {}
- const char *getClobbers() const override {
- return "";
- }
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override {}
- bool
- validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override {
- return true;
- }
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {}
- BuiltinVaListKind getBuiltinVaListKind() const override {
- return TargetInfo::VoidPtrBuiltinVaList;
- }
+static const unsigned SPIRAddrSpaceMap[] = {
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 4, // opencl_generic
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0 // cuda_shared
+};
+class SPIRTargetInfo : public TargetInfo {
+public:
+ SPIRTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {
+ assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
+ "SPIR target must use unknown OS");
+ assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
+ "SPIR target must use unknown environment type");
+ BigEndian = false;
+ TLSSupported = false;
+ LongWidth = LongAlign = 64;
+ AddrSpaceMap = &SPIRAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ // Define available target features
+ // These must be defined in sorted order!
+ NoAsmVariants = true;
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "SPIR", Opts);
+ }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "spir";
+ }
- CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
- return (CC == CC_SpirFunction ||
- CC == CC_SpirKernel) ? CCCR_OK : CCCR_Warning;
- }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ const char *getClobbers() const override { return ""; }
+ ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ return true;
+ }
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
- CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
- return CC_SpirFunction;
- }
- };
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ return (CC == CC_SpirFunction || CC == CC_SpirKernel) ? CCCR_OK
+ : CCCR_Warning;
+ }
+ CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
+ return CC_SpirFunction;
+ }
+};
- class SPIR32TargetInfo : public SPIRTargetInfo {
- public:
- SPIR32TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) {
- PointerWidth = PointerAlign = 32;
- SizeType = TargetInfo::UnsignedInt;
- PtrDiffType = IntPtrType = TargetInfo::SignedInt;
- DescriptionString
- = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024";
- }
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override {
- DefineStd(Builder, "SPIR32", Opts);
- }
- };
+class SPIR32TargetInfo : public SPIRTargetInfo {
+public:
+ SPIR32TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) {
+ PointerWidth = PointerAlign = 32;
+ SizeType = TargetInfo::UnsignedInt;
+ PtrDiffType = IntPtrType = TargetInfo::SignedInt;
+ DataLayoutString = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "SPIR32", Opts);
+ }
+};
- class SPIR64TargetInfo : public SPIRTargetInfo {
- public:
- SPIR64TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) {
- PointerWidth = PointerAlign = 64;
- SizeType = TargetInfo::UnsignedLong;
- PtrDiffType = IntPtrType = TargetInfo::SignedLong;
- DescriptionString = "e-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024";
- }
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override {
- DefineStd(Builder, "SPIR64", Opts);
- }
- };
+class SPIR64TargetInfo : public SPIRTargetInfo {
+public:
+ SPIR64TargetInfo(const llvm::Triple &Triple) : SPIRTargetInfo(Triple) {
+ PointerWidth = PointerAlign = 64;
+ SizeType = TargetInfo::UnsignedLong;
+ PtrDiffType = IntPtrType = TargetInfo::SignedLong;
+ DataLayoutString = "e-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024";
+ }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "SPIR64", Opts);
+ }
+};
class XCoreTargetInfo : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
@@ -6903,17 +7411,16 @@ public:
WCharType = UnsignedChar;
WIntType = UnsignedInt;
UseZeroLengthBitfieldAlignment = true;
- DescriptionString = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32"
- "-f64:32-a:0:32-n32";
+ DataLayoutString = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32"
+ "-f64:32-a:0:32-n32";
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
Builder.defineMacro("__XS1B__");
}
- void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const override {
- Records = BuiltinInfo;
- NumRecords = clang::XCore::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::XCore::LastTSBuiltin-Builtin::FirstTSBuiltin);
}
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
@@ -6921,19 +7428,15 @@ public:
const char *getClobbers() const override {
return "";
}
- void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const override {
+ ArrayRef<const char *> getGCCRegNames() const override {
static const char * const GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "cp", "dp", "sp", "lr"
};
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
+ return llvm::makeArrayRef(GCCRegNames);
}
- void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const override {
- Aliases = nullptr;
- NumAliases = 0;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
}
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
@@ -6946,14 +7449,13 @@ public:
};
const Builtin::Info XCoreTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
+ { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#include "clang/Basic/BuiltinsXCore.def"
};
-} // end anonymous namespace.
-namespace {
// x86_32 Android target
class AndroidX86_32TargetInfo : public LinuxTargetInfo<X86_32TargetInfo> {
public:
@@ -6964,9 +7466,7 @@ public:
LongDoubleFormat = &llvm::APFloat::IEEEdouble;
}
};
-} // end anonymous namespace
-namespace {
// x86_64 Android target
class AndroidX86_64TargetInfo : public LinuxTargetInfo<X86_64TargetInfo> {
public:
@@ -6981,7 +7481,6 @@ public:
};
} // end anonymous namespace
-
//===----------------------------------------------------------------------===//
// Driver code
//===----------------------------------------------------------------------===//
@@ -7004,6 +7503,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new DarwinAArch64TargetInfo(Triple);
switch (os) {
+ case llvm::Triple::CloudABI:
+ return new CloudABITargetInfo<AArch64leTargetInfo>(Triple);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<AArch64leTargetInfo>(Triple);
case llvm::Triple::Linux:
@@ -7048,6 +7549,10 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new NaClTargetInfo<ARMleTargetInfo>(Triple);
case llvm::Triple::Win32:
switch (Triple.getEnvironment()) {
+ case llvm::Triple::Cygnus:
+ return new CygwinARMTargetInfo(Triple);
+ case llvm::Triple::GNU:
+ return new MinGWARMTargetInfo(Triple);
case llvm::Triple::Itanium:
return new ItaniumWindowsARMleTargetInfo(Triple);
case llvm::Triple::MSVC:
@@ -7322,6 +7827,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new RTEMSX86_32TargetInfo(Triple);
case llvm::Triple::NaCl:
return new NaClTargetInfo<X86_32TargetInfo>(Triple);
+ case llvm::Triple::ELFIAMCU:
+ return new MCUX86_32TargetInfo(Triple);
default:
return new X86_32TargetInfo(Triple);
}
@@ -7357,6 +7864,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return new SolarisTargetInfo<X86_64TargetInfo>(Triple);
case llvm::Triple::Win32: {
switch (Triple.getEnvironment()) {
+ case llvm::Triple::Cygnus:
+ return new CygwinX86_64TargetInfo(Triple);
case llvm::Triple::GNU:
return new MinGWX86_64TargetInfo(Triple);
case llvm::Triple::MSVC:
@@ -7384,11 +7893,19 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) {
return nullptr;
return new SPIR64TargetInfo(Triple);
}
+ case llvm::Triple::wasm32:
+ if (!(Triple == llvm::Triple("wasm32-unknown-unknown")))
+ return nullptr;
+ return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple);
+ case llvm::Triple::wasm64:
+ if (!(Triple == llvm::Triple("wasm64-unknown-unknown")))
+ return nullptr;
+ return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple);
}
}
/// CreateTargetInfo - Return the target info object for the specified target
-/// triple.
+/// options.
TargetInfo *
TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
const std::shared_ptr<TargetOptions> &Opts) {
@@ -7423,25 +7940,15 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
// Compute the default target features, we need the target to handle this
// because features may have dependencies on one another.
llvm::StringMap<bool> Features;
- Target->getDefaultFeatures(Features);
-
- // Apply the user specified deltas.
- for (unsigned I = 0, N = Opts->FeaturesAsWritten.size();
- I < N; ++I) {
- const char *Name = Opts->FeaturesAsWritten[I].c_str();
- // Apply the feature via the target.
- bool Enabled = Name[0] == '+';
- Target->setFeatureEnabled(Features, Name + 1, Enabled);
- }
+ if (!Target->initFeatureMap(Features, Diags, Opts->CPU,
+ Opts->FeaturesAsWritten))
+ return nullptr;
// Add the features to the compile options.
- //
- // FIXME: If we are completely confident that we have the right set, we only
- // need to pass the minuses.
Opts->Features.clear();
- for (llvm::StringMap<bool>::const_iterator it = Features.begin(),
- ie = Features.end(); it != ie; ++it)
- Opts->Features.push_back((it->second ? "+" : "-") + it->first().str());
+ for (const auto &F : Features)
+ Opts->Features.push_back((F.getValue() ? "+" : "-") + F.getKey().str());
+
if (!Target->handleTargetFeatures(Opts->Features, Diags))
return nullptr;
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index 892897fc9cda..a1a67c2bc144 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_371/final/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
index a36102cf0f5a..cf5a8d681eac 100644
--- a/lib/Basic/VirtualFileSystem.cpp
+++ b/lib/Basic/VirtualFileSystem.cpp
@@ -10,6 +10,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/VirtualFileSystem.h"
+#include "clang/Basic/FileManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@@ -19,9 +20,17 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/YAMLParser.h"
+#include "llvm/Config/llvm-config.h"
#include <atomic>
#include <memory>
+// For chdir.
+#ifdef LLVM_ON_WIN32
+# include <direct.h>
+#else
+# include <unistd.h>
+#endif
+
using namespace clang;
using namespace clang::vfs;
using namespace llvm;
@@ -35,12 +44,24 @@ Status::Status(const file_status &Status)
User(Status.getUser()), Group(Status.getGroup()), Size(Status.getSize()),
Type(Status.type()), Perms(Status.permissions()), IsVFSMapped(false) {}
-Status::Status(StringRef Name, StringRef ExternalName, UniqueID UID,
- sys::TimeValue MTime, uint32_t User, uint32_t Group,
- uint64_t Size, file_type Type, perms Perms)
+Status::Status(StringRef Name, UniqueID UID, sys::TimeValue MTime,
+ uint32_t User, uint32_t Group, uint64_t Size, file_type Type,
+ perms Perms)
: Name(Name), UID(UID), MTime(MTime), User(User), Group(Group), Size(Size),
Type(Type), Perms(Perms), IsVFSMapped(false) {}
+Status Status::copyWithNewName(const Status &In, StringRef NewName) {
+ return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
+ In.getUser(), In.getGroup(), In.getSize(), In.getType(),
+ In.getPermissions());
+}
+
+Status Status::copyWithNewName(const file_status &In, StringRef NewName) {
+ return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
+ In.getUser(), In.getGroup(), In.getSize(), In.type(),
+ In.permissions());
+}
+
bool Status::equivalent(const Status &Other) const {
return getUniqueID() == Other.getUniqueID();
}
@@ -77,6 +98,19 @@ FileSystem::getBufferForFile(const llvm::Twine &Name, int64_t FileSize,
return (*F)->getBuffer(Name, FileSize, RequiresNullTerminator, IsVolatile);
}
+std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const {
+ auto WorkingDir = getCurrentWorkingDirectory();
+ if (!WorkingDir)
+ return WorkingDir.getError();
+
+ return llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
+}
+
+bool FileSystem::exists(const Twine &Path) {
+ auto Status = status(Path);
+ return Status && Status->exists();
+}
+
//===-----------------------------------------------------------------------===/
// RealFileSystem implementation
//===-----------------------------------------------------------------------===/
@@ -87,19 +121,20 @@ class RealFile : public File {
int FD;
Status S;
friend class RealFileSystem;
- RealFile(int FD) : FD(FD) {
+ RealFile(int FD, StringRef NewName)
+ : FD(FD), S(NewName, {}, {}, {}, {}, {},
+ llvm::sys::fs::file_type::status_error, {}) {
assert(FD >= 0 && "Invalid or inactive file descriptor");
}
public:
~RealFile() override;
ErrorOr<Status> status() override;
- ErrorOr<std::unique_ptr<MemoryBuffer>>
- getBuffer(const Twine &Name, int64_t FileSize = -1,
- bool RequiresNullTerminator = true,
- bool IsVolatile = false) override;
+ ErrorOr<std::unique_ptr<MemoryBuffer>> getBuffer(const Twine &Name,
+ int64_t FileSize,
+ bool RequiresNullTerminator,
+ bool IsVolatile) override;
std::error_code close() override;
- void setName(StringRef Name) override;
};
} // end anonymous namespace
RealFile::~RealFile() { close(); }
@@ -110,9 +145,7 @@ ErrorOr<Status> RealFile::status() {
file_status RealStatus;
if (std::error_code EC = sys::fs::status(FD, RealStatus))
return EC;
- Status NewS(RealStatus);
- NewS.setName(S.getName());
- S = std::move(NewS);
+ S = Status::copyWithNewName(RealStatus, S.getName());
}
return S;
}
@@ -142,10 +175,6 @@ std::error_code RealFile::close() {
return std::error_code();
}
-void RealFile::setName(StringRef Name) {
- S.setName(Name);
-}
-
namespace {
/// \brief The file system according to your operating system.
class RealFileSystem : public FileSystem {
@@ -153,6 +182,9 @@ public:
ErrorOr<Status> status(const Twine &Path) override;
ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
+
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
};
} // end anonymous namespace
@@ -160,9 +192,7 @@ ErrorOr<Status> RealFileSystem::status(const Twine &Path) {
sys::fs::file_status RealStatus;
if (std::error_code EC = sys::fs::status(Path, RealStatus))
return EC;
- Status Result(RealStatus);
- Result.setName(Path.str());
- return Result;
+ return Status::copyWithNewName(RealStatus, Path.str());
}
ErrorOr<std::unique_ptr<File>>
@@ -170,9 +200,29 @@ RealFileSystem::openFileForRead(const Twine &Name) {
int FD;
if (std::error_code EC = sys::fs::openFileForRead(Name, FD))
return EC;
- std::unique_ptr<File> Result(new RealFile(FD));
- Result->setName(Name.str());
- return std::move(Result);
+ return std::unique_ptr<File>(new RealFile(FD, Name.str()));
+}
+
+llvm::ErrorOr<std::string> RealFileSystem::getCurrentWorkingDirectory() const {
+ SmallString<256> Dir;
+ if (std::error_code EC = llvm::sys::fs::current_path(Dir))
+ return EC;
+ return Dir.str().str();
+}
+
+std::error_code RealFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
+ // FIXME: chdir is thread hostile; on the other hand, creating the same
+ // behavior as chdir is complex: chdir resolves the path once, thus
+ // guaranteeing that all subsequent relative path operations work
+ // on the same path the original chdir resulted in. This makes a
+ // difference for example on network filesystems, where symlinks might be
+ // switched during runtime of the tool. Fixing this depends on having a
+ // file system abstraction that allows openat() style interactions.
+ SmallString<256> Storage;
+ StringRef Dir = Path.toNullTerminatedStringRef(Storage);
+ if (int Err = ::chdir(Dir.data()))
+ return std::error_code(Err, std::generic_category());
+ return std::error_code();
}
IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
@@ -190,10 +240,8 @@ public:
if (!EC && Iter != llvm::sys::fs::directory_iterator()) {
llvm::sys::fs::file_status S;
EC = Iter->status(S);
- if (!EC) {
- CurrentEntry = Status(S);
- CurrentEntry.setName(Iter->path());
- }
+ if (!EC)
+ CurrentEntry = Status::copyWithNewName(S, Iter->path());
}
}
@@ -207,8 +255,7 @@ public:
} else {
llvm::sys::fs::file_status S;
EC = Iter->status(S);
- CurrentEntry = Status(S);
- CurrentEntry.setName(Iter->path());
+ CurrentEntry = Status::copyWithNewName(S, Iter->path());
}
return EC;
}
@@ -224,11 +271,14 @@ directory_iterator RealFileSystem::dir_begin(const Twine &Dir,
// OverlayFileSystem implementation
//===-----------------------------------------------------------------------===/
OverlayFileSystem::OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> BaseFS) {
- pushOverlay(BaseFS);
+ FSList.push_back(BaseFS);
}
void OverlayFileSystem::pushOverlay(IntrusiveRefCntPtr<FileSystem> FS) {
FSList.push_back(FS);
+ // Synchronize added file systems by duplicating the working directory from
+ // the first one in the list.
+ FS->setCurrentWorkingDirectory(getCurrentWorkingDirectory().get());
}
ErrorOr<Status> OverlayFileSystem::status(const Twine &Path) {
@@ -252,6 +302,19 @@ OverlayFileSystem::openFileForRead(const llvm::Twine &Path) {
return make_error_code(llvm::errc::no_such_file_or_directory);
}
+llvm::ErrorOr<std::string>
+OverlayFileSystem::getCurrentWorkingDirectory() const {
+ // All file systems are synchronized, just take the first working directory.
+ return FSList.front()->getCurrentWorkingDirectory();
+}
+std::error_code
+OverlayFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
+ for (auto &FS : FSList)
+ if (std::error_code EC = FS->setCurrentWorkingDirectory(Path))
+ return EC;
+ return std::error_code();
+}
+
clang::vfs::detail::DirIterImpl::~DirIterImpl() { }
namespace {
@@ -320,8 +383,286 @@ directory_iterator OverlayFileSystem::dir_begin(const Twine &Dir,
std::make_shared<OverlayFSDirIterImpl>(Dir, *this, EC));
}
+namespace clang {
+namespace vfs {
+namespace detail {
+
+enum InMemoryNodeKind { IME_File, IME_Directory };
+
+/// The in memory file system is a tree of Nodes. Every node can either be a
+/// file or a directory.
+class InMemoryNode {
+ Status Stat;
+ InMemoryNodeKind Kind;
+
+public:
+ InMemoryNode(Status Stat, InMemoryNodeKind Kind)
+ : Stat(std::move(Stat)), Kind(Kind) {}
+ virtual ~InMemoryNode() {}
+ const Status &getStatus() const { return Stat; }
+ InMemoryNodeKind getKind() const { return Kind; }
+ virtual std::string toString(unsigned Indent) const = 0;
+};
+
+namespace {
+class InMemoryFile : public InMemoryNode {
+ std::unique_ptr<llvm::MemoryBuffer> Buffer;
+
+public:
+ InMemoryFile(Status Stat, std::unique_ptr<llvm::MemoryBuffer> Buffer)
+ : InMemoryNode(std::move(Stat), IME_File), Buffer(std::move(Buffer)) {}
+
+ llvm::MemoryBuffer *getBuffer() { return Buffer.get(); }
+ std::string toString(unsigned Indent) const override {
+ return (std::string(Indent, ' ') + getStatus().getName() + "\n").str();
+ }
+ static bool classof(const InMemoryNode *N) {
+ return N->getKind() == IME_File;
+ }
+};
+
+/// Adapt a InMemoryFile for VFS' File interface.
+class InMemoryFileAdaptor : public File {
+ InMemoryFile &Node;
+
+public:
+ explicit InMemoryFileAdaptor(InMemoryFile &Node) : Node(Node) {}
+
+ llvm::ErrorOr<Status> status() override { return Node.getStatus(); }
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+ getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
+ bool IsVolatile) override {
+ llvm::MemoryBuffer *Buf = Node.getBuffer();
+ return llvm::MemoryBuffer::getMemBuffer(
+ Buf->getBuffer(), Buf->getBufferIdentifier(), RequiresNullTerminator);
+ }
+ std::error_code close() override { return std::error_code(); }
+};
+} // end anonymous namespace
+
+class InMemoryDirectory : public InMemoryNode {
+ std::map<std::string, std::unique_ptr<InMemoryNode>> Entries;
+
+public:
+ InMemoryDirectory(Status Stat)
+ : InMemoryNode(std::move(Stat), IME_Directory) {}
+ InMemoryNode *getChild(StringRef Name) {
+ auto I = Entries.find(Name);
+ if (I != Entries.end())
+ return I->second.get();
+ return nullptr;
+ }
+ InMemoryNode *addChild(StringRef Name, std::unique_ptr<InMemoryNode> Child) {
+ return Entries.insert(make_pair(Name, std::move(Child)))
+ .first->second.get();
+ }
+
+ typedef decltype(Entries)::const_iterator const_iterator;
+ const_iterator begin() const { return Entries.begin(); }
+ const_iterator end() const { return Entries.end(); }
+
+ std::string toString(unsigned Indent) const override {
+ std::string Result =
+ (std::string(Indent, ' ') + getStatus().getName() + "\n").str();
+ for (const auto &Entry : Entries) {
+ Result += Entry.second->toString(Indent + 2);
+ }
+ return Result;
+ }
+ static bool classof(const InMemoryNode *N) {
+ return N->getKind() == IME_Directory;
+ }
+};
+}
+
+InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
+ : Root(new detail::InMemoryDirectory(
+ Status("", getNextVirtualUniqueID(), llvm::sys::TimeValue::MinTime(),
+ 0, 0, 0, llvm::sys::fs::file_type::directory_file,
+ llvm::sys::fs::perms::all_all))),
+ UseNormalizedPaths(UseNormalizedPaths) {}
+
+InMemoryFileSystem::~InMemoryFileSystem() {}
+
+std::string InMemoryFileSystem::toString() const {
+ return Root->toString(/*Indent=*/0);
+}
+
+bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ SmallString<128> Path;
+ P.toVector(Path);
+
+ // Fix up relative paths. This just prepends the current working directory.
+ std::error_code EC = makeAbsolute(Path);
+ assert(!EC);
+ (void)EC;
+
+ if (useNormalizedPaths())
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
+
+ if (Path.empty())
+ return false;
+
+ detail::InMemoryDirectory *Dir = Root.get();
+ auto I = llvm::sys::path::begin(Path), E = llvm::sys::path::end(Path);
+ while (true) {
+ StringRef Name = *I;
+ detail::InMemoryNode *Node = Dir->getChild(Name);
+ ++I;
+ if (!Node) {
+ if (I == E) {
+ // End of the path, create a new file.
+ // FIXME: expose the status details in the interface.
+ Status Stat(P.str(), getNextVirtualUniqueID(),
+ llvm::sys::TimeValue(ModificationTime, 0), 0, 0,
+ Buffer->getBufferSize(),
+ llvm::sys::fs::file_type::regular_file,
+ llvm::sys::fs::all_all);
+ Dir->addChild(Name, llvm::make_unique<detail::InMemoryFile>(
+ std::move(Stat), std::move(Buffer)));
+ return true;
+ }
+
+ // Create a new directory. Use the path up to here.
+ // FIXME: expose the status details in the interface.
+ Status Stat(
+ StringRef(Path.str().begin(), Name.end() - Path.str().begin()),
+ getNextVirtualUniqueID(), llvm::sys::TimeValue(ModificationTime, 0),
+ 0, 0, Buffer->getBufferSize(),
+ llvm::sys::fs::file_type::directory_file, llvm::sys::fs::all_all);
+ Dir = cast<detail::InMemoryDirectory>(Dir->addChild(
+ Name, llvm::make_unique<detail::InMemoryDirectory>(std::move(Stat))));
+ continue;
+ }
+
+ if (auto *NewDir = dyn_cast<detail::InMemoryDirectory>(Node)) {
+ Dir = NewDir;
+ } else {
+ assert(isa<detail::InMemoryFile>(Node) &&
+ "Must be either file or directory!");
+
+ // Trying to insert a directory in place of a file.
+ if (I != E)
+ return false;
+
+ // Return false only if the new file is different from the existing one.
+ return cast<detail::InMemoryFile>(Node)->getBuffer()->getBuffer() ==
+ Buffer->getBuffer();
+ }
+ }
+}
+
+bool InMemoryFileSystem::addFileNoOwn(const Twine &P, time_t ModificationTime,
+ llvm::MemoryBuffer *Buffer) {
+ return addFile(P, ModificationTime,
+ llvm::MemoryBuffer::getMemBuffer(
+ Buffer->getBuffer(), Buffer->getBufferIdentifier()));
+}
+
+static ErrorOr<detail::InMemoryNode *>
+lookupInMemoryNode(const InMemoryFileSystem &FS, detail::InMemoryDirectory *Dir,
+ const Twine &P) {
+ SmallString<128> Path;
+ P.toVector(Path);
+
+ // Fix up relative paths. This just prepends the current working directory.
+ std::error_code EC = FS.makeAbsolute(Path);
+ assert(!EC);
+ (void)EC;
+
+ if (FS.useNormalizedPaths())
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
+
+ if (Path.empty())
+ return Dir;
+
+ auto I = llvm::sys::path::begin(Path), E = llvm::sys::path::end(Path);
+ while (true) {
+ detail::InMemoryNode *Node = Dir->getChild(*I);
+ ++I;
+ if (!Node)
+ return errc::no_such_file_or_directory;
+
+ // Return the file if it's at the end of the path.
+ if (auto File = dyn_cast<detail::InMemoryFile>(Node)) {
+ if (I == E)
+ return File;
+ return errc::no_such_file_or_directory;
+ }
+
+ // Traverse directories.
+ Dir = cast<detail::InMemoryDirectory>(Node);
+ if (I == E)
+ return Dir;
+ }
+}
+
+llvm::ErrorOr<Status> InMemoryFileSystem::status(const Twine &Path) {
+ auto Node = lookupInMemoryNode(*this, Root.get(), Path);
+ if (Node)
+ return (*Node)->getStatus();
+ return Node.getError();
+}
+
+llvm::ErrorOr<std::unique_ptr<File>>
+InMemoryFileSystem::openFileForRead(const Twine &Path) {
+ auto Node = lookupInMemoryNode(*this, Root.get(), Path);
+ if (!Node)
+ return Node.getError();
+
+ // When we have a file provide a heap-allocated wrapper for the memory buffer
+ // to match the ownership semantics for File.
+ if (auto *F = dyn_cast<detail::InMemoryFile>(*Node))
+ return std::unique_ptr<File>(new detail::InMemoryFileAdaptor(*F));
+
+ // FIXME: errc::not_a_file?
+ return make_error_code(llvm::errc::invalid_argument);
+}
+
+namespace {
+/// Adaptor from InMemoryDir::iterator to directory_iterator.
+class InMemoryDirIterator : public clang::vfs::detail::DirIterImpl {
+ detail::InMemoryDirectory::const_iterator I;
+ detail::InMemoryDirectory::const_iterator E;
+
+public:
+ InMemoryDirIterator() {}
+ explicit InMemoryDirIterator(detail::InMemoryDirectory &Dir)
+ : I(Dir.begin()), E(Dir.end()) {
+ if (I != E)
+ CurrentEntry = I->second->getStatus();
+ }
+
+ std::error_code increment() override {
+ ++I;
+ // When we're at the end, make CurrentEntry invalid and DirIterImpl will do
+ // the rest.
+ CurrentEntry = I != E ? I->second->getStatus() : Status();
+ return std::error_code();
+ }
+};
+} // end anonymous namespace
+
+directory_iterator InMemoryFileSystem::dir_begin(const Twine &Dir,
+ std::error_code &EC) {
+ auto Node = lookupInMemoryNode(*this, Root.get(), Dir);
+ if (!Node) {
+ EC = Node.getError();
+ return directory_iterator(std::make_shared<InMemoryDirIterator>());
+ }
+
+ if (auto *DirNode = dyn_cast<detail::InMemoryDirectory>(*Node))
+ return directory_iterator(std::make_shared<InMemoryDirIterator>(*DirNode));
+
+ EC = make_error_code(llvm::errc::not_a_directory);
+ return directory_iterator(std::make_shared<InMemoryDirIterator>());
+}
+}
+}
+
//===-----------------------------------------------------------------------===/
-// VFSFromYAML implementation
+// RedirectingFileSystem implementation
//===-----------------------------------------------------------------------===/
namespace {
@@ -343,23 +684,24 @@ public:
EntryKind getKind() const { return Kind; }
};
-class DirectoryEntry : public Entry {
- std::vector<Entry *> Contents;
+class RedirectingDirectoryEntry : public Entry {
+ std::vector<std::unique_ptr<Entry>> Contents;
Status S;
public:
- ~DirectoryEntry() override;
- DirectoryEntry(StringRef Name, std::vector<Entry *> Contents, Status S)
+ RedirectingDirectoryEntry(StringRef Name,
+ std::vector<std::unique_ptr<Entry>> Contents,
+ Status S)
: Entry(EK_Directory, Name), Contents(std::move(Contents)),
S(std::move(S)) {}
Status getStatus() { return S; }
- typedef std::vector<Entry *>::iterator iterator;
+ typedef decltype(Contents)::iterator iterator;
iterator contents_begin() { return Contents.begin(); }
iterator contents_end() { return Contents.end(); }
static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
};
-class FileEntry : public Entry {
+class RedirectingFileEntry : public Entry {
public:
enum NameKind {
NK_NotSet,
@@ -370,7 +712,8 @@ private:
std::string ExternalContentsPath;
NameKind UseName;
public:
- FileEntry(StringRef Name, StringRef ExternalContentsPath, NameKind UseName)
+ RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
+ NameKind UseName)
: Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
UseName(UseName) {}
StringRef getExternalContentsPath() const { return ExternalContentsPath; }
@@ -382,16 +725,18 @@ public:
static bool classof(const Entry *E) { return E->getKind() == EK_File; }
};
-class VFSFromYAML;
+class RedirectingFileSystem;
class VFSFromYamlDirIterImpl : public clang::vfs::detail::DirIterImpl {
std::string Dir;
- VFSFromYAML &FS;
- DirectoryEntry::iterator Current, End;
+ RedirectingFileSystem &FS;
+ RedirectingDirectoryEntry::iterator Current, End;
+
public:
- VFSFromYamlDirIterImpl(const Twine &Path, VFSFromYAML &FS,
- DirectoryEntry::iterator Begin,
- DirectoryEntry::iterator End, std::error_code &EC);
+ VFSFromYamlDirIterImpl(const Twine &Path, RedirectingFileSystem &FS,
+ RedirectingDirectoryEntry::iterator Begin,
+ RedirectingDirectoryEntry::iterator End,
+ std::error_code &EC);
std::error_code increment() override;
};
@@ -448,8 +793,9 @@ public:
/// In both cases, the 'name' field may contain multiple path components (e.g.
/// /path/to/file). However, any directory that contains more than one child
/// must be uniquely represented by a directory entry.
-class VFSFromYAML : public vfs::FileSystem {
- std::vector<Entry *> Roots; ///< The root(s) of the virtual file system.
+class RedirectingFileSystem : public vfs::FileSystem {
+ /// The root(s) of the virtual file system.
+ std::vector<std::unique_ptr<Entry>> Roots;
/// \brief The file system to use for external references.
IntrusiveRefCntPtr<FileSystem> ExternalFS;
@@ -466,10 +812,10 @@ class VFSFromYAML : public vfs::FileSystem {
bool UseExternalNames;
/// @}
- friend class VFSFromYAMLParser;
+ friend class RedirectingFileSystemParser;
private:
- VFSFromYAML(IntrusiveRefCntPtr<FileSystem> ExternalFS)
+ RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
: ExternalFS(ExternalFS), CaseSensitive(true), UseExternalNames(true) {}
/// \brief Looks up \p Path in \c Roots.
@@ -484,18 +830,23 @@ private:
ErrorOr<Status> status(const Twine &Path, Entry *E);
public:
- ~VFSFromYAML() override;
-
/// \brief Parses \p Buffer, which is expected to be in YAML format and
/// returns a virtual file system representing its contents.
- static VFSFromYAML *create(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS);
+ static RedirectingFileSystem *
+ create(std::unique_ptr<MemoryBuffer> Buffer,
+ SourceMgr::DiagHandlerTy DiagHandler, void *DiagContext,
+ IntrusiveRefCntPtr<FileSystem> ExternalFS);
ErrorOr<Status> status(const Twine &Path) override;
ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
+ llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
+ return ExternalFS->getCurrentWorkingDirectory();
+ }
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
+ return ExternalFS->setCurrentWorkingDirectory(Path);
+ }
+
directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override{
ErrorOr<Entry *> E = lookupPath(Dir);
if (!E) {
@@ -513,14 +864,14 @@ public:
return directory_iterator();
}
- DirectoryEntry *D = cast<DirectoryEntry>(*E);
+ auto *D = cast<RedirectingDirectoryEntry>(*E);
return directory_iterator(std::make_shared<VFSFromYamlDirIterImpl>(Dir,
*this, D->contents_begin(), D->contents_end(), EC));
}
};
/// \brief A helper class to hold the common YAML parsing state.
-class VFSFromYAMLParser {
+class RedirectingFileSystemParser {
yaml::Stream &Stream;
void error(yaml::Node *N, const Twine &Msg) {
@@ -596,7 +947,7 @@ class VFSFromYAMLParser {
return true;
}
- Entry *parseEntry(yaml::Node *N) {
+ std::unique_ptr<Entry> parseEntry(yaml::Node *N) {
yaml::MappingNode *M = dyn_cast<yaml::MappingNode>(N);
if (!M) {
error(N, "expected mapping node for file or directory entry");
@@ -611,14 +962,13 @@ class VFSFromYAMLParser {
KeyStatusPair("use-external-name", false),
};
- DenseMap<StringRef, KeyStatus> Keys(
- &Fields[0], Fields + sizeof(Fields)/sizeof(Fields[0]));
+ DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
bool HasContents = false; // external or otherwise
- std::vector<Entry *> EntryArrayContents;
+ std::vector<std::unique_ptr<Entry>> EntryArrayContents;
std::string ExternalContentsPath;
std::string Name;
- FileEntry::NameKind UseExternalName = FileEntry::NK_NotSet;
+ auto UseExternalName = RedirectingFileEntry::NK_NotSet;
EntryKind Kind;
for (yaml::MappingNode::iterator I = M->begin(), E = M->end(); I != E;
@@ -667,8 +1017,8 @@ class VFSFromYAMLParser {
for (yaml::SequenceNode::iterator I = Contents->begin(),
E = Contents->end();
I != E; ++I) {
- if (Entry *E = parseEntry(&*I))
- EntryArrayContents.push_back(E);
+ if (std::unique_ptr<Entry> E = parseEntry(&*I))
+ EntryArrayContents.push_back(std::move(E));
else
return nullptr;
}
@@ -686,7 +1036,8 @@ class VFSFromYAMLParser {
bool Val;
if (!parseScalarBool(I->getValue(), Val))
return nullptr;
- UseExternalName = Val ? FileEntry::NK_External : FileEntry::NK_Virtual;
+ UseExternalName = Val ? RedirectingFileEntry::NK_External
+ : RedirectingFileEntry::NK_Virtual;
} else {
llvm_unreachable("key missing from Keys");
}
@@ -704,7 +1055,8 @@ class VFSFromYAMLParser {
return nullptr;
// check invalid configuration
- if (Kind == EK_Directory && UseExternalName != FileEntry::NK_NotSet) {
+ if (Kind == EK_Directory &&
+ UseExternalName != RedirectingFileEntry::NK_NotSet) {
error(N, "'use-external-name' is not supported for directories");
return nullptr;
}
@@ -718,16 +1070,17 @@ class VFSFromYAMLParser {
// Get the last component
StringRef LastComponent = sys::path::filename(Trimmed);
- Entry *Result = nullptr;
+ std::unique_ptr<Entry> Result;
switch (Kind) {
case EK_File:
- Result = new FileEntry(LastComponent, std::move(ExternalContentsPath),
- UseExternalName);
+ Result = llvm::make_unique<RedirectingFileEntry>(
+ LastComponent, std::move(ExternalContentsPath), UseExternalName);
break;
case EK_Directory:
- Result = new DirectoryEntry(LastComponent, std::move(EntryArrayContents),
- Status("", "", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0,
- 0, file_type::directory_file, sys::fs::all_all));
+ Result = llvm::make_unique<RedirectingDirectoryEntry>(
+ LastComponent, std::move(EntryArrayContents),
+ Status("", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0, 0,
+ file_type::directory_file, sys::fs::all_all));
break;
}
@@ -739,18 +1092,21 @@ class VFSFromYAMLParser {
for (sys::path::reverse_iterator I = sys::path::rbegin(Parent),
E = sys::path::rend(Parent);
I != E; ++I) {
- Result = new DirectoryEntry(*I, llvm::makeArrayRef(Result),
- Status("", "", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0,
- 0, file_type::directory_file, sys::fs::all_all));
+ std::vector<std::unique_ptr<Entry>> Entries;
+ Entries.push_back(std::move(Result));
+ Result = llvm::make_unique<RedirectingDirectoryEntry>(
+ *I, std::move(Entries),
+ Status("", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0, 0,
+ file_type::directory_file, sys::fs::all_all));
}
return Result;
}
public:
- VFSFromYAMLParser(yaml::Stream &S) : Stream(S) {}
+ RedirectingFileSystemParser(yaml::Stream &S) : Stream(S) {}
// false on error
- bool parse(yaml::Node *Root, VFSFromYAML *FS) {
+ bool parse(yaml::Node *Root, RedirectingFileSystem *FS) {
yaml::MappingNode *Top = dyn_cast<yaml::MappingNode>(Root);
if (!Top) {
error(Root, "expected mapping node");
@@ -764,8 +1120,7 @@ public:
KeyStatusPair("roots", true),
};
- DenseMap<StringRef, KeyStatus> Keys(
- &Fields[0], Fields + sizeof(Fields)/sizeof(Fields[0]));
+ DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
// Parse configuration and 'roots'
for (yaml::MappingNode::iterator I = Top->begin(), E = Top->end(); I != E;
@@ -787,8 +1142,8 @@ public:
for (yaml::SequenceNode::iterator I = Roots->begin(), E = Roots->end();
I != E; ++I) {
- if (Entry *E = parseEntry(&*I))
- FS->Roots.push_back(E);
+ if (std::unique_ptr<Entry> E = parseEntry(&*I))
+ FS->Roots.push_back(std::move(E));
else
return false;
}
@@ -831,15 +1186,11 @@ public:
};
} // end of anonymous namespace
-Entry::~Entry() {}
-DirectoryEntry::~DirectoryEntry() { llvm::DeleteContainerPointers(Contents); }
+Entry::~Entry() = default;
-VFSFromYAML::~VFSFromYAML() { llvm::DeleteContainerPointers(Roots); }
-
-VFSFromYAML *VFSFromYAML::create(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS) {
+RedirectingFileSystem *RedirectingFileSystem::create(
+ std::unique_ptr<MemoryBuffer> Buffer, SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS) {
SourceMgr SM;
yaml::Stream Stream(Buffer->getMemBufferRef(), SM);
@@ -852,21 +1203,22 @@ VFSFromYAML *VFSFromYAML::create(std::unique_ptr<MemoryBuffer> Buffer,
return nullptr;
}
- VFSFromYAMLParser P(Stream);
+ RedirectingFileSystemParser P(Stream);
- std::unique_ptr<VFSFromYAML> FS(new VFSFromYAML(ExternalFS));
+ std::unique_ptr<RedirectingFileSystem> FS(
+ new RedirectingFileSystem(ExternalFS));
if (!P.parse(Root, FS.get()))
return nullptr;
return FS.release();
}
-ErrorOr<Entry *> VFSFromYAML::lookupPath(const Twine &Path_) {
+ErrorOr<Entry *> RedirectingFileSystem::lookupPath(const Twine &Path_) {
SmallString<256> Path;
Path_.toVector(Path);
// Handle relative paths
- if (std::error_code EC = sys::fs::make_absolute(Path))
+ if (std::error_code EC = makeAbsolute(Path))
return EC;
if (Path.empty())
@@ -874,18 +1226,17 @@ ErrorOr<Entry *> VFSFromYAML::lookupPath(const Twine &Path_) {
sys::path::const_iterator Start = sys::path::begin(Path);
sys::path::const_iterator End = sys::path::end(Path);
- for (std::vector<Entry *>::iterator I = Roots.begin(), E = Roots.end();
- I != E; ++I) {
- ErrorOr<Entry *> Result = lookupPath(Start, End, *I);
+ for (const std::unique_ptr<Entry> &Root : Roots) {
+ ErrorOr<Entry *> Result = lookupPath(Start, End, Root.get());
if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
return Result;
}
return make_error_code(llvm::errc::no_such_file_or_directory);
}
-ErrorOr<Entry *> VFSFromYAML::lookupPath(sys::path::const_iterator Start,
- sys::path::const_iterator End,
- Entry *From) {
+ErrorOr<Entry *>
+RedirectingFileSystem::lookupPath(sys::path::const_iterator Start,
+ sys::path::const_iterator End, Entry *From) {
if (Start->equals("."))
++Start;
@@ -902,52 +1253,78 @@ ErrorOr<Entry *> VFSFromYAML::lookupPath(sys::path::const_iterator Start,
return From;
}
- DirectoryEntry *DE = dyn_cast<DirectoryEntry>(From);
+ auto *DE = dyn_cast<RedirectingDirectoryEntry>(From);
if (!DE)
return make_error_code(llvm::errc::not_a_directory);
- for (DirectoryEntry::iterator I = DE->contents_begin(),
- E = DE->contents_end();
- I != E; ++I) {
- ErrorOr<Entry *> Result = lookupPath(Start, End, *I);
+ for (const std::unique_ptr<Entry> &DirEntry :
+ llvm::make_range(DE->contents_begin(), DE->contents_end())) {
+ ErrorOr<Entry *> Result = lookupPath(Start, End, DirEntry.get());
if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
return Result;
}
return make_error_code(llvm::errc::no_such_file_or_directory);
}
-ErrorOr<Status> VFSFromYAML::status(const Twine &Path, Entry *E) {
+static Status getRedirectedFileStatus(const Twine &Path, bool UseExternalNames,
+ Status ExternalStatus) {
+ Status S = ExternalStatus;
+ if (!UseExternalNames)
+ S = Status::copyWithNewName(S, Path.str());
+ S.IsVFSMapped = true;
+ return S;
+}
+
+ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path, Entry *E) {
assert(E != nullptr);
- std::string PathStr(Path.str());
- if (FileEntry *F = dyn_cast<FileEntry>(E)) {
+ if (auto *F = dyn_cast<RedirectingFileEntry>(E)) {
ErrorOr<Status> S = ExternalFS->status(F->getExternalContentsPath());
assert(!S || S->getName() == F->getExternalContentsPath());
- if (S && !F->useExternalName(UseExternalNames))
- S->setName(PathStr);
if (S)
- S->IsVFSMapped = true;
+ return getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
+ *S);
return S;
} else { // directory
- DirectoryEntry *DE = cast<DirectoryEntry>(E);
- Status S = DE->getStatus();
- S.setName(PathStr);
- return S;
+ auto *DE = cast<RedirectingDirectoryEntry>(E);
+ return Status::copyWithNewName(DE->getStatus(), Path.str());
}
}
-ErrorOr<Status> VFSFromYAML::status(const Twine &Path) {
+ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path) {
ErrorOr<Entry *> Result = lookupPath(Path);
if (!Result)
return Result.getError();
return status(Path, *Result);
}
-ErrorOr<std::unique_ptr<File>> VFSFromYAML::openFileForRead(const Twine &Path) {
+namespace {
+/// Provide a file wrapper with an overriden status.
+class FileWithFixedStatus : public File {
+ std::unique_ptr<File> InnerFile;
+ Status S;
+
+public:
+ FileWithFixedStatus(std::unique_ptr<File> InnerFile, Status S)
+ : InnerFile(std::move(InnerFile)), S(S) {}
+
+ ErrorOr<Status> status() override { return S; }
+ ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+ getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
+ bool IsVolatile) override {
+ return InnerFile->getBuffer(Name, FileSize, RequiresNullTerminator,
+ IsVolatile);
+ }
+ std::error_code close() override { return InnerFile->close(); }
+};
+} // end anonymous namespace
+
+ErrorOr<std::unique_ptr<File>>
+RedirectingFileSystem::openFileForRead(const Twine &Path) {
ErrorOr<Entry *> E = lookupPath(Path);
if (!E)
return E.getError();
- FileEntry *F = dyn_cast<FileEntry>(*E);
+ auto *F = dyn_cast<RedirectingFileEntry>(*E);
if (!F) // FIXME: errc::not_a_file?
return make_error_code(llvm::errc::invalid_argument);
@@ -955,18 +1332,23 @@ ErrorOr<std::unique_ptr<File>> VFSFromYAML::openFileForRead(const Twine &Path) {
if (!Result)
return Result;
- if (!F->useExternalName(UseExternalNames))
- (*Result)->setName(Path.str());
+ auto ExternalStatus = (*Result)->status();
+ if (!ExternalStatus)
+ return ExternalStatus.getError();
- return Result;
+ // FIXME: Update the status with the name and VFSMapped.
+ Status S = getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
+ *ExternalStatus);
+ return std::unique_ptr<File>(
+ llvm::make_unique<FileWithFixedStatus>(std::move(*Result), S));
}
IntrusiveRefCntPtr<FileSystem>
vfs::getVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
SourceMgr::DiagHandlerTy DiagHandler, void *DiagContext,
IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- return VFSFromYAML::create(std::move(Buffer), DiagHandler, DiagContext,
- ExternalFS);
+ return RedirectingFileSystem::create(std::move(Buffer), DiagHandler,
+ DiagContext, ExternalFS);
}
UniqueID vfs::getNextVirtualUniqueID() {
@@ -1111,11 +1493,10 @@ void YAMLVFSWriter::write(llvm::raw_ostream &OS) {
JSONWriter(OS).write(Mappings, IsCaseSensitive);
}
-VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(const Twine &_Path,
- VFSFromYAML &FS,
- DirectoryEntry::iterator Begin,
- DirectoryEntry::iterator End,
- std::error_code &EC)
+VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(
+ const Twine &_Path, RedirectingFileSystem &FS,
+ RedirectingDirectoryEntry::iterator Begin,
+ RedirectingDirectoryEntry::iterator End, std::error_code &EC)
: Dir(_Path.str()), FS(FS), Current(Begin), End(End) {
if (Current != End) {
SmallString<128> PathStr(Dir);
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index cc8652e169d8..a65f27085616 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -25,6 +25,8 @@ namespace clang {
class TargetInfo;
namespace CodeGen {
+ class ABIArgInfo;
+ class Address;
class CGCXXABI;
class CGFunctionInfo;
class CodeGenFunction;
@@ -79,8 +81,15 @@ namespace clang {
// the ABI information any lower than CodeGen. Of course, for
// VAArg handling it has to be at this level; there is no way to
// abstract this out.
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGen::CodeGenFunction &CGF) const = 0;
+ virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr,
+ QualType Ty) const = 0;
+
+ /// Emit the target dependent code to load a value of
+ /// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
+ virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr,
+ QualType Ty) const;
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
@@ -92,6 +101,15 @@ namespace clang {
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
+ /// A convenience method to return an indirect ABIArgInfo with an
+ /// expected alignment equal to the ABI alignment of the given type.
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirect(QualType Ty, bool ByRef = true,
+ bool Realign = false,
+ llvm::Type *Padding = nullptr) const;
+
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirectInReg(QualType Ty, bool Realign = false) const;
};
} // end namespace clang
diff --git a/lib/CodeGen/Address.h b/lib/CodeGen/Address.h
new file mode 100644
index 000000000000..9d145fa26b5f
--- /dev/null
+++ b/lib/CodeGen/Address.h
@@ -0,0 +1,126 @@
+//===-- Address.h - An aligned address -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class provides a simple wrapper for a pair of a pointer and an
+// alignment.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
+#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
+
+#include "llvm/IR/Constants.h"
+#include "clang/AST/CharUnits.h"
+
+namespace clang {
+namespace CodeGen {
+
+/// An aligned address.
+class Address {
+ llvm::Value *Pointer;
+ CharUnits Alignment;
+public:
+ Address(llvm::Value *pointer, CharUnits alignment)
+ : Pointer(pointer), Alignment(alignment) {
+ assert((!alignment.isZero() || pointer == nullptr) &&
+ "creating valid address with invalid alignment");
+ }
+
+ static Address invalid() { return Address(nullptr, CharUnits()); }
+ bool isValid() const { return Pointer != nullptr; }
+
+ llvm::Value *getPointer() const {
+ assert(isValid());
+ return Pointer;
+ }
+
+ /// Return the type of the pointer value.
+ llvm::PointerType *getType() const {
+ return llvm::cast<llvm::PointerType>(getPointer()->getType());
+ }
+
+ /// Return the type of the values stored in this address.
+ ///
+ /// When IR pointer types lose their element type, we should simply
+ /// store it in Address instead for the convenience of writing code.
+ llvm::Type *getElementType() const {
+ return getType()->getElementType();
+ }
+
+ /// Return the address space that this address resides in.
+ unsigned getAddressSpace() const {
+ return getType()->getAddressSpace();
+ }
+
+ /// Return the IR name of the pointer value.
+ llvm::StringRef getName() const {
+ return getPointer()->getName();
+ }
+
+ /// Return the alignment of this pointer.
+ CharUnits getAlignment() const {
+ assert(isValid());
+ return Alignment;
+ }
+};
+
+/// A specialization of Address that requires the address to be an
+/// LLVM Constant.
+class ConstantAddress : public Address {
+public:
+ ConstantAddress(llvm::Constant *pointer, CharUnits alignment)
+ : Address(pointer, alignment) {}
+
+ static ConstantAddress invalid() {
+ return ConstantAddress(nullptr, CharUnits());
+ }
+
+ llvm::Constant *getPointer() const {
+ return llvm::cast<llvm::Constant>(Address::getPointer());
+ }
+
+ ConstantAddress getBitCast(llvm::Type *ty) const {
+ return ConstantAddress(llvm::ConstantExpr::getBitCast(getPointer(), ty),
+ getAlignment());
+ }
+
+ ConstantAddress getElementBitCast(llvm::Type *ty) const {
+ return getBitCast(ty->getPointerTo(getAddressSpace()));
+ }
+
+ static bool isaImpl(Address addr) {
+ return llvm::isa<llvm::Constant>(addr.getPointer());
+ }
+ static ConstantAddress castImpl(Address addr) {
+ return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
+ addr.getAlignment());
+ }
+};
+
+}
+}
+
+namespace llvm {
+ // Present a minimal LLVM-like casting interface.
+ template <class U> inline U cast(clang::CodeGen::Address addr) {
+ return U::castImpl(addr);
+ }
+ template <class U> inline bool isa(clang::CodeGen::Address addr) {
+ return U::isaImpl(addr);
+ }
+}
+
+namespace clang {
+ // Make our custom isa and cast available in namespace clang, to mirror
+ // what we do for LLVM's versions in Basic/LLVM.h.
+ using llvm::isa;
+ using llvm::cast;
+}
+
+#endif
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index afcb9e5c5055..82297e7ee417 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -14,6 +14,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
@@ -21,6 +22,7 @@
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/FunctionInfo.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
@@ -52,6 +54,7 @@ class EmitAssemblyHelper {
const clang::TargetOptions &TargetOpts;
const LangOptions &LangOpts;
Module *TheModule;
+ std::unique_ptr<FunctionInfoIndex> FunctionIndex;
Timer CodeGenerationTime;
@@ -112,15 +115,14 @@ private:
bool AddEmitPasses(BackendAction Action, raw_pwrite_stream &OS);
public:
- EmitAssemblyHelper(DiagnosticsEngine &_Diags,
- const CodeGenOptions &CGOpts,
+ EmitAssemblyHelper(DiagnosticsEngine &_Diags, const CodeGenOptions &CGOpts,
const clang::TargetOptions &TOpts,
- const LangOptions &LOpts,
- Module *M)
- : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
- TheModule(M), CodeGenerationTime("Code Generation Time"),
- CodeGenPasses(nullptr), PerModulePasses(nullptr),
- PerFunctionPasses(nullptr) {}
+ const LangOptions &LOpts, Module *M,
+ std::unique_ptr<FunctionInfoIndex> Index)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
+ TheModule(M), FunctionIndex(std::move(Index)),
+ CodeGenerationTime("Code Generation Time"), CodeGenPasses(nullptr),
+ PerModulePasses(nullptr), PerFunctionPasses(nullptr) {}
~EmitAssemblyHelper() {
delete CodeGenPasses;
@@ -166,14 +168,6 @@ static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase
PM.add(createObjCARCOptPass());
}
-static void addSampleProfileLoaderPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper &>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- PM.add(createSampleProfileLoaderPass(CGOpts.SampleProfileFile));
-}
-
static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
PM.add(createAddDiscriminatorsPass());
@@ -201,14 +195,20 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
- PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/false));
- PM.add(createAddressSanitizerModulePass(/*CompileKernel*/false));
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address);
+ PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/false, Recover));
+ PM.add(createAddressSanitizerModulePass(/*CompileKernel*/false, Recover));
}
static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
- PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/true));
- PM.add(createAddressSanitizerModulePass(/*CompileKernel*/true));
+ PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/true,
+ /*Recover*/true));
+ PM.add(createAddressSanitizerModulePass(/*CompileKernel*/true,
+ /*Recover*/true));
}
static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
@@ -272,6 +272,9 @@ static void addSymbolRewriterPass(const CodeGenOptions &Opts,
}
void EmitAssemblyHelper::CreatePasses() {
+ if (CodeGenOpts.DisableLLVMPasses)
+ return;
+
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
@@ -283,6 +286,29 @@ void EmitAssemblyHelper::CreatePasses() {
}
PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts);
+
+ // Figure out TargetLibraryInfo.
+ Triple TargetTriple(TheModule->getTargetTriple());
+ PMBuilder.LibraryInfo = createTLII(TargetTriple, CodeGenOpts);
+
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining:
+ break;
+ case CodeGenOptions::NormalInlining: {
+ PMBuilder.Inliner =
+ createFunctionInliningPass(OptLevel, CodeGenOpts.OptimizeSize);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ // Respect always_inline.
+ if (OptLevel == 0)
+ // Do not insert lifetime intrinsics at -O0.
+ PMBuilder.Inliner = createAlwaysInlinerPass(false);
+ else
+ PMBuilder.Inliner = createAlwaysInlinerPass();
+ break;
+ }
+
PMBuilder.OptLevel = OptLevel;
PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
PMBuilder.BBVectorize = CodeGenOpts.VectorizeBB;
@@ -295,13 +321,20 @@ void EmitAssemblyHelper::CreatePasses() {
PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
+ legacy::PassManager *MPM = getPerModulePasses();
+
+ // If we are performing a ThinLTO importing compile, invoke the LTO
+ // pipeline and pass down the in-memory function index.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty()) {
+ assert(FunctionIndex && "Expected non-empty function index");
+ PMBuilder.FunctionIndex = FunctionIndex.get();
+ PMBuilder.populateLTOPassManager(*MPM);
+ return;
+ }
+
PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
addAddDiscriminatorsPass);
- if (!CodeGenOpts.SampleProfileFile.empty())
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addSampleProfileLoaderPass);
-
// In ObjC ARC mode, add the main ARC optimization passes.
if (LangOpts.ObjCAutoRefCount) {
PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
@@ -363,27 +396,6 @@ void EmitAssemblyHelper::CreatePasses() {
addDataFlowSanitizerPass);
}
- // Figure out TargetLibraryInfo.
- Triple TargetTriple(TheModule->getTargetTriple());
- PMBuilder.LibraryInfo = createTLII(TargetTriple, CodeGenOpts);
-
- switch (Inlining) {
- case CodeGenOptions::NoInlining: break;
- case CodeGenOptions::NormalInlining: {
- PMBuilder.Inliner =
- createFunctionInliningPass(OptLevel, CodeGenOpts.OptimizeSize);
- break;
- }
- case CodeGenOptions::OnlyAlwaysInlining:
- // Respect always_inline.
- if (OptLevel == 0)
- // Do not insert lifetime intrinsics at -O0.
- PMBuilder.Inliner = createAlwaysInlinerPass(false);
- else
- PMBuilder.Inliner = createAlwaysInlinerPass();
- break;
- }
-
// Set up the per-function pass manager.
legacy::FunctionPassManager *FPM = getPerFunctionPasses();
if (CodeGenOpts.VerifyModule)
@@ -391,7 +403,6 @@ void EmitAssemblyHelper::CreatePasses() {
PMBuilder.populateFunctionPassManager(*FPM);
// Set up the per-module pass manager.
- legacy::PassManager *MPM = getPerModulePasses();
if (!CodeGenOpts.RewriteMapFiles.empty())
addSymbolRewriterPass(CodeGenOpts, MPM);
@@ -420,6 +431,9 @@ void EmitAssemblyHelper::CreatePasses() {
MPM->add(createInstrProfilingPass(Options));
}
+ if (!CodeGenOpts.SampleProfileFile.empty())
+ MPM->add(createSampleProfileLoaderPass(CodeGenOpts.SampleProfileFile));
+
PMBuilder.populateModulePassManager(*MPM);
}
@@ -455,20 +469,16 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
BackendArgs.push_back("-limit-float-precision");
BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
}
- for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i)
- BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str());
+ for (const std::string &BackendOption : CodeGenOpts.BackendOptions)
+ BackendArgs.push_back(BackendOption.c_str());
BackendArgs.push_back(nullptr);
llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
BackendArgs.data());
- std::string FeaturesStr;
- if (!TargetOpts.Features.empty()) {
- SubtargetFeatures Features;
- for (const std::string &Feature : TargetOpts.Features)
- Features.AddFeature(Feature);
- FeaturesStr = Features.getString();
- }
+ std::string FeaturesStr =
+ llvm::join(TargetOpts.Features.begin(), TargetOpts.Features.end(), ",");
+ // Keep this synced with the equivalent code in tools/driver/cc1as_main.cpp.
llvm::Reloc::Model RM = llvm::Reloc::Default;
if (CodeGenOpts.RelocationModel == "static") {
RM = llvm::Reloc::Static;
@@ -497,24 +507,16 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
.Case("posix", llvm::ThreadModel::POSIX)
.Case("single", llvm::ThreadModel::Single);
- if (CodeGenOpts.DisableIntegratedAS)
- Options.DisableIntegratedAS = true;
-
- if (CodeGenOpts.CompressDebugSections)
- Options.CompressDebugSections = true;
-
- if (CodeGenOpts.UseInitArray)
- Options.UseInitArray = true;
-
// Set float ABI type.
- if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
- Options.FloatABIType = llvm::FloatABI::Soft;
- else if (CodeGenOpts.FloatABI == "hard")
- Options.FloatABIType = llvm::FloatABI::Hard;
- else {
- assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
- Options.FloatABIType = llvm::FloatABI::Default;
- }
+ assert((CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp" ||
+ CodeGenOpts.FloatABI == "hard" || CodeGenOpts.FloatABI.empty()) &&
+ "Invalid Floating Point ABI!");
+ Options.FloatABIType =
+ llvm::StringSwitch<llvm::FloatABI::ABIType>(CodeGenOpts.FloatABI)
+ .Case("soft", llvm::FloatABI::Soft)
+ .Case("softfp", llvm::FloatABI::Soft)
+ .Case("hard", llvm::FloatABI::Hard)
+ .Default(llvm::FloatABI::Default);
// Set FP fusion mode.
switch (CodeGenOpts.getFPContractMode()) {
@@ -529,6 +531,17 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
break;
}
+ Options.UseInitArray = CodeGenOpts.UseInitArray;
+ Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
+ Options.CompressDebugSections = CodeGenOpts.CompressDebugSections;
+
+ // Set EABI version.
+ Options.EABIVersion = llvm::StringSwitch<llvm::EABI>(CodeGenOpts.EABIVersion)
+ .Case("4", llvm::EABI::EABI4)
+ .Case("5", llvm::EABI::EABI5)
+ .Case("gnu", llvm::EABI::GNU)
+ .Default(llvm::EABI::Default);
+
Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
@@ -539,11 +552,27 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Options.FunctionSections = CodeGenOpts.FunctionSections;
Options.DataSections = CodeGenOpts.DataSections;
Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
+ Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
+ switch (CodeGenOpts.getDebuggerTuning()) {
+ case CodeGenOptions::DebuggerKindGDB:
+ Options.DebuggerTuning = llvm::DebuggerKind::GDB;
+ break;
+ case CodeGenOptions::DebuggerKindLLDB:
+ Options.DebuggerTuning = llvm::DebuggerKind::LLDB;
+ break;
+ case CodeGenOptions::DebuggerKindSCE:
+ Options.DebuggerTuning = llvm::DebuggerKind::SCE;
+ break;
+ default:
+ break;
+ }
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
+ Options.MCOptions.MCIncrementalLinkerCompatible =
+ CodeGenOpts.IncrementalLinkerCompatible;
Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
Options.MCOptions.ABIName = TargetOpts.ABI;
@@ -605,7 +634,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
if (UsesCodeGen && !TM)
return;
if (TM)
- TheModule->setDataLayout(*TM->getDataLayout());
+ TheModule->setDataLayout(TM->createDataLayout());
CreatePasses();
switch (Action) {
@@ -613,8 +642,8 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
break;
case Backend_EmitBC:
- getPerModulePasses()->add(
- createBitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists));
+ getPerModulePasses()->add(createBitcodeWriterPass(
+ *OS, CodeGenOpts.EmitLLVMUseLists, CodeGenOpts.EmitFunctionSummary));
break;
case Backend_EmitLL:
@@ -659,16 +688,17 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const clang::TargetOptions &TOpts,
const LangOptions &LOpts, StringRef TDesc,
Module *M, BackendAction Action,
- raw_pwrite_stream *OS) {
- EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
+ raw_pwrite_stream *OS,
+ std::unique_ptr<FunctionInfoIndex> Index) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M,
+ std::move(Index));
AsmHelper.EmitAssembly(Action, OS);
// If an optional clang TargetInfo description string was passed in, use it to
// verify the LLVM TargetMachine's DataLayout.
if (AsmHelper.TM && !TDesc.empty()) {
- std::string DLDesc =
- AsmHelper.TM->getDataLayout()->getStringRepresentation();
+ std::string DLDesc = M->getDataLayout().getStringRepresentation();
if (DLDesc != TDesc) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "backend data layout '%0' does not match "
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 9839617c0e41..24de30b0b862 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -80,7 +80,7 @@ namespace {
AtomicSizeInBits = C.toBits(
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
.RoundUpToAlignment(lvalue.getAlignment()));
- auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
+ auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
auto OffsetInChars =
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
@@ -94,8 +94,9 @@ namespace {
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
BFI.StorageOffset += OffsetInChars;
- LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
- lvalue.getAlignment());
+ LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
+ BFI, lvalue.getType(),
+ lvalue.getAlignmentSource());
LVal.setTBAAInfo(lvalue.getTBAAInfo());
AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
if (AtomicTy.isNull()) {
@@ -118,10 +119,8 @@ namespace {
ValueTy = lvalue.getType();
ValueSizeInBits = C.getTypeSize(ValueTy);
AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
- lvalue.getType(), lvalue.getExtVectorAddr()
- ->getType()
- ->getPointerElementType()
- ->getVectorNumElements());
+ lvalue.getType(), lvalue.getExtVectorAddress()
+ .getElementType()->getVectorNumElements());
AtomicSizeInBits = C.getTypeSize(AtomicTy);
AtomicAlign = ValueAlign = lvalue.getAlignment();
LVal = lvalue;
@@ -139,15 +138,22 @@ namespace {
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
const LValue &getAtomicLValue() const { return LVal; }
- llvm::Value *getAtomicAddress() const {
+ llvm::Value *getAtomicPointer() const {
if (LVal.isSimple())
- return LVal.getAddress();
+ return LVal.getPointer();
else if (LVal.isBitField())
- return LVal.getBitFieldAddr();
+ return LVal.getBitFieldPointer();
else if (LVal.isVectorElt())
- return LVal.getVectorAddr();
+ return LVal.getVectorPointer();
assert(LVal.isExtVectorElt());
- return LVal.getExtVectorAddr();
+ return LVal.getExtVectorPointer();
+ }
+ Address getAtomicAddress() const {
+ return Address(getAtomicPointer(), getAtomicAlignment());
+ }
+
+ Address getAtomicAddressAsAtomicIntPointer() const {
+ return emitCastToAtomicIntPointer(getAtomicAddress());
}
/// Is the atomic size larger than the underlying value type?
@@ -167,13 +173,18 @@ namespace {
return CGF.CGM.getSize(size);
}
- /// Cast the given pointer to an integer pointer suitable for
- /// atomic operations.
- llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
+ /// Cast the given pointer to an integer pointer suitable for atomic
+ /// operations if the source.
+ Address emitCastToAtomicIntPointer(Address Addr) const;
+
+ /// If Addr is compatible with the iN that will be used for an atomic
+ /// operation, bitcast it. Otherwise, create a temporary that is suitable
+ /// and copy the value across.
+ Address convertToAtomicIntPointer(Address Addr) const;
/// Turn an atomic-layout object into an r-value.
- RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
- SourceLocation loc, bool AsValue) const;
+ RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
+ SourceLocation loc, bool AsValue) const;
/// \brief Converts a rvalue to integer value.
llvm::Value *convertRValueToInt(RValue RVal) const;
@@ -188,12 +199,12 @@ namespace {
/// Project an l-value down to the value field.
LValue projectValue() const {
assert(LVal.isSimple());
- llvm::Value *addr = getAtomicAddress();
+ Address addr = getAtomicAddress();
if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
+ addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
- return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
- CGF.getContext(), LVal.getTBAAInfo());
+ return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
+ LVal.getAlignmentSource(), LVal.getTBAAInfo());
}
/// \brief Emits atomic load.
@@ -228,18 +239,18 @@ namespace {
bool IsVolatile);
/// Materialize an atomic r-value in atomic-layout memory.
- llvm::Value *materializeRValue(RValue rvalue) const;
+ Address materializeRValue(RValue rvalue) const;
/// \brief Translates LLVM atomic ordering to GNU atomic ordering for
/// libcalls.
static AtomicExpr::AtomicOrderingKind
translateAtomicOrdering(const llvm::AtomicOrdering AO);
+ /// \brief Creates temp alloca for intermediate operations on atomic value.
+ Address CreateTempAlloca() const;
private:
bool requiresMemSetZero(llvm::Type *type) const;
- /// \brief Creates temp alloca for intermediate operations on atomic value.
- llvm::Value *CreateTempAlloca() const;
/// \brief Emits atomic load as a libcall.
void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
@@ -294,16 +305,16 @@ AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
llvm_unreachable("Unhandled AtomicOrdering");
}
-llvm::Value *AtomicInfo::CreateTempAlloca() const {
- auto *TempAlloca = CGF.CreateMemTemp(
+Address AtomicInfo::CreateTempAlloca() const {
+ Address TempAlloca = CGF.CreateMemTemp(
(LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
: AtomicTy,
+ getAtomicAlignment(),
"atomic-temp");
- TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
// Cast to pointer to value type for bitfields.
if (LVal.isBitField())
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TempAlloca, getAtomicAddress()->getType());
+ TempAlloca, getAtomicAddress().getType());
return TempAlloca;
}
@@ -351,7 +362,7 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- llvm::Value *addr = LVal.getAddress();
+ llvm::Value *addr = LVal.getPointer();
if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
return false;
@@ -363,19 +374,17 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
}
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
- llvm::Value *Dest, llvm::Value *Ptr,
- llvm::Value *Val1, llvm::Value *Val2,
- uint64_t Size, unsigned Align,
+ Address Dest, Address Ptr,
+ Address Val1, Address Val2,
+ uint64_t Size,
llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
- llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
- Expected->setAlignment(Align);
- llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
- Desired->setAlignment(Align);
+ llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
+ llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
- Ptr, Expected, Desired, SuccessOrder, FailureOrder);
+ Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
Pair->setVolatile(E->isVolatile());
Pair->setWeak(IsWeak);
@@ -400,26 +409,24 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
CGF.Builder.SetInsertPoint(StoreExpectedBB);
// Update the memory at Expected with Old's value.
- llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
- StoreExpected->setAlignment(Align);
+ CGF.Builder.CreateStore(Old, Val1);
// Finally, branch to the exit point.
CGF.Builder.CreateBr(ContinueBB);
CGF.Builder.SetInsertPoint(ContinueBB);
// Update the memory at Dest with Cmp's value.
CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
- return;
}
/// Given an ordering required on success, emit all possible cmpxchg
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
- bool IsWeak, llvm::Value *Dest,
- llvm::Value *Ptr, llvm::Value *Val1,
- llvm::Value *Val2,
+ bool IsWeak, Address Dest,
+ Address Ptr, Address Val1,
+ Address Val2,
llvm::Value *FailureOrderVal,
- uint64_t Size, unsigned Align,
+ uint64_t Size,
llvm::AtomicOrdering SuccessOrder) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
@@ -440,7 +447,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
FailureOrder =
llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
}
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size,
SuccessOrder, FailureOrder);
return;
}
@@ -465,13 +472,13 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// doesn't fold to a constant for the ordering.
CGF.Builder.SetInsertPoint(MonotonicBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, Align, SuccessOrder, llvm::Monotonic);
+ Size, SuccessOrder, llvm::Monotonic);
CGF.Builder.CreateBr(ContBB);
if (AcquireBB) {
CGF.Builder.SetInsertPoint(AcquireBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, Align, SuccessOrder, llvm::Acquire);
+ Size, SuccessOrder, llvm::Acquire);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
AcquireBB);
@@ -481,7 +488,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
if (SeqCstBB) {
CGF.Builder.SetInsertPoint(SeqCstBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
+ Size, SuccessOrder, llvm::SequentiallyConsistent);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
SeqCstBB);
@@ -490,11 +497,10 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
CGF.Builder.SetInsertPoint(ContBB);
}
-static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
- llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
+static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
+ Address Ptr, Address Val1, Address Val2,
llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, unsigned Align,
- llvm::AtomicOrdering Order) {
+ uint64_t Size, llvm::AtomicOrdering Order) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
@@ -504,17 +510,17 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
- Val1, Val2, FailureOrder, Size, Align, Order);
+ Val1, Val2, FailureOrder, Size, Order);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
@@ -528,12 +534,12 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -545,22 +551,17 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order);
- Load->setAlignment(Size);
Load->setVolatile(E->isVolatile());
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
- StoreDest->setAlignment(Align);
+ CGF.Builder.CreateStore(Load, Dest);
return;
}
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n: {
- assert(!Dest && "Store does not return a value");
- llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
- LoadVal1->setAlignment(Align);
+ llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
Store->setAtomic(Order);
- Store->setAlignment(Size);
Store->setVolatile(E->isVolatile());
return;
}
@@ -612,17 +613,16 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
break;
case AtomicExpr::AO__atomic_nand_fetch:
- PostOp = llvm::Instruction::And;
- // Fall through.
+ PostOp = llvm::Instruction::And; // the NOT is special cased below
+ // Fall through.
case AtomicExpr::AO__atomic_fetch_nand:
Op = llvm::AtomicRMWInst::Nand;
break;
}
- llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
- LoadVal1->setAlignment(Align);
+ llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::AtomicRMWInst *RMWI =
- CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
+ CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order);
RMWI->setVolatile(E->isVolatile());
// For __atomic_*_fetch operations, perform the operation again to
@@ -632,15 +632,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
Result = CGF.Builder.CreateNot(Result);
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
- StoreDest->setAlignment(Align);
+ CGF.Builder.CreateStore(Result, Dest);
}
// This function emits any expression (scalar, complex, or aggregate)
// into a temporary alloca.
-static llvm::Value *
+static Address
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
- llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
+ Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
/*Init*/ true);
return DeclPtr;
@@ -652,14 +651,15 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
SourceLocation Loc, CharUnits SizeInChars) {
if (UseOptimizedLibcall) {
// Load value and pass it to the function directly.
- unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
ValTy =
CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
SizeInBits)->getPointerTo();
- Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
- Align, CGF.getContext().getPointerType(ValTy),
+ Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
+ Val = CGF.EmitLoadOfScalar(Ptr, false,
+ CGF.getContext().getPointerType(ValTy),
Loc);
// Coerce the value into an appropriately sized integer type.
Args.add(RValue::get(Val), ValTy);
@@ -670,27 +670,27 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
}
}
-RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
+RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
QualType MemTy = AtomicTy;
if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
MemTy = AT->getValueType();
- CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
+ CharUnits sizeChars, alignChars;
+ std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
- CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
- unsigned Align = alignChars.getQuantity();
- unsigned MaxInlineWidthInBits =
- getTarget().getMaxAtomicInlineWidth();
- bool UseLibcall = (Size != Align ||
+ unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
+ bool UseLibcall = (sizeChars != alignChars ||
getContext().toBits(sizeChars) > MaxInlineWidthInBits);
- llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
- *Val2 = nullptr;
- llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
+ llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
+
+ Address Val1 = Address::invalid();
+ Address Val2 = Address::invalid();
+ Address Dest = Address::invalid();
+ Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
- assert(!Dest && "Init does not return a value");
- LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
+ LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
EmitAtomicInit(E->getVal1(), lvalue);
return RValue::get(nullptr);
}
@@ -699,32 +699,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
- llvm_unreachable("Already handled!");
+ llvm_unreachable("Already handled above with EmitAtomicInit!");
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__atomic_load_n:
break;
case AtomicExpr::AO__atomic_load:
- Dest = EmitScalarExpr(E->getVal1());
+ Dest = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_store:
- Val1 = EmitScalarExpr(E->getVal1());
+ Val1 = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_exchange:
- Val1 = EmitScalarExpr(E->getVal1());
- Dest = EmitScalarExpr(E->getVal2());
+ Val1 = EmitPointerWithAlignment(E->getVal1());
+ Dest = EmitPointerWithAlignment(E->getVal2());
break;
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__atomic_compare_exchange:
- Val1 = EmitScalarExpr(E->getVal1());
+ Val1 = EmitPointerWithAlignment(E->getVal1());
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
- Val2 = EmitScalarExpr(E->getVal2());
+ Val2 = EmitPointerWithAlignment(E->getVal2());
else
Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
@@ -744,8 +744,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
CharUnits PointeeIncAmt =
getContext().getTypeSizeInChars(MemTy->getPointeeType());
Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
- Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
- EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
+ Val1 = Temp;
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
// Fall through.
@@ -774,31 +775,63 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType RValTy = E->getType().getUnqualifiedType();
- auto GetDest = [&] {
- if (!RValTy->isVoidType() && !Dest) {
- Dest = CreateMemTemp(RValTy, ".atomicdst");
- }
- return Dest;
- };
+ // The inlined atomics only function on iN types, where N is a power of 2. We
+ // need to make sure (via temporaries if necessary) that all incoming values
+ // are compatible.
+ LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
+ AtomicInfo Atomics(*this, AtomicVal);
+
+ Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
+ if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
+ if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
+ if (Dest.isValid())
+ Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ else if (E->isCmpXChg())
+ Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
+ else if (!RValTy->isVoidType())
+ Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
bool UseOptimizedLibcall = false;
switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled above with EmitAtomicInit!");
+
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
// For these, only library calls for certain sizes exist.
UseOptimizedLibcall = true;
break;
- default:
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
// Only use optimized library calls for sizes for which they exist.
if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
UseOptimizedLibcall = true;
@@ -812,14 +845,19 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().getSizeType());
}
// Atomic address is the first or second parameter
- Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr.getPointer())),
+ getContext().VoidPtrTy);
std::string LibCallName;
QualType LoweredMemTy =
MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
QualType RetTy;
bool HaveRetTy = false;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
// There is only one libcall for compare an exchange, because there is no
// optimisation benefit possible from a libcall version of a weak compare
// and exchange.
@@ -834,9 +872,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
- Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
- E->getExprLoc(), sizeChars);
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1.getPointer())),
+ getContext().VoidPtrTy);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
@@ -847,8 +886,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
@@ -858,8 +897,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
LibCallName = "__atomic_store";
RetTy = getContext().VoidTy;
HaveRetTy = true;
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
@@ -868,42 +907,71 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_load_n:
LibCallName = "__atomic_load";
break;
+ // T __atomic_add_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_add_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
break;
+ // T __atomic_and_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_and_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
+ // T __atomic_or_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_or_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
+ // T __atomic_sub_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_sub_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
break;
+ // T __atomic_xor_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_xor_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
+ break;
+ // T __atomic_nand_fetch_N(T *mem, T val, int order)
+ // T __atomic_fetch_nand_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And; // the NOT is special cased below
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ LibCallName = "__atomic_fetch_nand";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
- default: return EmitUnsupportedRValue(E, "atomic library call");
}
// Optimized functions have the size in their name.
@@ -919,30 +987,46 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
+ getContext().VoidPtrTy);
}
}
// order is always the last parameter
Args.add(RValue::get(Order),
getContext().IntTy);
+ // PostOp is only needed for the atomic_*_fetch operations, and
+ // thus is only needed for and implemented in the
+ // UseOptimizedLibcall codepath.
+ assert(UseOptimizedLibcall || !PostOp);
+
RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
// The value is returned directly from the libcall.
- if (HaveRetTy && !RetTy->isVoidType())
+ if (E->isCmpXChg())
return Res;
- // The value is returned via an explicit out param.
- if (RetTy->isVoidType())
- return RValue::get(nullptr);
- // The value is returned directly for optimized libcalls but the caller is
- // expected an out-param.
- if (UseOptimizedLibcall) {
+
+ // The value is returned directly for optimized libcalls but the expr
+ // provided an out-param.
+ if (UseOptimizedLibcall && Res.getScalarVal()) {
llvm::Value *ResVal = Res.getScalarVal();
- llvm::StoreInst *StoreDest = Builder.CreateStore(
+ if (PostOp) {
+ llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
+ ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
+ }
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ ResVal = Builder.CreateNot(ResVal);
+
+ Builder.CreateStore(
ResVal,
- Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
- StoreDest->setAlignment(Align);
+ Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
}
- return convertTempToRValue(Dest, RValTy, E->getExprLoc());
+
+ if (RValTy->isVoidType())
+ return RValue::get(nullptr);
+
+ return convertTempToRValue(
+ Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ RValTy, E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
@@ -952,45 +1036,35 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
E->getOp() == AtomicExpr::AO__atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load_n;
- llvm::Type *ITy =
- llvm::IntegerType::get(getLLVMContext(), Size * 8);
- llvm::Value *OrigDest = GetDest();
- Ptr = Builder.CreateBitCast(
- Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
- if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
- if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
- if (Dest && !E->isCmpXChg())
- Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
-
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
switch (ord) {
case AtomicExpr::AO_ABI_memory_order_relaxed:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Monotonic);
+ Size, llvm::Monotonic);
break;
case AtomicExpr::AO_ABI_memory_order_consume:
case AtomicExpr::AO_ABI_memory_order_acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Acquire);
+ Size, llvm::Acquire);
break;
case AtomicExpr::AO_ABI_memory_order_release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Release);
+ Size, llvm::Release);
break;
case AtomicExpr::AO_ABI_memory_order_acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::AcquireRelease);
+ Size, llvm::AcquireRelease);
break;
case AtomicExpr::AO_ABI_memory_order_seq_cst:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::SequentiallyConsistent);
+ Size, llvm::SequentiallyConsistent);
break;
default: // invalid order
// We should not ever get here normally, but it's hard to
@@ -999,7 +1073,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
}
if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
+
+ return convertTempToRValue(
+ Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ RValTy, E->getExprLoc());
}
// Long case, when Order isn't obviously constant.
@@ -1028,12 +1105,12 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Monotonic);
+ Size, llvm::Monotonic);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Acquire);
+ Size, llvm::Acquire);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
AcquireBB);
@@ -1043,7 +1120,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Release);
+ Size, llvm::Release);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
ReleaseBB);
@@ -1051,14 +1128,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::AcquireRelease);
+ Size, llvm::AcquireRelease);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::SequentiallyConsistent);
+ Size, llvm::SequentiallyConsistent);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
SeqCstBB);
@@ -1067,47 +1144,65 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
Builder.SetInsertPoint(ContBB);
if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
+
+ assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
+ return convertTempToRValue(
+ Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ RValTy, E->getExprLoc());
}
-llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
+Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
unsigned addrspace =
- cast<llvm::PointerType>(addr->getType())->getAddressSpace();
+ cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
llvm::IntegerType *ty =
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
}
-RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
- AggValueSlot resultSlot,
- SourceLocation loc, bool AsValue) const {
+Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
+ llvm::Type *Ty = Addr.getElementType();
+ uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
+ if (SourceSizeInBits != AtomicSizeInBits) {
+ Address Tmp = CreateTempAlloca();
+ CGF.Builder.CreateMemCpy(Tmp, Addr,
+ std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
+ Addr = Tmp;
+ }
+
+ return emitCastToAtomicIntPointer(Addr);
+}
+
+RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
+ AggValueSlot resultSlot,
+ SourceLocation loc,
+ bool asValue) const {
if (LVal.isSimple()) {
if (EvaluationKind == TEK_Aggregate)
return resultSlot.asRValue();
// Drill into the padding structure if we have one.
if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
+ addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
// Otherwise, just convert the temporary to an r-value using the
// normal conversion routine.
return CGF.convertTempToRValue(addr, getValueType(), loc);
}
- if (!AsValue)
+ if (!asValue)
// Get RValue from temp memory as atomic for non-simple lvalues
- return RValue::get(
- CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
+ return RValue::get(CGF.Builder.CreateLoad(addr));
if (LVal.isBitField())
- return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
- addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
+ return CGF.EmitLoadOfBitfieldLValue(
+ LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
+ LVal.getAlignmentSource()));
if (LVal.isVectorElt())
- return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
- LVal.getType(),
- LVal.getAlignment()),
- loc);
+ return CGF.EmitLoadOfLValue(
+ LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
+ LVal.getAlignmentSource()), loc);
assert(LVal.isExtVectorElt());
return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
- addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
+ addr, LVal.getExtVectorElts(), LVal.getType(),
+ LVal.getAlignmentSource()));
}
RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
@@ -1123,7 +1218,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
!AsValue)) {
auto *ValTy = AsValue
? CGF.ConvertTypeForMem(ValueTy)
- : getAtomicAddress()->getType()->getPointerElementType();
+ : getAtomicAddress().getType()->getPointerElementType();
if (ValTy->isIntegerTy()) {
assert(IntVal->getType() == ValTy && "Different integer types.");
return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
@@ -1135,25 +1230,22 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
// Create a temporary. This needs to be big enough to hold the
// atomic integer.
- llvm::Value *Temp;
+ Address Temp = Address::invalid();
bool TempIsVolatile = false;
- CharUnits TempAlignment;
if (AsValue && getEvaluationKind() == TEK_Aggregate) {
assert(!ResultSlot.isIgnored());
- Temp = ResultSlot.getAddr();
- TempAlignment = getValueAlignment();
+ Temp = ResultSlot.getAddress();
TempIsVolatile = ResultSlot.isVolatile();
} else {
Temp = CreateTempAlloca();
- TempAlignment = getAtomicAlignment();
}
// Slam the integer into the temporary.
- llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
- CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
+ Address CastTemp = emitCastToAtomicIntPointer(Temp);
+ CGF.Builder.CreateStore(IntVal, CastTemp)
->setVolatile(TempIsVolatile);
- return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
+ return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
}
void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
@@ -1161,7 +1253,7 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
// void __atomic_load(size_t size, void *mem, void *return, int order);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
CGF.getContext().VoidPtrTy);
@@ -1174,16 +1266,15 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile) {
// Okay, we're doing this natively.
- llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
+ Address Addr = getAtomicAddressAsAtomicIntPointer();
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
Load->setAtomic(AO);
// Other decoration.
- Load->setAlignment(getAtomicAlignment().getQuantity());
if (IsVolatile)
Load->setVolatile(true);
if (LVal.getTBAAInfo())
- CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo());
+ CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
return Load;
}
@@ -1191,11 +1282,12 @@ llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
/// performing such an operation can be performed without a libcall.
bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
+ if (!CGM.getCodeGenOpts().MSVolatile) return false;
AtomicInfo AI(*this, LV);
bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
// An atomic is inline if we don't need to use a libcall.
bool AtomicIsInline = !AI.shouldUseLibcall();
- return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
+ return IsVolatile && AtomicIsInline;
}
/// An type is a candidate for having its loads and stores be made atomic if
@@ -1227,18 +1319,18 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
bool IsVolatile) {
// Check whether we should use a library call.
if (shouldUseLibcall()) {
- llvm::Value *TempAddr;
+ Address TempAddr = Address::invalid();
if (LVal.isSimple() && !ResultSlot.isIgnored()) {
assert(getEvaluationKind() == TEK_Aggregate);
- TempAddr = ResultSlot.getAddr();
+ TempAddr = ResultSlot.getAddress();
} else
TempAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
+ EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
// Okay, turn that back into the original value or whole atomic (for
// non-simple lvalues) type.
- return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
+ return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
}
// Okay, we're doing this natively.
@@ -1246,7 +1338,7 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
// If we're ignoring an aggregate return, don't do anything.
if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
- return RValue::getAggregate(nullptr, false);
+ return RValue::getAggregate(Address::invalid(), false);
// Okay, turn that back into the original value or atomic (for non-simple
// lvalues) type.
@@ -1272,11 +1364,10 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
// any padding. Just do an aggregate copy of that type.
if (rvalue.isAggregate()) {
CGF.EmitAggregateCopy(getAtomicAddress(),
- rvalue.getAggregateAddr(),
+ rvalue.getAggregateAddress(),
getAtomicType(),
(rvalue.isVolatileQualified()
- || LVal.isVolatileQualified()),
- LVal.getAlignment());
+ || LVal.isVolatileQualified()));
return;
}
@@ -1299,15 +1390,14 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
/// Materialize an r-value into memory for the purposes of storing it
/// to an atomic type.
-llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
+Address AtomicInfo::materializeRValue(RValue rvalue) const {
// Aggregate r-values are already in memory, and EmitAtomicStore
// requires them to be values of the atomic type.
if (rvalue.isAggregate())
- return rvalue.getAggregateAddr();
+ return rvalue.getAggregateAddress();
// Otherwise, make a temporary and materialize into it.
- LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
- getAtomicAlignment());
+ LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
AtomicInfo Atomics(CGF, TempLV);
Atomics.emitCopyIntoMemory(rvalue);
return TempLV.getAddress();
@@ -1332,20 +1422,20 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
}
// Otherwise, we need to go through memory.
// Put the r-value in memory.
- llvm::Value *Addr = materializeRValue(RVal);
+ Address Addr = materializeRValue(RVal);
// Cast the temporary to the atomic int type and pull a value out.
Addr = emitCastToAtomicIntPointer(Addr);
- return CGF.Builder.CreateAlignedLoad(Addr,
- getAtomicAlignment().getQuantity());
+ return CGF.Builder.CreateLoad(Addr);
}
std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
// Do the atomic store.
- auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
- auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
+ Address Addr = getAtomicAddressAsAtomicIntPointer();
+ auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
+ ExpectedVal, DesiredVal,
Success, Failure);
// Other decoration.
Inst->setVolatile(LVal.isVolatileQualified());
@@ -1366,7 +1456,7 @@ AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
// void *desired, int success, int failure);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
CGF.getContext().VoidPtrTy);
@@ -1394,13 +1484,14 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
// Check whether we should use a library call.
if (shouldUseLibcall()) {
// Produce a source address.
- auto *ExpectedAddr = materializeRValue(Expected);
- auto *DesiredAddr = materializeRValue(Desired);
- auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
+ Address ExpectedAddr = materializeRValue(Expected);
+ Address DesiredAddr = materializeRValue(Desired);
+ auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
Success, Failure);
return std::make_pair(
- convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
- SourceLocation(), /*AsValue=*/false),
+ convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
Res);
}
@@ -1419,42 +1510,41 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
static void
EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
- llvm::Value *DesiredAddr) {
- llvm::Value *Ptr = nullptr;
- LValue UpdateLVal;
+ Address DesiredAddr) {
RValue UpRVal;
LValue AtomicLVal = Atomics.getAtomicLValue();
LValue DesiredLVal;
if (AtomicLVal.isSimple()) {
UpRVal = OldRVal;
- DesiredLVal =
- LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(),
- AtomicLVal.getAlignment(), CGF.CGM.getContext());
+ DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
} else {
// Build new lvalue for temp address
- Ptr = Atomics.materializeRValue(OldRVal);
+ Address Ptr = Atomics.materializeRValue(OldRVal);
+ LValue UpdateLVal;
if (AtomicLVal.isBitField()) {
UpdateLVal =
LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
DesiredLVal =
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
} else if (AtomicLVal.isVectorElt()) {
UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
DesiredLVal = LValue::MakeVectorElt(
DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
} else {
assert(AtomicLVal.isExtVectorElt());
UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
DesiredLVal = LValue::MakeExtVectorElt(
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
}
UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
@@ -1476,26 +1566,26 @@ void AtomicInfo::EmitAtomicUpdateLibcall(
bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
- llvm::Value *ExpectedAddr = CreateTempAlloca();
+ Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
- auto *DesiredAddr = CreateTempAlloca();
+ Address DesiredAddr = CreateTempAlloca();
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- auto *OldVal = CGF.Builder.CreateAlignedLoad(
- ExpectedAddr, getAtomicAlignment().getQuantity());
- CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
+ CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
- auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
- SourceLocation(), /*AsValue=*/false);
+ auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
+ AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
+ AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -1515,19 +1605,16 @@ void AtomicInfo::EmitAtomicUpdateOp(
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
- auto *NewAtomicAddr = CreateTempAlloca();
- auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicAddr = CreateTempAlloca();
+ Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
}
auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
SourceLocation(), /*AsValue=*/false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
- auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
- NewAtomicIntAddr, getAtomicAlignment().getQuantity());
+ auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
// Try to write new value using cmpxchg operation
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
@@ -1536,23 +1623,25 @@ void AtomicInfo::EmitAtomicUpdateOp(
}
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
- RValue UpdateRVal, llvm::Value *DesiredAddr) {
+ RValue UpdateRVal, Address DesiredAddr) {
LValue AtomicLVal = Atomics.getAtomicLValue();
LValue DesiredLVal;
// Build new lvalue for temp address
if (AtomicLVal.isBitField()) {
DesiredLVal =
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
} else if (AtomicLVal.isVectorElt()) {
DesiredLVal =
LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
} else {
assert(AtomicLVal.isExtVectorElt());
DesiredLVal = LValue::MakeExtVectorElt(
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
}
DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
// Store new value in the corresponding memory area
@@ -1564,24 +1653,23 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal, bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
- llvm::Value *ExpectedAddr = CreateTempAlloca();
+ Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
- auto *DesiredAddr = CreateTempAlloca();
+ Address DesiredAddr = CreateTempAlloca();
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- auto *OldVal = CGF.Builder.CreateAlignedLoad(
- ExpectedAddr, getAtomicAlignment().getQuantity());
- CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
+ CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
+ AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -1600,17 +1688,14 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
- auto *NewAtomicAddr = CreateTempAlloca();
- auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicAddr = CreateTempAlloca();
+ Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
- auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
- NewAtomicIntAddr, getAtomicAlignment().getQuantity());
+ auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
// Try to write new value using cmpxchg operation
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
@@ -1661,8 +1746,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
- rvalue.getAggregateAddr()->getType()->getPointerElementType()
- == dest.getAddress()->getType()->getPointerElementType());
+ rvalue.getAggregateAddress().getElementType()
+ == dest.getAddress().getElementType());
AtomicInfo atomics(*this, dest);
LValue LVal = atomics.getAtomicLValue();
@@ -1677,15 +1762,16 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
// Check whether we should use a library call.
if (atomics.shouldUseLibcall()) {
// Produce a source address.
- llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
+ Address srcAddr = atomics.materializeRValue(rvalue);
// void __atomic_store(size_t size, void *mem, void *val, int order)
CallArgList args;
args.add(RValue::get(atomics.getAtomicSizeValue()),
getContext().getSizeType());
- args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
+ args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
getContext().VoidPtrTy);
- args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy);
args.add(RValue::get(llvm::ConstantInt::get(
IntTy, AtomicInfo::translateAtomicOrdering(AO))),
getContext().IntTy);
@@ -1697,10 +1783,10 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
- llvm::Value *addr =
+ Address addr =
atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
intValue = Builder.CreateIntCast(
- intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
+ intValue, addr.getElementType(), /*isSigned=*/false);
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
// Initializations don't need to be atomic.
@@ -1708,11 +1794,10 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
store->setAtomic(AO);
// Other decoration.
- store->setAlignment(dest.getAlignment().getQuantity());
if (IsVolatile)
store->setVolatile(true);
if (dest.getTBAAInfo())
- CGM.DecorateInstruction(store, dest.getTBAAInfo());
+ CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
return;
}
@@ -1729,11 +1814,11 @@ std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!Expected.isAggregate() ||
- Expected.getAggregateAddr()->getType()->getPointerElementType() ==
- Obj.getAddress()->getType()->getPointerElementType());
+ Expected.getAggregateAddress().getElementType() ==
+ Obj.getAddress().getElementType());
assert(!Desired.isAggregate() ||
- Desired.getAggregateAddr()->getType()->getPointerElementType() ==
- Obj.getAddress()->getType()->getPointerElementType());
+ Desired.getAggregateAddress().getElementType() ==
+ Obj.getAddress().getElementType());
AtomicInfo Atomics(*this, Obj);
return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 3fd344c389a5..ba2941e9df4a 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -1,4 +1,4 @@
-//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -30,7 +30,7 @@ using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
- StructureType(nullptr), Block(block),
+ LocalAddress(Address::invalid()), StructureType(nullptr), Block(block),
DominatingIP(nullptr) {
// Skip asm prefix, if any. 'name' is usually taken directly from
@@ -40,7 +40,7 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
}
// Anchor the vtable to this translation unit.
-CodeGenModule::ByrefHelpers::~ByrefHelpers() {}
+BlockByrefHelpers::~BlockByrefHelpers() {}
/// Build the given block as a global block.
static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
@@ -78,7 +78,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
ASTContext &C = CGM.getContext();
llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
- llvm::Type *i8p = NULL;
+ llvm::Type *i8p = nullptr;
if (CGM.getLangOpts().OpenCL)
i8p =
llvm::Type::getInt8PtrTy(
@@ -111,7 +111,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
std::string typeAtEncoding =
CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
elements.push_back(llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
+ CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
// GC layout.
if (C.getLangOpts().ObjC1) {
@@ -203,46 +203,36 @@ namespace {
Capture(capture), Type(type) {}
/// Tell the block info that this chunk has the given field index.
- void setIndex(CGBlockInfo &info, unsigned index) {
- if (!Capture)
+ void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
+ if (!Capture) {
info.CXXThisIndex = index;
- else
- info.Captures[Capture->getVariable()]
- = CGBlockInfo::Capture::makeIndex(index);
+ info.CXXThisOffset = offset;
+ } else {
+ info.Captures.insert({Capture->getVariable(),
+ CGBlockInfo::Capture::makeIndex(index, offset)});
+ }
}
};
/// Order by 1) all __strong together 2) next, all byfref together 3) next,
/// all __weak together. Preserve descending alignment in all situations.
bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
- CharUnits LeftValue, RightValue;
- bool LeftByref = left.Capture ? left.Capture->isByRef() : false;
- bool RightByref = right.Capture ? right.Capture->isByRef() : false;
-
- if (left.Lifetime == Qualifiers::OCL_Strong &&
- left.Alignment >= right.Alignment)
- LeftValue = CharUnits::fromQuantity(64);
- else if (LeftByref && left.Alignment >= right.Alignment)
- LeftValue = CharUnits::fromQuantity(32);
- else if (left.Lifetime == Qualifiers::OCL_Weak &&
- left.Alignment >= right.Alignment)
- LeftValue = CharUnits::fromQuantity(16);
- else
- LeftValue = left.Alignment;
- if (right.Lifetime == Qualifiers::OCL_Strong &&
- right.Alignment >= left.Alignment)
- RightValue = CharUnits::fromQuantity(64);
- else if (RightByref && right.Alignment >= left.Alignment)
- RightValue = CharUnits::fromQuantity(32);
- else if (right.Lifetime == Qualifiers::OCL_Weak &&
- right.Alignment >= left.Alignment)
- RightValue = CharUnits::fromQuantity(16);
- else
- RightValue = right.Alignment;
-
- return LeftValue > RightValue;
+ if (left.Alignment != right.Alignment)
+ return left.Alignment > right.Alignment;
+
+ auto getPrefOrder = [](const BlockLayoutChunk &chunk) {
+ if (chunk.Capture && chunk.Capture->isByRef())
+ return 1;
+ if (chunk.Lifetime == Qualifiers::OCL_Strong)
+ return 0;
+ if (chunk.Lifetime == Qualifiers::OCL_Weak)
+ return 2;
+ return 3;
+ };
+
+ return getPrefOrder(left) < getPrefOrder(right);
}
-}
+} // end anonymous namespace
/// Determines if the given type is safe for constant capture in C++.
static bool isSafeForCXXConstantCapture(QualType type) {
@@ -302,31 +292,20 @@ static CharUnits getLowBit(CharUnits v) {
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
SmallVectorImpl<llvm::Type*> &elementTypes) {
- ASTContext &C = CGM.getContext();
-
- // The header is basically a 'struct { void *; int; int; void *; void *; }'.
- CharUnits ptrSize, ptrAlign, intSize, intAlign;
- std::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
- std::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
-
- // Are there crazy embedded platforms where this isn't true?
- assert(intSize <= ptrSize && "layout assumptions horribly violated");
+ // The header is basically 'struct { void *; int; int; void *; void *; }'.
+ // Assert that that struct is packed.
+ assert(CGM.getIntSize() <= CGM.getPointerSize());
+ assert(CGM.getIntAlign() <= CGM.getPointerAlign());
+ assert((2 * CGM.getIntSize()).isMultipleOf(CGM.getPointerAlign()));
- CharUnits headerSize = ptrSize;
- if (2 * intSize < ptrAlign) headerSize += ptrSize;
- else headerSize += 2 * intSize;
- headerSize += 2 * ptrSize;
-
- info.BlockAlign = ptrAlign;
- info.BlockSize = headerSize;
+ info.BlockAlign = CGM.getPointerAlign();
+ info.BlockSize = 3 * CGM.getPointerSize() + 2 * CGM.getIntSize();
assert(elementTypes.empty());
- llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
- llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
- elementTypes.push_back(i8p);
- elementTypes.push_back(intTy);
- elementTypes.push_back(intTy);
- elementTypes.push_back(i8p);
+ elementTypes.push_back(CGM.VoidPtrTy);
+ elementTypes.push_back(CGM.IntTy);
+ elementTypes.push_back(CGM.IntTy);
+ elementTypes.push_back(CGM.VoidPtrTy);
elementTypes.push_back(CGM.getBlockDescriptorType());
assert(elementTypes.size() == BlockHeaderSize);
@@ -365,6 +344,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
"Can't capture 'this' outside a method");
QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType(C);
+ // Theoretically, this could be in a different address space, so
+ // don't assume standard pointer size/align.
llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
std::pair<CharUnits,CharUnits> tinfo
= CGM.getContext().getTypeInfoInChars(thisType);
@@ -384,15 +365,12 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.NeedsCopyDispose = true;
// Just use void* instead of a pointer to the byref type.
- QualType byRefPtrTy = C.VoidPtrTy;
-
- llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
- std::pair<CharUnits,CharUnits> tinfo
- = CGM.getContext().getTypeInfoInChars(byRefPtrTy);
- maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+ CharUnits align = CGM.getPointerAlign();
+ maxFieldAlign = std::max(maxFieldAlign, align);
- layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
- Qualifiers::OCL_None, &CI, llvmType));
+ layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
+ Qualifiers::OCL_None, &CI,
+ CGM.VoidPtrTy));
continue;
}
@@ -421,9 +399,15 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// Block pointers require copy/dispose. So do Objective-C pointers.
} else if (variable->getType()->isObjCRetainableType()) {
- info.NeedsCopyDispose = true;
- // used for mrr below.
- lifetime = Qualifiers::OCL_Strong;
+ // But honor the inert __unsafe_unretained qualifier, which doesn't
+ // actually make it into the type system.
+ if (variable->getType()->isObjCInertUnsafeUnretainedType()) {
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ info.NeedsCopyDispose = true;
+ // used for mrr below.
+ lifetime = Qualifiers::OCL_Strong;
+ }
// So do types that require non-trivial copy construction.
} else if (CI.hasCopyExpr()) {
@@ -504,18 +488,13 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
for (; li != le; ++li) {
assert(endAlign >= li->Alignment);
- li->setIndex(info, elementTypes.size());
+ li->setIndex(info, elementTypes.size(), blockSize);
elementTypes.push_back(li->Type);
blockSize += li->Size;
endAlign = getLowBit(blockSize);
// ...until we get to the alignment of the maximum field.
if (endAlign >= maxFieldAlign) {
- if (li == first) {
- // No user field was appended. So, a gap was added.
- // Save total gap size for use in block layout bit map.
- info.BlockHeaderForcedGapSize = li->Size;
- }
break;
}
}
@@ -532,6 +511,12 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
CharUnits newBlockSize = blockSize.RoundUpToAlignment(maxFieldAlign);
CharUnits padding = newBlockSize - blockSize;
+ // If we haven't yet added any fields, remember that there was an
+ // initial gap; this need to go into the block layout bit map.
+ if (blockSize == info.BlockHeaderForcedGapOffset) {
+ info.BlockHeaderForcedGapSize = padding;
+ }
+
elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
padding.getQuantity()));
blockSize = newBlockSize;
@@ -556,7 +541,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
endAlign = getLowBit(blockSize);
}
assert(endAlign >= li->Alignment);
- li->setIndex(info, elementTypes.size());
+ li->setIndex(info, elementTypes.size(), blockSize);
elementTypes.push_back(li->Type);
blockSize += li->Size;
endAlign = getLowBit(blockSize);
@@ -586,9 +571,8 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
if (blockInfo.CanBeGlobal) return;
// Make the allocation for the block.
- blockInfo.Address =
- CGF.CreateTempAlloca(blockInfo.StructureType, "block");
- blockInfo.Address->setAlignment(blockInfo.BlockAlign.getQuantity());
+ blockInfo.LocalAddress = CGF.CreateTempAlloca(blockInfo.StructureType,
+ blockInfo.BlockAlign, "block");
// If there are cleanups to emit, enter them (but inactive).
if (!blockInfo.NeedsCopyDispose) return;
@@ -621,12 +605,13 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
}
// GEP down to the address.
- llvm::Value *addr = CGF.Builder.CreateStructGEP(
- blockInfo.StructureType, blockInfo.Address, capture.getIndex());
+ Address addr = CGF.Builder.CreateStructGEP(blockInfo.LocalAddress,
+ capture.getIndex(),
+ capture.getOffset());
// We can use that GEP as the dominating IP.
if (!blockInfo.DominatingIP)
- blockInfo.DominatingIP = cast<llvm::Instruction>(addr);
+ blockInfo.DominatingIP = cast<llvm::Instruction>(addr.getPointer());
CleanupKind cleanupKind = InactiveNormalCleanup;
bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
@@ -721,9 +706,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Build the block descriptor.
llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
- llvm::Type *blockTy = blockInfo.StructureType;
- llvm::AllocaInst *blockAddr = blockInfo.Address;
- assert(blockAddr && "block has no address!");
+ Address blockAddr = blockInfo.LocalAddress;
+ assert(blockAddr.isValid() && "block has no address!");
// Compute the initial on-stack block flags.
BlockFlags flags = BLOCK_HAS_SIGNATURE;
@@ -732,27 +716,44 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
- // Initialize the block literal.
- Builder.CreateStore(
- isa, Builder.CreateStructGEP(blockTy, blockAddr, 0, "block.isa"));
- Builder.CreateStore(
- llvm::ConstantInt::get(IntTy, flags.getBitMask()),
- Builder.CreateStructGEP(blockTy, blockAddr, 1, "block.flags"));
- Builder.CreateStore(
- llvm::ConstantInt::get(IntTy, 0),
- Builder.CreateStructGEP(blockTy, blockAddr, 2, "block.reserved"));
- Builder.CreateStore(
- blockFn, Builder.CreateStructGEP(blockTy, blockAddr, 3, "block.invoke"));
- Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockTy, blockAddr, 4,
- "block.descriptor"));
+ auto projectField =
+ [&](unsigned index, CharUnits offset, const Twine &name) -> Address {
+ return Builder.CreateStructGEP(blockAddr, index, offset, name);
+ };
+ auto storeField =
+ [&](llvm::Value *value, unsigned index, CharUnits offset,
+ const Twine &name) {
+ Builder.CreateStore(value, projectField(index, offset, name));
+ };
+
+ // Initialize the block header.
+ {
+ // We assume all the header fields are densely packed.
+ unsigned index = 0;
+ CharUnits offset;
+ auto addHeaderField =
+ [&](llvm::Value *value, CharUnits size, const Twine &name) {
+ storeField(value, index, offset, name);
+ offset += size;
+ index++;
+ };
+
+ addHeaderField(isa, getPointerSize(), "block.isa");
+ addHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ getIntSize(), "block.flags");
+ addHeaderField(llvm::ConstantInt::get(IntTy, 0),
+ getIntSize(), "block.reserved");
+ addHeaderField(blockFn, getPointerSize(), "block.invoke");
+ addHeaderField(descriptor, getPointerSize(), "block.descriptor");
+ }
// Finally, capture all the values into the block.
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
// First, 'this'.
if (blockDecl->capturesCXXThis()) {
- llvm::Value *addr = Builder.CreateStructGEP(
- blockTy, blockAddr, blockInfo.CXXThisIndex, "block.captured-this.addr");
+ Address addr = projectField(blockInfo.CXXThisIndex, blockInfo.CXXThisOffset,
+ "block.captured-this.addr");
Builder.CreateStore(LoadCXXThis(), addr);
}
@@ -765,35 +766,37 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (capture.isConstant()) continue;
QualType type = variable->getType();
- CharUnits align = getContext().getDeclAlign(variable);
// This will be a [[type]]*, except that a byref entry will just be
// an i8**.
- llvm::Value *blockField = Builder.CreateStructGEP(
- blockTy, blockAddr, capture.getIndex(), "block.captured");
+ Address blockField =
+ projectField(capture.getIndex(), capture.getOffset(), "block.captured");
// Compute the address of the thing we're going to move into the
// block literal.
- llvm::Value *src;
+ Address src = Address::invalid();
if (BlockInfo && CI.isNested()) {
// We need to use the capture from the enclosing block.
const CGBlockInfo::Capture &enclosingCapture =
BlockInfo->getCapture(variable);
// This is a [[type]]*, except that a byref entry wil just be an i8**.
- src = Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(),
+ src = Builder.CreateStructGEP(LoadBlockStruct(),
enclosingCapture.getIndex(),
+ enclosingCapture.getOffset(),
"block.capture.addr");
} else if (blockDecl->isConversionFromLambda()) {
// The lambda capture in a lambda's conversion-to-block-pointer is
// special; we'll simply emit it directly.
- src = nullptr;
+ src = Address::invalid();
} else {
// Just look it up in the locals map, which will give us back a
// [[type]]*. If that doesn't work, do the more elaborate DRE
// emission.
- src = LocalDeclMap.lookup(variable);
- if (!src) {
+ auto it = LocalDeclMap.find(variable);
+ if (it != LocalDeclMap.end()) {
+ src = it->second;
+ } else {
DeclRefExpr declRef(
const_cast<VarDecl *>(variable),
/*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type,
@@ -808,14 +811,14 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// live a shorter life than the stack byref anyway.
if (CI.isByRef()) {
// Get a void* that points to the byref struct.
+ llvm::Value *byrefPointer;
if (CI.isNested())
- src = Builder.CreateAlignedLoad(src, align.getQuantity(),
- "byref.capture");
+ byrefPointer = Builder.CreateLoad(src, "byref.capture");
else
- src = Builder.CreateBitCast(src, VoidPtrTy);
+ byrefPointer = Builder.CreateBitCast(src.getPointer(), VoidPtrTy);
// Write that void* into the capture field.
- Builder.CreateAlignedStore(src, blockField, align.getQuantity());
+ Builder.CreateStore(byrefPointer, blockField);
// If we have a copy constructor, evaluate that into the block field.
} else if (const Expr *copyExpr = CI.getCopyExpr()) {
@@ -823,7 +826,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If we have a lambda conversion, emit the expression
// directly into the block instead.
AggValueSlot Slot =
- AggValueSlot::forAddr(blockField, align, Qualifiers(),
+ AggValueSlot::forAddr(blockField, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -834,9 +837,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If it's a reference variable, copy the reference into the block field.
} else if (type->isReferenceType()) {
- llvm::Value *ref =
- Builder.CreateAlignedLoad(src, align.getQuantity(), "ref.val");
- Builder.CreateAlignedStore(ref, blockField, align.getQuantity());
+ llvm::Value *ref = Builder.CreateLoad(src, "ref.val");
+ Builder.CreateStore(ref, blockField);
// If this is an ARC __strong block-pointer variable, don't do a
// block copy.
@@ -848,13 +850,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
} else if (type.getObjCLifetime() == Qualifiers::OCL_Strong &&
type->isBlockPointerType()) {
// Load the block and do a simple retain.
- LValue srcLV = MakeAddrLValue(src, type, align);
- llvm::Value *value = EmitLoadOfScalar(srcLV, SourceLocation());
+ llvm::Value *value = Builder.CreateLoad(src, "block.captured_block");
value = EmitARCRetainNonBlock(value);
// Do a primitive store to the block field.
- LValue destLV = MakeAddrLValue(blockField, type, align);
- EmitStoreOfScalar(value, destLV, /*init*/ true);
+ Builder.CreateStore(value, blockField);
// Otherwise, fake up a POD copy into the block field.
} else {
@@ -876,7 +876,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// attributed to a reasonable location - otherwise it may be attributed to
// locations of subexpressions in the initialization.
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
- MakeAddrLValue(blockField, type, align),
+ MakeAddrLValue(blockField, type, AlignmentSource::Decl),
/*captured by init*/ false);
}
@@ -891,7 +891,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Cast to the converted block-pointer type, which happens (somewhat
// unfortunately) to be a pointer to function type.
llvm::Value *result =
- Builder.CreateBitCast(blockAddr,
+ Builder.CreateBitCast(blockAddr.getPointer(),
ConvertType(blockInfo.getBlockExpr()->getType()));
return result;
@@ -949,7 +949,6 @@ llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
return GenericBlockLiteralType;
}
-
RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
const BlockPointerType *BPT =
@@ -966,8 +965,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
// Get the function pointer from the literal.
- llvm::Value *FuncPtr = Builder.CreateStructGEP(
- CGM.getGenericBlockLiteralType(), BlockLiteral, 3);
+ llvm::Value *FuncPtr =
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockLiteral, 3);
BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
@@ -978,11 +977,10 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
QualType FnType = BPT->getPointeeType();
// And the rest of the arguments.
- EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
- E->arg_begin(), E->arg_end());
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
// Load the function.
- llvm::Value *Func = Builder.CreateLoad(FuncPtr);
+ llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
@@ -998,41 +996,35 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
return EmitCall(FnInfo, Func, ReturnValue, Args);
}
-llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
- bool isByRef) {
+Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
+ bool isByRef) {
assert(BlockInfo && "evaluating block ref without block information?");
const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
// Handle constant captures.
- if (capture.isConstant()) return LocalDeclMap[variable];
+ if (capture.isConstant()) return LocalDeclMap.find(variable)->second;
- llvm::Value *addr =
- Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(),
- capture.getIndex(), "block.capture.addr");
+ Address addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ capture.getOffset(), "block.capture.addr");
if (isByRef) {
// addr should be a void** right now. Load, then cast the result
// to byref*.
- addr = Builder.CreateLoad(addr);
- auto *byrefType = BuildByRefType(variable);
- llvm::PointerType *byrefPointerType = llvm::PointerType::get(byrefType, 0);
- addr = Builder.CreateBitCast(addr, byrefPointerType,
- "byref.addr");
-
- // Follow the forwarding pointer.
- addr = Builder.CreateStructGEP(byrefType, addr, 1, "byref.forwarding");
- addr = Builder.CreateLoad(addr, "byref.addr.forwarded");
-
- // Cast back to byref* and GEP over to the actual object.
- addr = Builder.CreateBitCast(addr, byrefPointerType);
- addr = Builder.CreateStructGEP(byrefType, addr,
- getByRefValueLLVMField(variable).second,
- variable->getNameAsString());
+ auto &byrefInfo = getBlockByrefInfo(variable);
+ addr = Address(Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
+
+ auto byrefPointerType = llvm::PointerType::get(byrefInfo.Type, 0);
+ addr = Builder.CreateBitCast(addr, byrefPointerType, "byref.addr");
+
+ addr = emitBlockByrefAddress(addr, byrefInfo, /*follow*/ true,
+ variable->getName());
}
- if (variable->getType()->isReferenceType())
- addr = Builder.CreateLoad(addr, "ref.tmp");
+ if (auto refType = variable->getType()->getAs<ReferenceType>()) {
+ addr = EmitLoadOfReference(addr, refType);
+ }
return addr;
}
@@ -1049,7 +1041,7 @@ CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
// Using that metadata, generate the actual block function.
llvm::Constant *blockFn;
{
- llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ CodeGenFunction::DeclMapTy LocalDeclMap;
blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
blockInfo,
LocalDeclMap,
@@ -1103,6 +1095,44 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
return llvm::ConstantExpr::getBitCast(literal, requiredType);
}
+void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
+ unsigned argNum,
+ llvm::Value *arg) {
+ assert(BlockInfo && "not emitting prologue of block invocation function?!");
+
+ llvm::Value *localAddr = nullptr;
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ // Allocate a stack slot to let the debug info survive the RA.
+ Address alloc = CreateMemTemp(D->getType(), D->getName() + ".addr");
+ Builder.CreateStore(arg, alloc);
+ localAddr = Builder.CreateLoad(alloc);
+ }
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D->getLocation());
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, arg, argNum,
+ localAddr, Builder);
+ }
+ }
+
+ SourceLocation StartLoc = BlockInfo->getBlockExpr()->getBody()->getLocStart();
+ ApplyDebugLocation Scope(*this, StartLoc);
+
+ // Instead of messing around with LocalDeclMap, just set the value
+ // directly as BlockPointer.
+ BlockPointer = Builder.CreateBitCast(arg,
+ BlockInfo->StructureType->getPointerTo(),
+ "block");
+}
+
+Address CodeGenFunction::LoadBlockStruct() {
+ assert(BlockInfo && "not in a block invocation function!");
+ assert(BlockPointer && "no block pointer set!");
+ return Address(BlockPointer, BlockInfo->BlockAlign);
+}
+
llvm::Function *
CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &blockInfo,
@@ -1122,7 +1152,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) {
const auto *var = dyn_cast<VarDecl>(i->first);
if (var && !var->hasLocalStorage())
- LocalDeclMap[var] = i->second;
+ setAddrOfLocalVar(var, i->second);
}
// Begin building the function declaration.
@@ -1163,35 +1193,28 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
blockInfo.getBlockExpr()->getBody()->getLocStart());
// Okay. Undo some of what StartFunction did.
-
- // Pull the 'self' reference out of the local decl map.
- llvm::Value *blockAddr = LocalDeclMap[&selfDecl];
- LocalDeclMap.erase(&selfDecl);
- BlockPointer = Builder.CreateBitCast(blockAddr,
- blockInfo.StructureType->getPointerTo(),
- "block");
+
// At -O0 we generate an explicit alloca for the BlockPointer, so the RA
// won't delete the dbg.declare intrinsics for captured variables.
llvm::Value *BlockPointerDbgLoc = BlockPointer;
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
// Allocate a stack slot for it, so we can point the debugger to it
- llvm::AllocaInst *Alloca = CreateTempAlloca(BlockPointer->getType(),
- "block.addr");
- unsigned Align = getContext().getDeclAlign(&selfDecl).getQuantity();
- Alloca->setAlignment(Align);
+ Address Alloca = CreateTempAlloca(BlockPointer->getType(),
+ getPointerAlign(),
+ "block.addr");
// Set the DebugLocation to empty, so the store is recognized as a
// frame setup instruction by llvm::DwarfDebug::beginFunction().
auto NL = ApplyDebugLocation::CreateEmpty(*this);
- Builder.CreateAlignedStore(BlockPointer, Alloca, Align);
- BlockPointerDbgLoc = Alloca;
+ Builder.CreateStore(BlockPointer, Alloca);
+ BlockPointerDbgLoc = Alloca.getPointer();
}
// If we have a C++ 'this' reference, go ahead and force it into
// existence now.
if (blockDecl->capturesCXXThis()) {
- llvm::Value *addr =
- Builder.CreateStructGEP(blockInfo.StructureType, BlockPointer,
- blockInfo.CXXThisIndex, "block.captured-this");
+ Address addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), blockInfo.CXXThisIndex,
+ blockInfo.CXXThisOffset, "block.captured-this");
CXXThisValue = Builder.CreateLoad(addr, "this");
}
@@ -1201,15 +1224,13 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (!capture.isConstant()) continue;
- unsigned align = getContext().getDeclAlign(variable).getQuantity();
-
- llvm::AllocaInst *alloca =
- CreateMemTemp(variable->getType(), "block.captured-const");
- alloca->setAlignment(align);
+ CharUnits align = getContext().getDeclAlign(variable);
+ Address alloca =
+ CreateMemTemp(variable->getType(), align, "block.captured-const");
- Builder.CreateAlignedStore(capture.getConstant(), alloca, align);
+ Builder.CreateStore(capture.getConstant(), alloca);
- LocalDeclMap[variable] = alloca;
+ setAddrOfLocalVar(variable, alloca);
}
// Save a spot to insert the debug information for all the DeclRefExprs.
@@ -1220,7 +1241,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
if (IsLambdaConversionToBlock)
EmitLambdaBlockInvokeBody();
else {
- PGO.assignRegionCounters(blockDecl, fn);
+ PGO.assignRegionCounters(GlobalDecl(blockDecl), fn);
incrementProfileCounter(blockDecl->getBody());
EmitStmt(blockDecl->getBody());
}
@@ -1243,15 +1264,15 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
>= CodeGenOptions::LimitedDebugInfo) {
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
- DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ auto addr = LocalDeclMap.find(variable)->second;
+ DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(),
Builder);
continue;
}
- DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointerDbgLoc,
- Builder, blockInfo,
- entry_ptr == entry->end()
- ? nullptr : entry_ptr);
+ DI->EmitDeclareOfBlockDeclRefVariable(
+ variable, BlockPointerDbgLoc, Builder, blockInfo,
+ entry_ptr == entry->end() ? nullptr : &*entry_ptr);
}
}
// Recover location if it was changed in the above loop.
@@ -1288,7 +1309,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
}
*/
-
/// Generate the copy-helper function for a block closure object:
/// static void block_copy_helper(block_t *dst, block_t *src);
/// The runtime will have previously initialized 'dst' by doing a
@@ -1330,18 +1350,21 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
nullptr, SC_Static,
false,
false);
+
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(FD, C.VoidTy, Fn, FI, args);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
- llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
- src = Builder.CreateLoad(src);
+ Address src = GetAddrOfLocalVar(&srcDecl);
+ src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
src = Builder.CreateBitCast(src, structPtrTy, "block.source");
- llvm::Value *dst = GetAddrOfLocalVar(&dstDecl);
- dst = Builder.CreateLoad(dst);
+ Address dst = GetAddrOfLocalVar(&dstDecl);
+ dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
@@ -1375,40 +1398,38 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures:
- if (getLangOpts().ObjCAutoRefCount) {
- Qualifiers qs = type.getQualifiers();
-
- // We need to register __weak direct captures with the runtime.
- if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
- useARCWeakCopy = true;
-
- // We need to retain the copied value for __strong direct captures.
- } else if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
- // If it's a block pointer, we have to copy the block and
- // assign that to the destination pointer, so we might as
- // well use _Block_object_assign. Otherwise we can avoid that.
- if (!isBlockPointer)
- useARCStrongCopy = true;
-
- // Otherwise the memcpy is fine.
- } else {
- continue;
- }
+ Qualifiers qs = type.getQualifiers();
+
+ // We need to register __weak direct captures with the runtime.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ useARCWeakCopy = true;
+
+ // We need to retain the copied value for __strong direct captures.
+ } else if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // If it's a block pointer, we have to copy the block and
+ // assign that to the destination pointer, so we might as
+ // well use _Block_object_assign. Otherwise we can avoid that.
+ if (!isBlockPointer)
+ useARCStrongCopy = true;
// Non-ARC captures of retainable pointers are strong and
// therefore require a call to _Block_object_assign.
- } else {
+ } else if (!qs.getObjCLifetime() && !getLangOpts().ObjCAutoRefCount) {
// fall through
+
+ // Otherwise the memcpy is fine.
+ } else {
+ continue;
}
+
+ // For all other types, the memcpy is fine.
} else {
continue;
}
unsigned index = capture.getIndex();
- llvm::Value *srcField =
- Builder.CreateStructGEP(blockInfo.StructureType, src, index);
- llvm::Value *dstField =
- Builder.CreateStructGEP(blockInfo.StructureType, dst, index);
+ Address srcField = Builder.CreateStructGEP(src, index, capture.getOffset());
+ Address dstField = Builder.CreateStructGEP(dst, index, capture.getOffset());
// If there's an explicit copy expression, we do that.
if (copyExpr) {
@@ -1435,11 +1456,12 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// We don't need this anymore, so kill it. It's not quite
// worth the annoyance to avoid creating it in the first place.
- cast<llvm::Instruction>(dstField)->eraseFromParent();
+ cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
}
} else {
srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
- llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ llvm::Value *dstAddr =
+ Builder.CreateBitCast(dstField.getPointer(), VoidPtrTy);
llvm::Value *args[] = {
dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
};
@@ -1502,6 +1524,9 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
SourceLocation(), II, C.VoidTy,
nullptr, SC_Static,
false, false);
+
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
// Create a scope with an artificial location for the body of this function.
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(FD, C.VoidTy, Fn, FI, args);
@@ -1509,8 +1534,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
- llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
- src = Builder.CreateLoad(src);
+ Address src = GetAddrOfLocalVar(&srcDecl);
+ src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
src = Builder.CreateBitCast(src, structPtrTy, "block");
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
@@ -1544,29 +1569,31 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures.
- if (getLangOpts().ObjCAutoRefCount) {
- Qualifiers qs = type.getQualifiers();
+ Qualifiers qs = type.getQualifiers();
- // Don't generate special dispose logic for a captured object
- // unless it's __strong or __weak.
- if (!qs.hasStrongOrWeakObjCLifetime())
- continue;
+ // Use objc_storeStrong for __strong direct captures; the
+ // dynamic tools really like it when we do this.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ useARCStrongDestroy = true;
+
+ // Support __weak direct captures.
+ } else if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ useARCWeakDestroy = true;
- // Support __weak direct captures.
- if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
- useARCWeakDestroy = true;
+ // Non-ARC captures are strong, and we need to use _Block_object_dispose.
+ } else if (!qs.hasObjCLifetime() && !getLangOpts().ObjCAutoRefCount) {
+ // fall through
- // Tools really want us to use objc_storeStrong here.
- else
- useARCStrongDestroy = true;
+ // Otherwise, we have nothing to do.
+ } else {
+ continue;
}
} else {
continue;
}
- unsigned index = capture.getIndex();
- llvm::Value *srcField =
- Builder.CreateStructGEP(blockInfo.StructureType, src, index);
+ Address srcField =
+ Builder.CreateStructGEP(src, capture.getIndex(), capture.getOffset());
// If there's an explicit copy expression, we do that.
if (dtor) {
@@ -1600,15 +1627,15 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
namespace {
/// Emits the copy/dispose helper functions for a __block object of id type.
-class ObjectByrefHelpers : public CodeGenModule::ByrefHelpers {
+class ObjectByrefHelpers final : public BlockByrefHelpers {
BlockFieldFlags Flags;
public:
ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags)
- : ByrefHelpers(alignment), Flags(flags) {}
+ : BlockByrefHelpers(alignment), Flags(flags) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
@@ -1619,11 +1646,11 @@ public:
llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
llvm::Value *fn = CGF.CGM.getBlockObjectAssign();
- llvm::Value *args[] = { destField, srcValue, flagsVal };
+ llvm::Value *args[] = { destField.getPointer(), srcValue, flagsVal };
CGF.EmitNounwindRuntimeCall(fn, args);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
llvm::Value *value = CGF.Builder.CreateLoad(field);
@@ -1636,16 +1663,16 @@ public:
};
/// Emits the copy/dispose helpers for an ARC __block __weak variable.
-class ARCWeakByrefHelpers : public CodeGenModule::ByrefHelpers {
+class ARCWeakByrefHelpers final : public BlockByrefHelpers {
public:
- ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+ ARCWeakByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
CGF.EmitARCMoveWeak(destField, srcField);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
CGF.EmitARCDestroyWeak(field);
}
@@ -1657,36 +1684,31 @@ public:
/// Emits the copy/dispose helpers for an ARC __block __strong variable
/// that's not of block-pointer type.
-class ARCStrongByrefHelpers : public CodeGenModule::ByrefHelpers {
+class ARCStrongByrefHelpers final : public BlockByrefHelpers {
public:
- ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+ ARCStrongByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
// Do a "move" by copying the value and then zeroing out the old
// variable.
- llvm::LoadInst *value = CGF.Builder.CreateLoad(srcField);
- value->setAlignment(Alignment.getQuantity());
+ llvm::Value *value = CGF.Builder.CreateLoad(srcField);
llvm::Value *null =
llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
- llvm::StoreInst *store = CGF.Builder.CreateStore(null, destField);
- store->setAlignment(Alignment.getQuantity());
+ CGF.Builder.CreateStore(null, destField);
CGF.EmitARCStoreStrongCall(destField, value, /*ignored*/ true);
CGF.EmitARCStoreStrongCall(srcField, null, /*ignored*/ true);
return;
}
- llvm::StoreInst *store = CGF.Builder.CreateStore(value, destField);
- store->setAlignment(Alignment.getQuantity());
-
- store = CGF.Builder.CreateStore(null, srcField);
- store->setAlignment(Alignment.getQuantity());
+ CGF.Builder.CreateStore(value, destField);
+ CGF.Builder.CreateStore(null, srcField);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
}
@@ -1698,25 +1720,22 @@ public:
/// Emits the copy/dispose helpers for an ARC __block __strong
/// variable that's of block-pointer type.
-class ARCStrongBlockByrefHelpers : public CodeGenModule::ByrefHelpers {
+class ARCStrongBlockByrefHelpers final : public BlockByrefHelpers {
public:
- ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+ ARCStrongBlockByrefHelpers(CharUnits alignment)
+ : BlockByrefHelpers(alignment) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
// Do the copy with objc_retainBlock; that's all that
// _Block_object_assign would do anyway, and we'd have to pass the
// right arguments to make sure it doesn't get no-op'ed.
- llvm::LoadInst *oldValue = CGF.Builder.CreateLoad(srcField);
- oldValue->setAlignment(Alignment.getQuantity());
-
+ llvm::Value *oldValue = CGF.Builder.CreateLoad(srcField);
llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true);
-
- llvm::StoreInst *store = CGF.Builder.CreateStore(copy, destField);
- store->setAlignment(Alignment.getQuantity());
+ CGF.Builder.CreateStore(copy, destField);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
}
@@ -1728,23 +1747,23 @@ public:
/// Emits the copy/dispose helpers for a __block variable with a
/// nontrivial copy constructor or destructor.
-class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
+class CXXByrefHelpers final : public BlockByrefHelpers {
QualType VarType;
const Expr *CopyExpr;
public:
CXXByrefHelpers(CharUnits alignment, QualType type,
const Expr *copyExpr)
- : ByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
+ : BlockByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
bool needsCopy() const override { return CopyExpr != nullptr; }
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
if (!CopyExpr) return;
CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
CGF.PushDestructorCleanup(VarType, field);
CGF.PopCleanupBlocks(cleanupDepth);
@@ -1757,10 +1776,8 @@ public:
} // end anonymous namespace
static llvm::Constant *
-generateByrefCopyHelper(CodeGenFunction &CGF,
- llvm::StructType &byrefType,
- unsigned valueFieldIndex,
- CodeGenModule::ByrefHelpers &byrefInfo) {
+generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
ASTContext &Context = CGF.getContext();
QualType R = Context.VoidTy;
@@ -1777,8 +1794,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
R, args, FunctionType::ExtInfo(), /*variadic=*/false);
- CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+ llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1796,26 +1812,30 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SC_Static,
false, false);
+ CGF.CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
CGF.StartFunction(FD, R, Fn, FI, args);
- if (byrefInfo.needsCopy()) {
- llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
+ if (generator.needsCopy()) {
+ llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
// dst->x
- llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst);
- destField = CGF.Builder.CreateLoad(destField);
+ Address destField = CGF.GetAddrOfLocalVar(&dst);
+ destField = Address(CGF.Builder.CreateLoad(destField),
+ byrefInfo.ByrefAlignment);
destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
- destField = CGF.Builder.CreateStructGEP(&byrefType, destField,
- valueFieldIndex, "x");
+ destField = CGF.emitBlockByrefAddress(destField, byrefInfo, false,
+ "dest-object");
// src->x
- llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src);
- srcField = CGF.Builder.CreateLoad(srcField);
+ Address srcField = CGF.GetAddrOfLocalVar(&src);
+ srcField = Address(CGF.Builder.CreateLoad(srcField),
+ byrefInfo.ByrefAlignment);
srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
- srcField =
- CGF.Builder.CreateStructGEP(&byrefType, srcField, valueFieldIndex, "x");
+ srcField = CGF.emitBlockByrefAddress(srcField, byrefInfo, false,
+ "src-object");
- byrefInfo.emitCopy(CGF, destField, srcField);
+ generator.emitCopy(CGF, destField, srcField);
}
CGF.FinishFunction();
@@ -1825,19 +1845,17 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
/// Build the copy helper for a __block variable.
static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
- llvm::StructType &byrefType,
- unsigned byrefValueIndex,
- CodeGenModule::ByrefHelpers &info) {
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
CodeGenFunction CGF(CGM);
- return generateByrefCopyHelper(CGF, byrefType, byrefValueIndex, info);
+ return generateByrefCopyHelper(CGF, byrefInfo, generator);
}
/// Generate code for a __block variable's dispose helper.
static llvm::Constant *
generateByrefDisposeHelper(CodeGenFunction &CGF,
- llvm::StructType &byrefType,
- unsigned byrefValueIndex,
- CodeGenModule::ByrefHelpers &byrefInfo) {
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
ASTContext &Context = CGF.getContext();
QualType R = Context.VoidTy;
@@ -1849,8 +1867,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
R, args, FunctionType::ExtInfo(), /*variadic=*/false);
- CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+ llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1868,15 +1885,19 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SourceLocation(), II, R, nullptr,
SC_Static,
false, false);
+
+ CGF.CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
CGF.StartFunction(FD, R, Fn, FI, args);
- if (byrefInfo.needsDispose()) {
- llvm::Value *V = CGF.GetAddrOfLocalVar(&src);
- V = CGF.Builder.CreateLoad(V);
- V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0));
- V = CGF.Builder.CreateStructGEP(&byrefType, V, byrefValueIndex, "x");
+ if (generator.needsDispose()) {
+ Address addr = CGF.GetAddrOfLocalVar(&src);
+ addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
+ auto byrefPtrType = byrefInfo.Type->getPointerTo(0);
+ addr = CGF.Builder.CreateBitCast(addr, byrefPtrType);
+ addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object");
- byrefInfo.emitDispose(CGF, V);
+ generator.emitDispose(CGF, addr);
}
CGF.FinishFunction();
@@ -1886,38 +1907,29 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
/// Build the dispose helper for a __block variable.
static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
- llvm::StructType &byrefType,
- unsigned byrefValueIndex,
- CodeGenModule::ByrefHelpers &info) {
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
CodeGenFunction CGF(CGM);
- return generateByrefDisposeHelper(CGF, byrefType, byrefValueIndex, info);
+ return generateByrefDisposeHelper(CGF, byrefInfo, generator);
}
/// Lazily build the copy and dispose helpers for a __block variable
/// with the given information.
-template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
- llvm::StructType &byrefTy,
- unsigned byrefValueIndex,
- T &byrefInfo) {
- // Increase the field's alignment to be at least pointer alignment,
- // since the layout of the byref struct will guarantee at least that.
- byrefInfo.Alignment = std::max(byrefInfo.Alignment,
- CharUnits::fromQuantity(CGM.PointerAlignInBytes));
-
+template <class T>
+static T *buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo,
+ T &&generator) {
llvm::FoldingSetNodeID id;
- byrefInfo.Profile(id);
+ generator.Profile(id);
void *insertPos;
- CodeGenModule::ByrefHelpers *node
+ BlockByrefHelpers *node
= CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos);
if (node) return static_cast<T*>(node);
- byrefInfo.CopyHelper =
- buildByrefCopyHelper(CGM, byrefTy, byrefValueIndex, byrefInfo);
- byrefInfo.DisposeHelper =
- buildByrefDisposeHelper(CGM, byrefTy, byrefValueIndex,byrefInfo);
+ generator.CopyHelper = buildByrefCopyHelper(CGM, byrefInfo, generator);
+ generator.DisposeHelper = buildByrefDisposeHelper(CGM, byrefInfo, generator);
- T *copy = new (CGM.getContext()) T(byrefInfo);
+ T *copy = new (CGM.getContext()) T(std::move(generator));
CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
return copy;
}
@@ -1925,20 +1937,25 @@ template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
/// Build the copy and dispose helpers for the given __block variable
/// emission. Places the helpers in the global cache. Returns null
/// if no helpers are required.
-CodeGenModule::ByrefHelpers *
+BlockByrefHelpers *
CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission) {
const VarDecl &var = *emission.Variable;
QualType type = var.getType();
- unsigned byrefValueIndex = getByRefValueLLVMField(&var).second;
+ auto &byrefInfo = getBlockByrefInfo(&var);
+
+ // The alignment we care about for the purposes of uniquing byref
+ // helpers is the alignment of the actual byref value field.
+ CharUnits valueAlignment =
+ byrefInfo.ByrefAlignment.alignmentAtOffset(byrefInfo.FieldOffset);
if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var);
if (!copyExpr && record->hasTrivialDestructor()) return nullptr;
- CXXByrefHelpers byrefInfo(emission.Alignment, type, copyExpr);
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo);
+ return ::buildByrefHelpers(
+ CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr));
}
// Otherwise, if we don't have a retainable type, there's nothing to do.
@@ -1949,8 +1966,6 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
// If we have lifetime, that dominates.
if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
- assert(getLangOpts().ObjCAutoRefCount);
-
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("impossible");
@@ -1961,24 +1976,23 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
// Tell the runtime that this is ARC __weak, called by the
// byref routines.
- case Qualifiers::OCL_Weak: {
- ARCWeakByrefHelpers byrefInfo(emission.Alignment);
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo);
- }
+ case Qualifiers::OCL_Weak:
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCWeakByrefHelpers(valueAlignment));
// ARC __strong __block variables need to be retained.
case Qualifiers::OCL_Strong:
// Block pointers need to be copied, and there's no direct
// transfer possible.
if (type->isBlockPointerType()) {
- ARCStrongBlockByrefHelpers byrefInfo(emission.Alignment);
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo);
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCStrongBlockByrefHelpers(valueAlignment));
// Otherwise, we transfer ownership of the retain from the stack
// to the heap.
} else {
- ARCStrongByrefHelpers byrefInfo(emission.Alignment);
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo);
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCStrongByrefHelpers(valueAlignment));
}
}
llvm_unreachable("fell out of lifetime switch!");
@@ -1997,28 +2011,33 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
if (type.isObjCGCWeak())
flags |= BLOCK_FIELD_IS_WEAK;
- ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo);
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ObjectByrefHelpers(valueAlignment, flags));
}
-std::pair<llvm::Type *, unsigned>
-CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
- assert(ByRefValueInfo.count(VD) && "Did not find value!");
-
- return ByRefValueInfo.find(VD)->second;
+Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
+ const VarDecl *var,
+ bool followForward) {
+ auto &info = getBlockByrefInfo(var);
+ return emitBlockByrefAddress(baseAddr, info, followForward, var->getName());
}
-llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
- const VarDecl *V) {
- auto P = getByRefValueLLVMField(V);
- llvm::Value *Loc =
- Builder.CreateStructGEP(P.first, BaseAddr, 1, "forwarding");
- Loc = Builder.CreateLoad(Loc);
- Loc = Builder.CreateStructGEP(P.first, Loc, P.second, V->getNameAsString());
- return Loc;
+Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
+ const BlockByrefInfo &info,
+ bool followForward,
+ const llvm::Twine &name) {
+ // Chase the forwarding address if requested.
+ if (followForward) {
+ Address forwardingAddr =
+ Builder.CreateStructGEP(baseAddr, 1, getPointerSize(), "forwarding");
+ baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.ByrefAlignment);
+ }
+
+ return Builder.CreateStructGEP(baseAddr, info.FieldIndex,
+ info.FieldOffset, name);
}
-/// BuildByRefType - This routine changes a __block variable declared as T x
+/// BuildByrefInfo - This routine changes a __block variable declared as T x
/// into:
///
/// struct {
@@ -2033,108 +2052,116 @@ llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
/// T x;
/// } x
///
-llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
- std::pair<llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
- if (Info.first)
- return Info.first;
+const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
+ auto it = BlockByrefInfos.find(D);
+ if (it != BlockByrefInfos.end())
+ return it->second;
+
+ llvm::StructType *byrefType =
+ llvm::StructType::create(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
QualType Ty = D->getType();
+ CharUnits size;
SmallVector<llvm::Type *, 8> types;
- llvm::StructType *ByRefType =
- llvm::StructType::create(getLLVMContext(),
- "struct.__block_byref_" + D->getNameAsString());
-
// void *__isa;
types.push_back(Int8PtrTy);
+ size += getPointerSize();
// void *__forwarding;
- types.push_back(llvm::PointerType::getUnqual(ByRefType));
+ types.push_back(llvm::PointerType::getUnqual(byrefType));
+ size += getPointerSize();
// int32_t __flags;
types.push_back(Int32Ty);
+ size += CharUnits::fromQuantity(4);
// int32_t __size;
types.push_back(Int32Ty);
+ size += CharUnits::fromQuantity(4);
+
// Note that this must match *exactly* the logic in buildByrefHelpers.
- bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D);
- if (HasCopyAndDispose) {
+ bool hasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D);
+ if (hasCopyAndDispose) {
/// void *__copy_helper;
types.push_back(Int8PtrTy);
+ size += getPointerSize();
/// void *__destroy_helper;
types.push_back(Int8PtrTy);
+ size += getPointerSize();
}
+
bool HasByrefExtendedLayout = false;
Qualifiers::ObjCLifetime Lifetime;
if (getContext().getByrefLifetime(Ty, Lifetime, HasByrefExtendedLayout) &&
- HasByrefExtendedLayout)
+ HasByrefExtendedLayout) {
/// void *__byref_variable_layout;
types.push_back(Int8PtrTy);
+ size += CharUnits::fromQuantity(PointerSizeInBytes);
+ }
- bool Packed = false;
- CharUnits Align = getContext().getDeclAlign(D);
- if (Align >
- getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0))) {
- // We have to insert padding.
-
- // The struct above has 2 32-bit integers.
- unsigned CurrentOffsetInBytes = 4 * 2;
-
- // And either 2, 3, 4 or 5 pointers.
- unsigned noPointers = 2;
- if (HasCopyAndDispose)
- noPointers += 2;
- if (HasByrefExtendedLayout)
- noPointers += 1;
-
- CurrentOffsetInBytes += noPointers * CGM.getDataLayout().getTypeAllocSize(Int8PtrTy);
-
- // Align the offset.
- unsigned AlignedOffsetInBytes =
- llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
-
- unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
- if (NumPaddingBytes > 0) {
- llvm::Type *Ty = Int8Ty;
- // FIXME: We need a sema error for alignment larger than the minimum of
- // the maximal stack alignment and the alignment of malloc on the system.
- if (NumPaddingBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
-
- types.push_back(Ty);
+ // T x;
+ llvm::Type *varTy = ConvertTypeForMem(Ty);
- // We want a packed struct.
- Packed = true;
- }
+ bool packed = false;
+ CharUnits varAlign = getContext().getDeclAlign(D);
+ CharUnits varOffset = size.RoundUpToAlignment(varAlign);
+
+ // We may have to insert padding.
+ if (varOffset != size) {
+ llvm::Type *paddingTy =
+ llvm::ArrayType::get(Int8Ty, (varOffset - size).getQuantity());
+
+ types.push_back(paddingTy);
+ size = varOffset;
+
+ // Conversely, we might have to prevent LLVM from inserting padding.
+ } else if (CGM.getDataLayout().getABITypeAlignment(varTy)
+ > varAlign.getQuantity()) {
+ packed = true;
}
+ types.push_back(varTy);
- // T x;
- types.push_back(ConvertTypeForMem(Ty));
-
- ByRefType->setBody(types, Packed);
-
- Info.first = ByRefType;
-
- Info.second = types.size() - 1;
-
- return Info.first;
+ byrefType->setBody(types, packed);
+
+ BlockByrefInfo info;
+ info.Type = byrefType;
+ info.FieldIndex = types.size() - 1;
+ info.FieldOffset = varOffset;
+ info.ByrefAlignment = std::max(varAlign, getPointerAlign());
+
+ auto pair = BlockByrefInfos.insert({D, info});
+ assert(pair.second && "info was inserted recursively?");
+ return pair.first->second;
}
/// Initialize the structural components of a __block variable, i.e.
/// everything but the actual object.
void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
// Find the address of the local.
- llvm::Value *addr = emission.Address;
+ Address addr = emission.Addr;
// That's an alloca of the byref structure type.
llvm::StructType *byrefType = cast<llvm::StructType>(
- cast<llvm::PointerType>(addr->getType())->getElementType());
+ cast<llvm::PointerType>(addr.getPointer()->getType())->getElementType());
+
+ unsigned nextHeaderIndex = 0;
+ CharUnits nextHeaderOffset;
+ auto storeHeaderField = [&](llvm::Value *value, CharUnits fieldSize,
+ const Twine &name) {
+ auto fieldAddr = Builder.CreateStructGEP(addr, nextHeaderIndex,
+ nextHeaderOffset, name);
+ Builder.CreateStore(value, fieldAddr);
+
+ nextHeaderIndex++;
+ nextHeaderOffset += fieldSize;
+ };
// Build the byref helpers if necessary. This is null if we don't need any.
- CodeGenModule::ByrefHelpers *helpers =
- buildByrefHelpers(*byrefType, emission);
+ BlockByrefHelpers *helpers = buildByrefHelpers(*byrefType, emission);
const VarDecl &D = *emission.Variable;
QualType type = D.getType();
@@ -2143,7 +2170,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
Qualifiers::ObjCLifetime ByrefLifetime;
bool ByRefHasLifetime =
getContext().getByrefLifetime(type, ByrefLifetime, HasByrefExtendedLayout);
-
+
llvm::Value *V;
// Initialize the 'isa', which is just 0 or 1.
@@ -2151,12 +2178,10 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
if (type.isObjCGCWeak())
isa = 1;
V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
- Builder.CreateStore(V,
- Builder.CreateStructGEP(nullptr, addr, 0, "byref.isa"));
+ storeHeaderField(V, getPointerSize(), "byref.isa");
// Store the address of the variable into its own forwarding pointer.
- Builder.CreateStore(
- addr, Builder.CreateStructGEP(nullptr, addr, 1, "byref.forwarding"));
+ storeHeaderField(addr.getPointer(), getPointerSize(), "byref.forwarding");
// Blocks ABI:
// c) the flags field is set to either 0 if no helper functions are
@@ -2202,31 +2227,23 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
printf("\n");
}
}
-
- Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
- Builder.CreateStructGEP(nullptr, addr, 2, "byref.flags"));
+ storeHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ getIntSize(), "byref.flags");
CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType);
V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity());
- Builder.CreateStore(V,
- Builder.CreateStructGEP(nullptr, addr, 3, "byref.size"));
+ storeHeaderField(V, getIntSize(), "byref.size");
if (helpers) {
- llvm::Value *copy_helper = Builder.CreateStructGEP(nullptr, addr, 4);
- Builder.CreateStore(helpers->CopyHelper, copy_helper);
-
- llvm::Value *destroy_helper = Builder.CreateStructGEP(nullptr, addr, 5);
- Builder.CreateStore(helpers->DisposeHelper, destroy_helper);
+ storeHeaderField(helpers->CopyHelper, getPointerSize(),
+ "byref.copyHelper");
+ storeHeaderField(helpers->DisposeHelper, getPointerSize(),
+ "byref.disposeHelper");
}
+
if (ByRefHasLifetime && HasByrefExtendedLayout) {
- llvm::Constant* ByrefLayoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type);
- llvm::Value *ByrefInfoAddr =
- Builder.CreateStructGEP(nullptr, addr, helpers ? 6 : 4, "byref.layout");
- // cast destination to pointer to source type.
- llvm::Type *DesTy = ByrefLayoutInfo->getType();
- DesTy = DesTy->getPointerTo();
- llvm::Value *BC = Builder.CreatePointerCast(ByrefInfoAddr, DesTy);
- Builder.CreateStore(ByrefLayoutInfo, BC);
+ auto layoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type);
+ storeHeaderField(layoutInfo, getPointerSize(), "byref.layout");
}
}
@@ -2240,7 +2257,8 @@ void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
}
namespace {
- struct CallBlockRelease : EHScopeStack::Cleanup {
+ /// Release a __block variable.
+ struct CallBlockRelease final : EHScopeStack::Cleanup {
llvm::Value *Addr;
CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
@@ -2249,7 +2267,7 @@ namespace {
CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
}
};
-}
+} // end anonymous namespace
/// Enter a cleanup to destroy a __block variable. Note that this
/// cleanup should be a no-op if the variable hasn't left the stack
@@ -2260,7 +2278,8 @@ void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
return;
- EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup,
+ emission.Addr.getPointer());
}
/// Adjust the declaration of something from the blocks API.
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index c4eed0d0e8eb..1edabef4ec74 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -140,6 +140,15 @@ inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
return BlockFieldFlags(l) | BlockFieldFlags(r);
}
+/// Information about the layout of a __block variable.
+class BlockByrefInfo {
+public:
+ llvm::StructType *Type;
+ unsigned FieldIndex;
+ CharUnits ByrefAlignment;
+ CharUnits FieldOffset;
+};
+
/// CGBlockInfo - Information to generate a block literal.
class CGBlockInfo {
public:
@@ -152,14 +161,19 @@ public:
class Capture {
uintptr_t Data;
EHScopeStack::stable_iterator Cleanup;
+ CharUnits::QuantityType Offset;
public:
bool isIndex() const { return (Data & 1) != 0; }
bool isConstant() const { return !isIndex(); }
- unsigned getIndex() const { assert(isIndex()); return Data >> 1; }
- llvm::Value *getConstant() const {
- assert(isConstant());
- return reinterpret_cast<llvm::Value*>(Data);
+
+ unsigned getIndex() const {
+ assert(isIndex());
+ return Data >> 1;
+ }
+ CharUnits getOffset() const {
+ assert(isIndex());
+ return CharUnits::fromQuantity(Offset);
}
EHScopeStack::stable_iterator getCleanup() const {
assert(isIndex());
@@ -170,9 +184,15 @@ public:
Cleanup = cleanup;
}
- static Capture makeIndex(unsigned index) {
+ llvm::Value *getConstant() const {
+ assert(isConstant());
+ return reinterpret_cast<llvm::Value*>(Data);
+ }
+
+ static Capture makeIndex(unsigned index, CharUnits offset) {
Capture v;
v.Data = (index << 1) | 1;
+ v.Offset = offset.getQuantity();
return v;
}
@@ -205,12 +225,13 @@ public:
/// The mapping of allocated indexes within the block.
llvm::DenseMap<const VarDecl*, Capture> Captures;
- llvm::AllocaInst *Address;
+ Address LocalAddress;
llvm::StructType *StructureType;
const BlockDecl *Block;
const BlockExpr *BlockExpression;
CharUnits BlockSize;
CharUnits BlockAlign;
+ CharUnits CXXThisOffset;
// Offset of the gap caused by block header having a smaller
// alignment than the alignment of the block descriptor. This
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index 6610659131f7..489f3413d4b8 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -11,6 +11,8 @@
#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#include "llvm/IR/IRBuilder.h"
+#include "Address.h"
+#include "CodeGenTypeCache.h"
namespace clang {
namespace CodeGen {
@@ -22,9 +24,9 @@ class CodeGenFunction;
/// instructions.
template <bool PreserveNames>
class CGBuilderInserter
- : protected llvm::IRBuilderDefaultInserter<PreserveNames> {
+ : protected llvm::IRBuilderDefaultInserter<PreserveNames> {
public:
- CGBuilderInserter() : CGF(nullptr) {}
+ CGBuilderInserter() = default;
explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
protected:
@@ -33,9 +35,7 @@ protected:
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
private:
- void operator=(const CGBuilderInserter &) = delete;
-
- CodeGenFunction *CGF;
+ CodeGenFunction *CGF = nullptr;
};
// Don't preserve names on values in an optimized build.
@@ -44,9 +44,260 @@ private:
#else
#define PreserveNames true
#endif
+
typedef CGBuilderInserter<PreserveNames> CGBuilderInserterTy;
+
typedef llvm::IRBuilder<PreserveNames, llvm::ConstantFolder,
- CGBuilderInserterTy> CGBuilderTy;
+ CGBuilderInserterTy> CGBuilderBaseTy;
+
+class CGBuilderTy : public CGBuilderBaseTy {
+ /// Storing a reference to the type cache here makes it a lot easier
+ /// to build natural-feeling, target-specific IR.
+ const CodeGenTypeCache &TypeCache;
+public:
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
+ : CGBuilderBaseTy(C), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache,
+ llvm::LLVMContext &C, const llvm::ConstantFolder &F,
+ const CGBuilderInserterTy &Inserter)
+ : CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::Instruction *I)
+ : CGBuilderBaseTy(I), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::BasicBlock *BB)
+ : CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
+
+ llvm::ConstantInt *getSize(CharUnits N) {
+ return llvm::ConstantInt::get(TypeCache.SizeTy, N.getQuantity());
+ }
+ llvm::ConstantInt *getSize(uint64_t N) {
+ return llvm::ConstantInt::get(TypeCache.SizeTy, N);
+ }
+
+ // Note that we intentionally hide the CreateLoad APIs that don't
+ // take an alignment.
+ llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ Name);
+ }
+ llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
+ // This overload is required to prevent string literals from
+ // ending up in the IsVolatile overload.
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ Name);
+ }
+ llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ IsVolatile,
+ Name);
+ }
+
+ using CGBuilderBaseTy::CreateAlignedLoad;
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ const char *Name) {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
+ CharUnits Align,
+ const llvm::Twine &Name = "") {
+ assert(Addr->getType()->getPointerElementType() == Ty);
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), IsVolatile, Name);
+ }
+
+ // Note that we intentionally hide the CreateStore APIs that don't
+ // take an alignment.
+ llvm::StoreInst *CreateStore(llvm::Value *Val, Address Addr,
+ bool IsVolatile = false) {
+ return CreateAlignedStore(Val, Addr.getPointer(),
+ Addr.getAlignment().getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateAlignedStore;
+ llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
+ CharUnits Align, bool IsVolatile = false) {
+ return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile);
+ }
+
+ // FIXME: these "default-aligned" APIs should be removed,
+ // but I don't feel like fixing all the builtin code right now.
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
+ const llvm::Twine &Name = "") {
+ return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
+ }
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
+ const char *Name) {
+ return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
+ }
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr, bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CGBuilderBaseTy::CreateLoad(Addr, IsVolatile, Name);
+ }
+
+ llvm::StoreInst *CreateDefaultAlignedStore(llvm::Value *Val,
+ llvm::Value *Addr,
+ bool IsVolatile = false) {
+ return CGBuilderBaseTy::CreateStore(Val, Addr, IsVolatile);
+ }
+
+ /// Emit a load from an i1 flag variable.
+ llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
+ const llvm::Twine &Name = "") {
+ assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
+ }
+
+ /// Emit a store to an i1 flag variable.
+ llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
+ assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
+ }
+
+ using CGBuilderBaseTy::CreateBitCast;
+ Address CreateBitCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ return Address(CreateBitCast(Addr.getPointer(), Ty, Name),
+ Addr.getAlignment());
+ }
+
+ /// Cast the element type of the given address to a different type,
+ /// preserving information like the alignment and address space.
+ Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ auto PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
+ return CreateBitCast(Addr, PtrTy, Name);
+ }
+
+ using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
+ Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ llvm::Value *Ptr =
+ CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
+ return Address(Ptr, Addr.getAlignment());
+ }
+
+ using CGBuilderBaseTy::CreateStructGEP;
+ Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ return Address(CreateStructGEP(Addr.getElementType(),
+ Addr.getPointer(), Index, Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ /// Given
+ /// %addr = [n x T]* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 0, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// This API assumes that drilling into an array like this is always
+ /// an inbounds operation.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateInBoundsGEP(Addr.getPointer(),
+ {getSize(CharUnits::Zero()),
+ getSize(Index)},
+ Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given
+ /// %addr = T* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstInBoundsGEP(Address Addr, uint64_t Index,
+ CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
+ getSize(Index), Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given
+ /// %addr = T* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
+ getSize(Index), Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given a pointer to i8, adjust it by a given constant offset.
+ Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Addr.getElementType() == TypeCache.Int8Ty);
+ return Address(CreateInBoundsGEP(Addr.getPointer(), getSize(Offset), Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+ Address CreateConstByteGEP(Address Addr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Addr.getElementType() == TypeCache.Int8Ty);
+ return Address(CreateGEP(Addr.getPointer(), getSize(Offset), Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ llvm::Value *CreateConstInBoundsByteGEP(llvm::Value *Ptr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
+ return CreateInBoundsGEP(Ptr, getSize(Offset), Name);
+ }
+ llvm::Value *CreateConstByteGEP(llvm::Value *Ptr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
+ return CreateGEP(Ptr, getSize(Offset), Name);
+ }
+
+ using CGBuilderBaseTy::CreateMemCpy;
+ llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+ llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateMemMove;
+ llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemMove(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateMemSet;
+ llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value,
+ llvm::Value *Size, bool IsVolatile = false) {
+ return CreateMemSet(Dest.getPointer(), Value, Size,
+ Dest.getAlignment().getQuantity(), IsVolatile);
+ }
+};
+
#undef PreserveNames
} // end namespace CodeGen
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 9b8694f9c5f2..787ac5361bbb 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -48,7 +48,7 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
if (FD->hasAttr<AsmLabelAttr>())
Name = getMangledName(D);
else
- Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
+ Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
@@ -111,6 +111,28 @@ static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
return EmitFromInt(CGF, Result, T, ValueType);
}
+static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Val = CGF.EmitScalarExpr(E->getArg(0));
+ Value *Address = CGF.EmitScalarExpr(E->getArg(1));
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
+ Value *BC = CGF.Builder.CreateBitCast(
+ Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
+ LV.setNontemporal(true);
+ CGF.EmitStoreOfScalar(Val, LV, false);
+ return nullptr;
+}
+
+static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Address = CGF.EmitScalarExpr(E->getArg(0));
+
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
+ LV.setNontemporal(true);
+ return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
+}
+
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) {
@@ -215,10 +237,20 @@ static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateBitCast(V, IntTy);
if (Ty->isPPC_FP128Ty()) {
- // The higher-order double comes first, and so we need to truncate the
- // pair to extract the overall sign. The order of the pair is the same
- // in both little- and big-Endian modes.
+ // We want the sign bit of the higher-order double. The bitcast we just
+ // did works as if the double-double was stored to memory and then
+ // read as an i128. The "store" will put the higher-order double in the
+ // lower address in both little- and big-Endian modes, but the "load"
+ // will treat those bits as a different part of the i128: the low bits in
+ // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
+ // we need to shift the high bits down to the low before truncating.
Width >>= 1;
+ if (CGF.getTarget().isBigEndian()) {
+ Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
+ V = CGF.Builder.CreateLShr(V, ShiftCst);
+ }
+ // We are truncating value in order to extract the higher-order
+ // double, which we will be using to extract the sign from.
IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateTrunc(V, IntTy);
}
@@ -256,6 +288,125 @@ static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
return CGF.Builder.CreateExtractValue(Tmp, 0);
}
+namespace {
+ struct WidthAndSignedness {
+ unsigned Width;
+ bool Signed;
+ };
+}
+
+static WidthAndSignedness
+getIntegerWidthAndSignedness(const clang::ASTContext &context,
+ const clang::QualType Type) {
+ assert(Type->isIntegerType() && "Given type is not an integer.");
+ unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
+ bool Signed = Type->isSignedIntegerType();
+ return {Width, Signed};
+}
+
+// Given one or more integer types, this function produces an integer type that
+// encompasses them: any value in one of the given types could be expressed in
+// the encompassing type.
+static struct WidthAndSignedness
+EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
+ assert(Types.size() > 0 && "Empty list of types.");
+
+ // If any of the given types is signed, we must return a signed type.
+ bool Signed = false;
+ for (const auto &Type : Types) {
+ Signed |= Type.Signed;
+ }
+
+ // The encompassing type must have a width greater than or equal to the width
+ // of the specified types. Aditionally, if the encompassing type is signed,
+ // its width must be strictly greater than the width of any unsigned types
+ // given.
+ unsigned Width = 0;
+ for (const auto &Type : Types) {
+ unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
+ if (Width < MinWidth) {
+ Width = MinWidth;
+ }
+ }
+
+ return {Width, Signed};
+}
+
+Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
+ llvm::Type *DestType = Int8PtrTy;
+ if (ArgValue->getType() != DestType)
+ ArgValue =
+ Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
+
+ Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
+ return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
+}
+
+/// Checks if using the result of __builtin_object_size(p, @p From) in place of
+/// __builtin_object_size(p, @p To) is correct
+static bool areBOSTypesCompatible(int From, int To) {
+ // Note: Our __builtin_object_size implementation currently treats Type=0 and
+ // Type=2 identically. Encoding this implementation detail here may make
+ // improving __builtin_object_size difficult in the future, so it's omitted.
+ return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
+}
+
+static llvm::Value *
+getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
+ return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
+}
+
+llvm::Value *
+CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType) {
+ uint64_t ObjectSize;
+ if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
+ return emitBuiltinObjectSize(E, Type, ResType);
+ return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
+}
+
+/// Returns a Value corresponding to the size of the given expression.
+/// This Value may be either of the following:
+/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
+/// it)
+/// - A call to the @llvm.objectsize intrinsic
+llvm::Value *
+CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType) {
+ // We need to reference an argument if the pointer is a parameter with the
+ // pass_object_size attribute.
+ if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
+ auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
+ auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
+ if (Param != nullptr && PS != nullptr &&
+ areBOSTypesCompatible(PS->getType(), Type)) {
+ auto Iter = SizeArguments.find(Param);
+ assert(Iter != SizeArguments.end());
+
+ const ImplicitParamDecl *D = Iter->second;
+ auto DIter = LocalDeclMap.find(D);
+ assert(DIter != LocalDeclMap.end());
+
+ return EmitLoadOfScalar(DIter->second, /*volatile=*/false,
+ getContext().getSizeType(), E->getLocStart());
+ }
+ }
+
+ // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
+ // evaluate E for side-effects. In either case, we shouldn't lower to
+ // @llvm.objectsize.
+ if (Type == 3 || E->HasSideEffects(getContext()))
+ return getDefaultBuiltinObjectSizeResult(Type, ResType);
+
+ // LLVM only supports 0 and 2, make sure that we pass along that
+ // as a boolean.
+ auto *CI = ConstantInt::get(Builder.getInt1Ty(), (Type & 2) >> 1);
+ // FIXME: Get right address space.
+ llvm::Type *Tys[] = {ResType, Builder.getInt8PtrTy(0)};
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
+ return Builder.CreateCall(F, {EmitScalarExpr(E), CI});
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue) {
@@ -279,22 +430,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
case Builtin::BI__va_start:
- case Builtin::BI__builtin_va_end: {
- Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
- ? EmitScalarExpr(E->getArg(0))
- : EmitVAListRef(E->getArg(0));
- llvm::Type *DestType = Int8PtrTy;
- if (ArgValue->getType() != DestType)
- ArgValue = Builder.CreateBitCast(ArgValue, DestType,
- ArgValue->getName().data());
-
- Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
- Intrinsic::vaend : Intrinsic::vastart;
- return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
- }
+ case Builtin::BI__builtin_va_end:
+ return RValue::get(
+ EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
+ ? EmitScalarExpr(E->getArg(0))
+ : EmitVAListRef(E->getArg(0)).getPointer(),
+ BuiltinID != Builtin::BI__builtin_va_end));
case Builtin::BI__builtin_va_copy: {
- Value *DstPtr = EmitVAListRef(E->getArg(0));
- Value *SrcPtr = EmitVAListRef(E->getArg(1));
+ Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
+ Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
llvm::Type *Type = Int8PtrTy;
@@ -455,6 +599,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_unpredictable: {
+ // Always return the argument of __builtin_unpredictable. LLVM does not
+ // handle this builtin. Metadata for this builtin should be added directly
+ // to instructions such as branches or switches that use it.
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
case Builtin::BI__builtin_expect: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
@@ -501,26 +651,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F, ArgValue));
}
case Builtin::BI__builtin_object_size: {
- // We rely on constant folding to deal with expressions with side effects.
- assert(!E->getArg(0)->HasSideEffects(getContext()) &&
- "should have been constant folded");
-
- // We pass this builtin onto the optimizer so that it can
- // figure out the object size in more complex cases.
- llvm::Type *ResType = ConvertType(E->getType());
-
- // LLVM only supports 0 and 2, make sure that we pass along that
- // as a boolean.
- Value *Ty = EmitScalarExpr(E->getArg(1));
- ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
- assert(CI);
- uint64_t val = CI->getZExtValue();
- CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
- // FIXME: Get right address space.
- llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
- Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
- return RValue::get(
- Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0)), CI}));
+ unsigned Type =
+ E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
+ auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
+
+ // We pass this builtin onto the optimizer so that it can figure out the
+ // object size in more complex cases.
+ return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
@@ -737,29 +874,24 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
- Dest.second, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(Dest.second, Src.second);
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
- Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemCpy(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin___memcpy_chk: {
@@ -770,23 +902,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(Dest.second, Src.second);
- Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemCpy(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_objc_memmove_collectable: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
+ Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
- Address, SrcAddr, SizeVal);
- return RValue::get(Address);
+ DestAddr, SrcAddr, SizeVal);
+ return RValue::get(DestAddr.getPointer());
}
case Builtin::BI__builtin___memmove_chk: {
@@ -797,42 +926,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(Dest.second, Src.second);
- Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemMove(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(Dest.second, Src.second);
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
- Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemMove(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
@@ -842,13 +964,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
@@ -952,7 +1073,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin_setjmp: {
// Buffer is a void**.
- Value *Buf = EmitScalarExpr(E->getArg(0));
+ Address Buf = EmitPointerWithAlignment(E->getArg(0));
// Store the frame pointer to the setjmp buffer.
Value *FrameAddr =
@@ -963,14 +1084,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Store the stack pointer to the setjmp buffer.
Value *StackAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
- Value *StackSaveSlot =
- Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
+ Address StackSaveSlot =
+ Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize());
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
- return RValue::get(Builder.CreateCall(F, Buf));
+ return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
@@ -1135,8 +1256,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
- Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
- Store->setAlignment(StoreSize.getQuantity());
+ Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
+ StoreSize);
Store->setAtomic(llvm::Release);
return RValue::get(nullptr);
}
@@ -1153,6 +1274,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_nontemporal_load:
+ return RValue::get(EmitNontemporalLoad(*this, E));
+ case Builtin::BI__builtin_nontemporal_store:
+ return RValue::get(EmitNontemporalStore(*this, E));
case Builtin::BI__c11_atomic_is_lock_free:
case Builtin::BI__atomic_is_lock_free: {
// Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
@@ -1270,15 +1395,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
+ Address Ptr = EmitPointerWithAlignment(E->getArg(0));
+ unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- Store->setAlignment(1);
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
@@ -1311,7 +1435,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
for (unsigned i = 0; i < 3; ++i) {
Builder.SetInsertPoint(BBs[i]);
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- Store->setAlignment(1);
Store->setOrdering(Orders[i]);
Builder.CreateBr(ContBB);
}
@@ -1493,8 +1616,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
- std::pair<llvm::Value*, unsigned> CarryOutPtr =
- EmitPointerWithAlignment(E->getArg(3));
+ Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
llvm::Intrinsic::ID IntrinsicId;
@@ -1525,11 +1647,91 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Sum1, Carryin, Carry2);
llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
X->getType());
- llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
- CarryOutPtr.first);
- CarryOutStore->setAlignment(CarryOutPtr.second);
+ Builder.CreateStore(CarryOut, CarryOutPtr);
return RValue::get(Sum2);
}
+
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_mul_overflow: {
+ const clang::Expr *LeftArg = E->getArg(0);
+ const clang::Expr *RightArg = E->getArg(1);
+ const clang::Expr *ResultArg = E->getArg(2);
+
+ clang::QualType ResultQTy =
+ ResultArg->getType()->castAs<PointerType>()->getPointeeType();
+
+ WidthAndSignedness LeftInfo =
+ getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
+ WidthAndSignedness RightInfo =
+ getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
+ WidthAndSignedness ResultInfo =
+ getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
+ WidthAndSignedness EncompassingInfo =
+ EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
+
+ llvm::Type *EncompassingLLVMTy =
+ llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
+
+ llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
+
+ llvm::Intrinsic::ID IntrinsicId;
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unknown overflow builtin id.");
+ case Builtin::BI__builtin_add_overflow:
+ IntrinsicId = EncompassingInfo.Signed
+ ? llvm::Intrinsic::sadd_with_overflow
+ : llvm::Intrinsic::uadd_with_overflow;
+ break;
+ case Builtin::BI__builtin_sub_overflow:
+ IntrinsicId = EncompassingInfo.Signed
+ ? llvm::Intrinsic::ssub_with_overflow
+ : llvm::Intrinsic::usub_with_overflow;
+ break;
+ case Builtin::BI__builtin_mul_overflow:
+ IntrinsicId = EncompassingInfo.Signed
+ ? llvm::Intrinsic::smul_with_overflow
+ : llvm::Intrinsic::umul_with_overflow;
+ break;
+ }
+
+ llvm::Value *Left = EmitScalarExpr(LeftArg);
+ llvm::Value *Right = EmitScalarExpr(RightArg);
+ Address ResultPtr = EmitPointerWithAlignment(ResultArg);
+
+ // Extend each operand to the encompassing type.
+ Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
+ Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
+
+ // Perform the operation on the extended values.
+ llvm::Value *Overflow, *Result;
+ Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
+
+ if (EncompassingInfo.Width > ResultInfo.Width) {
+ // The encompassing type is wider than the result type, so we need to
+ // truncate it.
+ llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
+
+ // To see if the truncation caused an overflow, we will extend
+ // the result and then compare it to the original result.
+ llvm::Value *ResultTruncExt = Builder.CreateIntCast(
+ ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
+ llvm::Value *TruncationOverflow =
+ Builder.CreateICmpNE(Result, ResultTruncExt);
+
+ Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
+ Result = ResultTrunc;
+ }
+
+ // Finally, store the result using the pointer.
+ bool isVolatile =
+ ResultArg->getType()->getPointeeType().isVolatileQualified();
+ Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
+
+ return RValue::get(Overflow);
+ }
+
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
@@ -1554,13 +1756,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
- std::pair<llvm::Value *, unsigned> SumOutPtr =
- EmitPointerWithAlignment(E->getArg(2));
+ Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
- default: llvm_unreachable("Unknown security overflow builtin id.");
+ default: llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
@@ -1596,13 +1797,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *Carry;
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
- llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
- SumOutStore->setAlignment(SumOutPtr.second);
+ Builder.CreateStore(Sum, SumOutPtr);
return RValue::get(Carry);
}
case Builtin::BI__builtin_addressof:
- return RValue::get(EmitLValue(E->getArg(0)).getAddress());
+ return RValue::get(EmitLValue(E->getArg(0)).getPointer());
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
E->getArg(0), false);
@@ -1777,8 +1977,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
+ // Check that a call to a target specific builtin has the correct target
+ // features.
+ // This is down here to avoid non-target specific builtins, however, if
+ // generic builtins start to require generic target features then we
+ // can move this up to the beginning of the function.
+ checkTargetFeatures(E, FD);
+
// See if we have a target specific intrinsic.
- const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+ const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
if (const char *Prefix =
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
@@ -1856,37 +2063,54 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return GetUndefRValue(E->getType());
}
-Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- switch (getTarget().getTriple().getArch()) {
+static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
+ unsigned BuiltinID, const CallExpr *E,
+ llvm::Triple::ArchType Arch) {
+ switch (Arch) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- return EmitARMBuiltinExpr(BuiltinID, E);
+ return CGF->EmitARMBuiltinExpr(BuiltinID, E);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- return EmitAArch64BuiltinExpr(BuiltinID, E);
+ return CGF->EmitAArch64BuiltinExpr(BuiltinID, E);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- return EmitX86BuiltinExpr(BuiltinID, E);
+ return CGF->EmitX86BuiltinExpr(BuiltinID, E);
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
- return EmitPPCBuiltinExpr(BuiltinID, E);
+ return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
case llvm::Triple::r600:
case llvm::Triple::amdgcn:
- return EmitAMDGPUBuiltinExpr(BuiltinID, E);
+ return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
case llvm::Triple::systemz:
- return EmitSystemZBuiltinExpr(BuiltinID, E);
+ return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
- return EmitNVPTXBuiltinExpr(BuiltinID, E);
+ return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
default:
return nullptr;
}
}
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
+ assert(getContext().getAuxTargetInfo() && "Missing aux target info");
+ return EmitTargetArchBuiltinExpr(
+ this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
+ getContext().getAuxTargetInfo()->getTriple().getArch());
+ }
+
+ return EmitTargetArchBuiltinExpr(this, BuiltinID, E,
+ getTarget().getTriple().getArch());
+}
+
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
bool V1Ty=false) {
@@ -1917,6 +2141,19 @@ static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
llvm_unreachable("Unknown vector element type!");
}
+static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags IntTypeFlags) {
+ int IsQuad = IntTypeFlags.isQuad();
+ switch (IntTypeFlags.getEltType()) {
+ case NeonTypeFlags::Int32:
+ return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
+ case NeonTypeFlags::Int64:
+ return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
+ default:
+ llvm_unreachable("Type can't be converted to floating-point!");
+ }
+}
+
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
Value* SV = llvm::ConstantVector::getSplat(nElts, C);
@@ -1940,10 +2177,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
int SV = cast<ConstantInt>(V)->getSExtValue();
-
- llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
- llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
- return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
+ return ConstantInt::get(Ty, neg ? -SV : SV);
}
// \brief Right-shift a vector by a constant.
@@ -1962,8 +2196,7 @@ Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
if (ShiftAmt == EltSize) {
if (usgn) {
// Right-shifting an unsigned value by its size yields 0.
- llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
- return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
+ return llvm::ConstantAggregateZero::get(VTy);
} else {
// Right-shifting a signed value by its size is equivalent
// to a shift of size-1.
@@ -1979,61 +2212,6 @@ Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
return Builder.CreateAShr(Vec, Shift, name);
}
-/// GetPointeeAlignment - Given an expression with a pointer type, find the
-/// alignment of the type referenced by the pointer. Skip over implicit
-/// casts.
-std::pair<llvm::Value*, unsigned>
-CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
- assert(Addr->getType()->isPointerType());
- Addr = Addr->IgnoreParens();
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
- if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
- ICE->getSubExpr()->getType()->isPointerType()) {
- std::pair<llvm::Value*, unsigned> Ptr =
- EmitPointerWithAlignment(ICE->getSubExpr());
- Ptr.first = Builder.CreateBitCast(Ptr.first,
- ConvertType(Addr->getType()));
- return Ptr;
- } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
- LValue LV = EmitLValue(ICE->getSubExpr());
- unsigned Align = LV.getAlignment().getQuantity();
- if (!Align) {
- // FIXME: Once LValues are fixed to always set alignment,
- // zap this code.
- QualType PtTy = ICE->getSubExpr()->getType();
- if (!PtTy->isIncompleteType())
- Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
- else
- Align = 1;
- }
- return std::make_pair(LV.getAddress(), Align);
- }
- }
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
- if (UO->getOpcode() == UO_AddrOf) {
- LValue LV = EmitLValue(UO->getSubExpr());
- unsigned Align = LV.getAlignment().getQuantity();
- if (!Align) {
- // FIXME: Once LValues are fixed to always set alignment,
- // zap this code.
- QualType PtTy = UO->getSubExpr()->getType();
- if (!PtTy->isIncompleteType())
- Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
- else
- Align = 1;
- }
- return std::make_pair(LV.getAddress(), Align);
- }
- }
-
- unsigned Align = 1;
- QualType PtTy = Addr->getType()->getPointeeType();
- if (!PtTy->isIncompleteType())
- Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
-
- return std::make_pair(EmitScalarExpr(Addr), Align);
-}
-
enum {
AddRetType = (1 << 0),
Add1ArgType = (1 << 1),
@@ -2056,31 +2234,36 @@ enum {
AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
};
- struct NeonIntrinsicInfo {
+namespace {
+struct NeonIntrinsicInfo {
+ const char *NameHint;
unsigned BuiltinID;
unsigned LLVMIntrinsic;
unsigned AltLLVMIntrinsic;
- const char *NameHint;
unsigned TypeModifier;
bool operator<(unsigned RHSBuiltinID) const {
return BuiltinID < RHSBuiltinID;
}
+ bool operator<(const NeonIntrinsicInfo &TE) const {
+ return BuiltinID < TE.BuiltinID;
+ }
};
+} // end anonymous namespace
#define NEONMAP0(NameBase) \
- { NEON::BI__builtin_neon_ ## NameBase, 0, 0, #NameBase, 0 }
+ { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
- { NEON:: BI__builtin_neon_ ## NameBase, \
- Intrinsic::LLVMIntrinsic, 0, #NameBase, TypeModifier }
+ { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
+ Intrinsic::LLVMIntrinsic, 0, TypeModifier }
#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
- { NEON:: BI__builtin_neon_ ## NameBase, \
+ { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
- #NameBase, TypeModifier }
+ TypeModifier }
-static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
+static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP1(vabs_v, arm_neon_vabs, 0),
@@ -2106,7 +2289,7 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
- NEONMAP1(vcvt_f16_v, arm_neon_vcvtfp2hf, 0),
+ NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
@@ -2297,7 +2480,7 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vzipq_v)
};
-static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
+static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vabs_v, aarch64_neon_abs, 0),
NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
NEONMAP0(vaddhn_v),
@@ -2319,7 +2502,7 @@ static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
- NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0),
+ NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
@@ -2412,7 +2595,7 @@ static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vtstq_v),
};
-static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
+static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
@@ -2623,9 +2806,7 @@ findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
#ifndef NDEBUG
if (!MapProvenSorted) {
- // FIXME: use std::is_sorted once C++11 is allowed
- for (unsigned i = 0; i < IntrinsicMap.size() - 1; ++i)
- assert(IntrinsicMap[i].BuiltinID <= IntrinsicMap[i + 1].BuiltinID);
+ assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
MapProvenSorted = true;
}
#endif
@@ -2744,7 +2925,7 @@ static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
const char *NameHint, unsigned Modifier, const CallExpr *E,
- SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
+ SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1) {
// Get the last argument, which specifies the vector type.
llvm::APSInt NeonTypeConst;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
@@ -2761,6 +2942,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
if (!Ty)
return nullptr;
+ auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ return Builder.getInt32(addr.getAlignment().getQuantity());
+ };
+
unsigned Int = LLVMIntrinsic;
if ((Modifier & UnsignedAlts) && !Usgn)
Int = AltLLVMIntrinsic;
@@ -2782,9 +2967,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
- Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
- SrcTy->getScalarSizeInBits() / 2);
- ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
+ Constant *ShiftAmt =
+ ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
// %res = trunc <4 x i32> %high to <4 x i16>
@@ -2822,13 +3006,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvt_n_f64_v:
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *FloatTy =
- GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32,
- false, Quad));
- llvm::Type *Tys[2] = { FloatTy, Ty };
+ llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
@@ -2841,13 +3019,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *FloatTy =
- GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32,
- false, Quad));
- llvm::Type *Tys[2] = { Ty, FloatTy };
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
@@ -2859,13 +3031,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v: {
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *FloatTy =
- GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32,
- false, Quad));
- Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
@@ -2901,13 +3067,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *InTy =
- GetNeonType(this,
- NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32, false, Quad));
- llvm::Type *Tys[2] = { Ty, InTy };
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vext_v:
@@ -2933,28 +3093,31 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vld1_v:
- case NEON::BI__builtin_neon_vld1q_v:
- Ops.push_back(Align);
- return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
+ case NEON::BI__builtin_neon_vld1q_v: {
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Ops.push_back(getAlignmentValue32(PtrOp0));
+ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
+ }
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
- Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+ Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
+ LoadInst *Ld = Builder.CreateLoad(PtrOp0);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
return EmitNeonSplat(Ops[0], CI);
@@ -2965,14 +3128,15 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld3q_lane_v:
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
- Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
for (unsigned I = 2; I < Ops.size() - 1; ++I)
Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
- Ops.push_back(Align);
+ Ops.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
@@ -3019,14 +3183,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vqdmlal_v:
case NEON::BI__builtin_neon_vqdmlsl_v: {
SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
- Value *Mul = EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty),
- MulOps, "vqdmlal");
-
- SmallVector<Value *, 2> AccumOps;
- AccumOps.push_back(Ops[0]);
- AccumOps.push_back(Mul);
- return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty),
- AccumOps, NameHint);
+ Ops[1] =
+ EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
+ Ops.resize(2);
+ return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
}
case NEON::BI__builtin_neon_vqshl_n_v:
case NEON::BI__builtin_neon_vqshlq_n_v:
@@ -3088,9 +3248,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_lane_v:
- case NEON::BI__builtin_neon_vst4q_lane_v:
- Ops.push_back(Align);
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
+ case NEON::BI__builtin_neon_vst4q_lane_v: {
+ llvm::Type *Tys[] = {Int8PtrTy, Ty};
+ Ops.push_back(getAlignmentValue32(PtrOp0));
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
+ }
case NEON::BI__builtin_neon_vsubhn_v: {
llvm::VectorType *SrcTy =
llvm::VectorType::getExtendedElementVectorType(VTy);
@@ -3101,9 +3263,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
- Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
- SrcTy->getScalarSizeInBits() / 2);
- ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
+ Constant *ShiftAmt =
+ ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
// %res = trunc <4 x i32> %high to <4 x i16>
@@ -3125,7 +3286,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -3153,7 +3314,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -3173,7 +3334,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -3252,33 +3413,37 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
}
Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
+ unsigned Value;
switch (BuiltinID) {
default:
return nullptr;
case ARM::BI__builtin_arm_nop:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
- llvm::ConstantInt::get(Int32Ty, 0));
+ Value = 0;
+ break;
case ARM::BI__builtin_arm_yield:
case ARM::BI__yield:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
- llvm::ConstantInt::get(Int32Ty, 1));
+ Value = 1;
+ break;
case ARM::BI__builtin_arm_wfe:
case ARM::BI__wfe:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
- llvm::ConstantInt::get(Int32Ty, 2));
+ Value = 2;
+ break;
case ARM::BI__builtin_arm_wfi:
case ARM::BI__wfi:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
- llvm::ConstantInt::get(Int32Ty, 3));
+ Value = 3;
+ break;
case ARM::BI__builtin_arm_sev:
case ARM::BI__sev:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
- llvm::ConstantInt::get(Int32Ty, 4));
+ Value = 4;
+ break;
case ARM::BI__builtin_arm_sevl:
case ARM::BI__sevl:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
- llvm::ConstantInt::get(Int32Ty, 5));
+ Value = 5;
+ break;
}
+
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, Value));
}
// Generates the IR for the read/write special register builtin,
@@ -3428,9 +3593,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (BuiltinID == ARM::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
- SmallVector<Value*, 2> Ops;
+ Value *Ops[2];
for (unsigned i = 0; i < 2; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ Ops[i] = EmitScalarExpr(E->getArg(i));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
@@ -3504,11 +3669,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
: Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
- Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
- Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
@@ -3627,8 +3792,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
+ auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ return Builder.getInt32(addr.getAlignment().getQuantity());
+ };
+
+ Address PtrOp0 = Address::invalid();
+ Address PtrOp1 = Address::invalid();
SmallVector<Value*, 4> Ops;
- llvm::Value *Align = nullptr;
bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
for (unsigned i = 0, e = NumArgs; i != e; i++) {
@@ -3658,10 +3828,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst4q_lane_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(0));
- Ops.push_back(Src.first);
- Align = Builder.getInt32(Src.second);
+ PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(PtrOp0.getPointer());
continue;
}
}
@@ -3684,10 +3852,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld4_dup_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
- Ops.push_back(Src.first);
- Align = Builder.getInt32(Src.second);
+ PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
+ Ops.push_back(PtrOp1.getPointer());
continue;
}
}
@@ -3798,7 +3964,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1);
unsigned Int;
switch (BuiltinID) {
@@ -3809,27 +3975,25 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (VTy->getElementType()->isIntegerTy(64)) {
// Extract the other lane.
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
+ uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
// Load the value as a one-element vector.
Ty = llvm::VectorType::get(VTy->getElementType(), 1);
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
+ Value *Align = getAlignmentValue32(PtrOp0);
Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
// Combine them.
- SmallVector<Constant*, 2> Indices;
- Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
- Indices.push_back(ConstantInt::get(Int32Ty, Lane));
- SV = llvm::ConstantVector::get(Indices);
+ uint32_t Indices[] = {1 - Lane, Lane};
+ SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
}
// fall through
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
+ Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld2_dup_v:
@@ -3849,11 +4013,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
- Function *F = CGM.getIntrinsic(Int, Ty);
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ llvm::Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_dup_v:
@@ -3867,7 +4033,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
- Function *F = CGM.getIntrinsic(Int, Ty);
+ llvm::Type *Tys[] = {Ty, Int8PtrTy};
+ Function *F = CGM.getIntrinsic(Int, Tys);
llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
@@ -3876,7 +4043,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
- Args.push_back(Align);
+ Args.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
@@ -3889,7 +4056,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
@@ -3941,18 +4108,17 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
- Ops[2] = Align;
+ Ops[2] = getAlignmentValue32(PtrOp0);
+ llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
- Ops[1]->getType()), Ops);
+ Tys), Ops);
}
// fall through
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- StoreInst *St = Builder.CreateStore(Ops[1],
- Builder.CreateBitCast(Ops[0], Ty));
- St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
return St;
}
case NEON::BI__builtin_neon_vtbl1_v:
@@ -4029,52 +4195,41 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
- llvm::VectorType *VTy = GetNeonType(&CGF, Type);
- llvm::Type *Ty = VTy;
+ llvm::VectorType *Ty = GetNeonType(&CGF, Type);
if (!Ty)
return nullptr;
- unsigned nElts = VTy->getNumElements();
-
CodeGen::CGBuilderTy &Builder = CGF.Builder;
// AArch64 scalar builtins are not overloaded, they do not have an extra
// argument that specifies the vector type, need to handle each case.
- SmallVector<Value *, 2> TblOps;
switch (BuiltinID) {
case NEON::BI__builtin_neon_vtbl1_v: {
- TblOps.push_back(Ops[0]);
- return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
- Intrinsic::aarch64_neon_tbl1, "vtbl1");
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
+ Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
+ "vtbl1");
}
case NEON::BI__builtin_neon_vtbl2_v: {
- TblOps.push_back(Ops[0]);
- TblOps.push_back(Ops[1]);
- return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
- Intrinsic::aarch64_neon_tbl1, "vtbl1");
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
+ Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
+ "vtbl1");
}
case NEON::BI__builtin_neon_vtbl3_v: {
- TblOps.push_back(Ops[0]);
- TblOps.push_back(Ops[1]);
- TblOps.push_back(Ops[2]);
- return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
- Intrinsic::aarch64_neon_tbl2, "vtbl2");
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
+ Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
+ "vtbl2");
}
case NEON::BI__builtin_neon_vtbl4_v: {
- TblOps.push_back(Ops[0]);
- TblOps.push_back(Ops[1]);
- TblOps.push_back(Ops[2]);
- TblOps.push_back(Ops[3]);
- return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
- Intrinsic::aarch64_neon_tbl2, "vtbl2");
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
+ Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
+ "vtbl2");
}
case NEON::BI__builtin_neon_vtbx1_v: {
- TblOps.push_back(Ops[1]);
- Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
- Intrinsic::aarch64_neon_tbl1, "vtbl1");
+ Value *TblRes =
+ packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
+ Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
- llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
- Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
+ llvm::Constant *EightV = ConstantInt::get(Ty, 8);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
@@ -4083,20 +4238,16 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx2_v: {
- TblOps.push_back(Ops[1]);
- TblOps.push_back(Ops[2]);
- return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
- Intrinsic::aarch64_neon_tbx1, "vtbx1");
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
+ Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
+ "vtbx1");
}
case NEON::BI__builtin_neon_vtbx3_v: {
- TblOps.push_back(Ops[1]);
- TblOps.push_back(Ops[2]);
- TblOps.push_back(Ops[3]);
- Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
- Intrinsic::aarch64_neon_tbl2, "vtbl2");
-
- llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
- Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
+ Value *TblRes =
+ packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
+ Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
+
+ llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
TwentyFourV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
@@ -4106,12 +4257,9 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx4_v: {
- TblOps.push_back(Ops[1]);
- TblOps.push_back(Ops[2]);
- TblOps.push_back(Ops[3]);
- TblOps.push_back(Ops[4]);
- return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
- Intrinsic::aarch64_neon_tbx2, "vtbx2");
+ return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
+ Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
+ "vtbx2");
}
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
@@ -4156,15 +4304,6 @@ Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
return Op;
}
-Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
- llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
- Op = Builder.CreateBitCast(Op, Int8Ty);
- Value *V = UndefValue::get(VTy);
- llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
- Op = Builder.CreateInsertElement(V, Op, CI);
- return Op;
-}
-
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
unsigned HintID = static_cast<unsigned>(-1);
@@ -4236,9 +4375,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
- SmallVector<Value*, 2> Ops;
+ Value *Ops[2];
for (unsigned i = 0; i < 2; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ Ops[i] = EmitScalarExpr(E->getArg(i));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
@@ -4297,14 +4436,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::aarch64_stxp);
llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
- Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
- One);
- Value *Val = EmitScalarExpr(E->getArg(0));
- Builder.CreateStore(Val, Tmp);
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
+ EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
- Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
- Val = Builder.CreateLoad(LdPtr);
+ Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
+ llvm::Value *Val = Builder.CreateLoad(Tmp);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
@@ -4342,6 +4478,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F);
}
+ if (BuiltinID == AArch64::BI__builtin_thread_pointer) {
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_thread_pointer);
+ return Builder.CreateCall(F);
+ }
+
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
@@ -4453,12 +4594,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
- return Builder.CreateLoad(Ptr);
+ return Builder.CreateDefaultAlignedLoad(Ptr);
}
case NEON::BI__builtin_neon_vstrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
- return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
+ return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
}
case NEON::BI__builtin_neon_vcvts_u32_f32:
case NEON::BI__builtin_neon_vcvtd_u64_f64:
@@ -4491,8 +4632,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateSIToFP(Ops[0], FTy);
}
case NEON::BI__builtin_neon_vpaddd_s64: {
- llvm::Type *Ty =
- llvm::VectorType::get(llvm::Type::getInt64Ty(getLLVMContext()), 2);
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
@@ -4505,7 +4645,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vpaddd_f64: {
llvm::Type *Ty =
- llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2);
+ llvm::VectorType::get(DoubleTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
@@ -4518,7 +4658,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vpadds_f32: {
llvm::Type *Ty =
- llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2);
+ llvm::VectorType::get(FloatTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f32, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
@@ -4566,12 +4706,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vceqzd_u64: {
- llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateICmp(llvm::ICmpInst::ICMP_EQ, Ops[0],
- llvm::Constant::getNullValue(Ty));
- return Builder.CreateSExt(Ops[0], Ty, "vceqzd");
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
+ Ops[0] =
+ Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
+ return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
}
case NEON::BI__builtin_neon_vceqd_f64:
case NEON::BI__builtin_neon_vcled_f64:
@@ -4645,14 +4784,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vtstd_s64:
case NEON::BI__builtin_neon_vtstd_u64: {
- llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
- llvm::Constant::getNullValue(Ty));
- return Builder.CreateSExt(Ops[0], Ty, "vtstd");
+ llvm::Constant::getNullValue(Int64Ty));
+ return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
}
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
@@ -4675,89 +4813,80 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vsetq_lane_f64:
// The vector type needs a cast for the v2f64 variant.
Ops[1] = Builder.CreateBitCast(Ops[1],
- llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
+ llvm::VectorType::get(DoubleTy, 2));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vdupb_lane_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vdupb_laneq_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vduph_lane_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vduph_laneq_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vdups_lane_i32:
- Ops[0] = Builder.CreateBitCast(
- Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 2));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdups_lane_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
+ llvm::VectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdups_lane");
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vdups_laneq_i32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 4));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vdupd_lane_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 1));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdupd_lane_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
+ llvm::VectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdupd_lane");
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vdupd_laneq_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 2));
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
+ llvm::VectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vget_lane_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
+ llvm::VectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vdups_laneq_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 4));
+ llvm::VectorType::get(FloatTy, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vgetq_lane_f64:
case NEON::BI__builtin_neon_vdupd_laneq_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
+ llvm::VectorType::get(DoubleTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vaddd_s64:
@@ -4930,7 +5059,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops,
+ /*never use addresses*/ Address::invalid(), Address::invalid());
if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
return V;
@@ -5096,15 +5226,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vrecpss_f32: {
- llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
Ops, "vrecps");
}
case NEON::BI__builtin_neon_vrecpsd_f64: {
- llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
Ops, "vrecps");
}
case NEON::BI__builtin_neon_vqshrun_n_v:
@@ -5207,13 +5335,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v: {
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *InTy =
- GetNeonType(this,
- NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32, false, quad));
- Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
if (usgn)
return Builder.CreateFPToUI(Ops[0], Ty);
return Builder.CreateFPToSI(Ops[0], Ty);
@@ -5227,13 +5349,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *InTy =
- GetNeonType(this,
- NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32, false, quad));
- llvm::Type *Tys[2] = { Ty, InTy };
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
}
case NEON::BI__builtin_neon_vcvtm_s32_v:
@@ -5245,13 +5361,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *InTy =
- GetNeonType(this,
- NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32, false, quad));
- llvm::Type *Tys[2] = { Ty, InTy };
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
}
case NEON::BI__builtin_neon_vcvtn_s32_v:
@@ -5263,13 +5373,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *InTy =
- GetNeonType(this,
- NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32, false, quad));
- llvm::Type *Tys[2] = { Ty, InTy };
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
}
case NEON::BI__builtin_neon_vcvtp_s32_v:
@@ -5281,13 +5385,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
- bool Double =
- (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
- llvm::Type *InTy =
- GetNeonType(this,
- NeonTypeFlags(Double ? NeonTypeFlags::Float64
- : NeonTypeFlags::Float32, false, quad));
- llvm::Type *Tys[2] = { Ty, InTy };
+ llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
}
case NEON::BI__builtin_neon_vmulx_v:
@@ -5338,232 +5436,192 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vaddv_u16:
usgn = true;
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddvq_u8:
usgn = true;
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vaddvq_u16:
usgn = true;
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxv_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxvq_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxvq_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxv_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxvq_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxvq_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminv_u8: {
Int = Intrinsic::aarch64_neon_uminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminv_u16: {
Int = Intrinsic::aarch64_neon_uminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminvq_u8: {
Int = Intrinsic::aarch64_neon_uminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminvq_u16: {
Int = Intrinsic::aarch64_neon_uminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminv_s8: {
Int = Intrinsic::aarch64_neon_sminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminv_s16: {
Int = Intrinsic::aarch64_neon_sminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminvq_s8: {
Int = Intrinsic::aarch64_neon_sminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 8));
+ return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminvq_s16: {
Int = Intrinsic::aarch64_neon_sminv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmul_n_f64: {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
@@ -5572,80 +5630,68 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vaddlv_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlv_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlvq_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlv_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlv_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0],
- llvm::IntegerType::get(getLLVMContext(), 16));
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlvq_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
- Ty = llvm::IntegerType::get(getLLVMContext(), 32);
- VTy =
- llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
+ Ty = Int32Ty;
+ VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -5708,7 +5754,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst1_x2_v:
case NEON::BI__builtin_neon_vst1q_x2_v:
@@ -5733,32 +5779,31 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_st1x4;
break;
}
- SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
- IntOps.push_back(Ops[0]);
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), IntOps, "");
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- return Builder.CreateLoad(Ops[0]);
+ return Builder.CreateDefaultAlignedLoad(Ops[0]);
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
+ Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
+ Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
@@ -5768,7 +5813,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ return Builder.CreateDefaultAlignedStore(Ops[1],
+ Builder.CreateBitCast(Ops[0], Ty));
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
@@ -5778,7 +5824,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
@@ -5789,7 +5835,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
@@ -5800,7 +5846,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
@@ -5812,7 +5858,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
@@ -5824,7 +5870,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
@@ -5836,7 +5882,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v: {
@@ -5846,12 +5892,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.erase(Ops.begin()+1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Ops[3] = Builder.CreateZExt(Ops[3],
- llvm::IntegerType::get(getLLVMContext(), 64));
+ Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v: {
@@ -5862,12 +5907,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
- Ops[4] = Builder.CreateZExt(Ops[4],
- llvm::IntegerType::get(getLLVMContext(), 64));
+ Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
@@ -5879,12 +5923,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
- Ops[5] = Builder.CreateZExt(Ops[5],
- llvm::IntegerType::get(getLLVMContext(), 64));
+ Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v: {
@@ -5898,8 +5941,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst2q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
- Ops[2] = Builder.CreateZExt(Ops[2],
- llvm::IntegerType::get(getLLVMContext(), 64));
+ Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
Ops, "");
@@ -5916,8 +5958,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst3q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
- Ops[3] = Builder.CreateZExt(Ops[3],
- llvm::IntegerType::get(getLLVMContext(), 64));
+ Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
Ops, "");
@@ -5934,8 +5975,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst4q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
- Ops[4] = Builder.CreateZExt(Ops[4],
- llvm::IntegerType::get(getLLVMContext(), 64));
+ Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
Ops, "");
@@ -5956,7 +5996,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -5975,7 +6015,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -5995,7 +6035,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -6072,6 +6112,31 @@ BuildVector(ArrayRef<llvm::Value*> Ops) {
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
+ if (BuiltinID == X86::BI__builtin_ms_va_start ||
+ BuiltinID == X86::BI__builtin_ms_va_end)
+ return EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
+ BuiltinID == X86::BI__builtin_ms_va_start);
+ if (BuiltinID == X86::BI__builtin_ms_va_copy) {
+ // Lower this manually. We can't reliably determine whether or not any
+ // given va_copy() is for a Win64 va_list from the calling convention
+ // alone, because it's legal to do this from a System V ABI function.
+ // With opaque pointer types, we won't have enough information in LLVM
+ // IR to determine this from the argument types, either. Best to do it
+ // now, while we have enough information.
+ Address DestAddr = EmitMSVAListRef(E->getArg(0));
+ Address SrcAddr = EmitMSVAListRef(E->getArg(1));
+
+ llvm::Type *BPP = Int8PtrPtrTy;
+
+ DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
+ DestAddr.getAlignment());
+ SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
+ SrcAddr.getAlignment());
+
+ Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
+ return Builder.CreateStore(ArgPtr, DestAddr);
+ }
+
SmallVector<Value*, 4> Ops;
// Find out if any arguments are required to be integer constant expressions.
@@ -6167,7 +6232,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
ConstantInt::get(Int32Ty, 0)
};
Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
- Value *Features = Builder.CreateLoad(CpuFeatures);
+ Value *Features = Builder.CreateAlignedLoad(CpuFeatures,
+ CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
Value *Bitset = Builder.CreateAnd(
@@ -6175,13 +6241,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
}
case X86::BI_mm_prefetch: {
- Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Address = Ops[0];
Value *RW = ConstantInt::get(Int32Ty, 0);
- Value *Locality = EmitScalarExpr(E->getArg(1));
+ Value *Locality = Ops[1];
Value *Data = ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
+ case X86::BI__builtin_ia32_undef128:
+ case X86::BI__builtin_ia32_undef256:
+ case X86::BI__builtin_ia32_undef512:
+ return UndefValue::get(ConvertType(E->getType()));
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:
@@ -6191,17 +6261,57 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
case X86::BI__builtin_ia32_ldmxcsr: {
- Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
- Builder.CreateBitCast(Tmp, Int8PtrTy));
+ Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- Value *Tmp = CreateMemTemp(E->getType());
+ Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
- Builder.CreateBitCast(Tmp, Int8PtrTy));
+ Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
+ case X86::BI__builtin_ia32_xsave:
+ case X86::BI__builtin_ia32_xsave64:
+ case X86::BI__builtin_ia32_xrstor:
+ case X86::BI__builtin_ia32_xrstor64:
+ case X86::BI__builtin_ia32_xsaveopt:
+ case X86::BI__builtin_ia32_xsaveopt64:
+ case X86::BI__builtin_ia32_xrstors:
+ case X86::BI__builtin_ia32_xrstors64:
+ case X86::BI__builtin_ia32_xsavec:
+ case X86::BI__builtin_ia32_xsavec64:
+ case X86::BI__builtin_ia32_xsaves:
+ case X86::BI__builtin_ia32_xsaves64: {
+ Intrinsic::ID ID;
+#define INTRINSIC_X86_XSAVE_ID(NAME) \
+ case X86::BI__builtin_ia32_##NAME: \
+ ID = Intrinsic::x86_##NAME; \
+ break
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ INTRINSIC_X86_XSAVE_ID(xsave);
+ INTRINSIC_X86_XSAVE_ID(xsave64);
+ INTRINSIC_X86_XSAVE_ID(xrstor);
+ INTRINSIC_X86_XSAVE_ID(xrstor64);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt64);
+ INTRINSIC_X86_XSAVE_ID(xrstors);
+ INTRINSIC_X86_XSAVE_ID(xrstors64);
+ INTRINSIC_X86_XSAVE_ID(xsavec);
+ INTRINSIC_X86_XSAVE_ID(xsavec64);
+ INTRINSIC_X86_XSAVE_ID(xsaves);
+ INTRINSIC_X86_XSAVE_ID(xsaves64);
+ }
+#undef INTRINSIC_X86_XSAVE_ID
+ Value *Mhi = Builder.CreateTrunc(
+ Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
+ Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
+ Ops[1] = Mhi;
+ Ops.push_back(Mlo);
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ }
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
@@ -6217,7 +6327,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// cast pointer to i64 & store
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256: {
@@ -6242,18 +6352,19 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
}
- SmallVector<llvm::Constant*, 32> Indices;
+ uint32_t Indices[32];
// 256-bit palignr operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
unsigned Idx = ShiftVal + i;
if (Idx >= NumLaneElts)
Idx += NumElts - NumLaneElts; // End of lane, switch operand.
- Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
+ Indices[l + i] = Idx + l;
}
}
- Value* SV = llvm::ConstantVector::get(Indices);
+ Value *SV = llvm::ConstantDataVector::get(getLLVMContext(),
+ makeArrayRef(Indices, NumElts));
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
case X86::BI__builtin_ia32_pslldqi256: {
@@ -6264,13 +6375,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
if (shiftVal >= 16)
return llvm::Constant::getNullValue(ConvertType(E->getType()));
- SmallVector<llvm::Constant*, 32> Indices;
+ uint32_t Indices[32];
// 256-bit pslldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != 32; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = 32 + i - shiftVal;
if (Idx < 32) Idx -= 16; // end of lane, switch operand.
- Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
+ Indices[l + i] = Idx + l;
}
}
@@ -6278,7 +6389,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
- Value *SV = llvm::ConstantVector::get(Indices);
+ Value *SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
SV = Builder.CreateShuffleVector(Zero, Ops[0], SV, "pslldq");
llvm::Type *ResultType = ConvertType(E->getType());
return Builder.CreateBitCast(SV, ResultType, "cast");
@@ -6291,13 +6402,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
if (shiftVal >= 16)
return llvm::Constant::getNullValue(ConvertType(E->getType()));
- SmallVector<llvm::Constant*, 32> Indices;
+ uint32_t Indices[32];
// 256-bit psrldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != 32; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = i + shiftVal;
if (Idx >= 16) Idx += 16; // end of lane, switch operand.
- Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
+ Indices[l + i] = Idx + l;
}
}
@@ -6305,7 +6416,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
- Value *SV = llvm::ConstantVector::get(Indices);
+ Value *SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
SV = Builder.CreateShuffleVector(Ops[0], Zero, SV, "psrldq");
llvm::Type *ResultType = ConvertType(E->getType());
return Builder.CreateBitCast(SV, ResultType, "cast");
@@ -6325,7 +6436,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *BC = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()),
"cast");
- StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ StoreInst *SI = Builder.CreateDefaultAlignedStore(Ops[1], BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
// If the operand is an integer, we can't assume alignment. Otherwise,
@@ -6377,7 +6488,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
- Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
+ Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
+ Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
// SSE comparison intrisics
@@ -6544,6 +6656,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return nullptr;
+ // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
+ // call __builtin_readcyclecounter.
+ case PPC::BI__builtin_ppc_get_timebase:
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
+
// vec_ld, vec_lvsl, vec_lvsr
case PPC::BI__builtin_altivec_lvx:
case PPC::BI__builtin_altivec_lvxl:
@@ -6775,8 +6892,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
// Translate from the intrinsics's struct return to the builtin's out
// argument.
- std::pair<llvm::Value *, unsigned> FlagOutPtr
- = EmitPointerWithAlignment(E->getArg(3));
+ Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
@@ -6791,11 +6907,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
llvm::Type *RealFlagType
- = FlagOutPtr.first->getType()->getPointerElementType();
+ = FlagOutPtr.getPointer()->getType()->getPointerElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
- llvm::StoreInst *FlagStore = Builder.CreateStore(FlagExt, FlagOutPtr.first);
- FlagStore->setAlignment(FlagOutPtr.second);
+ Builder.CreateStore(FlagExt, FlagOutPtr);
return Result;
}
case AMDGPU::BI__builtin_amdgpu_div_fmas:
@@ -6846,7 +6961,7 @@ static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
SmallVector<Value *, 8> Args(NumArgs);
for (unsigned I = 0; I < NumArgs; ++I)
Args[I] = CGF.EmitScalarExpr(E->getArg(I));
- Value *CCPtr = CGF.EmitScalarExpr(E->getArg(NumArgs));
+ Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
Value *Call = CGF.Builder.CreateCall(F, Args);
Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
@@ -7115,23 +7230,29 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__nvvm_atom_max_gen_i:
case NVPTX::BI__nvvm_atom_max_gen_l:
case NVPTX::BI__nvvm_atom_max_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
+
case NVPTX::BI__nvvm_atom_max_gen_ui:
case NVPTX::BI__nvvm_atom_max_gen_ul:
case NVPTX::BI__nvvm_atom_max_gen_ull:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
case NVPTX::BI__nvvm_atom_min_gen_i:
case NVPTX::BI__nvvm_atom_min_gen_l:
case NVPTX::BI__nvvm_atom_min_gen_ll:
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
+
case NVPTX::BI__nvvm_atom_min_gen_ui:
case NVPTX::BI__nvvm_atom_min_gen_ul:
case NVPTX::BI__nvvm_atom_min_gen_ull:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
+ return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
case NVPTX::BI__nvvm_atom_cas_gen_i:
case NVPTX::BI__nvvm_atom_cas_gen_l:
case NVPTX::BI__nvvm_atom_cas_gen_ll:
- return MakeAtomicCmpXchgValue(*this, E, true);
+ // __nvvm_atom_cas_gen_* should return the old value rather than the
+ // success flag.
+ return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
case NVPTX::BI__nvvm_atom_add_gen_f: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
@@ -7147,3 +7268,22 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
return nullptr;
}
}
+
+Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_memory_size: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
+ return Builder.CreateCall(Callee);
+ }
+ case WebAssembly::BI__builtin_wasm_grow_memory: {
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_grow_memory, X->getType());
+ return Builder.CreateCall(Callee, X);
+ }
+
+ default:
+ return nullptr;
+ }
+}
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 67d0ab7a82f7..045e19b189dc 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -57,9 +57,9 @@ private:
unsigned Alignment = 0) {
llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
llvm::ConstantInt::get(SizeTy, 0)};
- auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
- ConstStr, Zeros);
+ auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
+ ConstStr.getPointer(), Zeros);
}
void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
@@ -121,7 +121,7 @@ void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
std::vector<llvm::Type *> ArgTypes;
for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
- llvm::Value *V = CGF.GetAddrOfLocalVar(*I);
+ llvm::Value *V = CGF.GetAddrOfLocalVar(*I).getPointer();
ArgValues.push_back(V);
assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
@@ -173,7 +173,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterKernelsFn() {
llvm::GlobalValue::InternalLinkage, "__cuda_register_kernels", &TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
- CGBuilderTy Builder(Context);
+ CGBuilderTy Builder(CGM, Context);
Builder.SetInsertPoint(EntryBB);
// void __cudaRegisterFunction(void **, const char *, char *, const char *,
@@ -230,7 +230,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule);
llvm::BasicBlock *CtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
- CGBuilderTy CtorBuilder(Context);
+ CGBuilderTy CtorBuilder(CGM, Context);
CtorBuilder.SetInsertPoint(CtorEntryBB);
@@ -267,7 +267,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
llvm::GlobalVariable *GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
- CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryHandle, false);
+ CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
+ CGM.getPointerAlign());
// Call __cuda_register_kernels(GpuBinaryHandle);
CtorBuilder.CreateCall(RegisterKernelsFunc, RegisterFatbinCall);
@@ -300,12 +301,13 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule);
llvm::BasicBlock *DtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
- CGBuilderTy DtorBuilder(Context);
+ CGBuilderTy DtorBuilder(CGM, Context);
DtorBuilder.SetInsertPoint(DtorEntryBB);
for (llvm::GlobalVariable *GpuBinaryHandle : GpuBinaryHandles) {
- DtorBuilder.CreateCall(UnregisterFatbinFunc,
- DtorBuilder.CreateLoad(GpuBinaryHandle, false));
+ auto HandleValue =
+ DtorBuilder.CreateAlignedLoad(GpuBinaryHandle, CGM.getPointerAlign());
+ DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
}
DtorBuilder.CreateRetVoid();
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 7d7ed784b181..6847df9b749b 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -28,6 +28,7 @@
using namespace clang;
using namespace CodeGen;
+
/// Try to emit a base destructor as an alias to its primary
/// base-class destructor.
bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
@@ -39,6 +40,12 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (getCodeGenOpts().OptimizationLevel == 0)
return true;
+ // If sanitizing memory to check for use-after-dtor, do not emit as
+ // an alias, unless this class owns no members.
+ if (getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ !D->getParent()->field_empty())
+ return true;
+
// If the destructor doesn't have a trivial body, we have to emit it
// separately.
if (!D->hasTrivialBody())
@@ -124,11 +131,6 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
if (!llvm::GlobalAlias::isValidLinkage(Linkage))
return true;
- // Don't create a weak alias for a dllexport'd symbol.
- if (AliasDecl.getDecl()->hasAttr<DLLExportAttr>() &&
- llvm::GlobalValue::isWeakForLinker(Linkage))
- return true;
-
llvm::GlobalValue::LinkageTypes TargetLinkage =
getFunctionLinkage(TargetDecl);
@@ -141,8 +143,8 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
return false;
// Derive the type for the alias.
- llvm::PointerType *AliasType
- = getTypes().GetFunctionType(AliasDecl)->getPointerTo();
+ llvm::Type *AliasValueType = getTypes().GetFunctionType(AliasDecl);
+ llvm::PointerType *AliasType = AliasValueType->getPointerTo();
// Find the referent. Some aliases might require a bitcast, in
// which case the caller is responsible for ensuring the soundness
@@ -166,6 +168,16 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
return false;
}
+ // If we have a weak, non-discardable alias (weak, weak_odr), like an extern
+ // template instantiation or a dllexported class, avoid forming it on COFF.
+ // A COFF weak external alias cannot satisfy a normal undefined symbol
+ // reference from another TU. The other TU must also mark the referenced
+ // symbol as weak, which we cannot rely on.
+ if (llvm::GlobalValue::isWeakForLinker(Linkage) &&
+ getTriple().isOSBinFormatCOFF()) {
+ return true;
+ }
+
if (!InEveryTU) {
// If we don't have a definition for the destructor yet, don't
// emit. We can't emit aliases to declarations; that's just not
@@ -182,8 +194,8 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
return true;
// Create the alias with no name.
- auto *Alias =
- llvm::GlobalAlias::create(AliasType, Linkage, "", Aliasee, &getModule());
+ auto *Alias = llvm::GlobalAlias::create(AliasValueType, 0, Linkage, "",
+ Aliasee, &getModule());
// Switch any previous uses to the alias.
if (Entry) {
@@ -207,7 +219,8 @@ llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
const CGFunctionInfo &FnInfo =
getTypes().arrangeCXXStructorDeclaration(MD, Type);
auto *Fn = cast<llvm::Function>(
- getAddrOfCXXStructor(MD, Type, &FnInfo, nullptr, true));
+ getAddrOfCXXStructor(MD, Type, &FnInfo, /*FnType=*/nullptr,
+ /*DontDefer=*/true, /*IsForDefinition=*/true));
GlobalDecl GD;
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
@@ -226,9 +239,9 @@ llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
return Fn;
}
-llvm::GlobalValue *CodeGenModule::getAddrOfCXXStructor(
+llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
- llvm::FunctionType *FnType, bool DontDefer) {
+ llvm::FunctionType *FnType, bool DontDefer, bool IsForDefinition) {
GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
GD = GlobalDecl(CD, toCXXCtorType(Type));
@@ -236,19 +249,15 @@ llvm::GlobalValue *CodeGenModule::getAddrOfCXXStructor(
GD = GlobalDecl(cast<CXXDestructorDecl>(MD), toCXXDtorType(Type));
}
- StringRef Name = getMangledName(GD);
- if (llvm::GlobalValue *Existing = GetGlobalValue(Name))
- return Existing;
-
if (!FnType) {
if (!FnInfo)
FnInfo = &getTypes().arrangeCXXStructorDeclaration(MD, Type);
FnType = getTypes().GetFunctionType(*FnInfo);
}
- return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FnType, GD,
- /*ForVTable=*/false,
- DontDefer));
+ return GetOrCreateLLVMFunction(
+ getMangledName(GD), FnType, GD, /*ForVTable=*/false, DontDefer,
+ /*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeSet(), IsForDefinition);
}
static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
@@ -270,7 +279,7 @@ static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
VTableIndex += AddressPoint;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- return CGF.Builder.CreateLoad(VFuncPtr);
+ return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
}
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index dc16616df9c5..e4da447eddc7 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
+#include "CGCleanup.h"
using namespace clang;
using namespace CodeGen;
@@ -73,25 +74,28 @@ CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
}
llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall,
llvm::Value *MemPtr, const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "calls through member pointers");
+ ThisPtrForCall = This.getPointer();
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
return llvm::Constant::getNullValue(FTy->getPointerTo());
}
llvm::Value *
CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base, llvm::Value *MemPtr,
+ Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
- llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())
+ ->getPointerTo(Base.getAddressSpace());
return llvm::Constant::getNullValue(Ty);
}
@@ -159,13 +163,24 @@ void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
&CGM.getContext().Idents.get("this"),
MD->getThisType(CGM.getContext()));
params.push_back(ThisDecl);
- getThisDecl(CGF) = ThisDecl;
+ CGF.CXXABIThisDecl = ThisDecl;
+
+ // Compute the presumed alignment of 'this', which basically comes
+ // down to whether we know it's a complete object or not.
+ auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent());
+ if (MD->getParent()->getNumVBases() == 0 || // avoid vcall in common case
+ MD->getParent()->hasAttr<FinalAttr>() ||
+ !isThisCompleteObject(CGF.CurGD)) {
+ CGF.CXXABIThisAlignment = Layout.getAlignment();
+ } else {
+ CGF.CXXABIThisAlignment = Layout.getNonVirtualAlignment();
+ }
}
void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
/// Initialize the 'this' slot.
assert(getThisDecl(CGF) && "no 'this' variable for function");
- getThisValue(CGF)
+ CGF.CXXABIThisValue
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
"this");
}
@@ -186,14 +201,14 @@ CharUnits CGCXXABI::getArrayCookieSizeImpl(QualType elementType) {
return CharUnits::Zero();
}
-llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) {
+Address CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
// Should never be called.
ErrorUnsupportedABI(CGF, "array cookie initialization");
- return nullptr;
+ return Address::invalid();
}
bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
@@ -215,31 +230,30 @@ bool CGCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
return expr->getAllocatedType().isDestructedType();
}
-void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr,
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
const CXXDeleteExpr *expr, QualType eltTy,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
- unsigned AS = ptr->getType()->getPointerAddressSpace();
- llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
- ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy);
+ ptr = CGF.Builder.CreateElementBitCast(ptr, CGF.Int8Ty);
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
- allocPtr = ptr;
+ allocPtr = ptr.getPointer();
numElements = nullptr;
cookieSize = CharUnits::Zero();
return;
}
cookieSize = getArrayCookieSizeImpl(eltTy);
- allocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ptr,
- -cookieSize.getQuantity());
- numElements = readArrayCookieImpl(CGF, allocPtr, cookieSize);
+ Address allocAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
+ allocPtr = allocAddr.getPointer();
+ numElements = readArrayCookieImpl(CGF, allocAddr, cookieSize);
}
llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *ptr,
+ Address ptr,
CharUnits cookieSize) {
ErrorUnsupportedABI(CGF, "reading a new[] cookie");
return llvm::ConstantInt::get(CGF.SizeTy, 0);
@@ -308,3 +322,11 @@ CGCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
// Just call std::terminate and ignore the violating exception.
return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
}
+
+CatchTypeInfo CGCXXABI::getCatchAllTypeInfo() {
+ return CatchTypeInfo{nullptr, 0};
+}
+
+std::vector<CharUnits> CGCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) {
+ return std::vector<CharUnits>();
+}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 436b96a615ef..3f240b1802b8 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -37,6 +37,7 @@ class MangleContext;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
+struct CatchTypeInfo;
/// \brief Implements C++ ABI-specific code generation functions.
class CGCXXABI {
@@ -48,12 +49,15 @@ protected:
: CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
protected:
- ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
+ ImplicitParamDecl *getThisDecl(CodeGenFunction &CGF) {
return CGF.CXXABIThisDecl;
}
- llvm::Value *&getThisValue(CodeGenFunction &CGF) {
+ llvm::Value *getThisValue(CodeGenFunction &CGF) {
return CGF.CXXABIThisValue;
}
+ Address getThisAddress(CodeGenFunction &CGF) {
+ return Address(CGF.CXXABIThisValue, CGF.CXXABIThisAlignment);
+ }
/// Issue a diagnostic about unsupported features in the ABI.
void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S);
@@ -77,6 +81,12 @@ protected:
virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
virtual bool requiresArrayCookie(const CXXNewExpr *E);
+ /// Determine whether there's something special about the rules of
+ /// the ABI tell us that 'this' is a complete object within the
+ /// given function. Obvious common logic like being defined on a
+ /// final class will have been taken care of by the caller.
+ virtual bool isThisCompleteObject(GlobalDecl GD) const = 0;
+
public:
virtual ~CGCXXABI();
@@ -135,13 +145,14 @@ public:
/// pointer. Apply the this-adjustment and set 'This' to the
/// adjusted value.
virtual llvm::Value *EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
- llvm::Value *MemPtr, const MemberPointerType *MPT);
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
/// Calculate an l-value from an object and a data member pointer.
virtual llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base, llvm::Value *MemPtr,
+ Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT);
/// Perform a derived-to-base, base-to-derived, or bitcast member
@@ -164,10 +175,6 @@ public:
return true;
}
- virtual bool isTypeInfoCalculable(QualType Ty) const {
- return !Ty->isIncompleteType();
- }
-
/// Create a null member pointer of the given type.
virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
@@ -212,12 +219,17 @@ protected:
public:
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr, QualType ElementType,
+ Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) = 0;
virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) = 0;
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) = 0;
virtual llvm::GlobalVariable *getThrowInfo(QualType T) { return nullptr; }
+ /// \brief Determine whether it's possible to emit a vtable for \p RD, even
+ /// though we do not know that the vtable has been marked as used by semantic
+ /// analysis.
+ virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const = 0;
+
virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) = 0;
virtual llvm::CallInst *
@@ -225,33 +237,34 @@ public:
llvm::Value *Exn);
virtual llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) = 0;
- virtual llvm::Constant *
+ virtual CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) = 0;
+ virtual CatchTypeInfo getCatchAllTypeInfo();
virtual bool shouldTypeidBeNullChecked(bool IsDeref,
QualType SrcRecordTy) = 0;
virtual void EmitBadTypeidCall(CodeGenFunction &CGF) = 0;
virtual llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) = 0;
virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) = 0;
virtual llvm::Value *
- EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy, llvm::BasicBlock *CastEnd) = 0;
virtual llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF,
- llvm::Value *Value,
+ Address Value,
QualType SrcRecordTy,
QualType DestTy) = 0;
virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0;
virtual llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) = 0;
@@ -294,10 +307,9 @@ public:
/// Perform ABI-specific "this" argument adjustment required prior to
/// a call of a virtual function.
/// The "VirtualCall" argument is true iff the call itself is virtual.
- virtual llvm::Value *
+ virtual Address
adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This,
- bool VirtualCall) {
+ Address This, bool VirtualCall) {
return This;
}
@@ -337,19 +349,31 @@ public:
virtual void EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD, CXXDtorType Type,
bool ForVirtualBase, bool Delegating,
- llvm::Value *This) = 0;
+ Address This) = 0;
/// Emits the VTable definitions required for the given record type.
virtual void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) = 0;
+ /// Checks if ABI requires extra virtual offset for vtable field.
+ virtual bool
+ isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
+ CodeGenFunction::VPtr Vptr) = 0;
+
+ /// Checks if ABI requires to initilize vptrs for given dynamic class.
+ virtual bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) = 0;
+
+ /// Get the address point of the vtable for the given base subobject.
+ virtual llvm::Constant *
+ getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) = 0;
+
/// Get the address point of the vtable for the given base subobject while
- /// building a constructor or a destructor. On return, NeedsVirtualOffset
- /// tells if a virtual base adjustment is needed in order to get the offset
- /// of the base subobject.
- virtual llvm::Value *getVTableAddressPointInStructor(
- CodeGenFunction &CGF, const CXXRecordDecl *RD, BaseSubobject Base,
- const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) = 0;
+ /// building a constructor or a destructor.
+ virtual llvm::Value *
+ getVTableAddressPointInStructor(CodeGenFunction &CGF, const CXXRecordDecl *RD,
+ BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase) = 0;
/// Get the address point of the vtable for the given base subobject while
/// building a constexpr.
@@ -365,14 +389,14 @@ public:
/// Build a virtual function pointer in the ABI-specific way.
virtual llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
- llvm::Value *This,
+ Address This,
llvm::Type *Ty,
SourceLocation Loc) = 0;
/// Emit the ABI-specific virtual destructor call.
virtual llvm::Value *
EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType, llvm::Value *This,
+ CXXDtorType DtorType, Address This,
const CXXMemberCallExpr *CE) = 0;
virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
@@ -388,11 +412,11 @@ public:
GlobalDecl GD, bool ReturnAdjustment) = 0;
virtual llvm::Value *performThisAdjustment(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const ThisAdjustment &TA) = 0;
virtual llvm::Value *performReturnAdjustment(CodeGenFunction &CGF,
- llvm::Value *Ret,
+ Address Ret,
const ReturnAdjustment &RA) = 0;
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
@@ -401,6 +425,9 @@ public:
virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
FunctionArgList &Args) const = 0;
+ /// Gets the offsets of all the virtual base pointers in a given class.
+ virtual std::vector<CharUnits> getVBPtrOffsets(const CXXRecordDecl *RD);
+
/// Gets the pure virtual member call function.
virtual StringRef GetPureVirtualCallName() = 0;
@@ -429,11 +456,11 @@ public:
/// always a size_t
/// \param ElementType - the base element allocated type,
/// i.e. the allocated type after stripping all array types
- virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType);
+ virtual Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
/// Reads the array cookie associated with the given pointer,
/// if it has one.
@@ -448,7 +475,7 @@ public:
/// function
/// \param CookieSize - an out parameter which will be initialized
/// with the size of the cookie, or zero if there is no cookie
- virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ virtual void ReadArrayCookie(CodeGenFunction &CGF, Address Ptr,
const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
@@ -471,8 +498,7 @@ protected:
/// Other parameters are as above.
///
/// \return a size_t
- virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF,
- llvm::Value *ptr,
+ virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF, Address ptr,
CharUnits cookieSize);
public:
@@ -512,11 +538,9 @@ public:
/// thread_local variables, a list of functions to perform the
/// initialization.
virtual void EmitThreadLocalInitFuncs(
- CodeGenModule &CGM,
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
- CXXThreadLocals,
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
- ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) = 0;
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) = 0;
// Determine if references to thread_local global variables can be made
// directly or require access through a thread wrapper function.
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 0bcf59bb5c3f..49b5df0c4f06 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -15,12 +15,14 @@
#include "CGCall.h"
#include "ABIInfo.h"
#include "CGCXXABI.h"
+#include "CGCleanup.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
@@ -91,15 +93,41 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
FTNP->getExtInfo(), RequiredArgs(0));
}
+/// Adds the formal paramaters in FPT to the given prefix. If any parameter in
+/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
+static void appendParameterTypes(const CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ const CanQual<FunctionProtoType> &FPT,
+ const FunctionDecl *FD) {
+ // Fast path: unknown target.
+ if (FD == nullptr) {
+ prefix.append(FPT->param_type_begin(), FPT->param_type_end());
+ return;
+ }
+
+ // In the vast majority cases, we'll have precisely FPT->getNumParams()
+ // parameters; the only thing that can change this is the presence of
+ // pass_object_size. So, we preallocate for the common case.
+ prefix.reserve(prefix.size() + FPT->getNumParams());
+
+ assert(FD->getNumParams() == FPT->getNumParams());
+ for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
+ prefix.push_back(FPT->getParamType(I));
+ if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
+ prefix.push_back(CGT.getContext().getSizeType());
+ }
+}
+
/// Arrange the LLVM function layout for a value of the given function
/// type, on top of any implicit parameters already stored.
static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP) {
+ CanQual<FunctionProtoType> FTP,
+ const FunctionDecl *FD) {
RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
- prefix.append(FTP->param_type_begin(), FTP->param_type_end());
+ appendParameterTypes(CGT, prefix, FTP, FD);
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
/*chainCall=*/false, prefix,
@@ -109,10 +137,11 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
/// Arrange the argument and result information for a value of the
/// given freestanding function type.
const CGFunctionInfo &
-CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
+ const FunctionDecl *FD) {
SmallVector<CanQualType, 16> argTypes;
return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
- FTP);
+ FTP, FD);
}
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
@@ -155,7 +184,8 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
/// constructor or destructor.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP) {
+ const FunctionProtoType *FTP,
+ const CXXMethodDecl *MD) {
SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
@@ -166,7 +196,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
return ::arrangeLLVMFunctionInfo(
*this, true, argTypes,
- FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
}
/// Arrange the argument and result information for a declaration or
@@ -183,10 +213,10 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
if (MD->isInstance()) {
// The abstract case is perfectly fine.
const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
- return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
+ return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
}
- return arrangeFreeFunctionType(prototype);
+ return arrangeFreeFunctionType(prototype, MD);
}
const CGFunctionInfo &
@@ -207,7 +237,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
// Add the formal parameters.
- argTypes.append(FTP->param_type_begin(), FTP->param_type_end());
+ appendParameterTypes(*this, argTypes, FTP, MD);
TheCXXABI.buildStructorSignature(MD, Type, argTypes);
@@ -273,7 +303,7 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
}
assert(isa<FunctionProtoType>(FTy));
- return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
+ return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
}
/// Arrange the argument and result information for the declaration or
@@ -553,6 +583,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->HasRegParm = info.getHasRegParm();
FI->RegParm = info.getRegParm();
FI->ArgStruct = nullptr;
+ FI->ArgStructAlign = 0;
FI->NumArgs = argTypes.size();
FI->getArgsBuffer()[0].type = resultType;
for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
@@ -726,6 +757,21 @@ CodeGenTypes::getExpandedTypes(QualType Ty,
}
}
+static void forConstantArrayExpansion(CodeGenFunction &CGF,
+ ConstantArrayExpansion *CAE,
+ Address BaseAddr,
+ llvm::function_ref<void(Address)> Fn) {
+ CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
+ CharUnits EltAlign =
+ BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
+
+ for (int i = 0, n = CAE->NumElts; i < n; i++) {
+ llvm::Value *EltAddr =
+ CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
+ Fn(Address(EltAddr, EltAlign));
+ }
+}
+
void CodeGenFunction::ExpandTypeFromArgs(
QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
assert(LV.isSimple() &&
@@ -733,17 +779,16 @@ void CodeGenFunction::ExpandTypeFromArgs(
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- for (int i = 0, n = CAExp->NumElts; i < n; i++) {
- llvm::Value *EltAddr =
- Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i);
+ forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
+ [&](Address EltAddr) {
LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
- }
+ });
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- llvm::Value *This = LV.getAddress();
+ Address This = LV.getAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
- llvm::Value *Base =
+ Address Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
LValue SubLV = MakeAddrLValue(Base, BS->getType());
@@ -756,15 +801,10 @@ void CodeGenFunction::ExpandTypeFromArgs(
LValue SubLV = EmitLValueForField(LV, FD);
ExpandTypeFromArgs(FD->getType(), SubLV, AI);
}
- } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
- llvm::Value *RealAddr =
- Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real");
- EmitStoreThroughLValue(RValue::get(*AI++),
- MakeAddrLValue(RealAddr, CExp->EltTy));
- llvm::Value *ImagAddr =
- Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag");
- EmitStoreThroughLValue(RValue::get(*AI++),
- MakeAddrLValue(ImagAddr, CExp->EltTy));
+ } else if (isa<ComplexExpansion>(Exp.get())) {
+ auto realValue = *AI++;
+ auto imagValue = *AI++;
+ EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
} else {
assert(isa<NoExpansion>(Exp.get()));
EmitStoreThroughLValue(RValue::get(*AI++), LV);
@@ -776,18 +816,17 @@ void CodeGenFunction::ExpandTypeToArgs(
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- llvm::Value *Addr = RV.getAggregateAddr();
- for (int i = 0, n = CAExp->NumElts; i < n; i++) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i);
+ forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
+ [&](Address EltAddr) {
RValue EltRV =
convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
- }
+ });
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- llvm::Value *This = RV.getAggregateAddr();
+ Address This = RV.getAggregateAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
- llvm::Value *Base =
+ Address Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
RValue BaseRV = RValue::getAggregate(Base);
@@ -822,12 +861,22 @@ void CodeGenFunction::ExpandTypeToArgs(
}
}
+/// Create a temporary allocation for the purposes of coercion.
+static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
+ CharUnits MinAlign) {
+ // Don't use an alignment that's worse than what LLVM would prefer.
+ auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
+ CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
+
+ return CGF.CreateTempAlloca(Ty, Align);
+}
+
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
/// accessing some number of bytes out of it, try to gep into the struct to get
/// at its inner goodness. Dive as deep as possible without entering an element
/// with an in-memory size smaller than DstSize.
-static llvm::Value *
-EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+static Address
+EnterStructPointerForCoercedAccess(Address SrcPtr,
llvm::StructType *SrcSTy,
uint64_t DstSize, CodeGenFunction &CGF) {
// We can't dive into a zero-element struct.
@@ -846,11 +895,10 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
return SrcPtr;
// GEP into the first element.
- SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive");
+ SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
// If the first element is a struct, recurse.
- llvm::Type *SrcTy =
- cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ llvm::Type *SrcTy = SrcPtr.getElementType();
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
@@ -918,21 +966,19 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
/// This safely handles the case when the src type is smaller than the
/// destination type; in this situation the values of bits which not
/// present in the src are undefined.
-static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
- llvm::Type *Ty, CharUnits SrcAlign,
+static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
CodeGenFunction &CGF) {
- llvm::Type *SrcTy =
- cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ llvm::Type *SrcTy = Src.getElementType();
// If SrcTy and Ty are the same, just do a load.
if (SrcTy == Ty)
- return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
+ return CGF.Builder.CreateLoad(Src);
uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
- SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
- SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
+ SrcTy = Src.getType()->getElementType();
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -941,8 +987,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
// extension or truncation to the desired type.
if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
(isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
- llvm::LoadInst *Load =
- CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
+ llvm::Value *Load = CGF.Builder.CreateLoad(Src);
return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
}
@@ -954,22 +999,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- llvm::Value *Casted =
- CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
- return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity());
- }
-
- // Otherwise do coercion through memory. This is stupid, but
- // simple.
- llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty);
- Tmp->setAlignment(SrcAlign.getQuantity());
- llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
- llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
- llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
+ Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
+ return CGF.Builder.CreateLoad(Src);
+ }
+
+ // Otherwise do coercion through memory. This is stupid, but simple.
+ Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
+ Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
+ Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
CGF.Builder.CreateMemCpy(Casted, SrcCasted,
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
- SrcAlign.getQuantity(), false);
- return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity());
+ false);
+ return CGF.Builder.CreateLoad(Tmp);
}
// Function to store a first-class aggregate into memory. We prefer to
@@ -977,8 +1018,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
// fast-isel.
// FIXME: Do we need to recurse here?
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
- llvm::Value *DestPtr, bool DestIsVolatile,
- CharUnits DestAlign) {
+ Address Dest, bool DestIsVolatile) {
// Prefer scalar stores to first-class aggregate stores.
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Val->getType())) {
@@ -986,17 +1026,13 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
CGF.CGM.getDataLayout().getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i);
+ auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
+ Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
- uint64_t EltOffset = Layout->getElementOffset(i);
- CharUnits EltAlign =
- DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset));
- CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(),
- DestIsVolatile);
+ CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
}
} else {
- CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(),
- DestIsVolatile);
+ CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
}
}
@@ -1007,24 +1043,21 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
/// This safely handles the case when the src type is larger than the
/// destination type; the upper bits of the src will be lost.
static void CreateCoercedStore(llvm::Value *Src,
- llvm::Value *DstPtr,
+ Address Dst,
bool DstIsVolatile,
- CharUnits DstAlign,
CodeGenFunction &CGF) {
llvm::Type *SrcTy = Src->getType();
- llvm::Type *DstTy =
- cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ llvm::Type *DstTy = Dst.getType()->getElementType();
if (SrcTy == DstTy) {
- CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
- DstIsVolatile);
+ CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
- DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
- DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
+ DstTy = Dst.getType()->getElementType();
}
// If the source and destination are integer or pointer types, just do an
@@ -1032,8 +1065,7 @@ static void CreateCoercedStore(llvm::Value *Src,
if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
(isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
- CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
- DstIsVolatile);
+ CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
}
@@ -1041,9 +1073,8 @@ static void CreateCoercedStore(llvm::Value *Src,
// If store is legal, just bitcast the src pointer.
if (SrcSize <= DstSize) {
- llvm::Value *Casted =
- CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
- BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign);
+ Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
+ BuildAggStore(CGF, Src, Dst, DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
@@ -1054,16 +1085,25 @@ static void CreateCoercedStore(llvm::Value *Src,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy);
- Tmp->setAlignment(DstAlign.getQuantity());
- CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity());
- llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
- llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
- llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
+ Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
+ CGF.Builder.CreateStore(Src, Tmp);
+ Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
+ Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
CGF.Builder.CreateMemCpy(DstCasted, Casted,
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
- DstAlign.getQuantity(), false);
+ false);
+ }
+}
+
+static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
+ const ABIArgInfo &info) {
+ if (unsigned offset = info.getDirectOffset()) {
+ addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
+ addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
+ CharUnits::fromQuantity(offset));
+ addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
}
+ return addr;
}
namespace {
@@ -1380,8 +1420,19 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
return GetFunctionType(*Info);
}
+static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
+ llvm::AttrBuilder &FuncAttrs,
+ const FunctionProtoType *FPT) {
+ if (!FPT)
+ return;
+
+ if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
+ FPT->isNothrow(Ctx))
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+}
+
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
- const Decl *TargetDecl,
+ CGCalleeInfo CalleeInfo,
AttributeListType &PAL,
unsigned &CallingConv,
bool AttrOnCallSite) {
@@ -1394,6 +1445,13 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (FI.isNoReturn())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
+ // If we have information about the function prototype, we can learn
+ // attributes form there.
+ AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
+ CalleeInfo.getCalleeFunctionProtoType());
+
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
@@ -1406,9 +1464,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
- const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
- if (FPT && FPT->isNothrow(getContext()))
- FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ AddAttributesFromFunctionProtoType(
+ getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
// Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
// These attributes are not inherited by overloads.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
@@ -1416,13 +1473,16 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
}
- // 'const' and 'pure' attribute functions are also nounwind.
+ // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
if (TargetDecl->hasAttr<ConstAttr>()) {
FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
} else if (TargetDecl->hasAttr<PureAttr>()) {
FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
+ FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
if (TargetDecl->hasAttr<RestrictAttr>())
RetAttrs.addAttribute(llvm::Attribute::NoAlias);
@@ -1466,8 +1526,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
}
+ bool DisableTailCalls =
+ CodeGenOpts.DisableTailCalls ||
+ (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
FuncAttrs.addAttribute("disable-tail-calls",
- llvm::toStringRef(CodeGenOpts.DisableTailCalls));
+ llvm::toStringRef(DisableTailCalls));
+
FuncAttrs.addAttribute("less-precise-fpmad",
llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
FuncAttrs.addAttribute("no-infs-fp-math",
@@ -1481,77 +1545,53 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute("stack-protector-buffer-size",
llvm::utostr(CodeGenOpts.SSPBufferSize));
- if (!CodeGenOpts.StackRealignment)
- FuncAttrs.addAttribute("no-realign-stack");
+ if (CodeGenOpts.StackRealignment)
+ FuncAttrs.addAttribute("stackrealign");
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
StringRef TargetCPU = getTarget().getTargetOpts().CPU;
-
- // TODO: Features gets us the features on the command line including
- // feature dependencies. For canonicalization purposes we might want to
- // avoid putting features in the target-features set if we know it'll be
- // one of the default features in the backend, e.g. corei7-avx and +avx or
- // figure out non-explicit dependencies.
- // Canonicalize the existing features in a new feature map.
- // TODO: Migrate the existing backends to keep the map around rather than
- // the vector.
- llvm::StringMap<bool> FeatureMap;
- for (auto F : getTarget().getTargetOpts().Features) {
- const char *Name = F.c_str();
- bool Enabled = Name[0] == '+';
- getTarget().setFeatureEnabled(FeatureMap, Name + 1, Enabled);
- }
-
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
- if (FD) {
- if (const auto *TD = FD->getAttr<TargetAttr>()) {
- StringRef FeaturesStr = TD->getFeatures();
- SmallVector<StringRef, 1> AttrFeatures;
- FeaturesStr.split(AttrFeatures, ",");
-
- // Grab the various features and prepend a "+" to turn on the feature to
- // the backend and add them to our existing set of features.
- for (auto &Feature : AttrFeatures) {
- // Go ahead and trim whitespace rather than either erroring or
- // accepting it weirdly.
- Feature = Feature.trim();
-
- // While we're here iterating check for a different target cpu.
- if (Feature.startswith("arch="))
- TargetCPU = Feature.split("=").second.trim();
- else if (Feature.startswith("tune="))
- // We don't support cpu tuning this way currently.
- ;
- else if (Feature.startswith("fpmath="))
- // TODO: Support the fpmath option this way. It will require checking
- // overall feature validity for the function with the rest of the
- // attributes on the function.
- ;
- else if (Feature.startswith("mno-"))
- getTarget().setFeatureEnabled(FeatureMap, Feature.split("-").second,
- false);
- else
- getTarget().setFeatureEnabled(FeatureMap, Feature, true);
- }
+ if (FD && FD->hasAttr<TargetAttr>()) {
+ llvm::StringMap<bool> FeatureMap;
+ getFunctionFeatureMap(FeatureMap, FD);
+
+ // Produce the canonical string for this set of features.
+ std::vector<std::string> Features;
+ for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
+ ie = FeatureMap.end();
+ it != ie; ++it)
+ Features.push_back((it->second ? "+" : "-") + it->first().str());
+
+ // Now add the target-cpu and target-features to the function.
+ // While we populated the feature map above, we still need to
+ // get and parse the target attribute so we can get the cpu for
+ // the function.
+ const auto *TD = FD->getAttr<TargetAttr>();
+ TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
+ if (ParsedAttr.second != "")
+ TargetCPU = ParsedAttr.second;
+ if (TargetCPU != "")
+ FuncAttrs.addAttribute("target-cpu", TargetCPU);
+ if (!Features.empty()) {
+ std::sort(Features.begin(), Features.end());
+ FuncAttrs.addAttribute(
+ "target-features",
+ llvm::join(Features.begin(), Features.end(), ","));
+ }
+ } else {
+ // Otherwise just add the existing target cpu and target features to the
+ // function.
+ std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
+ if (TargetCPU != "")
+ FuncAttrs.addAttribute("target-cpu", TargetCPU);
+ if (!Features.empty()) {
+ std::sort(Features.begin(), Features.end());
+ FuncAttrs.addAttribute(
+ "target-features",
+ llvm::join(Features.begin(), Features.end(), ","));
}
- }
-
- // Produce the canonical string for this set of features.
- std::vector<std::string> Features;
- for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
- ie = FeatureMap.end();
- it != ie; ++it)
- Features.push_back((it->second ? "+" : "-") + it->first().str());
-
- // Now add the target-cpu and target-features to the function.
- if (TargetCPU != "")
- FuncAttrs.addAttribute("target-cpu", TargetCPU);
- if (!Features.empty()) {
- std::sort(Features.begin(), Features.end());
- FuncAttrs.addAttribute("target-features",
- llvm::join(Features.begin(), Features.end(), ","));
}
}
@@ -1655,20 +1695,37 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attrs.addAttribute(llvm::Attribute::InReg);
break;
- case ABIArgInfo::Indirect:
+ case ABIArgInfo::Indirect: {
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
if (AI.getIndirectByVal())
Attrs.addAttribute(llvm::Attribute::ByVal);
- Attrs.addAlignmentAttr(AI.getIndirectAlign());
+ CharUnits Align = AI.getIndirectAlign();
+
+ // In a byval argument, it is important that the required
+ // alignment of the type is honored, as LLVM might be creating a
+ // *new* stack object, and needs to know what alignment to give
+ // it. (Sometimes it can deduce a sensible alignment on its own,
+ // but not if clang decides it must emit a packed struct, or the
+ // user specifies increased alignment requirements.)
+ //
+ // This is different from indirect *not* byval, where the object
+ // exists already, and the align attribute is purely
+ // informative.
+ assert(!Align.isZero());
+
+ // For now, only add this when we have a byval argument.
+ // TODO: be less lazy about updating test cases.
+ if (AI.getIndirectByVal())
+ Attrs.addAlignmentAttr(Align.getQuantity());
// byval disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
break;
-
+ }
case ABIArgInfo::Ignore:
case ABIArgInfo::Expand:
continue;
@@ -1788,10 +1845,14 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
- llvm::Value *ArgStruct = nullptr;
+ Address ArgStruct = Address::invalid();
+ const llvm::StructLayout *ArgStructLayout = nullptr;
if (IRFunctionArgs.hasInallocaArg()) {
- ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
- assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
+ ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
+ ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
+ FI.getArgStructAlignment());
+
+ assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
}
// Name the struct return parameter.
@@ -1805,9 +1866,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Track if we received the parameter as a pointer (indirect, byval, or
// inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
// into a local alloca for us.
- enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
- typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
- SmallVector<ValueAndIsPtr, 16> ArgVals;
+ SmallVector<ParamValue, 16> ArgVals;
ArgVals.reserve(Args.size());
// Create a pointer value for every parameter declaration. This usually
@@ -1833,49 +1892,47 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
switch (ArgI.getKind()) {
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
- llvm::Value *V =
- Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct,
- ArgI.getInAllocaFieldIndex(), Arg->getName());
- ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
+ auto FieldIndex = ArgI.getInAllocaFieldIndex();
+ CharUnits FieldOffset =
+ CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
+ Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
+ Arg->getName());
+ ArgVals.push_back(ParamValue::forIndirect(V));
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
- llvm::Value *V = FnArgs[FirstIRArg];
+ Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
- // need to do is realign the value, if requested
+ // need to do is realign the value, if requested.
+ Address V = ParamAddr;
if (ArgI.getIndirectRealign()) {
- llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
+ Address AlignedTemp = CreateMemTemp(Ty, "coerce");
// Copy from the incoming argument pointer to the temporary with the
// appropriate alignment.
//
// FIXME: We should have a common utility for generating an aggregate
// copy.
- llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
CharUnits Size = getContext().getTypeSizeInChars(Ty);
- llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
- llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
- Builder.CreateMemCpy(Dst,
- Src,
- llvm::ConstantInt::get(IntPtrTy,
- Size.getQuantity()),
- ArgI.getIndirectAlign(),
- false);
+ auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
+ Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
+ Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
+ Builder.CreateMemCpy(Dst, Src, SizeVal, false);
V = AlignedTemp;
}
- ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
+ ArgVals.push_back(ParamValue::forIndirect(V));
} else {
// Load scalar value from indirect argument.
- V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty,
- Arg->getLocStart());
+ llvm::Value *V =
+ EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
- ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(V));
}
break;
}
@@ -1980,87 +2037,66 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (V->getType() != LTy)
V = Builder.CreateBitCast(V, LTy);
- ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(V));
break;
}
- llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
-
- // The alignment we need to use is the max of the requested alignment for
- // the argument plus the alignment required by our access code below.
- unsigned AlignmentToUse =
- CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
- AlignmentToUse = std::max(AlignmentToUse,
- (unsigned)getContext().getDeclAlign(Arg).getQuantity());
+ Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
+ Arg->getName());
- Alloca->setAlignment(AlignmentToUse);
- llvm::Value *V = Alloca;
- llvm::Value *Ptr = V; // Pointer to store into.
- CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse);
-
- // If the value is offset in memory, apply the offset now.
- if (unsigned Offs = ArgI.getDirectOffset()) {
- Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
- Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs);
- Ptr = Builder.CreateBitCast(Ptr,
- llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
- PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
+ // Pointer to store into.
+ Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
+ auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
- llvm::Type *DstTy =
- cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ llvm::Type *DstTy = Ptr.getElementType();
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
+ Address AddrToStoreInto = Address::invalid();
if (SrcSize <= DstSize) {
- Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
-
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ AddrToStoreInto =
+ Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
} else {
- llvm::AllocaInst *TempAlloca =
- CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
- TempAlloca->setAlignment(AlignmentToUse);
- llvm::Value *TempV = TempAlloca;
-
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- llvm::Value *EltPtr =
- Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ AddrToStoreInto =
+ CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
+ }
- Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
+ assert(STy->getNumElements() == NumIRArgs);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto AI = FnArgs[FirstIRArg + i];
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
+ Address EltPtr =
+ Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
+ Builder.CreateStore(AI, EltPtr);
+ }
+
+ if (SrcSize > DstSize) {
+ Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
}
+
} else {
// Simple case, just do a coerced store of the argument into the alloca.
assert(NumIRArgs == 1);
auto AI = FnArgs[FirstIRArg];
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this);
+ CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
}
-
// Match to what EmitParmDecl is expecting for this type.
if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
- V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
+ llvm::Value *V =
+ EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
- ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(V));
} else {
- ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
+ ArgVals.push_back(ParamValue::forIndirect(Alloca));
}
break;
}
@@ -2069,11 +2105,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
- llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
- CharUnits Align = getContext().getDeclAlign(Arg);
- Alloca->setAlignment(Align.getQuantity());
- LValue LV = MakeAddrLValue(Alloca, Ty, Align);
- ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
+ Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
+ LValue LV = MakeAddrLValue(Alloca, Ty);
+ ArgVals.push_back(ParamValue::forIndirect(Alloca));
auto FnArgIter = FnArgs.begin() + FirstIRArg;
ExpandTypeFromArgs(Ty, LV, FnArgIter);
@@ -2089,10 +2123,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(NumIRArgs == 0);
// Initialize the local variable appropriately.
if (!hasScalarEvaluationKind(Ty)) {
- ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
+ ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
} else {
llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
- ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(U));
}
break;
}
@@ -2100,12 +2134,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
for (int I = Args.size() - 1; I >= 0; --I)
- EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
- I + 1);
+ EmitParmDecl(*Args[I], ArgVals[I], I + 1);
} else {
for (unsigned I = 0, E = Args.size(); I != E; ++I)
- EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
- I + 1);
+ EmitParmDecl(*Args[I], ArgVals[I], I + 1);
}
}
@@ -2158,9 +2190,9 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
bool doRetainAutorelease;
- if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
+ if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
doRetainAutorelease = true;
- } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
+ } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
.objc_retainAutoreleasedReturnValue) {
doRetainAutorelease = false;
@@ -2169,7 +2201,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
// for that call. If we can't find it, we can't do this
// optimization. But it should always be the immediately previous
// instruction, unless we needed bitcasts around the call.
- if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
+ if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
llvm::Instruction *prev = call->getPrevNode();
assert(prev);
if (isa<llvm::BitCastInst>(prev)) {
@@ -2178,7 +2210,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
}
assert(isa<llvm::CallInst>(prev));
assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
- CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
+ CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
insnsToKill.push_back(prev);
}
} else {
@@ -2223,7 +2255,7 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::CallInst *retainCall =
dyn_cast<llvm::CallInst>(result->stripPointerCasts());
if (!retainCall ||
- retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
+ retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
return nullptr;
// Look for an ordinary load of 'self'.
@@ -2231,7 +2263,7 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::LoadInst *load =
dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
if (!load || load->isAtomic() || load->isVolatile() ||
- load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
return nullptr;
// Okay! Burn it all down. This relies for correctness on the
@@ -2268,11 +2300,23 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
/// Heuristically search for a dominating store to the return-value slot.
static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ // Check if a User is a store which pointerOperand is the ReturnValue.
+ // We are looking for stores to the ReturnValue, not for stores of the
+ // ReturnValue to some other location.
+ auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
+ auto *SI = dyn_cast<llvm::StoreInst>(U);
+ if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
+ return nullptr;
+ // These aren't actually possible for non-coerced returns, and we
+ // only care about non-coerced returns on this code path.
+ assert(!SI->isAtomic() && !SI->isVolatile());
+ return SI;
+ };
// If there are multiple uses of the return-value slot, just check
// for something immediately preceding the IP. Sometimes this can
// happen with how we generate implicit-returns; it can also happen
// with noreturn cleanups.
- if (!CGF.ReturnValue->hasOneUse()) {
+ if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
if (IP->empty()) return nullptr;
llvm::Instruction *I = &IP->back();
@@ -2296,21 +2340,13 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
break;
}
- llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I);
- if (!store) return nullptr;
- if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
- assert(!store->isAtomic() && !store->isVolatile()); // see below
- return store;
+ return GetStoreIfValid(I);
}
llvm::StoreInst *store =
- dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
+ GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
if (!store) return nullptr;
- // These aren't actually possible for non-coerced returns, and we
- // only care about non-coerced returns on this code path.
- assert(!store->isAtomic() && !store->isVolatile());
-
// Now do a first-and-dirty dominance check: just walk up the
// single-predecessors chain from the current insertion point.
llvm::BasicBlock *StoreBB = store->getParent();
@@ -2335,7 +2371,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
}
// Functions with no result always return void.
- if (!ReturnValue) {
+ if (!ReturnValue.isValid()) {
Builder.CreateRetVoid();
return;
}
@@ -2353,10 +2389,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
if (RetAI.getInAllocaSRet()) {
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
- llvm::Value *ArgStruct = EI;
+ llvm::Value *ArgStruct = &*EI;
llvm::Value *SRet = Builder.CreateStructGEP(
nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
- RV = Builder.CreateLoad(SRet, "sret");
+ RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
}
break;
@@ -2367,9 +2403,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
switch (getEvaluationKind(RetTy)) {
case TEK_Complex: {
ComplexPairTy RT =
- EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
- EndLoc);
- EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
+ EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
+ EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
/*isInit*/ true);
break;
}
@@ -2378,7 +2413,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
break;
case TEK_Scalar:
EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
- MakeNaturalAlignAddrLValue(AI, RetTy),
+ MakeNaturalAlignAddrLValue(&*AI, RetTy),
/*isInit*/ true);
break;
}
@@ -2406,9 +2441,12 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
SI->eraseFromParent();
// If that was the only use of the return value, nuke it as well now.
- if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
- cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
- ReturnValue = nullptr;
+ auto returnValueInst = ReturnValue.getPointer();
+ if (returnValueInst->use_empty()) {
+ if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
+ alloca->eraseFromParent();
+ ReturnValue = Address::invalid();
+ }
}
// Otherwise, we have to do a simple load.
@@ -2416,18 +2454,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
RV = Builder.CreateLoad(ReturnValue);
}
} else {
- llvm::Value *V = ReturnValue;
- CharUnits Align = getContext().getTypeAlignInChars(RetTy);
// If the value is offset in memory, apply the offset now.
- if (unsigned Offs = RetAI.getDirectOffset()) {
- V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
- V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs);
- V = Builder.CreateBitCast(V,
- llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
- Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
+ Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
- RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this);
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
// In ARC, end functions that return a retainable type with a call
@@ -2450,8 +2480,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm::Instruction *Ret;
if (RV) {
- if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
- if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
+ if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
+ if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
SanitizerScope SanScope(this);
llvm::Value *Cond = Builder.CreateICmpNE(
RV, llvm::Constant::getNullValue(RV->getType()));
@@ -2477,14 +2507,20 @@ static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
}
-static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
+static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
+ QualType Ty) {
// FIXME: Generate IR in one pass, rather than going back and fixing up these
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *Placeholder =
- llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
- Placeholder = CGF.Builder.CreateLoad(Placeholder);
- return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
+ llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
+ Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
+
+ // FIXME: When we generate this IR in one pass, we shouldn't need
+ // this win32-specific alignment hack.
+ CharUnits Align = CharUnits::fromQuantity(4);
+
+ return AggValueSlot::forAddr(Address(Placeholder, Align),
Ty.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -2497,7 +2533,7 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
// StartFunction converted the ABI-lowered parameter(s) into a
// local alloca. We need to turn that into an r-value suitable
// for EmitCall.
- llvm::Value *local = GetAddrOfLocalVar(param);
+ Address local = GetAddrOfLocalVar(param);
QualType type = param->getType();
@@ -2532,20 +2568,21 @@ static bool isProvablyNonNull(llvm::Value *addr) {
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
const LValue &srcLV = writeback.Source;
- llvm::Value *srcAddr = srcLV.getAddress();
- assert(!isProvablyNull(srcAddr) &&
+ Address srcAddr = srcLV.getAddress();
+ assert(!isProvablyNull(srcAddr.getPointer()) &&
"shouldn't have writeback for provably null argument");
llvm::BasicBlock *contBB = nullptr;
// If the argument wasn't provably non-null, we need to null check
// before doing the store.
- bool provablyNonNull = isProvablyNonNull(srcAddr);
+ bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
if (!provablyNonNull) {
llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
contBB = CGF.createBasicBlock("icr.done");
- llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ llvm::Value *isNull =
+ CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
CGF.EmitBlock(writebackBB);
}
@@ -2554,9 +2591,8 @@ static void emitWriteback(CodeGenFunction &CGF,
llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
// Cast it back, in case we're writing an id to a Foo* or something.
- value = CGF.Builder.CreateBitCast(value,
- cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
- "icr.writeback-cast");
+ value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
+ "icr.writeback-cast");
// Perform the writeback.
@@ -2606,10 +2642,9 @@ static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
ArrayRef<CallArgList::CallArgCleanup> Cleanups =
CallArgs.getCleanupsToDeactivate();
// Iterate in reverse to increase the likelihood of popping the cleanup.
- for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
- I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
- CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
- I->IsActiveIP->eraseFromParent();
+ for (const auto &I : llvm::reverse(Cleanups)) {
+ CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
+ I.IsActiveIP->eraseFromParent();
}
}
@@ -2621,7 +2656,9 @@ static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
}
/// Emit an argument that's being passed call-by-writeback. That is,
-/// we are passing the address of
+/// we are passing the address of an __autoreleased temporary; it
+/// might be copy-initialized with the current value of the given
+/// address, but it will definitely be copied out of after the call.
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
const ObjCIndirectCopyRestoreExpr *CRE) {
LValue srcLV;
@@ -2633,13 +2670,13 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// Otherwise, just emit it as a scalar.
} else {
- llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+ Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
QualType srcAddrType =
CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
- srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
+ srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
}
- llvm::Value *srcAddr = srcLV.getAddress();
+ Address srcAddr = srcLV.getAddress();
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
@@ -2648,15 +2685,16 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
// If the address is a constant null, just pass the appropriate null.
- if (isProvablyNull(srcAddr)) {
+ if (isProvablyNull(srcAddr.getPointer())) {
args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
CRE->getType());
return;
}
// Create the temporary.
- llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
- "icr.temp");
+ Address temp = CGF.CreateTempAlloca(destType->getElementType(),
+ CGF.getPointerAlign(),
+ "icr.temp");
// Loading an l-value can introduce a cleanup if the l-value is __weak,
// and that cleanup will be conditional if we can't prove that the l-value
// isn't null, so we need to register a dominating point so that the cleanups
@@ -2678,15 +2716,16 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
- bool provablyNonNull = isProvablyNonNull(srcAddr);
+ bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
if (provablyNonNull) {
- finalArgument = temp;
+ finalArgument = temp.getPointer();
} else {
- llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ llvm::Value *isNull =
+ CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
finalArgument = CGF.Builder.CreateSelect(isNull,
llvm::ConstantPointerNull::get(destType),
- temp, "icr.argument");
+ temp.getPointer(), "icr.argument");
// If we need to copy, then the load has to be conditional, which
// means we need control flow.
@@ -2753,24 +2792,12 @@ void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
// Save the stack.
llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
-
- // Control gets really tied up in landing pads, so we have to spill the
- // stacksave to an alloca to avoid violating SSA form.
- // TODO: This is dead if we never emit the cleanup. We should create the
- // alloca and store lazily on the first cleanup emission.
- StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
- CGF.Builder.CreateStore(StackBase, StackBaseMem);
- CGF.pushStackRestore(EHCleanup, StackBaseMem);
- StackCleanup = CGF.EHStack.getInnermostEHScope();
- assert(StackCleanup.isValid());
}
void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
if (StackBase) {
- CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
+ // Restore the stack after the call.
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
- // We could load StackBase from StackBaseMem, but in the non-exceptional
- // case we can skip it.
CGF.Builder.CreateCall(F, StackBase);
}
}
@@ -2800,12 +2827,26 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
"nonnull_arg", StaticData, None);
}
-void CodeGenFunction::EmitCallArgs(CallArgList &Args,
- ArrayRef<QualType> ArgTypes,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- const FunctionDecl *CalleeDecl,
- unsigned ParamsToSkip) {
+void CodeGenFunction::EmitCallArgs(
+ CallArgList &Args, ArrayRef<QualType> ArgTypes,
+ llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
+ const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
+ assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
+
+ auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
+ if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
+ return;
+ auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
+ if (PS == nullptr)
+ return;
+
+ const auto &Context = getContext();
+ auto SizeTy = Context.getSizeType();
+ auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
+ llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
+ Args.add(RValue::get(V), SizeTy);
+ };
+
// We *have* to evaluate arguments from right to left in the MS C++ ABI,
// because arguments are destroyed left to right in the callee.
if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
@@ -2822,10 +2863,11 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
// Evaluate each argument.
size_t CallArgsStart = Args.size();
for (int I = ArgTypes.size() - 1; I >= 0; --I) {
- CallExpr::const_arg_iterator Arg = ArgBeg + I;
+ CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
EmitCallArg(Args, *Arg, ArgTypes[I]);
- EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
CalleeDecl, ParamsToSkip + I);
+ MaybeEmitImplicitObjectSize(I, *Arg);
}
// Un-reverse the arguments we just evaluated so they match up with the LLVM
@@ -2835,21 +2877,22 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
}
for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
- CallExpr::const_arg_iterator Arg = ArgBeg + I;
- assert(Arg != ArgEnd);
+ CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
+ assert(Arg != ArgRange.end());
EmitCallArg(Args, *Arg, ArgTypes[I]);
- EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
CalleeDecl, ParamsToSkip + I);
+ MaybeEmitImplicitObjectSize(I, *Arg);
}
}
namespace {
-struct DestroyUnpassedArg : EHScopeStack::Cleanup {
- DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
+struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
+ DestroyUnpassedArg(Address Addr, QualType Ty)
: Addr(Addr), Ty(Ty) {}
- llvm::Value *Addr;
+ Address Addr;
QualType Ty;
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -2860,8 +2903,6 @@ struct DestroyUnpassedArg : EHScopeStack::Cleanup {
}
};
-}
-
struct DisableDebugLocationUpdates {
CodeGenFunction &CGF;
bool disabledDebugInfo;
@@ -2875,6 +2916,8 @@ struct DisableDebugLocationUpdates {
}
};
+} // end anonymous namespace
+
void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
DisableDebugLocationUpdates Dis(*this, E);
@@ -2923,7 +2966,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
// Create a no-op GEP between the placeholder and the cleanup so we can
// RAUW it successfully. It also serves as a marker of the first
// instruction where the cleanup is active.
- pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
+ pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
+ type);
// This unreachable is a temporary marker which will be removed later.
llvm::Instruction *IsActive = Builder.CreateUnreachable();
args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
@@ -2940,9 +2984,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
} else {
// We can't represent a misaligned lvalue in the CallArgList, so copy
// to an aligned temporary now.
- llvm::Value *tmp = CreateMemTemp(type);
- EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
- L.getAlignment());
+ Address tmp = CreateMemTemp(type);
+ EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
args.add(RValue::getAggregate(tmp), type);
}
return;
@@ -3015,19 +3058,41 @@ CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
return call;
}
+// Calls which may throw must have operand bundles indicating which funclet
+// they are nested within.
+static void
+getBundlesForFunclet(llvm::Value *Callee,
+ llvm::Instruction *CurrentFuncletPad,
+ SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
+ // There is no need for a funclet operand bundle if we aren't inside a funclet.
+ if (!CurrentFuncletPad)
+ return;
+
+ // Skip intrinsics which cannot throw.
+ auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
+ if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
+ return;
+
+ BundleList.emplace_back("funclet", CurrentFuncletPad);
+}
+
/// Emits a call or invoke to the given noreturn runtime function.
void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args) {
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
+ getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
+
if (getInvokeDest()) {
llvm::InvokeInst *invoke =
Builder.CreateInvoke(callee,
getUnreachableBlock(),
getInvokeDest(),
- args);
+ args,
+ BundleList);
invoke->setDoesNotReturn();
invoke->setCallingConv(getRuntimeCC());
} else {
- llvm::CallInst *call = Builder.CreateCall(callee, args);
+ llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
call->setDoesNotReturn();
call->setCallingConv(getRuntimeCC());
Builder.CreateUnreachable();
@@ -3052,12 +3117,6 @@ CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
return callSite;
}
-llvm::CallSite
-CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
- const Twine &Name) {
- return EmitCallOrInvoke(Callee, None, Name);
-}
-
/// Emits a call or invoke instruction to the given function, depending
/// on the current state of the EH stack.
llvm::CallSite
@@ -3102,7 +3161,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- const Decl *TargetDecl,
+ CGCalleeInfo CalleeInfo,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
@@ -3117,8 +3176,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
- llvm::AllocaInst *ArgMemory = nullptr;
+ Address ArgMemory = Address::invalid();
+ const llvm::StructLayout *ArgMemoryLayout = nullptr;
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
+ ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
llvm::Instruction *IP = CallArgs.getStackBase();
llvm::AllocaInst *AI;
if (IP) {
@@ -3127,36 +3188,44 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else {
AI = CreateTempAlloca(ArgStruct, "argmem");
}
+ auto Align = CallInfo.getArgStructAlignment();
+ AI->setAlignment(Align.getQuantity());
AI->setUsedWithInAlloca(true);
assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
- ArgMemory = AI;
+ ArgMemory = Address(AI, Align);
}
+ // Helper function to drill into the inalloca allocation.
+ auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
+ auto FieldOffset =
+ CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
+ return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
+ };
+
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
- llvm::Value *SRetPtr = nullptr;
+ Address SRetPtr = Address::invalid();
size_t UnusedReturnSize = 0;
if (RetAI.isIndirect() || RetAI.isInAlloca()) {
- SRetPtr = ReturnValue.getValue();
- if (!SRetPtr) {
+ if (!ReturnValue.isNull()) {
+ SRetPtr = ReturnValue.getValue();
+ } else {
SRetPtr = CreateMemTemp(RetTy);
if (HaveInsertPoint() && ReturnValue.isUnused()) {
uint64_t size =
CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
- if (EmitLifetimeStart(size, SRetPtr))
+ if (EmitLifetimeStart(size, SRetPtr.getPointer()))
UnusedReturnSize = size;
}
}
if (IRFunctionArgs.hasSRetArg()) {
- IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
} else {
- llvm::Value *Addr =
- Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
- RetAI.getInAllocaFieldIndex());
- Builder.CreateStore(SRetPtr, Addr);
+ Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
+ Builder.CreateStore(SRetPtr.getPointer(), Addr);
}
}
@@ -3169,8 +3238,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
-
// Insert a padding argument to ensure proper alignment.
if (IRFunctionArgs.hasPaddingArg(ArgNo))
IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
@@ -3186,27 +3253,23 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (RV.isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
llvm::Instruction *Placeholder =
- cast<llvm::Instruction>(RV.getAggregateAddr());
+ cast<llvm::Instruction>(RV.getAggregatePointer());
CGBuilderTy::InsertPoint IP = Builder.saveIP();
Builder.SetInsertPoint(Placeholder);
- llvm::Value *Addr =
- Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
- ArgInfo.getInAllocaFieldIndex());
+ Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
Builder.restoreIP(IP);
- deferPlaceholderReplacement(Placeholder, Addr);
+ deferPlaceholderReplacement(Placeholder, Addr.getPointer());
} else {
// Store the RValue into the argument struct.
- llvm::Value *Addr =
- Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
- ArgInfo.getInAllocaFieldIndex());
- unsigned AS = Addr->getType()->getPointerAddressSpace();
+ Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ unsigned AS = Addr.getType()->getPointerAddressSpace();
llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
// There are some cases where a trivial bitcast is not avoidable. The
// definition of a type later in a translation unit may change it's type
// from {}* to (%struct.foo*)*.
- if (Addr->getType() != MemType)
+ if (Addr.getType() != MemType)
Addr = Builder.CreateBitCast(Addr, MemType);
- LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
+ LValue argLV = MakeAddrLValue(Addr, I->Ty);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
}
break;
@@ -3216,12 +3279,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(NumIRArgs == 1);
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
- llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
- if (ArgInfo.getIndirectAlign() > AI->getAlignment())
- AI->setAlignment(ArgInfo.getIndirectAlign());
- IRCallArgs[FirstIRArg] = AI;
+ Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
+ IRCallArgs[FirstIRArg] = Addr.getPointer();
- LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
+ LValue argLV = MakeAddrLValue(Addr, I->Ty);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
@@ -3232,27 +3293,27 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// we cannot force it to be sufficiently aligned.
// 3. If the argument is byval, but RV is located in an address space
// different than that of the argument (0).
- llvm::Value *Addr = RV.getAggregateAddr();
- unsigned Align = ArgInfo.getIndirectAlign();
+ Address Addr = RV.getAggregateAddress();
+ CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
- const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
+ const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
const unsigned ArgAddrSpace =
(FirstIRArg < IRFuncTy->getNumParams()
? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
: 0);
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
- (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
- llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) ||
+ (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
+ llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
+ Align.getQuantity(), *TD)
+ < Align.getQuantity()) ||
(ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
// Create an aligned temporary, and copy to it.
- llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
- if (Align > AI->getAlignment())
- AI->setAlignment(Align);
- IRCallArgs[FirstIRArg] = AI;
+ Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
+ IRCallArgs[FirstIRArg] = AI.getPointer();
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
} else {
// Skip the extra memcpy call.
- IRCallArgs[FirstIRArg] = Addr;
+ IRCallArgs[FirstIRArg] = Addr.getPointer();
}
}
break;
@@ -3272,7 +3333,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (RV.isScalar())
V = RV.getScalarVal();
else
- V = Builder.CreateLoad(RV.getAggregateAddr());
+ V = Builder.CreateLoad(RV.getAggregateAddress());
// We might have to widen integers, but we should never truncate.
if (ArgInfo.getCoerceToType() != V->getType() &&
@@ -3289,35 +3350,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
// FIXME: Avoid the conversion through memory if possible.
- llvm::Value *SrcPtr;
- CharUnits SrcAlign;
+ Address Src = Address::invalid();
if (RV.isScalar() || RV.isComplex()) {
- SrcPtr = CreateMemTemp(I->Ty, "coerce");
- SrcAlign = TypeAlign;
- LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
+ Src = CreateMemTemp(I->Ty, "coerce");
+ LValue SrcLV = MakeAddrLValue(Src, I->Ty);
EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
} else {
- SrcPtr = RV.getAggregateAddr();
- // This alignment is guaranteed by EmitCallArg.
- SrcAlign = TypeAlign;
+ Src = RV.getAggregateAddress();
}
// If the value is offset in memory, apply the offset now.
- if (unsigned Offs = ArgInfo.getDirectOffset()) {
- SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
- SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs);
- SrcPtr = Builder.CreateBitCast(SrcPtr,
- llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
- SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
+ Src = emitAddressAtOffset(*this, Src, ArgInfo);
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
- llvm::Type *SrcTy =
- cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ llvm::Type *SrcTy = Src.getType()->getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
@@ -3326,29 +3376,28 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// of the destination type to allow loading all of it. The bits past
// the source value are left undef.
if (SrcSize < DstSize) {
- llvm::AllocaInst *TempAlloca
- = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
- Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
- SrcPtr = TempAlloca;
+ Address TempAlloca
+ = CreateTempAlloca(STy, Src.getAlignment(),
+ Src.getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ Src = TempAlloca;
} else {
- SrcPtr = Builder.CreateBitCast(SrcPtr,
- llvm::PointerType::getUnqual(STy));
+ Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
}
+ auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i);
- llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
- // We don't know what we're loading from.
- LI->setAlignment(1);
+ auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
+ Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
+ llvm::Value *LI = Builder.CreateLoad(EltPtr);
IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
assert(NumIRArgs == 1);
IRCallArgs[FirstIRArg] =
- CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
- SrcAlign, *this);
+ CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
}
break;
@@ -3362,8 +3411,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
- if (ArgMemory) {
- llvm::Value *Arg = ArgMemory;
+ if (ArgMemory.isValid()) {
+ llvm::Value *Arg = ArgMemory.getPointer();
if (CallInfo.isVariadic()) {
// When passing non-POD arguments by value to variadic functions, we will
// end up with a variadic prototype and an inalloca call site. In such
@@ -3441,23 +3490,37 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
- CallingConv, true);
+ CGM.ConstructAttributeList(CallInfo, CalleeInfo, AttributeList, CallingConv,
+ true);
llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
AttributeList);
- llvm::BasicBlock *InvokeDest = nullptr;
- if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoUnwind) ||
- currentFunctionUsesSEHTry())
- InvokeDest = getInvokeDest();
+ bool CannotThrow;
+ if (currentFunctionUsesSEHTry()) {
+ // SEH cares about asynchronous exceptions, everything can "throw."
+ CannotThrow = false;
+ } else if (isCleanupPadScope() &&
+ EHPersonality::get(*this).isMSVCXXPersonality()) {
+ // The MSVC++ personality will implicitly terminate the program if an
+ // exception is thrown. An unwind edge cannot be reached.
+ CannotThrow = true;
+ } else {
+ // Otherwise, nowunind callsites will never throw.
+ CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ }
+ llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
+
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
+ getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, IRCallArgs);
+ CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
+ BundleList);
EmitBlock(Cont);
}
if (callOrInvoke)
@@ -3489,7 +3552,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CS.doesNotReturn()) {
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
- SRetPtr);
+ SRetPtr.getPointer());
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
@@ -3516,6 +3579,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// lexical order, so deactivate it and run it manually here.
CallArgs.freeArgumentMemory(*this);
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+ if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
+ Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
+ }
+
RValue Ret = [&] {
switch (RetAI.getKind()) {
case ABIArgInfo::InAlloca:
@@ -3523,7 +3592,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
- SRetPtr);
+ SRetPtr.getPointer());
return ret;
}
@@ -3543,15 +3612,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return RValue::getComplex(std::make_pair(Real, Imag));
}
case TEK_Aggregate: {
- llvm::Value *DestPtr = ReturnValue.getValue();
+ Address DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
- CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
- if (!DestPtr) {
+ if (!DestPtr.isValid()) {
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
DestIsVolatile = false;
}
- BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign);
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
return RValue::getAggregate(DestPtr);
}
case TEK_Scalar: {
@@ -3566,28 +3634,17 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm_unreachable("bad evaluation kind");
}
- llvm::Value *DestPtr = ReturnValue.getValue();
+ Address DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
- CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
- if (!DestPtr) {
+ if (!DestPtr.isValid()) {
DestPtr = CreateMemTemp(RetTy, "coerce");
DestIsVolatile = false;
}
// If the value is offset in memory, apply the offset now.
- llvm::Value *StorePtr = DestPtr;
- CharUnits StoreAlign = DestAlign;
- if (unsigned Offs = RetAI.getDirectOffset()) {
- StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
- StorePtr =
- Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs);
- StorePtr = Builder.CreateBitCast(StorePtr,
- llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
- StoreAlign =
- StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
- CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this);
+ Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
@@ -3599,6 +3656,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm_unreachable("Unhandled ABIArgInfo::Kind");
} ();
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
@@ -3617,6 +3676,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
/* VarArg handling */
-llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
- return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
+ VAListAddr = VE->isMicrosoftABI()
+ ? EmitMSVAListRef(VE->getSubExpr())
+ : EmitVAListRef(VE->getSubExpr());
+ QualType Ty = VE->getType();
+ if (VE->isMicrosoftABI())
+ return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
+ return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
}
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 7a4708e5ccfa..2ebd09b9eb57 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -56,7 +56,7 @@ namespace CodeGen {
class CallArgList :
public SmallVector<CallArg, 16> {
public:
- CallArgList() : StackBase(nullptr), StackBaseMem(nullptr) {}
+ CallArgList() : StackBase(nullptr) {}
struct Writeback {
/// The original argument. Note that the argument l-value
@@ -64,7 +64,7 @@ namespace CodeGen {
LValue Source;
/// The temporary alloca.
- llvm::Value *Temporary;
+ Address Temporary;
/// A value to "use" after the writeback, or null.
llvm::Value *ToUse;
@@ -88,12 +88,9 @@ namespace CodeGen {
other.Writebacks.begin(), other.Writebacks.end());
}
- void addWriteback(LValue srcLV, llvm::Value *temporary,
+ void addWriteback(LValue srcLV, Address temporary,
llvm::Value *toUse) {
- Writeback writeback;
- writeback.Source = srcLV;
- writeback.Temporary = temporary;
- writeback.ToUse = toUse;
+ Writeback writeback = { srcLV, temporary, toUse };
Writebacks.push_back(writeback);
}
@@ -137,9 +134,6 @@ namespace CodeGen {
/// The stacksave call. It dominates all of the argument evaluation.
llvm::CallInst *StackBase;
- /// The alloca holding the stackbase. We need it to maintain SSA form.
- llvm::AllocaInst *StackBaseMem;
-
/// The iterator pointing to the stack restore cleanup. We manually run and
/// deactivate this cleanup after the call in the unexceptional case because
/// it doesn't run in the normal order.
@@ -156,6 +150,7 @@ namespace CodeGen {
/// function can be stored, and whether the address is volatile or not.
class ReturnValueSlot {
llvm::PointerIntPair<llvm::Value *, 2, unsigned int> Value;
+ CharUnits Alignment;
// Return value slot flags
enum Flags {
@@ -165,14 +160,15 @@ namespace CodeGen {
public:
ReturnValueSlot() {}
- ReturnValueSlot(llvm::Value *Value, bool IsVolatile, bool IsUnused = false)
- : Value(Value,
- (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)) {}
+ ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false)
+ : Value(Addr.isValid() ? Addr.getPointer() : nullptr,
+ (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)),
+ Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
- bool isNull() const { return !getValue(); }
+ bool isNull() const { return !getValue().isValid(); }
bool isVolatile() const { return Value.getInt() & IS_VOLATILE; }
- llvm::Value *getValue() const { return Value.getPointer(); }
+ Address getValue() const { return Address(Value.getPointer(), Alignment); }
bool isUnused() const { return Value.getInt() & IS_UNUSED; }
};
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index c49f182c21d8..2e566de6d8ac 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -1,4 +1,4 @@
-//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
+//===--- CGClass.cpp - Emit LLVM Code for C++ classes -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,10 +25,124 @@
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
using namespace clang;
using namespace CodeGen;
+/// Return the best known alignment for an unknown pointer to a
+/// particular class.
+CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) {
+ if (!RD->isCompleteDefinition())
+ return CharUnits::One(); // Hopefully won't be used anywhere.
+
+ auto &layout = getContext().getASTRecordLayout(RD);
+
+ // If the class is final, then we know that the pointer points to an
+ // object of that type and can use the full alignment.
+ if (RD->hasAttr<FinalAttr>()) {
+ return layout.getAlignment();
+
+ // Otherwise, we have to assume it could be a subclass.
+ } else {
+ return layout.getNonVirtualAlignment();
+ }
+}
+
+/// Return the best known alignment for a pointer to a virtual base,
+/// given the alignment of a pointer to the derived class.
+CharUnits CodeGenModule::getVBaseAlignment(CharUnits actualDerivedAlign,
+ const CXXRecordDecl *derivedClass,
+ const CXXRecordDecl *vbaseClass) {
+ // The basic idea here is that an underaligned derived pointer might
+ // indicate an underaligned base pointer.
+
+ assert(vbaseClass->isCompleteDefinition());
+ auto &baseLayout = getContext().getASTRecordLayout(vbaseClass);
+ CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment();
+
+ return getDynamicOffsetAlignment(actualDerivedAlign, derivedClass,
+ expectedVBaseAlign);
+}
+
+CharUnits
+CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
+ const CXXRecordDecl *baseDecl,
+ CharUnits expectedTargetAlign) {
+ // If the base is an incomplete type (which is, alas, possible with
+ // member pointers), be pessimistic.
+ if (!baseDecl->isCompleteDefinition())
+ return std::min(actualBaseAlign, expectedTargetAlign);
+
+ auto &baseLayout = getContext().getASTRecordLayout(baseDecl);
+ CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment();
+
+ // If the class is properly aligned, assume the target offset is, too.
+ //
+ // This actually isn't necessarily the right thing to do --- if the
+ // class is a complete object, but it's only properly aligned for a
+ // base subobject, then the alignments of things relative to it are
+ // probably off as well. (Note that this requires the alignment of
+ // the target to be greater than the NV alignment of the derived
+ // class.)
+ //
+ // However, our approach to this kind of under-alignment can only
+ // ever be best effort; after all, we're never going to propagate
+ // alignments through variables or parameters. Note, in particular,
+ // that constructing a polymorphic type in an address that's less
+ // than pointer-aligned will generally trap in the constructor,
+ // unless we someday add some sort of attribute to change the
+ // assumed alignment of 'this'. So our goal here is pretty much
+ // just to allow the user to explicitly say that a pointer is
+ // under-aligned and then safely access its fields and v-tables.
+ if (actualBaseAlign >= expectedBaseAlign) {
+ return expectedTargetAlign;
+ }
+
+ // Otherwise, we might be offset by an arbitrary multiple of the
+ // actual alignment. The correct adjustment is to take the min of
+ // the two alignments.
+ return std::min(actualBaseAlign, expectedTargetAlign);
+}
+
+Address CodeGenFunction::LoadCXXThisAddress() {
+ assert(CurFuncDecl && "loading 'this' without a func declaration?");
+ assert(isa<CXXMethodDecl>(CurFuncDecl));
+
+ // Lazily compute CXXThisAlignment.
+ if (CXXThisAlignment.isZero()) {
+ // Just use the best known alignment for the parent.
+ // TODO: if we're currently emitting a complete-object ctor/dtor,
+ // we can always use the complete-object alignment.
+ auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
+ CXXThisAlignment = CGM.getClassPointerAlignment(RD);
+ }
+
+ return Address(LoadCXXThis(), CXXThisAlignment);
+}
+
+/// Emit the address of a field using a member data pointer.
+///
+/// \param E Only used for emergency diagnostics
+Address
+CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
+ llvm::Value *memberPtr,
+ const MemberPointerType *memberPtrType,
+ AlignmentSource *alignSource) {
+ // Ask the ABI to compute the actual address.
+ llvm::Value *ptr =
+ CGM.getCXXABI().EmitMemberDataPointerAddress(*this, E, base,
+ memberPtr, memberPtrType);
+
+ QualType memberType = memberPtrType->getPointeeType();
+ CharUnits memberAlign = getNaturalTypeAlignment(memberType, alignSource);
+ memberAlign =
+ CGM.getDynamicOffsetAlignment(base.getAlignment(),
+ memberPtrType->getClass()->getAsCXXRecordDecl(),
+ memberAlign);
+ return Address(ptr, memberAlign);
+}
+
CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start,
CastExpr::path_const_iterator End) {
@@ -78,15 +192,13 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
/// when the type is known to be complete (e.g. in complete destructors).
///
/// The object pointed to by 'This' is assumed to be non-null.
-llvm::Value *
-CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
+Address
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This,
const CXXRecordDecl *Derived,
const CXXRecordDecl *Base,
bool BaseIsVirtual) {
// 'this' must be a pointer (in some address space) to Derived.
- assert(This->getType()->isPointerTy() &&
- cast<llvm::PointerType>(This->getType())->getElementType()
- == ConvertType(Derived));
+ assert(This.getElementType() == ConvertType(Derived));
// Compute the offset of the virtual base.
CharUnits Offset;
@@ -98,20 +210,22 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
// Shift and cast down to the base type.
// TODO: for complete types, this should be possible with a GEP.
- llvm::Value *V = This;
- if (Offset.isPositive()) {
- V = Builder.CreateBitCast(V, Int8PtrTy);
- V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
+ Address V = This;
+ if (!Offset.isZero()) {
+ V = Builder.CreateElementBitCast(V, Int8Ty);
+ V = Builder.CreateConstInBoundsByteGEP(V, Offset);
}
- V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
+ V = Builder.CreateElementBitCast(V, ConvertType(Base));
return V;
}
-static llvm::Value *
-ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
+static Address
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
CharUnits nonVirtualOffset,
- llvm::Value *virtualOffset) {
+ llvm::Value *virtualOffset,
+ const CXXRecordDecl *derivedClass,
+ const CXXRecordDecl *nearestVBase) {
// Assert that we have something to do.
assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
@@ -128,13 +242,27 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
}
// Apply the base offset.
+ llvm::Value *ptr = addr.getPointer();
ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
- return ptr;
+
+ // If we have a virtual component, the alignment of the result will
+ // be relative only to the known alignment of that vbase.
+ CharUnits alignment;
+ if (virtualOffset) {
+ assert(nearestVBase && "virtual offset without vbase?");
+ alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(),
+ derivedClass, nearestVBase);
+ } else {
+ alignment = addr.getAlignment();
+ }
+ alignment = alignment.alignmentAtOffset(nonVirtualOffset);
+
+ return Address(ptr, alignment);
}
-llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
- llvm::Value *Value, const CXXRecordDecl *Derived,
+Address CodeGenFunction::GetAddressOfBaseClass(
+ Address Value, const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
SourceLocation Loc) {
@@ -174,14 +302,14 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
ConvertType((PathEnd[-1])->getType())->getPointerTo();
QualType DerivedTy = getContext().getRecordType(Derived);
- CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy);
+ CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
// If the static offset is zero and we don't have a virtual step,
// just do a bitcast; null checks are unnecessary.
if (NonVirtualOffset.isZero() && !VBase) {
if (sanitizePerformTypeCheck()) {
- EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign,
- !NullCheckValue);
+ EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
+ DerivedTy, DerivedAlign, !NullCheckValue);
}
return Builder.CreateBitCast(Value, BasePtrTy);
}
@@ -196,14 +324,14 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
endBB = createBasicBlock("cast.end");
- llvm::Value *isNull = Builder.CreateIsNull(Value);
+ llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer());
Builder.CreateCondBr(isNull, endBB, notNullBB);
EmitBlock(notNullBB);
}
if (sanitizePerformTypeCheck()) {
- EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value,
- DerivedTy, DerivedAlign, true);
+ EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
+ Value.getPointer(), DerivedTy, DerivedAlign, true);
}
// Compute the virtual offset.
@@ -214,9 +342,8 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
}
// Apply both offsets.
- Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
- NonVirtualOffset,
- VirtualOffset);
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
+ VirtualOffset, Derived, VBase);
// Cast to the destination type.
Value = Builder.CreateBitCast(Value, BasePtrTy);
@@ -228,16 +355,16 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
EmitBlock(endBB);
llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
- PHI->addIncoming(Value, notNullBB);
+ PHI->addIncoming(Value.getPointer(), notNullBB);
PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
- Value = PHI;
+ Value = Address(PHI, Value.getAlignment());
}
return Value;
}
-llvm::Value *
-CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
+Address
+CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd,
@@ -253,7 +380,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
if (!NonVirtualOffset) {
// No offset, we can just cast back.
- return Builder.CreateBitCast(Value, DerivedPtrTy);
+ return Builder.CreateBitCast(BaseAddr, DerivedPtrTy);
}
llvm::BasicBlock *CastNull = nullptr;
@@ -265,19 +392,20 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
CastNotNull = createBasicBlock("cast.notnull");
CastEnd = createBasicBlock("cast.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer());
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
// Apply the offset.
- Value = Builder.CreateBitCast(Value, Int8PtrTy);
+ llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy);
Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
"sub.ptr");
// Just cast.
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+ // Produce a PHI if we had a null-check.
if (NullCheckValue) {
Builder.CreateBr(CastEnd);
EmitBlock(CastNull);
@@ -286,12 +414,11 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
- CastNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
Value = PHI;
}
- return Value;
+ return Address(Value, CGM.getClassPointerAlignment(Derived));
}
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
@@ -345,7 +472,7 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
namespace {
/// Call the destructor for a direct base class.
- struct CallBaseDtor : EHScopeStack::Cleanup {
+ struct CallBaseDtor final : EHScopeStack::Cleanup {
const CXXRecordDecl *BaseClass;
bool BaseIsVirtual;
CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
@@ -356,8 +483,8 @@ namespace {
cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
const CXXDestructorDecl *D = BaseClass->getDestructor();
- llvm::Value *Addr =
- CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
+ Address Addr =
+ CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(),
DerivedClass, BaseClass,
BaseIsVirtual);
CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
@@ -381,7 +508,7 @@ namespace {
// external code might potentially access the vtable.
void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; }
};
-}
+} // end anonymous namespace
static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
DynamicThisUseChecker Checker(C);
@@ -396,7 +523,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
assert(BaseInit->isBaseInitializer() &&
"Must have base initializer!");
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ Address ThisPtr = CGF.LoadCXXThisAddress();
const Type *BaseType = BaseInit->getBaseClass();
CXXRecordDecl *BaseClassDecl =
@@ -416,13 +543,12 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
// We can pretend to be a complete class because it only matters for
// virtual bases, and we only do virtual bases for complete ctors.
- llvm::Value *V =
+ Address V =
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
BaseClassDecl,
isBaseVirtual);
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(V, Alignment, Qualifiers(),
+ AggValueSlot::forAddr(V, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -438,17 +564,17 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
LValue LHS,
Expr *Init,
- llvm::Value *ArrayIndexVar,
+ Address ArrayIndexVar,
QualType T,
ArrayRef<VarDecl *> ArrayIndexes,
unsigned Index) {
if (Index == ArrayIndexes.size()) {
LValue LV = LHS;
- if (ArrayIndexVar) {
+ if (ArrayIndexVar.isValid()) {
// If we have an array index variable, load it and use it as an offset.
// Then, increment the value.
- llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *Dest = LHS.getPointer();
llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
@@ -456,9 +582,9 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Next, ArrayIndexVar);
// Update the LValue.
- LV.setAddress(Dest);
- CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
- LV.setAlignment(std::min(Align, LV.getAlignment()));
+ CharUnits EltSize = CGF.getContext().getTypeSizeInChars(T);
+ CharUnits Align = LV.getAlignment().alignmentOfArrayElement(EltSize);
+ LV.setAddress(Address(Dest, Align));
}
switch (CGF.getEvaluationKind(T)) {
@@ -485,14 +611,11 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
assert(Array && "Array initialization without the array type?");
- llvm::Value *IndexVar
- = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
- assert(IndexVar && "Array index variable not loaded");
+ Address IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
// Initialize this index variable to zero.
llvm::Value* Zero
- = llvm::Constant::getNullValue(
- CGF.ConvertType(CGF.getContext().getSizeType()));
+ = llvm::Constant::getNullValue(IndexVar.getElementType());
CGF.Builder.CreateStore(Zero, IndexVar);
// Start the loop with a block that tests the condition.
@@ -626,9 +749,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
}
-void CodeGenFunction::EmitInitializerForField(
- FieldDecl *Field, LValue LHS, Expr *Init,
- ArrayRef<VarDecl *> ArrayIndexes) {
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
+ Expr *Init, ArrayRef<VarDecl *> ArrayIndexes) {
QualType FieldType = Field->getType();
switch (getEvaluationKind(FieldType)) {
case TEK_Scalar:
@@ -643,26 +765,23 @@ void CodeGenFunction::EmitInitializerForField(
EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
break;
case TEK_Aggregate: {
- llvm::Value *ArrayIndexVar = nullptr;
+ Address ArrayIndexVar = Address::invalid();
if (ArrayIndexes.size()) {
- llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
-
// The LHS is a pointer to the first object we'll be constructing, as
// a flat array.
QualType BaseElementTy = getContext().getBaseElementType(FieldType);
llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
- BasePtr);
+ Address BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr);
LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
// Create an array index that will be used to walk over all of the
// objects we're constructing.
- ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
- llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ ArrayIndexVar = CreateMemTemp(getContext().getSizeType(), "object.index");
+ llvm::Value *Zero =
+ llvm::Constant::getNullValue(ArrayIndexVar.getElementType());
Builder.CreateStore(Zero, ArrayIndexVar);
-
// Emit the block variables for the array indices, if any.
for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
EmitAutoVarDecl(*ArrayIndexes[I]);
@@ -811,7 +930,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
return;
}
- const FunctionDecl *Definition = 0;
+ const FunctionDecl *Definition = nullptr;
Stmt *Body = Ctor->getBody(Definition);
assert(Definition == Ctor && "emitting wrong constructor body");
@@ -868,7 +987,7 @@ namespace {
SanitizerSet OldSanOpts;
};
}
-
+
namespace {
class FieldMemcpyizer {
public:
@@ -930,19 +1049,16 @@ namespace {
CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
+ Address ThisPtr = CGF.LoadCXXThisAddress();
+ LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy);
LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
- CharUnits Offset = CGF.getContext().toCharUnitsFromBits(FirstByteOffset);
- CharUnits Alignment = DestLV.getAlignment().alignmentAtOffset(Offset);
-
- emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
- Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
- MemcpySize, Alignment);
+ emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(),
+ Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
+ MemcpySize);
reset();
}
@@ -956,20 +1072,18 @@ namespace {
private:
- void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
- CharUnits Size, CharUnits Alignment) {
- llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
+ llvm::PointerType *DPT = DestPtr.getType();
llvm::Type *DBP =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
- llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ llvm::PointerType *SPT = SrcPtr.getType();
llvm::Type *SBP =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
- CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
- Alignment.getQuantity());
+ CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
}
void addInitialField(FieldDecl *F) {
@@ -1089,9 +1203,9 @@ namespace {
}
void pushEHDestructors() {
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ Address ThisPtr = CGF.LoadCXXThisAddress();
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
- LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
+ LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy);
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
CXXCtorInitializer *MemberInit = AggregatedInits[i];
@@ -1228,7 +1342,13 @@ namespace {
emitAggregatedStmts();
}
};
+} // end anonymous namespace
+static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) {
+ const Type *BaseType = BaseInit->getBaseClass();
+ const auto *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+ return BaseClassDecl->isDynamicClass();
}
/// EmitCtorPrologue - This routine generates necessary code to initialize
@@ -1254,8 +1374,13 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
assert(BaseCtorContinueBB);
}
+ llvm::Value *const OldThis = CXXThisValue;
// Virtual base initializers first.
for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ isInitializerOfDynamicClass(*B))
+ CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
}
@@ -1268,13 +1393,20 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
// Then, non-virtual base initializers.
for (; B != E && (*B)->isBaseInitializer(); B++) {
assert(!(*B)->isBaseVirtual());
+
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ isInitializerOfDynamicClass(*B))
+ CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
}
+ CXXThisValue = OldThis;
+
InitializeVTablePointers(ClassDecl);
// And finally, initialize class members.
- FieldConstructionScope FCS(*this, CXXThisValue);
+ FieldConstructionScope FCS(*this, LoadCXXThisAddress());
ConstructorMemcpyizer CM(*this, CD, Args);
for (; B != E; B++) {
CXXCtorInitializer *Member = (*B);
@@ -1334,7 +1466,7 @@ HasTrivialDestructorBody(ASTContext &Context,
static bool
FieldHasTrivialDestructorBody(ASTContext &Context,
- const FieldDecl *Field)
+ const FieldDecl *Field)
{
QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
@@ -1353,39 +1485,23 @@ FieldHasTrivialDestructorBody(ASTContext &Context,
/// CanSkipVTablePointerInitialization - Check whether we need to initialize
/// any vtable pointers before calling this destructor.
-static bool CanSkipVTablePointerInitialization(ASTContext &Context,
+static bool CanSkipVTablePointerInitialization(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor) {
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ if (!ClassDecl->isDynamicClass())
+ return true;
+
if (!Dtor->hasTrivialBody())
return false;
// Check the fields.
- const CXXRecordDecl *ClassDecl = Dtor->getParent();
for (const auto *Field : ClassDecl->fields())
- if (!FieldHasTrivialDestructorBody(Context, Field))
+ if (!FieldHasTrivialDestructorBody(CGF.getContext(), Field))
return false;
return true;
}
-// Generates function call for handling object poisoning, passing in
-// references to 'this' and its size as arguments.
-static void EmitDtorSanitizerCallback(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor) {
- const ASTRecordLayout &Layout =
- CGF.getContext().getASTRecordLayout(Dtor->getParent());
-
- llvm::Value *Args[] = {
- CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.VoidPtrTy),
- llvm::ConstantInt::get(CGF.SizeTy, Layout.getSize().getQuantity())};
- llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
-
- llvm::FunctionType *FnType =
- llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
- llvm::Value *Fn =
- CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
- CGF.EmitNounwindRuntimeCall(Fn, Args);
-}
-
/// EmitDestructorBody - Emits the body of the current destructor.
void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
@@ -1402,7 +1518,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
if (DtorType == Dtor_Deleting) {
EnterDtorCleanups(Dtor, Dtor_Deleting);
EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, LoadCXXThis());
+ /*Delegating=*/false, LoadCXXThisAddress());
PopCleanupBlock();
return;
}
@@ -1437,7 +1553,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
if (!isTryBody) {
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
- /*Delegating=*/false, LoadCXXThis());
+ /*Delegating=*/false, LoadCXXThisAddress());
break;
}
// Fallthrough: act like we're in the base variant.
@@ -1449,8 +1565,14 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
EnterDtorCleanups(Dtor, Dtor_Base);
// Initialize the vtable pointers before entering the body.
- if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
- InitializeVTablePointers(Dtor->getParent());
+ if (!CanSkipVTablePointerInitialization(*this, Dtor)) {
+ // Insert the llvm.invariant.group.barrier intrinsic before initializing
+ // the vptrs to cancel any previous assumptions we might have made.
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0)
+ CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ InitializeVTablePointers(Dtor->getParent());
+ }
if (isTryBody)
EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
@@ -1464,6 +1586,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// the caller's body.
if (getLangOpts().AppleKext)
CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
+
break;
}
@@ -1473,10 +1596,6 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// Exit the try if applicable.
if (isTryBody)
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
-
- // Insert memory-poisoning instrumentation.
- if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor)
- EmitDtorSanitizerCallback(*this, Dtor);
}
void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
@@ -1496,7 +1615,7 @@ void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
namespace {
/// Call the operator delete associated with the current destructor.
- struct CallDtorDelete : EHScopeStack::Cleanup {
+ struct CallDtorDelete final : EHScopeStack::Cleanup {
CallDtorDelete() {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -1507,11 +1626,11 @@ namespace {
}
};
- struct CallDtorDeleteConditional : EHScopeStack::Cleanup {
+ struct CallDtorDeleteConditional final : EHScopeStack::Cleanup {
llvm::Value *ShouldDeleteCondition;
public:
CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
- : ShouldDeleteCondition(ShouldDeleteCondition) {
+ : ShouldDeleteCondition(ShouldDeleteCondition) {
assert(ShouldDeleteCondition != nullptr);
}
@@ -1533,7 +1652,7 @@ namespace {
}
};
- class DestroyField : public EHScopeStack::Cleanup {
+ class DestroyField final : public EHScopeStack::Cleanup {
const FieldDecl *field;
CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
@@ -1541,12 +1660,12 @@ namespace {
public:
DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : field(field), destroyer(destroyer),
- useEHCleanupForArray(useEHCleanupForArray) {}
+ : field(field), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Find the address of the field.
- llvm::Value *thisValue = CGF.LoadCXXThis();
+ Address thisValue = CGF.LoadCXXThisAddress();
QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
LValue LV = CGF.EmitLValueForField(ThisLV, field);
@@ -1556,7 +1675,133 @@ namespace {
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
-}
+
+ static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
+ CharUnits::QuantityType PoisonSize) {
+ // Pass in void pointer and size of region as arguments to runtime
+ // function
+ llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
+ llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
+
+ llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
+
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
+ llvm::Value *Fn =
+ CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
+ CGF.EmitNounwindRuntimeCall(Fn, Args);
+ }
+
+ class SanitizeDtorMembers final : public EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+
+ public:
+ SanitizeDtorMembers(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
+
+ // Generate function call for handling object poisoning.
+ // Disables tail call elimination, to prevent the current stack frame
+ // from disappearing from the stack trace.
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(Dtor->getParent());
+
+ // Nothing to poison.
+ if (Layout.getFieldCount() == 0)
+ return;
+
+ // Prevent the current stack frame from disappearing from the stack trace.
+ CGF.CurFn->addFnAttr("disable-tail-calls", "true");
+
+ // Construct pointer to region to begin poisoning, and calculate poison
+ // size, so that only members declared in this class are poisoned.
+ ASTContext &Context = CGF.getContext();
+ unsigned fieldIndex = 0;
+ int startIndex = -1;
+ // RecordDecl::field_iterator Field;
+ for (const FieldDecl *Field : Dtor->getParent()->fields()) {
+ // Poison field if it is trivial
+ if (FieldHasTrivialDestructorBody(Context, Field)) {
+ // Start sanitizing at this field
+ if (startIndex < 0)
+ startIndex = fieldIndex;
+
+ // Currently on the last field, and it must be poisoned with the
+ // current block.
+ if (fieldIndex == Layout.getFieldCount() - 1) {
+ PoisonMembers(CGF, startIndex, Layout.getFieldCount());
+ }
+ } else if (startIndex >= 0) {
+ // No longer within a block of memory to poison, so poison the block
+ PoisonMembers(CGF, startIndex, fieldIndex);
+ // Re-set the start index
+ startIndex = -1;
+ }
+ fieldIndex += 1;
+ }
+ }
+
+ private:
+ /// \param layoutStartOffset index of the ASTRecordLayout field to
+ /// start poisoning (inclusive)
+ /// \param layoutEndOffset index of the ASTRecordLayout field to
+ /// end poisoning (exclusive)
+ void PoisonMembers(CodeGenFunction &CGF, unsigned layoutStartOffset,
+ unsigned layoutEndOffset) {
+ ASTContext &Context = CGF.getContext();
+ const ASTRecordLayout &Layout =
+ Context.getASTRecordLayout(Dtor->getParent());
+
+ llvm::ConstantInt *OffsetSizePtr = llvm::ConstantInt::get(
+ CGF.SizeTy,
+ Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset))
+ .getQuantity());
+
+ llvm::Value *OffsetPtr = CGF.Builder.CreateGEP(
+ CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy),
+ OffsetSizePtr);
+
+ CharUnits::QuantityType PoisonSize;
+ if (layoutEndOffset >= Layout.getFieldCount()) {
+ PoisonSize = Layout.getNonVirtualSize().getQuantity() -
+ Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(layoutStartOffset))
+ .getQuantity();
+ } else {
+ PoisonSize = Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(layoutEndOffset) -
+ Layout.getFieldOffset(layoutStartOffset))
+ .getQuantity();
+ }
+
+ if (PoisonSize == 0)
+ return;
+
+ EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize);
+ }
+ };
+
+ class SanitizeDtorVTable final : public EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+
+ public:
+ SanitizeDtorVTable(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
+
+ // Generate function call for handling vtable pointer poisoning.
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ assert(Dtor->getParent()->isDynamicClass());
+ (void)Dtor;
+ ASTContext &Context = CGF.getContext();
+ // Poison vtable and vtable ptr if they exist for this class.
+ llvm::Value *VTablePtr = CGF.LoadCXXThis();
+
+ CharUnits::QuantityType PoisonSize =
+ Context.toCharUnitsFromBits(CGF.PointerWidthInBits).getQuantity();
+ // Pass in void pointer and size of region as arguments to runtime
+ // function
+ EmitSanitizerDtorCallback(CGF, VTablePtr, PoisonSize);
+ }
+ };
+} // end anonymous namespace
/// \brief Emit all code that comes at the end of class's
/// destructor. This is to call destructors on members and base classes
@@ -1590,6 +1835,12 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
// The complete-destructor phase just destructs all the virtual bases.
if (DtorType == Dtor_Complete) {
+ // Poison the vtable pointer such that access after the base
+ // and member destructors are invoked is invalid.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && ClassDecl->getNumVBases() &&
+ ClassDecl->isPolymorphic())
+ EHStack.pushCleanup<SanitizeDtorVTable>(NormalAndEHCleanup, DD);
// We push them in the forward order so that they'll be popped in
// the reverse order.
@@ -1610,6 +1861,12 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
}
assert(DtorType == Dtor_Base);
+ // Poison the vtable pointer if it has no virtual bases, but inherits
+ // virtual functions.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && !ClassDecl->getNumVBases() &&
+ ClassDecl->isPolymorphic())
+ EHStack.pushCleanup<SanitizeDtorVTable>(NormalAndEHCleanup, DD);
// Destroy non-virtual bases.
for (const auto &Base : ClassDecl->bases()) {
@@ -1628,6 +1885,12 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
/*BaseIsVirtual*/ false);
}
+ // Poison fields such that access after their destructors are
+ // invoked, and before the base class destructor runs, is invalid.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory))
+ EHStack.pushCleanup<SanitizeDtorMembers>(NormalAndEHCleanup, DD);
+
// Destroy direct fields.
for (const auto *Field : ClassDecl->fields()) {
QualType type = Field->getType();
@@ -1655,7 +1918,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
/// zero-initialized before it is constructed
void CodeGenFunction::EmitCXXAggrConstructorCall(
const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType,
- llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
+ Address arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
QualType elementType;
llvm::Value *numElements =
emitArrayLength(arrayType, elementType, arrayBegin);
@@ -1669,15 +1932,14 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(
/// \param ctor the constructor to call for each element
/// \param numElements the number of elements in the array;
/// may be zero
-/// \param arrayBegin a T*, where T is the type constructed by ctor
+/// \param arrayBase a T*, where T is the type constructed by ctor
/// \param zeroInitialize true if each element should be
/// zero-initialized before it is constructed
void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
llvm::Value *numElements,
- llvm::Value *arrayBegin,
+ Address arrayBase,
const CXXConstructExpr *E,
bool zeroInitialize) {
-
// It's legal for numElements to be zero. This can happen both
// dynamically, because x can be zero in 'new A[x]', and statically,
// because of GCC extensions that permit zero-length arrays. There
@@ -1701,6 +1963,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
}
// Find the end of the array.
+ llvm::Value *arrayBegin = arrayBase.getPointer();
llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
"arrayctor.end");
@@ -1714,11 +1977,21 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
// Inside the loop body, emit the constructor call on the array element.
+ // The alignment of the base, adjusted by the size of a single element,
+ // provides a conservative estimate of the alignment of every element.
+ // (This assumes we never start tracking offsetted alignments.)
+ //
+ // Note that these are complete objects and so we don't need to
+ // use the non-virtual size or alignment.
QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CharUnits eltAlignment =
+ arrayBase.getAlignment()
+ .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
+ Address curAddr = Address(cur, eltAlignment);
// Zero initialize the storage, if requested.
if (zeroInitialize)
- EmitNullInitialization(cur, type);
+ EmitNullInitialization(curAddr, type);
// C++ [class.temporary]p4:
// There are two contexts in which temporaries are destroyed at a different
@@ -1736,11 +2009,12 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
if (getLangOpts().Exceptions &&
!ctor->getParent()->hasTrivialDestructor()) {
Destroyer *destroyer = destroyCXXObject;
- pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
+ pushRegularPartialArrayCleanup(arrayBegin, cur, type, eltAlignment,
+ *destroyer);
}
EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, cur, E);
+ /*Delegating=*/false, curAddr, E);
}
// Go to the next element.
@@ -1761,7 +2035,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
}
void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
const RecordType *rtype = type->castAs<RecordType>();
const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
@@ -1774,14 +2048,16 @@ void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CXXCtorType Type,
bool ForVirtualBase,
- bool Delegating, llvm::Value *This,
+ bool Delegating, Address This,
const CXXConstructExpr *E) {
+ const CXXRecordDecl *ClassDecl = D->getParent();
+
// C++11 [class.mfct.non-static]p2:
// If a non-static member function of a class X is called for an object that
// is not of type X, or of a type derived from X, the behavior is undefined.
// FIXME: Provide a source location here.
- EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This,
- getContext().getRecordType(D->getParent()));
+ EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(),
+ This.getPointer(), getContext().getRecordType(ClassDecl));
if (D->isTrivial() && D->isDefaultConstructor()) {
assert(E->getNumArgs() == 0 && "trivial default ctor with args");
@@ -1796,8 +2072,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const Expr *Arg = E->getArg(0);
QualType SrcTy = Arg->getType();
- llvm::Value *Src = EmitLValue(Arg).getAddress();
- QualType DestTy = getContext().getTypeDeclType(D->getParent());
+ Address Src = EmitLValue(Arg).getAddress();
+ QualType DestTy = getContext().getTypeDeclType(ClassDecl);
EmitAggregateCopyCtor(This, Src, DestTy, SrcTy);
return;
}
@@ -1805,11 +2081,11 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This), D->getThisType(getContext()));
+ Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
// Add the rest of the user-supplied arguments.
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
- EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor());
+ EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor());
// Insert any ABI-specific implicit constructor arguments.
unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
@@ -1820,19 +2096,64 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const CGFunctionInfo &Info =
CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
+
+ // Generate vtable assumptions if we're constructing a complete object
+ // with a vtable. We don't do this for base subobjects for two reasons:
+ // first, it's incorrect for classes with virtual bases, and second, we're
+ // about to overwrite the vptrs anyway.
+ // We also have to make sure if we can refer to vtable:
+ // - Otherwise we can refer to vtable if it's safe to speculatively emit.
+ // FIXME: If vtable is used by ctor/dtor, or if vtable is external and we are
+ // sure that definition of vtable is not hidden,
+ // then we are always safe to refer to it.
+ // FIXME: It looks like InstCombine is very inefficient on dealing with
+ // assumes. Make assumption loads require -fstrict-vtable-pointers temporarily.
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ ClassDecl->isDynamicClass() && Type != Ctor_Base &&
+ CGM.getCXXABI().canSpeculativelyEmitVTable(ClassDecl) &&
+ CGM.getCodeGenOpts().StrictVTablePointers)
+ EmitVTableAssumptionLoads(ClassDecl, This);
+}
+
+void CodeGenFunction::EmitVTableAssumptionLoad(const VPtr &Vptr, Address This) {
+ llvm::Value *VTableGlobal =
+ CGM.getCXXABI().getVTableAddressPoint(Vptr.Base, Vptr.VTableClass);
+ if (!VTableGlobal)
+ return;
+
+ // We can just use the base offset in the complete class.
+ CharUnits NonVirtualOffset = Vptr.Base.getBaseOffset();
+
+ if (!NonVirtualOffset.isZero())
+ This =
+ ApplyNonVirtualAndVirtualOffset(*this, This, NonVirtualOffset, nullptr,
+ Vptr.VTableClass, Vptr.NearestVBase);
+
+ llvm::Value *VPtrValue =
+ GetVTablePtr(This, VTableGlobal->getType(), Vptr.VTableClass);
+ llvm::Value *Cmp =
+ Builder.CreateICmpEQ(VPtrValue, VTableGlobal, "cmp.vtables");
+ Builder.CreateAssumption(Cmp);
+}
+
+void CodeGenFunction::EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl,
+ Address This) {
+ if (CGM.getCXXABI().doStructorsInitializeVPtrs(ClassDecl))
+ for (const VPtr &Vptr : getVTablePointers(ClassDecl))
+ EmitVTableAssumptionLoad(Vptr, This);
}
void
CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
- llvm::Value *This, llvm::Value *Src,
- const CXXConstructExpr *E) {
+ Address This, Address Src,
+ const CXXConstructExpr *E) {
if (isMemcpyEquivalentSpecialMember(D)) {
assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
assert(D->isCopyOrMoveConstructor() &&
"trivial 1-arg ctor not a copy/move ctor");
EmitAggregateCopyCtor(This, Src,
getContext().getTypeDeclType(D->getParent()),
- E->arg_begin()->getType());
+ (*E->arg_begin())->getType());
return;
}
llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete);
@@ -1844,16 +2165,16 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This), D->getThisType(getContext()));
+ Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
// Push the src ptr.
QualType QT = *(FPT->param_type_begin());
llvm::Type *t = CGM.getTypes().ConvertType(QT);
Src = Builder.CreateBitCast(Src, t);
- Args.add(RValue::get(Src), QT);
+ Args.add(RValue::get(Src.getPointer()), QT);
// Skip over first argument (Src).
- EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(),
+ EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(),
/*ParamsToSkip*/ 1);
EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
@@ -1903,12 +2224,12 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
}
namespace {
- struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
+ struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup {
const CXXDestructorDecl *Dtor;
- llvm::Value *Addr;
+ Address Addr;
CXXDtorType Type;
- CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
+ CallDelegatingCtorDtor(const CXXDestructorDecl *D, Address Addr,
CXXDtorType Type)
: Dtor(D), Addr(Addr), Type(Type) {}
@@ -1917,19 +2238,17 @@ namespace {
/*Delegating=*/true, Addr);
}
};
-}
+} // end anonymous namespace
void
CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
const FunctionArgList &Args) {
assert(Ctor->isDelegatingConstructor());
- llvm::Value *ThisPtr = LoadCXXThis();
+ Address ThisPtr = LoadCXXThisAddress();
- QualType Ty = getContext().getTagDeclType(Ctor->getParent());
- CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
+ AggValueSlot::forAddr(ThisPtr, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -1951,17 +2270,17 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
bool ForVirtualBase,
bool Delegating,
- llvm::Value *This) {
+ Address This) {
CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
Delegating, This);
}
namespace {
- struct CallLocalDtor : EHScopeStack::Cleanup {
+ struct CallLocalDtor final : EHScopeStack::Cleanup {
const CXXDestructorDecl *Dtor;
- llvm::Value *Addr;
+ Address Addr;
- CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
+ CallLocalDtor(const CXXDestructorDecl *D, Address Addr)
: Dtor(D), Addr(Addr) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -1973,11 +2292,11 @@ namespace {
}
void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
- llvm::Value *Addr) {
+ Address Addr) {
EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
}
-void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) {
CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
if (!ClassDecl) return;
if (ClassDecl->hasTrivialDestructor()) return;
@@ -1987,24 +2306,12 @@ void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
PushDestructorCleanup(D, Addr);
}
-void
-CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
- const CXXRecordDecl *NearestVBase,
- CharUnits OffsetFromNearestVBase,
- const CXXRecordDecl *VTableClass) {
- const CXXRecordDecl *RD = Base.getBase();
-
- // Don't initialize the vtable pointer if the class is marked with the
- // 'novtable' attribute.
- if ((RD == VTableClass || RD == NearestVBase) &&
- VTableClass->hasAttr<MSNoVTableAttr>())
- return;
-
+void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// Compute the address point.
- bool NeedsVirtualOffset;
llvm::Value *VTableAddressPoint =
CGM.getCXXABI().getVTableAddressPointInStructor(
- *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset);
+ *this, Vptr.VTableClass, Vptr.Base, Vptr.NearestVBase);
+
if (!VTableAddressPoint)
return;
@@ -2012,26 +2319,25 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
llvm::Value *VirtualOffset = nullptr;
CharUnits NonVirtualOffset = CharUnits::Zero();
- if (NeedsVirtualOffset) {
+ if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) {
// We need to use the virtual base offset offset because the virtual base
// might have a different offset in the most derived class.
- VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
- LoadCXXThis(),
- VTableClass,
- NearestVBase);
- NonVirtualOffset = OffsetFromNearestVBase;
+
+ VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(
+ *this, LoadCXXThisAddress(), Vptr.VTableClass, Vptr.NearestVBase);
+ NonVirtualOffset = Vptr.OffsetFromNearestVBase;
} else {
// We can just use the base offset in the complete class.
- NonVirtualOffset = Base.getBaseOffset();
+ NonVirtualOffset = Vptr.Base.getBaseOffset();
}
// Apply the offsets.
- llvm::Value *VTableField = LoadCXXThis();
+ Address VTableField = LoadCXXThisAddress();
if (!NonVirtualOffset.isZero() || VirtualOffset)
- VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
- NonVirtualOffset,
- VirtualOffset);
+ VTableField = ApplyNonVirtualAndVirtualOffset(
+ *this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass,
+ Vptr.NearestVBase);
// Finally, store the address point. Use the same LLVM types as the field to
// support optimization.
@@ -2041,23 +2347,39 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
->getPointerTo();
VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
+
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
- CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
+ CGM.DecorateInstructionWithTBAA(Store, CGM.getTBAAInfoForVTablePtr());
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGM.getCodeGenOpts().StrictVTablePointers)
+ CGM.DecorateInstructionWithInvariantGroup(Store, Vptr.VTableClass);
}
-void
-CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
- const CXXRecordDecl *NearestVBase,
- CharUnits OffsetFromNearestVBase,
- bool BaseIsNonVirtualPrimaryBase,
- const CXXRecordDecl *VTableClass,
- VisitedVirtualBasesSetTy& VBases) {
+CodeGenFunction::VPtrsVector
+CodeGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) {
+ CodeGenFunction::VPtrsVector VPtrsResult;
+ VisitedVirtualBasesSetTy VBases;
+ getVTablePointers(BaseSubobject(VTableClass, CharUnits::Zero()),
+ /*NearestVBase=*/nullptr,
+ /*OffsetFromNearestVBase=*/CharUnits::Zero(),
+ /*BaseIsNonVirtualPrimaryBase=*/false, VTableClass, VBases,
+ VPtrsResult);
+ return VPtrsResult;
+}
+
+void CodeGenFunction::getVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases,
+ VPtrsVector &Vptrs) {
// If this base is a non-virtual primary base the address point has already
// been set.
if (!BaseIsNonVirtualPrimaryBase) {
// Initialize the vtable pointer for this base.
- InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
- VTableClass);
+ VPtr Vptr = {Base, NearestVBase, OffsetFromNearestVBase, VTableClass};
+ Vptrs.push_back(Vptr);
}
const CXXRecordDecl *RD = Base.getBase();
@@ -2095,11 +2417,10 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
}
- InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
- I.isVirtual() ? BaseDecl : NearestVBase,
- BaseOffsetFromNearestVBase,
- BaseDeclIsNonVirtualPrimaryBase,
- VTableClass, VBases);
+ getVTablePointers(
+ BaseSubobject(BaseDecl, BaseOffset),
+ I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase,
+ BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases, Vptrs);
}
}
@@ -2109,21 +2430,25 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
return;
// Initialize the vtable pointers for this class and all of its bases.
- VisitedVirtualBasesSetTy VBases;
- InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
- /*NearestVBase=*/nullptr,
- /*OffsetFromNearestVBase=*/CharUnits::Zero(),
- /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases);
+ if (CGM.getCXXABI().doStructorsInitializeVPtrs(RD))
+ for (const VPtr &Vptr : getVTablePointers(RD))
+ InitializeVTablePointer(Vptr);
if (RD->getNumVBases())
CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
}
-llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
- llvm::Type *Ty) {
- llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
+llvm::Value *CodeGenFunction::GetVTablePtr(Address This,
+ llvm::Type *VTableTy,
+ const CXXRecordDecl *RD) {
+ Address VTablePtrSrc = Builder.CreateElementBitCast(This, VTableTy);
llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
- CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
+ CGM.DecorateInstructionWithTBAA(VTable, CGM.getTBAAInfoForVTablePtr());
+
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGM.getCodeGenOpts().StrictVTablePointers)
+ CGM.DecorateInstructionWithInvariantGroup(VTable, RD);
+
return VTable;
}
@@ -2190,19 +2515,10 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
return;
- SmallString<64> MangledName;
- llvm::raw_svector_ostream Out(MangledName);
- CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T.getUnqualifiedType(),
- Out);
-
- // Blacklist based on the mangled type.
- if (CGM.getContext().getSanitizerBlacklist().isBlacklistedType(Out.str()))
- return;
-
if (!SanOpts.has(SanitizerKind::CFICastStrict))
ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
- llvm::BasicBlock *ContBlock = 0;
+ llvm::BasicBlock *ContBlock = nullptr;
if (MayBeNull) {
llvm::Value *DerivedNotNull =
@@ -2216,7 +2532,9 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
EmitBlock(CheckBlock);
}
- llvm::Value *VTable = GetVTablePtr(Derived, Int8PtrTy);
+ llvm::Value *VTable =
+ GetVTablePtr(Address(Derived, getPointerAlign()), Int8PtrTy, ClassDecl);
+
EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc);
if (MayBeNull) {
@@ -2234,18 +2552,22 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
SanitizerScope SanScope(this);
- std::string OutName;
- llvm::raw_string_ostream Out(OutName);
- CGM.getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out);
-
- llvm::Value *BitSetName = llvm::MetadataAsValue::get(
- getLLVMContext(), llvm::MDString::get(getLLVMContext(), Out.str()));
+ llvm::Metadata *MD =
+ CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ llvm::Value *BitSetName = llvm::MetadataAsValue::get(getLLVMContext(), MD);
llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
llvm::Value *BitSetTest =
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test),
{CastedVTable, BitSetName});
+ if (CGM.getCodeGenOpts().SanitizeCfiCrossDso) {
+ if (auto TypeId = CGM.CreateCfiIdForTypeMetadata(MD)) {
+ EmitCfiSlowPathCheck(BitSetTest, TypeId, CastedVTable);
+ return;
+ }
+ }
+
SanitizerMask M;
switch (TCK) {
case CFITCK_VCall:
@@ -2263,9 +2585,9 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
}
llvm::Constant *StaticData[] = {
- EmitCheckSourceLocation(Loc),
- EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)),
- llvm::ConstantInt::get(Int8Ty, TCK),
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)),
+ llvm::ConstantInt::get(Int8Ty, TCK),
};
EmitCheck(std::make_pair(BitSetTest, M), "cfi_bad_type", StaticData,
CastedVTable);
@@ -2405,8 +2727,8 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
CallArgList CallArgs;
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
- llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
- CallArgs.add(RValue::get(ThisPtr), ThisType);
+ Address ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
// Add the rest of the parameters.
for (auto param : BD->params())
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index d97e40554ef2..ba7dcf7de6c7 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -19,6 +19,7 @@
#include "CGCleanup.h"
#include "CodeGenFunction.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace CodeGen;
@@ -27,7 +28,7 @@ bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
if (rv.isScalar())
return DominatingLLVMValue::needsSaving(rv.getScalarVal());
if (rv.isAggregate())
- return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
+ return DominatingLLVMValue::needsSaving(rv.getAggregatePointer());
return true;
}
@@ -41,9 +42,10 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
return saved_type(V, ScalarLiteral);
// Everything else needs an alloca.
- llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ Address addr =
+ CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue");
CGF.Builder.CreateStore(V, addr);
- return saved_type(addr, ScalarAddress);
+ return saved_type(addr.getPointer(), ScalarAddress);
}
if (rv.isComplex()) {
@@ -51,42 +53,56 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
llvm::Type *ComplexTy =
llvm::StructType::get(V.first->getType(), V.second->getType(),
(void*) nullptr);
- llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
+ Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
CGF.Builder.CreateStore(V.first,
- CGF.Builder.CreateStructGEP(ComplexTy, addr, 0));
+ CGF.Builder.CreateStructGEP(addr, 0, CharUnits()));
+ CharUnits offset = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getTypeAllocSize(V.first->getType()));
CGF.Builder.CreateStore(V.second,
- CGF.Builder.CreateStructGEP(ComplexTy, addr, 1));
- return saved_type(addr, ComplexAddress);
+ CGF.Builder.CreateStructGEP(addr, 1, offset));
+ return saved_type(addr.getPointer(), ComplexAddress);
}
assert(rv.isAggregate());
- llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
- if (!DominatingLLVMValue::needsSaving(V))
- return saved_type(V, AggregateLiteral);
-
- llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
- CGF.Builder.CreateStore(V, addr);
- return saved_type(addr, AggregateAddress);
+ Address V = rv.getAggregateAddress(); // TODO: volatile?
+ if (!DominatingLLVMValue::needsSaving(V.getPointer()))
+ return saved_type(V.getPointer(), AggregateLiteral,
+ V.getAlignment().getQuantity());
+
+ Address addr =
+ CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue");
+ CGF.Builder.CreateStore(V.getPointer(), addr);
+ return saved_type(addr.getPointer(), AggregateAddress,
+ V.getAlignment().getQuantity());
}
/// Given a saved r-value produced by SaveRValue, perform the code
/// necessary to restore it to usability at the current insertion
/// point.
RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
+ auto getSavingAddress = [&](llvm::Value *value) {
+ auto alignment = cast<llvm::AllocaInst>(value)->getAlignment();
+ return Address(value, CharUnits::fromQuantity(alignment));
+ };
switch (K) {
case ScalarLiteral:
return RValue::get(Value);
case ScalarAddress:
- return RValue::get(CGF.Builder.CreateLoad(Value));
+ return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value)));
case AggregateLiteral:
- return RValue::getAggregate(Value);
- case AggregateAddress:
- return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
+ return RValue::getAggregate(Address(Value, CharUnits::fromQuantity(Align)));
+ case AggregateAddress: {
+ auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value));
+ return RValue::getAggregate(Address(addr, CharUnits::fromQuantity(Align)));
+ }
case ComplexAddress: {
- llvm::Value *real =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 0));
- llvm::Value *imag =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 1));
+ Address address = getSavingAddress(Value);
+ llvm::Value *real = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(address, 0, CharUnits()));
+ CharUnits offset = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getTypeAllocSize(real->getType()));
+ llvm::Value *imag = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(address, 1, offset));
return RValue::getComplex(real, imag);
}
}
@@ -96,6 +112,7 @@ RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
/// Push an entry of the given size onto this protected-scope stack.
char *EHScopeStack::allocate(size_t Size) {
+ Size = llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
if (!StartOfBuffer) {
unsigned Capacity = 1024;
while (Capacity < Size) Capacity *= 2;
@@ -125,6 +142,10 @@ char *EHScopeStack::allocate(size_t Size) {
return StartOfData;
}
+void EHScopeStack::deallocate(size_t Size) {
+ StartOfData += llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
+}
+
bool EHScopeStack::containsOnlyLifetimeMarkers(
EHScopeStack::stable_iterator Old) const {
for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) {
@@ -147,26 +168,8 @@ EHScopeStack::getInnermostActiveNormalCleanup() const {
return stable_end();
}
-EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
- for (stable_iterator si = getInnermostEHScope(), se = stable_end();
- si != se; ) {
- // Skip over inactive cleanups.
- EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
- if (cleanup && !cleanup->isActive()) {
- si = cleanup->getEnclosingEHScope();
- continue;
- }
-
- // All other scopes are always active.
- return si;
- }
-
- return stable_end();
-}
-
void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
- assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
bool IsNormalCleanup = Kind & NormalCleanup;
bool IsEHCleanup = Kind & EHCleanup;
@@ -194,7 +197,7 @@ void EHScopeStack::popCleanup() {
EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
InnermostEHScope = Cleanup.getEnclosingEHScope();
- StartOfData += Cleanup.getAllocatedSize();
+ deallocate(Cleanup.getAllocatedSize());
// Destroy the cleanup.
Cleanup.Destroy();
@@ -224,7 +227,7 @@ void EHScopeStack::popFilter() {
assert(!empty() && "popping exception stack when not empty");
EHFilterScope &filter = cast<EHFilterScope>(*begin());
- StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters());
+ deallocate(EHFilterScope::getSizeForNumFilters(filter.getNumFilters()));
InnermostEHScope = filter.getEnclosingEHScope();
}
@@ -264,8 +267,8 @@ void EHScopeStack::popNullFixups() {
void CodeGenFunction::initFullExprCleanup() {
// Create a variable to decide whether the cleanup needs to be run.
- llvm::AllocaInst *active
- = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
+ Address active = CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(),
+ "cleanup.cond");
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
@@ -276,7 +279,7 @@ void CodeGenFunction::initFullExprCleanup() {
// Set that as the active flag in the cleanup.
EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
- assert(!cleanup.getActiveFlag() && "cleanup already has active flag?");
+ assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
cleanup.setActiveFlag(active);
if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
@@ -285,6 +288,19 @@ void CodeGenFunction::initFullExprCleanup() {
void EHScopeStack::Cleanup::anchor() {}
+static void createStoreInstBefore(llvm::Value *value, Address addr,
+ llvm::Instruction *beforeInst) {
+ auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst);
+ store->setAlignment(addr.getAlignment().getQuantity());
+}
+
+static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name,
+ llvm::Instruction *beforeInst) {
+ auto load = new llvm::LoadInst(addr.getPointer(), name, beforeInst);
+ load->setAlignment(addr.getAlignment().getQuantity());
+ return load;
+}
+
/// All the branch fixups on the EH stack have propagated out past the
/// outermost normal cleanup; resolve them all by adding cases to the
/// given switch instruction.
@@ -307,9 +323,9 @@ static void ResolveAllBranchFixups(CodeGenFunction &CGF,
// i.e. where there's an unresolved fixup inside a single cleanup
// entry which we're currently popping.
if (Fixup.OptimisticBranchBlock == nullptr) {
- new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
- CGF.getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ CGF.getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
}
@@ -335,8 +351,8 @@ static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
assert(Br->isUnconditional());
- llvm::LoadInst *Load =
- new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest", Term);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
Br->eraseFromParent();
@@ -481,20 +497,11 @@ static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
static void EmitCleanup(CodeGenFunction &CGF,
EHScopeStack::Cleanup *Fn,
EHScopeStack::Cleanup::Flags flags,
- llvm::Value *ActiveFlag) {
- // Itanium EH cleanups occur within a terminate scope. Microsoft SEH doesn't
- // have this behavior, and the Microsoft C++ runtime will call terminate for
- // us if the cleanup throws.
- bool PushedTerminate = false;
- if (flags.isForEHCleanup() && !CGF.getTarget().getCXXABI().isMicrosoft()) {
- CGF.EHStack.pushTerminate();
- PushedTerminate = true;
- }
-
+ Address ActiveFlag) {
// If there's an active flag, load it and skip the cleanup if it's
// false.
llvm::BasicBlock *ContBB = nullptr;
- if (ActiveFlag) {
+ if (ActiveFlag.isValid()) {
ContBB = CGF.createBasicBlock("cleanup.done");
llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
llvm::Value *IsActive
@@ -508,12 +515,8 @@ static void EmitCleanup(CodeGenFunction &CGF,
assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
// Emit the continuation block if there was an active flag.
- if (ActiveFlag)
+ if (ActiveFlag.isValid())
CGF.EmitBlock(ContBB);
-
- // Leave the terminate scope.
- if (PushedTerminate)
- CGF.EHStack.popTerminate();
}
static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
@@ -588,10 +591,12 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Remember activation information.
bool IsActive = Scope.isActive();
- llvm::Value *NormalActiveFlag =
- Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : nullptr;
- llvm::Value *EHActiveFlag =
- Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : nullptr;
+ Address NormalActiveFlag =
+ Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag()
+ : Address::invalid();
+ Address EHActiveFlag =
+ Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag()
+ : Address::invalid();
// Check whether we need an EH cleanup. This is only true if we've
// generated a lazy EH cleanup block.
@@ -671,16 +676,25 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
return;
}
- // Copy the cleanup emission data out. Note that SmallVector
- // guarantees maximal alignment for its buffer regardless of its
- // type parameter.
- SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
- CleanupBuffer.reserve(Scope.getCleanupSize());
- memcpy(CleanupBuffer.data(),
- Scope.getCleanupBuffer(), Scope.getCleanupSize());
- CleanupBuffer.set_size(Scope.getCleanupSize());
- EHScopeStack::Cleanup *Fn =
- reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
+ // Copy the cleanup emission data out. This uses either a stack
+ // array or malloc'd memory, depending on the size, which is
+ // behavior that SmallVector would provide, if we could use it
+ // here. Unfortunately, if you ask for a SmallVector<char>, the
+ // alignment isn't sufficient.
+ auto *CleanupSource = reinterpret_cast<char *>(Scope.getCleanupBuffer());
+ llvm::AlignedCharArray<EHScopeStack::ScopeStackAlignment, 8 * sizeof(void *)> CleanupBufferStack;
+ std::unique_ptr<char[]> CleanupBufferHeap;
+ size_t CleanupSize = Scope.getCleanupSize();
+ EHScopeStack::Cleanup *Fn;
+
+ if (CleanupSize <= sizeof(CleanupBufferStack)) {
+ memcpy(CleanupBufferStack.buffer, CleanupSource, CleanupSize);
+ Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferStack.buffer);
+ } else {
+ CleanupBufferHeap.reset(new char[CleanupSize]);
+ memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize);
+ Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferHeap.get());
+ }
EHScopeStack::Cleanup::Flags cleanupFlags;
if (Scope.isNormalCleanup())
@@ -761,7 +775,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Clean up the possibly dead store to the cleanup dest slot.
llvm::Instruction *NormalCleanupDestSlot =
- cast<llvm::Instruction>(getNormalCleanupDestSlot());
+ cast<llvm::Instruction>(getNormalCleanupDestSlot().getPointer());
if (NormalCleanupDestSlot->hasOneUse()) {
NormalCleanupDestSlot->user_back()->eraseFromParent();
NormalCleanupDestSlot->eraseFromParent();
@@ -787,7 +801,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
const unsigned SwitchCapacity = 10;
llvm::LoadInst *Load =
- new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest",
+ nullptr);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
@@ -833,9 +848,9 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
BranchFixup &Fixup = EHStack.getBranchFixup(I);
if (!Fixup.Destination) continue;
if (!Fixup.OptimisticBranchBlock) {
- new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
- getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
Fixup.InitialBranch->setSuccessor(0, NormalEntry);
}
Fixup.OptimisticBranchBlock = NormalExit;
@@ -893,15 +908,40 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EmitBlock(EHEntry);
+ llvm::BasicBlock *NextAction = getEHDispatchBlock(EHParent);
+
+ // Push a terminate scope or cleanupendpad scope around the potentially
+ // throwing cleanups. For funclet EH personalities, the cleanupendpad models
+ // program termination when cleanups throw.
+ bool PushedTerminate = false;
+ SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
+ CurrentFuncletPad);
+ llvm::CleanupPadInst *CPI = nullptr;
+ if (!EHPersonality::get(*this).usesFuncletPads()) {
+ EHStack.pushTerminate();
+ PushedTerminate = true;
+ } else {
+ llvm::Value *ParentPad = CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
+ CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad);
+ }
+
// We only actually emit the cleanup code if the cleanup is either
// active or was used before it was deactivated.
- if (EHActiveFlag || IsActive) {
-
+ if (EHActiveFlag.isValid() || IsActive) {
cleanupFlags.setIsForEHCleanup();
EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
}
- Builder.CreateBr(getEHDispatchBlock(EHParent));
+ if (CPI)
+ Builder.CreateCleanupRet(CPI, NextAction);
+ else
+ Builder.CreateBr(NextAction);
+
+ // Leave the terminate scope.
+ if (PushedTerminate)
+ EHStack.popTerminate();
Builder.restoreIP(SavedIP);
@@ -977,7 +1017,7 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
// Store the index at the start.
llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
+ createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI);
// Adjust BI to point to the first cleanup block.
{
@@ -1096,23 +1136,24 @@ static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
// If it hasn't yet been used as either, we're done.
if (!needFlag) return;
- llvm::AllocaInst *var = Scope.getActiveFlag();
- if (!var) {
- var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Address var = Scope.getActiveFlag();
+ if (!var.isValid()) {
+ var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(),
+ "cleanup.isactive");
Scope.setActiveFlag(var);
assert(dominatingIP && "no existing variable and no dominating IP!");
// Initialize to true or false depending on whether it was
// active up to this point.
- llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
+ llvm::Constant *value = CGF.Builder.getInt1(kind == ForDeactivation);
// If we're in a conditional block, ignore the dominating IP and
// use the outermost conditional branch.
if (CGF.isInConditionalBranch()) {
CGF.setBeforeOutermostConditional(value, var);
} else {
- new llvm::StoreInst(value, var, dominatingIP);
+ createStoreInstBefore(value, var, dominatingIP);
}
}
@@ -1154,17 +1195,17 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
Scope.setActive(false);
}
-llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+Address CodeGenFunction::getNormalCleanupDestSlot() {
if (!NormalCleanupDest)
NormalCleanupDest =
CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
- return NormalCleanupDest;
+ return Address(NormalCleanupDest, CharUnits::fromQuantity(4));
}
/// Emits all the code to cause the given temporary to be cleaned up.
void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
QualType TempType,
- llvm::Value *Ptr) {
+ Address Ptr) {
pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
/*useEHCleanup*/ true);
}
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 81c64123dfdb..909f00b05925 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -15,6 +15,8 @@
#define LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
#include "EHScopeStack.h"
+
+#include "Address.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -26,7 +28,17 @@ class AllocaInst;
}
namespace clang {
+class FunctionDecl;
namespace CodeGen {
+class CodeGenModule;
+class CodeGenFunction;
+
+/// The MS C++ ABI needs a pointer to RTTI data plus some flags to describe the
+/// type of a catch handler, so we use this wrapper.
+struct CatchTypeInfo {
+ llvm::Constant *RTTI;
+ unsigned Flags;
+};
/// A protected scope for zero-cost EH handling.
class EHScope {
@@ -37,9 +49,9 @@ class EHScope {
class CommonBitFields {
friend class EHScope;
- unsigned Kind : 2;
+ unsigned Kind : 3;
};
- enum { NumCommonBits = 2 };
+ enum { NumCommonBits = 3 };
protected:
class CatchBitFields {
@@ -78,7 +90,7 @@ protected:
/// The number of fixups required by enclosing scopes (not including
/// this one). If this is the top cleanup scope, all the fixups
/// from this index onwards belong to this scope.
- unsigned FixupDepth : 32 - 18 - NumCommonBits; // currently 13
+ unsigned FixupDepth : 32 - 18 - NumCommonBits; // currently 12
};
class FilterBitFields {
@@ -96,7 +108,7 @@ protected:
};
public:
- enum Kind { Cleanup, Catch, Terminate, Filter };
+ enum Kind { Cleanup, Catch, Terminate, Filter, PadEnd };
EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
: CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr),
@@ -148,12 +160,12 @@ public:
struct Handler {
/// A type info value, or null (C++ null, not an LLVM null pointer)
/// for a catch-all.
- llvm::Constant *Type;
+ CatchTypeInfo Type;
/// The catch handler for this type.
llvm::BasicBlock *Block;
- bool isCatchAll() const { return Type == nullptr; }
+ bool isCatchAll() const { return Type.RTTI == nullptr; }
};
private:
@@ -183,11 +195,17 @@ public:
}
void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
- setHandler(I, /*catchall*/ nullptr, Block);
+ setHandler(I, CatchTypeInfo{nullptr, 0}, Block);
}
void setHandler(unsigned I, llvm::Constant *Type, llvm::BasicBlock *Block) {
assert(I < getNumHandlers());
+ getHandlers()[I].Type = CatchTypeInfo{Type, 0};
+ getHandlers()[I].Block = Block;
+ }
+
+ void setHandler(unsigned I, CatchTypeInfo Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
getHandlers()[I].Type = Type;
getHandlers()[I].Block = Block;
}
@@ -216,7 +234,7 @@ public:
};
/// A cleanup scope which generates the cleanup blocks lazily.
-class EHCleanupScope : public EHScope {
+class LLVM_ALIGNAS(/*alignof(uint64_t)*/ 8) EHCleanupScope : public EHScope {
/// The nearest normal cleanup scope enclosing this one.
EHScopeStack::stable_iterator EnclosingNormal;
@@ -302,8 +320,14 @@ public:
bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; }
void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; }
- llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
- void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
+ bool hasActiveFlag() const { return ActiveFlag != nullptr; }
+ Address getActiveFlag() const {
+ return Address(ActiveFlag, CharUnits::One());
+ }
+ void setActiveFlag(Address Var) {
+ assert(Var.getAlignment().isOne());
+ ActiveFlag = cast<llvm::AllocaInst>(Var.getPointer());
+ }
void setTestFlagInNormalCleanup() {
CleanupBits.TestFlagInNormalCleanup = true;
@@ -396,6 +420,15 @@ public:
return (Scope->getKind() == Cleanup);
}
};
+// NOTE: there's a bunch of different data classes tacked on after an
+// EHCleanupScope. It is asserted (in EHScopeStack::pushCleanup*) that
+// they don't require greater alignment than ScopeStackAlignment. So,
+// EHCleanupScope ought to have alignment equal to that -- not more
+// (would be misaligned by the stack allocator), and not less (would
+// break the appended classes).
+static_assert(llvm::AlignOf<EHCleanupScope>::Alignment ==
+ EHScopeStack::ScopeStackAlignment,
+ "EHCleanupScope expected alignment");
/// An exceptions scope which filters exceptions thrown through it.
/// Only exceptions matching the filter types will be permitted to be
@@ -454,6 +487,17 @@ public:
}
};
+class EHPadEndScope : public EHScope {
+public:
+ EHPadEndScope(EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(PadEnd, enclosingEHScope) {}
+ static size_t getSize() { return sizeof(EHPadEndScope); }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == PadEnd;
+ }
+};
+
/// A non-stable pointer into the scope stack.
class EHScopeStack::iterator {
char *Ptr;
@@ -472,27 +516,31 @@ public:
EHScope &operator*() const { return *get(); }
iterator &operator++() {
+ size_t Size;
switch (get()->getKind()) {
case EHScope::Catch:
- Ptr += EHCatchScope::getSizeForNumHandlers(
- static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ Size = EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope *>(get())->getNumHandlers());
break;
case EHScope::Filter:
- Ptr += EHFilterScope::getSizeForNumFilters(
- static_cast<const EHFilterScope*>(get())->getNumFilters());
+ Size = EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope *>(get())->getNumFilters());
break;
case EHScope::Cleanup:
- Ptr += static_cast<const EHCleanupScope*>(get())
- ->getAllocatedSize();
+ Size = static_cast<const EHCleanupScope *>(get())->getAllocatedSize();
break;
case EHScope::Terminate:
- Ptr += EHTerminateScope::getSize();
+ Size = EHTerminateScope::getSize();
break;
- }
+ case EHScope::PadEnd:
+ Size = EHPadEndScope::getSize();
+ break;
+ }
+ Ptr += llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
return *this;
}
@@ -528,7 +576,7 @@ inline void EHScopeStack::popCatch() {
EHCatchScope &scope = cast<EHCatchScope>(*begin());
InnermostEHScope = scope.getEnclosingEHScope();
- StartOfData += EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers());
+ deallocate(EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers()));
}
inline void EHScopeStack::popTerminate() {
@@ -536,7 +584,7 @@ inline void EHScopeStack::popTerminate() {
EHTerminateScope &scope = cast<EHTerminateScope>(*begin());
InnermostEHScope = scope.getEnclosingEHScope();
- StartOfData += EHTerminateScope::getSize();
+ deallocate(EHTerminateScope::getSize());
}
inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
@@ -551,6 +599,43 @@ EHScopeStack::stabilize(iterator ir) const {
return stable_iterator(EndOfBuffer - ir.Ptr);
}
+/// The exceptions personality for a function.
+struct EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ static const EHPersonality &get(CodeGenModule &CGM, const FunctionDecl *FD);
+ static const EHPersonality &get(CodeGenFunction &CGF);
+
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_C_SEH;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNUstep_ObjC;
+ static const EHPersonality GNU_ObjCXX;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+ static const EHPersonality GNU_CPlusPlus_SEH;
+ static const EHPersonality MSVC_except_handler;
+ static const EHPersonality MSVC_C_specific_handler;
+ static const EHPersonality MSVC_CxxFrameHandler3;
+
+ /// Does this personality use landingpads or the family of pad instructions
+ /// designed to form funclets?
+ bool usesFuncletPads() const { return isMSVCPersonality(); }
+
+ bool isMSVCPersonality() const {
+ return this == &MSVC_except_handler || this == &MSVC_C_specific_handler ||
+ this == &MSVC_CxxFrameHandler3;
+ }
+
+ bool isMSVCXXPersonality() const { return this == &MSVC_CxxFrameHandler3; }
+};
}
}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 93a2287b1e01..78e3978e0ff9 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -28,6 +28,7 @@
#include "clang/Basic/Version.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -37,7 +38,6 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
using namespace clang;
@@ -45,7 +45,10 @@ using namespace clang::CodeGen;
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
: CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
+ DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs),
DBuilder(CGM.getModule()) {
+ for (const auto &KV : CGM.getCodeGenOpts().DebugPrefixMap)
+ DebugPrefixMap[KV.first] = KV.second;
CreateCompileUnit();
}
@@ -56,54 +59,63 @@ CGDebugInfo::~CGDebugInfo() {
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
SourceLocation TemporaryLocation)
- : CGF(CGF) {
+ : CGF(&CGF) {
init(TemporaryLocation);
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
bool DefaultToEmpty,
SourceLocation TemporaryLocation)
- : CGF(CGF) {
+ : CGF(&CGF) {
init(TemporaryLocation, DefaultToEmpty);
}
void ApplyDebugLocation::init(SourceLocation TemporaryLocation,
bool DefaultToEmpty) {
- if (auto *DI = CGF.getDebugInfo()) {
- OriginalLocation = CGF.Builder.getCurrentDebugLocation();
- if (TemporaryLocation.isInvalid()) {
- if (DefaultToEmpty)
- CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc());
- else {
- // Construct a location that has a valid scope, but no line info.
- assert(!DI->LexicalBlockStack.empty());
- CGF.Builder.SetCurrentDebugLocation(
- llvm::DebugLoc::get(0, 0, DI->LexicalBlockStack.back()));
- }
- } else
- DI->EmitLocation(CGF.Builder, TemporaryLocation);
+ auto *DI = CGF->getDebugInfo();
+ if (!DI) {
+ CGF = nullptr;
+ return;
+ }
+
+ OriginalLocation = CGF->Builder.getCurrentDebugLocation();
+ if (TemporaryLocation.isValid()) {
+ DI->EmitLocation(CGF->Builder, TemporaryLocation);
+ return;
}
+
+ if (DefaultToEmpty) {
+ CGF->Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ return;
+ }
+
+ // Construct a location that has a valid scope, but no line info.
+ assert(!DI->LexicalBlockStack.empty());
+ CGF->Builder.SetCurrentDebugLocation(
+ llvm::DebugLoc::get(0, 0, DI->LexicalBlockStack.back()));
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E)
- : CGF(CGF) {
+ : CGF(&CGF) {
init(E->getExprLoc());
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc)
- : CGF(CGF) {
- if (CGF.getDebugInfo()) {
- OriginalLocation = CGF.Builder.getCurrentDebugLocation();
- if (Loc)
- CGF.Builder.SetCurrentDebugLocation(std::move(Loc));
+ : CGF(&CGF) {
+ if (!CGF.getDebugInfo()) {
+ this->CGF = nullptr;
+ return;
}
+ OriginalLocation = CGF.Builder.getCurrentDebugLocation();
+ if (Loc)
+ CGF.Builder.SetCurrentDebugLocation(std::move(Loc));
}
ApplyDebugLocation::~ApplyDebugLocation() {
// Query CGF so the location isn't overwritten when location updates are
// temporarily disabled (for C++ default function arguments)
- if (CGF.getDebugInfo())
- CGF.Builder.SetCurrentDebugLocation(std::move(OriginalLocation));
+ if (CGF)
+ CGF->Builder.SetCurrentDebugLocation(std::move(OriginalLocation));
}
void CGDebugInfo::setLocation(SourceLocation Loc) {
@@ -138,9 +150,16 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
}
}
-llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context) {
+llvm::DIScope *CGDebugInfo::getDeclContextDescriptor(const Decl *D) {
+ llvm::DIScope *Mod = getParentModuleOrNull(D);
+ return getContextDescriptor(cast<Decl>(D->getDeclContext()),
+ Mod ? Mod : TheCU);
+}
+
+llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
+ llvm::DIScope *Default) {
if (!Context)
- return TheCU;
+ return Default;
auto I = RegionMap.find(Context);
if (I != RegionMap.end()) {
@@ -156,7 +175,7 @@ llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (!RDecl->isDependentType())
return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
getOrCreateMainFile());
- return TheCU;
+ return Default;
}
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
@@ -164,22 +183,31 @@ StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
IdentifierInfo *FII = FD->getIdentifier();
FunctionTemplateSpecializationInfo *Info =
FD->getTemplateSpecializationInfo();
- if (!Info && FII)
+
+ if (!Info && FII && !CGM.getCodeGenOpts().EmitCodeView)
return FII->getName();
// Otherwise construct human readable name for debug info.
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
- FD->printName(OS);
+ PrintingPolicy Policy(CGM.getLangOpts());
- // Add any template specialization args.
- if (Info) {
- const TemplateArgumentList *TArgs = Info->TemplateArguments;
- const TemplateArgument *Args = TArgs->data();
- unsigned NumArgs = TArgs->size();
- PrintingPolicy Policy(CGM.getLangOpts());
- TemplateSpecializationType::PrintTemplateArgumentList(OS, Args, NumArgs,
- Policy);
+ if (CGM.getCodeGenOpts().EmitCodeView) {
+ // Print a fully qualified name like MSVC would.
+ Policy.MSVCFormatting = true;
+ FD->printQualifiedName(OS, Policy);
+ } else {
+ // Print the unqualified name with some template arguments. This is what
+ // DWARF-based debuggers expect.
+ FD->printName(OS);
+ // Add any template specialization args.
+ if (Info) {
+ const TemplateArgumentList *TArgs = Info->TemplateArguments;
+ const TemplateArgument *Args = TArgs->data();
+ unsigned NumArgs = TArgs->size();
+ TemplateSpecializationType::PrintTemplateArgumentList(OS, Args, NumArgs,
+ Policy);
+ }
}
// Copy this name on the side and use its reference.
@@ -197,6 +225,13 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
} else if (const ObjCInterfaceDecl *OID =
dyn_cast<const ObjCInterfaceDecl>(DC)) {
OS << OID->getName();
+ } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(DC)) {
+ if (OC->IsClassExtension()) {
+ OS << OC->getClassInterface()->getName();
+ } else {
+ OS << ((const NamedDecl *)OC)->getIdentifier()->getNameStart() << '('
+ << OC->getIdentifier()->getNameStart() << ')';
+ }
} else if (const ObjCCategoryImplDecl *OCD =
dyn_cast<const ObjCCategoryImplDecl>(DC)) {
OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '('
@@ -238,14 +273,16 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
- return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory());
+ return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
+ remapDIPath(TheCU->getDirectory()));
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
// If the location is not valid then use main input file.
- return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory());
+ return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
+ remapDIPath(TheCU->getDirectory()));
// Cache the results.
const char *fname = PLoc.getFilename();
@@ -257,15 +294,23 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return cast<llvm::DIFile>(V);
}
- llvm::DIFile *F =
- DBuilder.createFile(PLoc.getFilename(), getCurrentDirname());
+ llvm::DIFile *F = DBuilder.createFile(remapDIPath(PLoc.getFilename()),
+ remapDIPath(getCurrentDirname()));
DIFileCache[fname].reset(F);
return F;
}
llvm::DIFile *CGDebugInfo::getOrCreateMainFile() {
- return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory());
+ return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
+ remapDIPath(TheCU->getDirectory()));
+}
+
+std::string CGDebugInfo::remapDIPath(StringRef Path) const {
+ for (const auto &Entry : DebugPrefixMap)
+ if (Path.startswith(Entry.first))
+ return (Twine(Entry.second) + Path.substr(Entry.first.size())).str();
+ return Path.str();
}
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
@@ -321,7 +366,7 @@ void CGDebugInfo::CreateCompileUnit() {
// file to determine the real absolute path for the file.
std::string MainFileDir;
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- MainFileDir = MainFile->getDir()->getName();
+ MainFileDir = remapDIPath(MainFile->getDir()->getName());
if (MainFileDir != ".") {
llvm::SmallString<1024> MainFileDirSS(MainFileDir);
llvm::sys::path::append(MainFileDirSS, MainFileName);
@@ -329,13 +374,6 @@ void CGDebugInfo::CreateCompileUnit() {
}
}
- // Save filename string.
- StringRef Filename = internString(MainFileName);
-
- // Save split dwarf file string.
- std::string SplitDwarfFile = CGM.getCodeGenOpts().SplitDwarfFile;
- StringRef SplitDwarfFilename = internString(SplitDwarfFile);
-
llvm::dwarf::SourceLanguage LangTag;
const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
@@ -361,13 +399,13 @@ void CGDebugInfo::CreateCompileUnit() {
// Create new compile unit.
// FIXME - Eliminate TheCU.
TheCU = DBuilder.createCompileUnit(
- LangTag, Filename, getCurrentDirname(), Producer, LO.Optimize,
- CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers, SplitDwarfFilename,
+ LangTag, remapDIPath(MainFileName), remapDIPath(getCurrentDirname()),
+ Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers,
+ CGM.getCodeGenOpts().SplitDwarfFile,
DebugKind <= CodeGenOptions::DebugLineTablesOnly
? llvm::DIBuilder::LineTablesOnly
: llvm::DIBuilder::FullDebug,
- 0 /* DWOid */,
- DebugKind != CodeGenOptions::LocTrackingOnly);
+ 0 /* DWOid */, DebugKind != CodeGenOptions::LocTrackingOnly);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -438,6 +476,24 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::OCLImage2dArray:
return getOrCreateStructPtrType("opencl_image2d_array_t",
OCLImage2dArrayDITy);
+ case BuiltinType::OCLImage2dDepth:
+ return getOrCreateStructPtrType("opencl_image2d_depth_t",
+ OCLImage2dDepthDITy);
+ case BuiltinType::OCLImage2dArrayDepth:
+ return getOrCreateStructPtrType("opencl_image2d_array_depth_t",
+ OCLImage2dArrayDepthDITy);
+ case BuiltinType::OCLImage2dMSAA:
+ return getOrCreateStructPtrType("opencl_image2d_msaa_t",
+ OCLImage2dMSAADITy);
+ case BuiltinType::OCLImage2dArrayMSAA:
+ return getOrCreateStructPtrType("opencl_image2d_array_msaa_t",
+ OCLImage2dArrayMSAADITy);
+ case BuiltinType::OCLImage2dMSAADepth:
+ return getOrCreateStructPtrType("opencl_image2d_msaa_depth_t",
+ OCLImage2dMSAADepthDITy);
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ return getOrCreateStructPtrType("opencl_image2d_array_msaa_depth_t",
+ OCLImage2dArrayMSAADepthDITy);
case BuiltinType::OCLImage3d:
return getOrCreateStructPtrType("opencl_image3d_t", OCLImage3dDITy);
case BuiltinType::OCLSampler:
@@ -446,6 +502,14 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
CGM.getContext().getTypeAlign(BT), llvm::dwarf::DW_ATE_unsigned);
case BuiltinType::OCLEvent:
return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
+ case BuiltinType::OCLClkEvent:
+ return getOrCreateStructPtrType("opencl_clk_event_t", OCLClkEventDITy);
+ case BuiltinType::OCLQueue:
+ return getOrCreateStructPtrType("opencl_queue_t", OCLQueueDITy);
+ case BuiltinType::OCLNDRange:
+ return getOrCreateStructPtrType("opencl_ndrange_t", OCLNDRangeDITy);
+ case BuiltinType::OCLReserveID:
+ return getOrCreateStructPtrType("opencl_reserve_id_t", OCLReserveIDDITy);
case BuiltinType::UChar:
case BuiltinType::Char_U:
@@ -604,7 +668,6 @@ static SmallString<256> getUniqueTagTypeName(const TagType *Ty,
// a unique string for a type?
llvm::raw_svector_ostream Out(FullName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(QualType(Ty, 0), Out);
- Out.flush();
return FullName;
}
@@ -658,10 +721,6 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
const Type *Ty,
QualType PointeeTy,
llvm::DIFile *Unit) {
- if (Tag == llvm::dwarf::DW_TAG_reference_type ||
- Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
- return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit));
-
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
@@ -669,8 +728,13 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
- Align);
+ if (Tag == llvm::dwarf::DW_TAG_reference_type ||
+ Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
+ return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit),
+ Size, Align);
+ else
+ return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
+ Align);
}
llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
@@ -760,9 +824,9 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
Ty->getTemplateName().getAsTemplateDecl())->getTemplatedDecl();
SourceLocation Loc = AliasDecl->getLocation();
- return DBuilder.createTypedef(
- Src, internString(OS.str()), getOrCreateFile(Loc), getLineNumber(Loc),
- getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext())));
+ return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc),
+ getLineNumber(Loc),
+ getDeclContextDescriptor(AliasDecl));
}
llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
@@ -775,7 +839,7 @@ llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
return DBuilder.createTypedef(
getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit),
Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc),
- getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext())));
+ getDeclContextDescriptor(Ty->getDecl()));
}
llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
@@ -797,7 +861,7 @@ llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
}
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
- return DBuilder.createSubroutineType(Unit, EltTypeArray);
+ return DBuilder.createSubroutineType(EltTypeArray);
}
/// Convert an AccessSpecifier into the corresponding DINode flag.
@@ -972,7 +1036,7 @@ void CGDebugInfo::CollectRecordFields(
if (MI != StaticDataMemberCache.end()) {
assert(MI->second &&
"Static data member declaration should still exist");
- elements.push_back(cast<llvm::DIDerivedTypeBase>(MI->second));
+ elements.push_back(MI->second);
} else {
auto Field = CreateRecordStaticField(V, RecordTy, record);
elements.push_back(Field);
@@ -1048,7 +1112,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
Flags |= llvm::DINode::FlagRValueReference;
- return DBuilder.createSubroutineType(Unit, EltTypeArray, Flags);
+ return DBuilder.createSubroutineType(EltTypeArray, Flags);
}
/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
@@ -1129,7 +1193,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine,
MethodTy, /*isLocalToUnit=*/false,
/* isDefinition=*/false, Virtuality, VIndex, ContainingType, Flags,
- CGM.getLangOpts().Optimize, nullptr, TParamsArray.get());
+ CGM.getLangOpts().Optimize, TParamsArray.get());
SPCache[Method->getCanonicalDecl()].reset(SP);
@@ -1348,7 +1412,7 @@ llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) {
/* Function type */
llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit);
llvm::DITypeRefArray SElements = DBuilder.getOrCreateTypeArray(STy);
- llvm::DIType *SubTy = DBuilder.createSubroutineType(Unit, SElements);
+ llvm::DIType *SubTy = DBuilder.createSubroutineType(SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
llvm::DIType *vtbl_ptr_type =
DBuilder.createPointerType(SubTy, Size, 0, "__vtbl_ptr_type");
@@ -1389,8 +1453,21 @@ llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy,
llvm::DIType *CGDebugInfo::getOrCreateInterfaceType(QualType D,
SourceLocation Loc) {
+ return getOrCreateStandaloneType(D, Loc);
+}
+
+llvm::DIType *CGDebugInfo::getOrCreateStandaloneType(QualType D,
+ SourceLocation Loc) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
+ assert(!D.isNull() && "null type");
llvm::DIType *T = getOrCreateType(D, getOrCreateFile(Loc));
+ assert(T && "could not create debug info for type");
+
+ // Composite types with UIDs were already retained by DIBuilder
+ // because they are only referenced by name in the IR.
+ if (auto *CTy = dyn_cast<llvm::DICompositeType>(T))
+ if (!CTy->getIdentifier().empty())
+ return T;
RetainedTypes.push_back(D.getAsOpaquePtr());
return T;
}
@@ -1422,6 +1499,9 @@ void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
if (CXXDecl->isDynamicClass())
return;
+ if (DebugTypeExtRefs && RD->isFromASTFile())
+ return;
+
QualType Ty = CGM.getContext().getRecordType(RD);
llvm::DIType *T = getTypeOrNull(Ty);
if (T && T->isForwardDecl())
@@ -1452,8 +1532,13 @@ static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
}
static bool shouldOmitDefinition(CodeGenOptions::DebugInfoKind DebugKind,
+ bool DebugTypeExtRefs,
const RecordDecl *RD,
const LangOptions &LangOpts) {
+ // Does the type exist in an imported clang module?
+ if (DebugTypeExtRefs && RD->isFromASTFile() && RD->getDefinition())
+ return true;
+
if (DebugKind > CodeGenOptions::LimitedDebugInfo)
return false;
@@ -1487,10 +1572,10 @@ static bool shouldOmitDefinition(CodeGenOptions::DebugInfoKind DebugKind,
llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
- if (T || shouldOmitDefinition(DebugKind, RD, CGM.getLangOpts())) {
+ if (T || shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD,
+ CGM.getLangOpts())) {
if (!T)
- T = getOrCreateRecordFwdDecl(
- Ty, getContextDescriptor(cast<Decl>(RD->getDeclContext())));
+ T = getOrCreateRecordFwdDecl(Ty, getDeclContextDescriptor(RD));
return T;
}
@@ -1509,9 +1594,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
-
- auto *FwdDecl =
- cast<llvm::DICompositeType>(getOrCreateLimitedType(Ty, DefUnit));
+ llvm::DICompositeType *FwdDecl = getOrCreateLimitedType(Ty, DefUnit);
const RecordDecl *D = RD->getDefinition();
if (!D || !D->isCompleteDefinition())
@@ -1593,6 +1676,12 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
if (!ID)
return nullptr;
+ // Return a forward declaration if this type was imported from a clang module.
+ if (DebugTypeExtRefs && ID->isFromASTFile() && ID->getDefinition())
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ ID->getName(),
+ getDeclContextDescriptor(ID), Unit, 0);
+
// Get overall information about the record type for the debug info.
llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation());
unsigned Line = getLineNumber(ID->getLocation());
@@ -1603,9 +1692,10 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// debug type since we won't be able to lay out the entire type.
ObjCInterfaceDecl *Def = ID->getDefinition();
if (!Def || !Def->getImplementation()) {
+ llvm::DIScope *Mod = getParentModuleOrNull(ID);
llvm::DIType *FwdDecl = DBuilder.createReplaceableCompositeType(
- llvm::dwarf::DW_TAG_structure_type, ID->getName(), TheCU, DefUnit, Line,
- RuntimeLang);
+ llvm::dwarf::DW_TAG_structure_type, ID->getName(), Mod ? Mod : TheCU,
+ DefUnit, Line, RuntimeLang);
ObjCInterfaceCache.push_back(ObjCInterfaceCacheEntry(Ty, FwdDecl, Unit));
return FwdDecl;
}
@@ -1614,10 +1704,15 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
}
llvm::DIModule *
-CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod) {
- auto it = ModuleRefCache.find(Mod.Signature);
- if (it != ModuleRefCache.end())
- return it->second;
+CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU) {
+ // Use the Module pointer as the key into the cache. This is a
+ // nullptr if the "Module" is a PCH, which is safe because we don't
+ // support chained PCH debug info, so there can only be a single PCH.
+ const Module *M = Mod.getModuleOrNull();
+ auto ModRef = ModuleCache.find(M);
+ if (ModRef != ModuleCache.end())
+ return cast<llvm::DIModule>(ModRef->second);
// Macro definitions that were defined with "-D" on the command line.
SmallString<128> ConfigMacros;
@@ -1641,17 +1736,26 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod) {
OS << '\"';
}
}
- llvm::DIBuilder DIB(CGM.getModule());
- auto *CU = DIB.createCompileUnit(
- TheCU->getSourceLanguage(), internString(Mod.ModuleName),
- internString(Mod.Path), TheCU->getProducer(), true, StringRef(), 0,
- internString(Mod.ASTFile), llvm::DIBuilder::FullDebug, Mod.Signature);
- llvm::DIModule *ModuleRef =
- DIB.createModule(CU, Mod.ModuleName, ConfigMacros, internString(Mod.Path),
- internString(CGM.getHeaderSearchOpts().Sysroot));
- DIB.finalize();
- ModuleRefCache.insert(std::make_pair(Mod.Signature, ModuleRef));
- return ModuleRef;
+
+ bool IsRootModule = M ? !M->Parent : true;
+ if (CreateSkeletonCU && IsRootModule) {
+ llvm::DIBuilder DIB(CGM.getModule());
+ DIB.createCompileUnit(TheCU->getSourceLanguage(), Mod.getModuleName(),
+ Mod.getPath(), TheCU->getProducer(), true,
+ StringRef(), 0, Mod.getASTFile(),
+ llvm::DIBuilder::FullDebug, Mod.getSignature());
+ DIB.finalize();
+ }
+ llvm::DIModule *Parent =
+ IsRootModule ? nullptr
+ : getOrCreateModuleRef(
+ ExternalASTSource::ASTSourceDescriptor(*M->Parent),
+ CreateSkeletonCU);
+ llvm::DIModule *DIMod =
+ DBuilder.createModule(Parent, Mod.getModuleName(), ConfigMacros,
+ Mod.getPath(), CGM.getHeaderSearchOpts().Sysroot);
+ ModuleCache[M].reset(DIMod);
+ return DIMod;
}
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
@@ -1669,9 +1773,10 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
if (ID->getImplementation())
Flags |= llvm::DINode::FlagObjcClassComplete;
+ llvm::DIScope *Mod = getParentModuleOrNull(ID);
llvm::DICompositeType *RealDecl = DBuilder.createStructType(
- Unit, ID->getName(), DefUnit, Line, Size, Align, Flags, nullptr,
- llvm::DINodeArray(), RuntimeLang);
+ Mod ? Mod : Unit, ID->getName(), DefUnit, Line, Size, Align, Flags,
+ nullptr, llvm::DINodeArray(), RuntimeLang);
QualType QTy(Ty, 0);
TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl);
@@ -1695,7 +1800,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
}
// Create entries for all of the properties.
- for (const auto *PD : ID->properties()) {
+ auto AddProperty = [&](const ObjCPropertyDecl *PD) {
SourceLocation Loc = PD->getLocation();
llvm::DIFile *PUnit = getOrCreateFile(Loc);
unsigned PLine = getLineNumber(Loc);
@@ -1709,6 +1814,21 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
: getSelectorName(PD->getSetterName()),
PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit));
EltTys.push_back(PropertyNode);
+ };
+ {
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ for (const ObjCCategoryDecl *ClassExt : ID->known_extensions())
+ for (auto *PD : ClassExt->properties()) {
+ PropertySet.insert(PD->getIdentifier());
+ AddProperty(PD);
+ }
+ for (const auto *PD : ID->properties()) {
+ // Don't emit duplicate metadata for properties that were already in a
+ // class extension.
+ if (!PropertySet.insert(PD->getIdentifier()).second)
+ continue;
+ AddProperty(PD);
+ }
}
const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
@@ -1883,9 +2003,8 @@ llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIFile *U) {
- uint64_t Size = CGM.getCXXABI().isTypeInfoCalculable(QualType(Ty, 0))
- ? CGM.getContext().getTypeSize(Ty)
- : 0;
+ uint64_t Size =
+ !Ty->isIncompleteType() ? CGM.getContext().getTypeSize(Ty) : 0;
llvm::DIType *ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U);
if (Ty->isMemberDataPointerType())
return DBuilder.createMemberPointerType(
@@ -1908,6 +2027,7 @@ llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) {
llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
const EnumDecl *ED = Ty->getDecl();
+
uint64_t Size = 0;
uint64_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
@@ -1917,11 +2037,13 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+ bool isImportedFromModule =
+ DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition();
+
// If this is just a forward declaration, construct an appropriately
// marked node and just return it.
- if (!ED->getDefinition()) {
- llvm::DIScope *EDContext =
- getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ if (isImportedFromModule || !ED->getDefinition()) {
+ llvm::DIScope *EDContext = getDeclContextDescriptor(ED);
llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
StringRef EDName = ED->getName();
@@ -1961,8 +2083,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
- llvm::DIScope *EnumContext =
- getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIScope *EnumContext = getDeclContextDescriptor(ED);
llvm::DIType *ClassTy =
ED->isFixed() ? getOrCreateType(ED->getIntegerType(), DefUnit) : nullptr;
return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit,
@@ -2061,9 +2182,8 @@ llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
if (auto *T = getTypeOrNull(Ty))
return T;
- // Otherwise create the type.
llvm::DIType *Res = CreateTypeNode(Ty, Unit);
- void *TyPtr = Ty.getAsOpaquePtr();
+ void* TyPtr = Ty.getAsOpaquePtr();
// And update the type cache.
TypeCache[TyPtr].reset(Res);
@@ -2071,28 +2191,36 @@ llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
return Res;
}
-unsigned CGDebugInfo::Checksum(const ObjCInterfaceDecl *ID) {
- // The assumption is that the number of ivars can only increase
- // monotonically, so it is safe to just use their current number as
- // a checksum.
- unsigned Sum = 0;
- for (const ObjCIvarDecl *Ivar = ID->all_declared_ivar_begin();
- Ivar != nullptr; Ivar = Ivar->getNextIvar())
- ++Sum;
-
- return Sum;
-}
-
-ObjCInterfaceDecl *CGDebugInfo::getObjCInterfaceDecl(QualType Ty) {
- switch (Ty->getTypeClass()) {
- case Type::ObjCObjectPointer:
- return getObjCInterfaceDecl(
- cast<ObjCObjectPointerType>(Ty)->getPointeeType());
- case Type::ObjCInterface:
- return cast<ObjCInterfaceType>(Ty)->getDecl();
- default:
+llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) {
+ // A forward declaration inside a module header does not belong to the module.
+ if (isa<RecordDecl>(D) && !cast<RecordDecl>(D)->getDefinition())
return nullptr;
+ if (DebugTypeExtRefs && D->isFromASTFile()) {
+ // Record a reference to an imported clang module or precompiled header.
+ auto *Reader = CGM.getContext().getExternalSource();
+ auto Idx = D->getOwningModuleID();
+ auto Info = Reader->getSourceDescriptor(Idx);
+ if (Info)
+ return getOrCreateModuleRef(*Info, /*SkeletonCU=*/true);
+ } else if (ClangModuleMap) {
+ // We are building a clang module or a precompiled header.
+ //
+ // TODO: When D is a CXXRecordDecl or a C++ Enum, the ODR applies
+ // and it wouldn't be necessary to specify the parent scope
+ // because the type is already unique by definition (it would look
+ // like the output of -fno-standalone-debug). On the other hand,
+ // the parent scope helps a consumer to quickly locate the object
+ // file where the type's definition is located, so it might be
+ // best to make this behavior a command line or debugger tuning
+ // option.
+ FullSourceLoc Loc(D->getLocation(), CGM.getContext().getSourceManager());
+ if (Module *M = ClangModuleMap->inferModuleFromLocation(Loc)) {
+ auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ return getOrCreateModuleRef(Info, /*SkeletonCU=*/false);
+ }
}
+
+ return nullptr;
}
llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
@@ -2175,11 +2303,11 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
llvm_unreachable("type should have been unwrapped!");
}
-llvm::DIType *CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
- llvm::DIFile *Unit) {
+llvm::DICompositeType *CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
+ llvm::DIFile *Unit) {
QualType QTy(Ty, 0);
- auto *T = cast_or_null<llvm::DICompositeTypeBase>(getTypeOrNull(QTy));
+ auto *T = cast_or_null<llvm::DICompositeType>(getTypeOrNull(QTy));
// We may have cached a forward decl when we could have created
// a non-forward decl. Go ahead and create a non-forward decl
@@ -2209,8 +2337,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
unsigned Line = getLineNumber(RD->getLocation());
StringRef RDName = getClassName(RD);
- llvm::DIScope *RDContext =
- getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+ llvm::DIScope *RDContext = getDeclContextDescriptor(RD);
// If we ended up creating the type during the context chain construction,
// just return that.
@@ -2306,8 +2433,10 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
else if (const RecordDecl *RDecl =
- dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
- FDContext = getContextDescriptor(cast<Decl>(RDecl));
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext())) {
+ llvm::DIScope *Mod = getParentModuleOrNull(RDecl);
+ FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU);
+ }
// Collect template parameters.
TParamsArray = CollectFunctionTemplateParams(FD, Unit);
}
@@ -2355,7 +2484,9 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
// outside the class by putting it in the global scope.
if (DC->isRecord())
DC = CGM.getContext().getTranslationUnitDecl();
- VDContext = getContextDescriptor(dyn_cast<Decl>(DC));
+
+ llvm::DIScope *Mod = getParentModuleOrNull(VD);
+ VDContext = getContextDescriptor(cast<Decl>(DC), Mod ? Mod : TheCU);
}
llvm::DISubprogram *
@@ -2380,7 +2511,7 @@ CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl(
DContext, Name, LinkageName, Unit, Line,
getOrCreateFunctionType(FD, FnType, Unit), !FD->isExternallyVisible(),
- false /*declaration*/, 0, Flags, CGM.getLangOpts().Optimize, nullptr,
+ /* isDefinition = */ false, 0, Flags, CGM.getLangOpts().Optimize,
TParamsArray.get(), getFunctionDeclaration(FD));
const FunctionDecl *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl());
FwdDeclReplaceMap.emplace_back(std::piecewise_construct,
@@ -2441,7 +2572,7 @@ llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
return nullptr;
// Setup context.
- auto *S = getContextDescriptor(cast<Decl>(D->getDeclContext()));
+ auto *S = getDeclContextDescriptor(D);
auto MI = SPCache.find(FD->getCanonicalDecl());
if (MI == SPCache.end()) {
@@ -2476,8 +2607,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
if (!D || DebugKind <= CodeGenOptions::DebugLineTablesOnly)
// Create fake but valid subroutine type. Otherwise -verify would fail, and
// subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields.
- return DBuilder.createSubroutineType(F,
- DBuilder.getOrCreateTypeArray(None));
+ return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None));
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
@@ -2495,11 +2625,17 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
Elts.push_back(getOrCreateType(ResultTy, F));
// "self" pointer is always first argument.
- QualType SelfDeclTy = OMethod->getSelfDecl()->getType();
- Elts.push_back(CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F)));
+ QualType SelfDeclTy;
+ if (auto *SelfDecl = OMethod->getSelfDecl())
+ SelfDeclTy = SelfDecl->getType();
+ else if (auto *FPT = dyn_cast<FunctionProtoType>(FnType))
+ if (FPT->getNumParams() > 1)
+ SelfDeclTy = FPT->getParamType(0);
+ if (!SelfDeclTy.isNull())
+ Elts.push_back(CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F)));
// "_cmd" pointer is always second argument.
Elts.push_back(DBuilder.createArtificialType(
- getOrCreateType(OMethod->getCmdDecl()->getType(), F)));
+ getOrCreateType(CGM.getContext().getObjCSelType(), F)));
// Get rest of the arguments.
for (const auto *PI : OMethod->params())
Elts.push_back(getOrCreateType(PI->getType(), F));
@@ -2508,7 +2644,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
Elts.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
- return DBuilder.createSubroutineType(F, EltTypeArray);
+ return DBuilder.createSubroutineType(EltTypeArray);
}
// Handle variadic function types; they need an additional
@@ -2522,7 +2658,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
EltTys.push_back(getOrCreateType(FPT->getParamType(i), F));
EltTys.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
- return DBuilder.createSubroutineType(F, EltTypeArray);
+ return DBuilder.createSubroutineType(EltTypeArray);
}
return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
@@ -2588,8 +2724,9 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
llvm::DISubprogram *SP = DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo,
getOrCreateFunctionType(D, FnType, Unit), Fn->hasInternalLinkage(),
- true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize, Fn,
+ true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
TParamsArray.get(), getFunctionDeclaration(D));
+ Fn->setSubprogram(SP);
// We might get here with a VarDecl in the case we're generating
// code for the initialization of globals. Do not record these decls
// as they will overwrite the actual VarDecl Decl in the cache.
@@ -2603,6 +2740,48 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
RegionMap[D].reset(SP);
}
+void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
+ QualType FnType) {
+ StringRef Name;
+ StringRef LinkageName;
+
+ const Decl *D = GD.getDecl();
+ if (!D)
+ return;
+
+ unsigned Flags = 0;
+ llvm::DIFile *Unit = getOrCreateFile(Loc);
+ llvm::DIScope *FDContext = getDeclContextDescriptor(D);
+ llvm::DINodeArray TParamsArray;
+ if (isa<FunctionDecl>(D)) {
+ // If there is a DISubprogram for this function available then use it.
+ collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext,
+ TParamsArray, Flags);
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+ Name = getObjCMethodName(OMD);
+ Flags |= llvm::DINode::FlagPrototyped;
+ } else {
+ llvm_unreachable("not a function or ObjC method");
+ }
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
+
+ if (D->isImplicit()) {
+ Flags |= llvm::DINode::FlagArtificial;
+ // Artificial functions without a location should not silently reuse CurLoc.
+ if (Loc.isInvalid())
+ CurLoc = SourceLocation();
+ }
+ unsigned LineNo = getLineNumber(Loc);
+ unsigned ScopeLine = 0;
+
+ DBuilder.createFunction(FDContext, Name, LinkageName, Unit, LineNo,
+ getOrCreateFunctionType(D, FnType, Unit),
+ false /*internalLinkage*/, true /*definition*/,
+ ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ TParamsArray.get(), getFunctionDeclaration(D));
+}
+
void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
// Update our current location
setLocation(Loc);
@@ -2740,8 +2919,8 @@ llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
nullptr, Elements);
}
-void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::Tag Tag,
- llvm::Value *Storage, unsigned ArgNo,
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
+ llvm::Optional<unsigned> ArgNo,
CGBuilderTy &Builder) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
@@ -2780,7 +2959,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::Tag Tag,
// FIXME: There has to be a better way to do this, but for static
// functions there won't be an implicit param at arg1 and
// otherwise it is 'self' or 'this'.
- if (isa<ImplicitParamDecl>(VD) && ArgNo == 1)
+ if (isa<ImplicitParamDecl>(VD) && ArgNo && *ArgNo == 1)
Flags |= llvm::DINode::FlagObjectPointer;
if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage))
if (Arg->getType()->isPointerTy() && !Arg->hasByValAttr() &&
@@ -2805,8 +2984,11 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::Tag Tag,
Expr.push_back(offset.getQuantity());
// Create the descriptor for the variable.
- auto *D = DBuilder.createLocalVariable(Tag, Scope, VD->getName(), Unit,
- Line, Ty, ArgNo);
+ auto *D = ArgNo
+ ? DBuilder.createParameterVariable(Scope, VD->getName(),
+ *ArgNo, Unit, Line, Ty)
+ : DBuilder.createAutoVariable(Scope, VD->getName(), Unit,
+ Line, Ty);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -2836,10 +3018,9 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::Tag Tag,
continue;
// Use VarDecl's Tag, Scope and Line number.
- auto *D = DBuilder.createLocalVariable(
- Tag, Scope, FieldName, Unit, Line, FieldTy,
- CGM.getLangOpts().Optimize, Flags | llvm::DINode::FlagArtificial,
- ArgNo);
+ auto *D = DBuilder.createAutoVariable(
+ Scope, FieldName, Unit, Line, FieldTy, CGM.getLangOpts().Optimize,
+ Flags | llvm::DINode::FlagArtificial);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -2851,8 +3032,12 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::Tag Tag,
// Create the descriptor for the variable.
auto *D =
- DBuilder.createLocalVariable(Tag, Scope, Name, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags, ArgNo);
+ ArgNo
+ ? DBuilder.createParameterVariable(Scope, Name, *ArgNo, Unit, Line,
+ Ty, CGM.getLangOpts().Optimize,
+ Flags)
+ : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -2864,7 +3049,7 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
- EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
+ EmitDeclare(VD, Storage, llvm::None, Builder);
}
llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy,
@@ -2929,8 +3114,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
}
// Create the descriptor for the variable.
- auto *D = DBuilder.createLocalVariable(
- llvm::dwarf::DW_TAG_auto_variable,
+ auto *D = DBuilder.createAutoVariable(
cast<llvm::DILocalScope>(LexicalBlockStack.back()), VD->getName(), Unit,
Line, Ty);
@@ -2948,7 +3132,7 @@ void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
unsigned ArgNo,
CGBuilderTy &Builder) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
- EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
+ EmitDeclare(VD, AI, ArgNo, Builder);
}
namespace {
@@ -2977,7 +3161,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
unsigned column = getColumnNumber(loc);
// Build the debug-info type for the block literal.
- getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
+ getDeclContextDescriptor(blockDecl);
const llvm::StructLayout *blockLayout =
CGM.getDataLayout().getStructLayout(block.StructureType);
@@ -3090,9 +3274,9 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
auto *scope = cast<llvm::DILocalScope>(LexicalBlockStack.back());
// Create the descriptor for the parameter.
- auto *debugVar = DBuilder.createLocalVariable(
- llvm::dwarf::DW_TAG_arg_variable, scope, Arg->getName(), tunit, line,
- type, CGM.getLangOpts().Optimize, flags, ArgNo);
+ auto *debugVar = DBuilder.createParameterVariable(
+ scope, Arg->getName(), ArgNo, tunit, line, type,
+ CGM.getLangOpts().Optimize, flags);
if (LocalAddr) {
// Insert an llvm.dbg.value into the current block.
@@ -3115,14 +3299,13 @@ CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
auto MI = StaticDataMemberCache.find(D->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
assert(MI->second && "Static data member declaration should still exist");
- return cast<llvm::DIDerivedType>(MI->second);
+ return MI->second;
}
// If the member wasn't found in the cache, lazily construct and add it to the
// type (used when a limited form of the type is emitted).
auto DC = D->getDeclContext();
- auto *Ctxt =
- cast<llvm::DICompositeType>(getContextDescriptor(cast<Decl>(DC)));
+ auto *Ctxt = cast<llvm::DICompositeType>(getDeclContextDescriptor(D));
return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC));
}
@@ -3170,7 +3353,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// variable for each member of the anonymous union so that it's possible
// to find the name of any field in the union.
if (T->isUnionType() && DeclName.empty()) {
- const RecordDecl *RD = cast<RecordType>(T)->getDecl();
+ const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
assert(RD->isAnonymousStructOrUnion() &&
"unnamed non-anonymous struct or union?");
GV = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
@@ -3207,15 +3390,14 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
auto *VarD = cast<VarDecl>(VD);
if (VarD->isStaticDataMember()) {
auto *RD = cast<RecordDecl>(VarD->getDeclContext());
- getContextDescriptor(RD);
+ getDeclContextDescriptor(VarD);
// Ensure that the type is retained even though it's otherwise unreferenced.
RetainedTypes.push_back(
CGM.getContext().getRecordType(RD).getAsOpaquePtr());
return;
}
- llvm::DIScope *DContext =
- getContextDescriptor(dyn_cast<Decl>(VD->getDeclContext()));
+ llvm::DIScope *DContext = getDeclContextDescriptor(VD);
auto &GV = DeclCache[VD];
if (GV)
@@ -3228,16 +3410,21 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
if (!LexicalBlockStack.empty())
return LexicalBlockStack.back();
- return getContextDescriptor(D);
+ llvm::DIScope *Mod = getParentModuleOrNull(D);
+ return getContextDescriptor(D, Mod ? Mod : TheCU);
}
void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
return;
- DBuilder.createImportedModule(
- getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
- getOrCreateNameSpace(UD.getNominatedNamespace()),
- getLineNumber(UD.getLocation()));
+ const NamespaceDecl *NSDecl = UD.getNominatedNamespace();
+ if (!NSDecl->isAnonymousNamespace() ||
+ CGM.getCodeGenOpts().DebugExplicitImport) {
+ DBuilder.createImportedModule(
+ getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
+ getOrCreateNameSpace(NSDecl),
+ getLineNumber(UD.getLocation()));
+ }
}
void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
@@ -3256,12 +3443,13 @@ void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
}
void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) {
- auto *Reader = CGM.getContext().getExternalSource();
- auto Info = Reader->getSourceDescriptor(*ID.getImportedModule());
- DBuilder.createImportedDeclaration(
- getCurrentContextDescriptor(cast<Decl>(ID.getDeclContext())),
- getOrCreateModuleRef(Info),
- getLineNumber(ID.getLocation()));
+ if (Module *M = ID.getImportedModule()) {
+ auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ DBuilder.createImportedDeclaration(
+ getCurrentContextDescriptor(cast<Decl>(ID.getDeclContext())),
+ getOrCreateModuleRef(Info, DebugTypeExtRefs),
+ getLineNumber(ID.getLocation()));
+ }
}
llvm::DIImportedEntity *
@@ -3297,14 +3485,19 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
unsigned LineNo = getLineNumber(NSDecl->getLocation());
llvm::DIFile *FileD = getOrCreateFile(NSDecl->getLocation());
- llvm::DIScope *Context =
- getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
+ llvm::DIScope *Context = getDeclContextDescriptor(NSDecl);
llvm::DINamespace *NS =
DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
NameSpaceCache[NSDecl].reset(NS);
return NS;
}
+void CGDebugInfo::setDwoId(uint64_t Signature) {
+ assert(TheCU && "no main compile unit");
+ TheCU->setDWOId(Signature);
+}
+
+
void CGDebugInfo::finalize() {
// Creating types might create further types - invalidating the current
// element and the size(), so don't cache/reference them.
@@ -3348,9 +3541,9 @@ void CGDebugInfo::finalize() {
// We keep our own list of retained types, because we need to look
// up the final type in the type cache.
- for (std::vector<void *>::const_iterator RI = RetainedTypes.begin(),
- RE = RetainedTypes.end(); RI != RE; ++RI)
- DBuilder.retainType(cast<llvm::DIType>(TypeCache[*RI]));
+ for (auto &RT : RetainedTypes)
+ if (auto MD = TypeCache[RT])
+ DBuilder.retainType(cast<llvm::DIType>(MD));
DBuilder.finalize();
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 82680a84d328..57d5c808f297 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -20,6 +20,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/ValueHandle.h"
@@ -31,12 +32,13 @@ class MDNode;
namespace clang {
class CXXMethodDecl;
-class VarDecl;
-class ObjCInterfaceDecl;
-class ObjCIvarDecl;
class ClassTemplateSpecializationDecl;
class GlobalDecl;
+class ModuleMap;
+class ObjCInterfaceDecl;
+class ObjCIvarDecl;
class UsingDecl;
+class VarDecl;
namespace CodeGen {
class CodeGenModule;
@@ -51,8 +53,10 @@ class CGDebugInfo {
friend class SaveAndRestoreLocation;
CodeGenModule &CGM;
const CodeGenOptions::DebugInfoKind DebugKind;
+ bool DebugTypeExtRefs;
llvm::DIBuilder DBuilder;
llvm::DICompileUnit *TheCU = nullptr;
+ ModuleMap *ClangModuleMap = nullptr;
SourceLocation CurLoc;
llvm::DIType *VTablePtrType = nullptr;
llvm::DIType *ClassTy = nullptr;
@@ -63,12 +67,24 @@ class CGDebugInfo {
llvm::DIType *OCLImage1dBufferDITy = nullptr;
llvm::DIType *OCLImage2dDITy = nullptr;
llvm::DIType *OCLImage2dArrayDITy = nullptr;
+ llvm::DIType *OCLImage2dDepthDITy = nullptr;
+ llvm::DIType *OCLImage2dArrayDepthDITy = nullptr;
+ llvm::DIType *OCLImage2dMSAADITy = nullptr;
+ llvm::DIType *OCLImage2dArrayMSAADITy = nullptr;
+ llvm::DIType *OCLImage2dMSAADepthDITy = nullptr;
+ llvm::DIType *OCLImage2dArrayMSAADepthDITy = nullptr;
llvm::DIType *OCLImage3dDITy = nullptr;
llvm::DIType *OCLEventDITy = nullptr;
+ llvm::DIType *OCLClkEventDITy = nullptr;
+ llvm::DIType *OCLQueueDITy = nullptr;
+ llvm::DIType *OCLNDRangeDITy = nullptr;
+ llvm::DIType *OCLReserveIDDITy = nullptr;
/// Cache of previously constructed Types.
llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
+ llvm::SmallDenseMap<llvm::StringRef, llvm::StringRef> DebugPrefixMap;
+
struct ObjCInterfaceCacheEntry {
const ObjCInterfaceType *Type;
llvm::DIType *Decl;
@@ -81,8 +97,8 @@ class CGDebugInfo {
/// Cache of previously constructed interfaces which may change.
llvm::SmallVector<ObjCInterfaceCacheEntry, 32> ObjCInterfaceCache;
- /// Cache of references to AST files such as PCHs or modules.
- llvm::DenseMap<uint64_t, llvm::DIModule *> ModuleRefCache;
+ /// Cache of references to clang modules and precompiled headers.
+ llvm::DenseMap<const Module *, llvm::TrackingMDRef> ModuleCache;
/// List of interfaces we want to keep even if orphaned.
std::vector<void *> RetainedTypes;
@@ -117,13 +133,13 @@ class CGDebugInfo {
llvm::DenseMap<const NamespaceDecl *, llvm::TrackingMDRef> NameSpaceCache;
llvm::DenseMap<const NamespaceAliasDecl *, llvm::TrackingMDRef>
NamespaceAliasCache;
- llvm::DenseMap<const Decl *, llvm::TrackingMDRef> StaticDataMemberCache;
+ llvm::DenseMap<const Decl *, llvm::TypedTrackingMDRef<llvm::DIDerivedType>>
+ StaticDataMemberCache;
/// Helper functions for getOrCreateType.
/// @{
/// Currently the checksum of an interface includes the number of
/// ivars and property accessors.
- unsigned Checksum(const ObjCInterfaceDecl *InterfaceDecl);
llvm::DIType *CreateType(const BuiltinType *Ty);
llvm::DIType *CreateType(const ComplexType *Ty);
llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg);
@@ -182,11 +198,8 @@ class CGDebugInfo {
llvm::DIType *getOrCreateVTablePtrType(llvm::DIFile *F);
/// \return namespace descriptor for the given namespace decl.
llvm::DINamespace *getOrCreateNameSpace(const NamespaceDecl *N);
- llvm::DIType *getOrCreateTypeDeclaration(QualType PointeeTy, llvm::DIFile *F);
llvm::DIType *CreatePointerLikeType(llvm::dwarf::Tag Tag, const Type *Ty,
QualType PointeeTy, llvm::DIFile *F);
-
- llvm::Value *getCachedInterfaceTypeOrNull(const QualType Ty);
llvm::DIType *getOrCreateStructPtrType(StringRef Name, llvm::DIType *&Cache);
/// A helper function to create a subprogram for a single member
@@ -261,6 +274,14 @@ public:
void finalize();
+ /// Set the main CU's DwoId field to \p Signature.
+ void setDwoId(uint64_t Signature);
+
+ /// When generating debug information for a clang module or
+ /// precompiled header, this module map will be used to determine
+ /// the module of origin of each Decl.
+ void setModuleMap(ModuleMap &MMap) { ClangModuleMap = &MMap; }
+
/// Update the current source location. If \arg loc is invalid it is
/// ignored.
void setLocation(SourceLocation Loc);
@@ -278,6 +299,9 @@ public:
SourceLocation ScopeLoc, QualType FnType,
llvm::Function *Fn, CGBuilderTy &Builder);
+ /// Emit debug info for a function declaration.
+ void EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc, QualType FnType);
+
/// Constructs the debug code for exiting a function.
void EmitFunctionEnd(CGBuilderTy &Builder);
@@ -300,7 +324,7 @@ public:
llvm::Value *storage,
CGBuilderTy &Builder,
const CGBlockInfo &blockInfo,
- llvm::Instruction *InsertPoint = 0);
+ llvm::Instruction *InsertPoint = nullptr);
/// Emit call to \c llvm.dbg.declare for an argument variable
/// declaration.
@@ -341,6 +365,9 @@ public:
/// Emit an Objective-C interface type standalone debug info.
llvm::DIType *getOrCreateInterfaceType(QualType Ty, SourceLocation Loc);
+ /// Emit standalone debug info for a type.
+ llvm::DIType *getOrCreateStandaloneType(QualType Ty, SourceLocation Loc);
+
void completeType(const EnumDecl *ED);
void completeType(const RecordDecl *RD);
void completeRequiredType(const RecordDecl *RD);
@@ -350,17 +377,18 @@ public:
private:
/// Emit call to llvm.dbg.declare for a variable declaration.
- /// Tag accepts custom types DW_TAG_arg_variable and DW_TAG_auto_variable,
- /// otherwise would be of type llvm::dwarf::Tag.
- void EmitDeclare(const VarDecl *decl, llvm::dwarf::Tag Tag, llvm::Value *AI,
- unsigned ArgNo, CGBuilderTy &Builder);
+ void EmitDeclare(const VarDecl *decl, llvm::Value *AI,
+ llvm::Optional<unsigned> ArgNo, CGBuilderTy &Builder);
/// Build up structure info for the byref. See \a BuildByRefType.
llvm::DIType *EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *OffSet);
- /// Get context info for the decl.
- llvm::DIScope *getContextDescriptor(const Decl *Decl);
+ /// Get context info for the DeclContext of \p Decl.
+ llvm::DIScope *getDeclContextDescriptor(const Decl *D);
+ /// Get context info for a given DeclContext \p Decl.
+ llvm::DIScope *getContextDescriptor(const Decl *Context,
+ llvm::DIScope *Default);
llvm::DIScope *getCurrentContextDescriptor(const Decl *Decl);
@@ -374,6 +402,9 @@ private:
/// Create new compile unit.
void CreateCompileUnit();
+ /// Remap a given path with the current debug prefix map
+ std::string remapDIPath(StringRef) const;
+
/// Get the file debug info descriptor for the input location.
llvm::DIFile *getOrCreateFile(SourceLocation Loc);
@@ -383,21 +414,23 @@ private:
/// Get the type from the cache or create a new type if necessary.
llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg);
- /// Get a reference to a clang module.
+ /// Get a reference to a clang module. If \p CreateSkeletonCU is true,
+ /// this also creates a split dwarf skeleton compile unit.
llvm::DIModule *
- getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod);
+ getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU);
+
+ /// DebugTypeExtRefs: If \p D originated in a clang module, return it.
+ llvm::DIModule *getParentModuleOrNull(const Decl *D);
/// Get the type from the cache or create a new partial type if
/// necessary.
- llvm::DIType *getOrCreateLimitedType(const RecordType *Ty, llvm::DIFile *F);
+ llvm::DICompositeType *getOrCreateLimitedType(const RecordType *Ty,
+ llvm::DIFile *F);
/// Create type metadata for a source language type.
llvm::DIType *CreateTypeNode(QualType Ty, llvm::DIFile *Fg);
- /// Return the underlying ObjCInterfaceDecl if \arg Ty is an
- /// ObjCInterface or a pointer to one.
- ObjCInterfaceDecl *getObjCInterfaceDecl(QualType Ty);
-
/// Create new member and increase Offset by FType's size.
llvm::DIType *CreateMemberType(llvm::DIFile *Unit, QualType FType,
StringRef Name, uint64_t *Offset);
@@ -501,13 +534,16 @@ private:
SourceLocation TemporaryLocation);
llvm::DebugLoc OriginalLocation;
- CodeGenFunction &CGF;
+ CodeGenFunction *CGF;
public:
/// Set the location to the (valid) TemporaryLocation.
ApplyDebugLocation(CodeGenFunction &CGF, SourceLocation TemporaryLocation);
ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E);
ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc);
+ ApplyDebugLocation(ApplyDebugLocation &&Other) : CGF(Other.CGF) {
+ Other.CGF = nullptr;
+ }
~ApplyDebugLocation();
@@ -538,20 +574,14 @@ public:
/// passing an empty SourceLocation to \a CGDebugInfo::setLocation()
/// will result in the last valid location being reused. Note that
/// all instructions that do not have a location at the beginning of
- /// a function are counted towards to funciton prologue.
+ /// a function are counted towards to function prologue.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF) {
return ApplyDebugLocation(CGF, true, SourceLocation());
}
- /// \brief Apply TemporaryLocation if it is valid. Otherwise set the IRBuilder
- /// to not attach debug locations.
- static ApplyDebugLocation
- CreateDefaultEmpty(CodeGenFunction &CGF, SourceLocation TemporaryLocation) {
- return ApplyDebugLocation(CGF, true, TemporaryLocation);
- }
};
} // namespace CodeGen
} // namespace clang
-#endif
+#endif // LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 96aa8c68e004..b78e80d79ddd 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGBlocks.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGOpenCLRuntime.h"
@@ -34,6 +35,7 @@ using namespace CodeGen;
void CodeGenFunction::EmitDecl(const Decl &D) {
switch (D.getKind()) {
+ case Decl::BuiltinTemplate:
case Decl::TranslationUnit:
case Decl::ExternCContext:
case Decl::Namespace:
@@ -142,7 +144,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
// Don't emit it now, allow it to be emitted lazily on its first use.
return;
- if (D.getStorageClass() == SC_OpenCLWorkGroupLocal)
+ if (D.getType().getAddressSpace() == LangAS::opencl_local)
return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
assert(D.hasLocalStorage());
@@ -311,6 +313,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->getThreadLocalMode(),
CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
+ GV->setComdat(OldGV->getComdat());
// Steal the name of the old global
GV->takeName(OldGV);
@@ -339,17 +342,15 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage) {
- llvm::Value *&DMEntry = LocalDeclMap[&D];
- assert(!DMEntry && "Decl already exists in localdeclmap!");
-
// Check to see if we already have a global variable for this
// declaration. This can happen when double-emitting function
// bodies, e.g. with complete and base constructors.
llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
+ CharUnits alignment = getContext().getDeclAlign(&D);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
- DMEntry = addr;
+ setAddrOfLocalVar(&D, Address(addr, alignment));
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -366,7 +367,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
if (D.getInit())
var = AddInitializerToStaticVarDecl(D, var);
- var->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ var->setAlignment(alignment.getQuantity());
if (D.hasAttr<AnnotateAttr>())
CGM.AddGlobalAnnotations(&D, var);
@@ -384,7 +385,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// RAUW's the GV uses of this constant will be invalid.
llvm::Constant *castedAddr =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
- DMEntry = castedAddr;
+ if (var != castedAddr)
+ LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
CGM.setStaticLocalDeclAddress(&D, castedAddr);
CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
@@ -399,14 +401,14 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
}
namespace {
- struct DestroyObject : EHScopeStack::Cleanup {
- DestroyObject(llvm::Value *addr, QualType type,
+ struct DestroyObject final : EHScopeStack::Cleanup {
+ DestroyObject(Address addr, QualType type,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
: addr(addr), type(type), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
- llvm::Value *addr;
+ Address addr;
QualType type;
CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
@@ -420,15 +422,15 @@ namespace {
}
};
- struct DestroyNRVOVariable : EHScopeStack::Cleanup {
- DestroyNRVOVariable(llvm::Value *addr,
+ struct DestroyNRVOVariable final : EHScopeStack::Cleanup {
+ DestroyNRVOVariable(Address addr,
const CXXDestructorDecl *Dtor,
llvm::Value *NRVOFlag)
: Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
const CXXDestructorDecl *Dtor;
llvm::Value *NRVOFlag;
- llvm::Value *Loc;
+ Address Loc;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Along the exceptions path we always execute the dtor.
@@ -439,7 +441,8 @@ namespace {
// If we exited via NRVO, we skip the destructor call.
llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
- llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ llvm::Value *DidNRVO =
+ CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
CGF.EmitBlock(RunDtorBB);
}
@@ -453,9 +456,9 @@ namespace {
}
};
- struct CallStackRestore : EHScopeStack::Cleanup {
- llvm::Value *Stack;
- CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
+ struct CallStackRestore final : EHScopeStack::Cleanup {
+ Address Stack;
+ CallStackRestore(Address Stack) : Stack(Stack) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
@@ -463,7 +466,7 @@ namespace {
}
};
- struct ExtendGCLifetime : EHScopeStack::Cleanup {
+ struct ExtendGCLifetime final : EHScopeStack::Cleanup {
const VarDecl &Var;
ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
@@ -478,7 +481,7 @@ namespace {
}
};
- struct CallCleanupFunction : EHScopeStack::Cleanup {
+ struct CallCleanupFunction final : EHScopeStack::Cleanup {
llvm::Constant *CleanupFn;
const CGFunctionInfo &FnInfo;
const VarDecl &Var;
@@ -492,7 +495,7 @@ namespace {
Var.getType(), VK_LValue, SourceLocation());
// Compute the address of the local variable, in case it's a byref
// or something.
- llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getAddress();
+ llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer();
// In some cases, the type of the function argument will be different from
// the type of the pointer. An example of this is
@@ -512,12 +515,12 @@ namespace {
};
/// A cleanup to call @llvm.lifetime.end.
- class CallLifetimeEnd : public EHScopeStack::Cleanup {
+ class CallLifetimeEnd final : public EHScopeStack::Cleanup {
llvm::Value *Addr;
llvm::Value *Size;
public:
- CallLifetimeEnd(llvm::Value *addr, llvm::Value *size)
- : Addr(addr), Size(size) {}
+ CallLifetimeEnd(Address addr, llvm::Value *size)
+ : Addr(addr.getPointer()), Size(size) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitLifetimeEnd(Size, Addr);
@@ -528,7 +531,7 @@ namespace {
/// EmitAutoVarWithLifetime - Does the setup required for an automatic
/// variable with lifetime.
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
- llvm::Value *addr,
+ Address addr,
Qualifiers::ObjCLifetime lifetime) {
switch (lifetime) {
case Qualifiers::OCL_None:
@@ -595,10 +598,61 @@ static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
return isAccessedBy(*var, e);
}
+static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
+ const LValue &destLV, const Expr *init) {
+ bool needsCast = false;
+
+ while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
+ switch (castExpr->getCastKind()) {
+ // Look through casts that don't require representation changes.
+ case CK_NoOp:
+ case CK_BitCast:
+ case CK_BlockPointerToObjCPointerCast:
+ needsCast = true;
+ break;
+
+ // If we find an l-value to r-value cast from a __weak variable,
+ // emit this operation as a copy or move.
+ case CK_LValueToRValue: {
+ const Expr *srcExpr = castExpr->getSubExpr();
+ if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
+ return false;
+
+ // Emit the source l-value.
+ LValue srcLV = CGF.EmitLValue(srcExpr);
+
+ // Handle a formal type change to avoid asserting.
+ auto srcAddr = srcLV.getAddress();
+ if (needsCast) {
+ srcAddr = CGF.Builder.CreateElementBitCast(srcAddr,
+ destLV.getAddress().getElementType());
+ }
+
+ // If it was an l-value, use objc_copyWeak.
+ if (srcExpr->getValueKind() == VK_LValue) {
+ CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
+ } else {
+ assert(srcExpr->getValueKind() == VK_XValue);
+ CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
+ }
+ return true;
+ }
+
+ // Stop at anything else.
+ default:
+ return false;
+ }
+
+ init = castExpr->getSubExpr();
+ continue;
+ }
+ return false;
+}
+
static void drillIntoBlockVariable(CodeGenFunction &CGF,
LValue &lvalue,
const VarDecl *var) {
- lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
+ lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
}
void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
@@ -636,15 +690,12 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) {
// We can use a simple GEP for this because it can't have been
// moved yet.
- tempLV.setAddress(Builder.CreateStructGEP(
- nullptr, tempLV.getAddress(),
- getByRefValueLLVMField(cast<VarDecl>(D)).second));
+ tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
+ cast<VarDecl>(D),
+ /*follow*/ false));
}
- llvm::PointerType *ty
- = cast<llvm::PointerType>(tempLV.getAddress()->getType());
- ty = cast<llvm::PointerType>(ty->getElementType());
-
+ auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
// If __weak, we want to use a barrier under certain conditions.
@@ -674,6 +725,12 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
}
case Qualifiers::OCL_Weak: {
+ // If it's not accessed by the initializer, try to emit the
+ // initialization with a copy or move.
+ if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
+ return;
+ }
+
// No way to optimize a producing initializer into this. It's not
// worth optimizing for, because the value will immediately
// disappear in the common case.
@@ -788,7 +845,7 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
- Builder.CreateStore(Init, Loc, isVolatile);
+ Builder.CreateDefaultAlignedStore(Init, Loc, isVolatile);
return;
}
@@ -891,13 +948,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
emission.IsByRef = isByRef;
CharUnits alignment = getContext().getDeclAlign(&D);
- emission.Alignment = alignment;
// If the type is variably-modified, emit all the VLA sizes for it.
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
- llvm::Value *DeclPtr;
+ Address address = Address::invalid();
if (Ty->isConstantSizeType()) {
bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
@@ -923,7 +979,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
CGM.isTypeConstant(Ty, true)) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
- emission.Address = nullptr; // signal this condition to later callbacks
+ // Signal this condition to later callbacks.
+ emission.Addr = Address::invalid();
assert(emission.wasEmittedAsGlobal());
return emission;
}
@@ -934,13 +991,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// A normal fixed sized variable becomes an alloca in the entry block,
// unless it's an NRVO variable.
- llvm::Type *LTy = ConvertTypeForMem(Ty);
if (NRVO) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
- DeclPtr = ReturnValue;
+ address = ReturnValue;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
@@ -948,34 +1004,46 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
llvm::Value *Zero = Builder.getFalse();
- llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
+ Address NRVOFlag =
+ CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
// Record the NRVO flag for this variable.
- NRVOFlags[&D] = NRVOFlag;
- emission.NRVOFlag = NRVOFlag;
+ NRVOFlags[&D] = NRVOFlag.getPointer();
+ emission.NRVOFlag = NRVOFlag.getPointer();
}
}
} else {
- if (isByRef)
- LTy = BuildByRefType(&D);
+ CharUnits allocaAlignment;
+ llvm::Type *allocaTy;
+ if (isByRef) {
+ auto &byrefInfo = getBlockByrefInfo(&D);
+ allocaTy = byrefInfo.Type;
+ allocaAlignment = byrefInfo.ByrefAlignment;
+ } else {
+ allocaTy = ConvertTypeForMem(Ty);
+ allocaAlignment = alignment;
+ }
- llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
- Alloc->setName(D.getName());
+ // Create the alloca. Note that we set the name separately from
+ // building the instruction so that it's there even in no-asserts
+ // builds.
+ address = CreateTempAlloca(allocaTy, allocaAlignment);
+ address.getPointer()->setName(D.getName());
- CharUnits allocaAlignment = alignment;
- if (isByRef)
- allocaAlignment = std::max(allocaAlignment,
- getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0)));
- Alloc->setAlignment(allocaAlignment.getQuantity());
- DeclPtr = Alloc;
+ // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
+ // the catch parameter starts in the catchpad instruction, and we can't
+ // insert code in those basic blocks.
+ bool IsMSCatchParam =
+ D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
// Emit a lifetime intrinsic if meaningful. There's no point
// in doing this if we don't have a valid insertion point (?).
- uint64_t size = CGM.getDataLayout().getTypeAllocSize(LTy);
- if (HaveInsertPoint()) {
- emission.SizeForLifetimeMarkers = EmitLifetimeStart(size, Alloc);
+ if (HaveInsertPoint() && !IsMSCatchParam) {
+ uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ emission.SizeForLifetimeMarkers =
+ EmitLifetimeStart(size, address.getPointer());
} else {
assert(!emission.useLifetimeMarkers());
}
@@ -985,11 +1053,11 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (!DidCallStackSave) {
// Save the stack.
- llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
+ Address Stack =
+ CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
-
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
@@ -1009,13 +1077,11 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
vla->setAlignment(alignment.getQuantity());
- DeclPtr = vla;
+ address = Address(vla, alignment);
}
- llvm::Value *&DMEntry = LocalDeclMap[&D];
- assert(!DMEntry && "Decl already exists in localdeclmap!");
- DMEntry = DeclPtr;
- emission.Address = DeclPtr;
+ setAddrOfLocalVar(&D, address);
+ emission.Addr = address;
// Emit debug info for local var declaration.
if (HaveInsertPoint())
@@ -1023,12 +1089,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
- DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
}
}
if (D.hasAttr<AnnotateAttr>())
- EmitVarAnnotations(&D, emission.Address);
+ EmitVarAnnotations(&D, address.getPointer());
return emission;
}
@@ -1124,15 +1190,13 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (isTrivialInitializer(Init))
return;
- CharUnits alignment = emission.Alignment;
-
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
- llvm::Value *Loc =
- capturedByInit ? emission.Address : emission.getObjectAddress(*this);
+ Address Loc =
+ capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
llvm::Constant *constant = nullptr;
if (emission.IsConstantAggregate || D.isConstexpr()) {
@@ -1141,14 +1205,14 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
}
if (!constant) {
- LValue lv = MakeAddrLValue(Loc, type, alignment);
+ LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
}
if (!emission.IsConstantAggregate) {
// For simple scalar/complex initialization, store the value directly.
- LValue lv = MakeAddrLValue(Loc, type, alignment);
+ LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
@@ -1162,7 +1226,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
getContext().getTypeSizeInChars(type).getQuantity());
llvm::Type *BP = Int8PtrTy;
- if (Loc->getType() != BP)
+ if (Loc.getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
// If the initializer is all or mostly zeros, codegen with memset then do
@@ -1170,11 +1234,12 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (shouldUseMemSetPlusStoresToInitialize(constant,
CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
- alignment.getQuantity(), isVolatile);
+ isVolatile);
// Zero and undef don't require a stores.
if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
- emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
+ emitStoresForInitAfterMemset(constant, Loc.getPointer(),
+ isVolatile, Builder);
}
} else {
// Otherwise, create a temporary global with the initializer then
@@ -1184,15 +1249,14 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
constant, Name);
- GV->setAlignment(alignment.getQuantity());
+ GV->setAlignment(Loc.getAlignment().getQuantity());
GV->setUnnamedAddr(true);
- llvm::Value *SrcPtr = GV;
- if (SrcPtr->getType() != BP)
+ Address SrcPtr = Address(GV, Loc.getAlignment());
+ if (SrcPtr.getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
- Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
- isVolatile);
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
}
}
@@ -1253,7 +1317,7 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
// Note that for __block variables, we want to destroy the
// original stack object, not the possibly forwarded object.
- llvm::Value *addr = emission.getObjectAddress(*this);
+ Address addr = emission.getObjectAddress(*this);
const VarDecl *var = emission.Variable;
QualType type = var->getType();
@@ -1271,8 +1335,8 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
if (emission.NRVOFlag) {
assert(!type->isArrayType());
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
- EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor,
- emission.NRVOFlag);
+ EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr,
+ dtor, emission.NRVOFlag);
return;
}
break;
@@ -1369,7 +1433,7 @@ CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
/// pushEHDestroy - Push the standard destructor for the given type as
/// an EH-only cleanup.
void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type) {
+ Address addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
assert(needsEHCleanup(dtorKind));
@@ -1379,7 +1443,7 @@ void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
/// pushDestroy - Push the standard destructor for the given type as
/// at least a normal cleanup.
void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type) {
+ Address addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
CleanupKind cleanupKind = getCleanupKind(dtorKind);
@@ -1387,19 +1451,19 @@ void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
cleanupKind & EHCleanup);
}
-void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
+void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
destroyer, useEHCleanupForArray);
}
-void CodeGenFunction::pushStackRestore(CleanupKind Kind, llvm::Value *SPMem) {
+void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
void CodeGenFunction::pushLifetimeExtendedDestroy(
- CleanupKind cleanupKind, llvm::Value *addr, QualType type,
+ CleanupKind cleanupKind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray) {
assert(!isInConditionalBranch() &&
"performing lifetime extension from within conditional");
@@ -1429,15 +1493,18 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(
/// \param useEHCleanupForArray - whether an EH cleanup should be
/// used when destroying array elements, in case one of the
/// destructions throws an exception
-void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
+void CodeGenFunction::emitDestroy(Address addr, QualType type,
Destroyer *destroyer,
bool useEHCleanupForArray) {
const ArrayType *arrayType = getContext().getAsArrayType(type);
if (!arrayType)
return destroyer(*this, addr, type);
- llvm::Value *begin = addr;
- llvm::Value *length = emitArrayLength(arrayType, type, begin);
+ llvm::Value *length = emitArrayLength(arrayType, type, addr);
+
+ CharUnits elementAlign =
+ addr.getAlignment()
+ .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
// Normally we have to check whether the array is zero-length.
bool checkZeroLength = true;
@@ -1449,8 +1516,9 @@ void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
checkZeroLength = false;
}
+ llvm::Value *begin = addr.getPointer();
llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
- emitArrayDestroy(begin, end, type, destroyer,
+ emitArrayDestroy(begin, end, type, elementAlign, destroyer,
checkZeroLength, useEHCleanupForArray);
}
@@ -1459,18 +1527,19 @@ void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
///
/// \param begin - a type* denoting the first element of the array
/// \param end - a type* denoting one past the end of the array
-/// \param type - the element type of the array
+/// \param elementType - the element type of the array
/// \param destroyer - the function to call to destroy elements
/// \param useEHCleanup - whether to push an EH cleanup to destroy
/// the remaining elements in case the destruction of a single
/// element throws
void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
llvm::Value *end,
- QualType type,
+ QualType elementType,
+ CharUnits elementAlign,
Destroyer *destroyer,
bool checkZeroLength,
bool useEHCleanup) {
- assert(!type->isArrayType());
+ assert(!elementType->isArrayType());
// The basic structure here is a do-while loop, because we don't
// need to check for the zero-element case.
@@ -1496,10 +1565,11 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
"arraydestroy.element");
if (useEHCleanup)
- pushRegularPartialArrayCleanup(begin, element, type, destroyer);
+ pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
+ destroyer);
// Perform the actual destruction there.
- destroyer(*this, element, type);
+ destroyer(*this, Address(element, elementAlign), elementType);
if (useEHCleanup)
PopCleanupBlock();
@@ -1517,7 +1587,7 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
/// emitArrayDestroy, the element type here may still be an array type.
static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
- QualType type,
+ QualType type, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer) {
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
@@ -1529,9 +1599,9 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
}
if (arrayDepth) {
- llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, arrayDepth+1);
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
- SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
+ SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
}
@@ -1539,7 +1609,7 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
// Destroy the array. We don't ever need an EH cleanup because we
// assume that we're in an EH cleanup ourselves, so a throwing
// destructor causes an immediate terminate.
- CGF.emitArrayDestroy(begin, end, type, destroyer,
+ CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
/*checkZeroLength*/ true, /*useEHCleanup*/ false);
}
@@ -1547,44 +1617,49 @@ namespace {
/// RegularPartialArrayDestroy - a cleanup which performs a partial
/// array destroy where the end pointer is regularly determined and
/// does not need to be loaded from a local.
- class RegularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEnd;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
+ CharUnits ElementAlign;
public:
RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
- QualType elementType,
+ QualType elementType, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
- ElementType(elementType), Destroyer(destroyer) {}
+ ElementType(elementType), Destroyer(destroyer),
+ ElementAlign(elementAlign) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
- ElementType, Destroyer);
+ ElementType, ElementAlign, Destroyer);
}
};
/// IrregularPartialArrayDestroy - a cleanup which performs a
/// partial array destroy where the end pointer is irregularly
/// determined and must be loaded from a local.
- class IrregularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
llvm::Value *ArrayBegin;
- llvm::Value *ArrayEndPointer;
+ Address ArrayEndPointer;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
+ CharUnits ElementAlign;
public:
IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
- llvm::Value *arrayEndPointer,
+ Address arrayEndPointer,
QualType elementType,
+ CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
- ElementType(elementType), Destroyer(destroyer) {}
+ ElementType(elementType), Destroyer(destroyer),
+ ElementAlign(elementAlign) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
- ElementType, Destroyer);
+ ElementType, ElementAlign, Destroyer);
}
};
}
@@ -1596,12 +1671,14 @@ namespace {
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
- llvm::Value *arrayEndPointer,
+ Address arrayEndPointer,
QualType elementType,
+ CharUnits elementAlign,
Destroyer *destroyer) {
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEndPointer,
- elementType, destroyer);
+ elementType, elementAlign,
+ destroyer);
}
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
@@ -1613,10 +1690,12 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
+ CharUnits elementAlign,
Destroyer *destroyer) {
pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEnd,
- elementType, destroyer);
+ elementType, elementAlign,
+ destroyer);
}
/// Lazily declare the @llvm.lifetime.start intrinsic.
@@ -1640,7 +1719,7 @@ namespace {
/// function. This is used to balance out the incoming +1 of a
/// ns_consumed argument when we can't reasonably do that just by
/// not doing the initial retain for a __block argument.
- struct ConsumeARCParameter : EHScopeStack::Cleanup {
+ struct ConsumeARCParameter final : EHScopeStack::Cleanup {
ConsumeARCParameter(llvm::Value *param,
ARCPreciseLifetime_t precise)
: Param(param), Precise(precise) {}
@@ -1656,56 +1735,38 @@ namespace {
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
-void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
- bool ArgIsPointer, unsigned ArgNo) {
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
+ unsigned ArgNo) {
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
- Arg->setName(D.getName());
+ Arg.getAnyValue()->setName(D.getName());
QualType Ty = D.getType();
// Use better IR generation for certain implicit parameters.
- if (isa<ImplicitParamDecl>(D)) {
+ if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
// The only implicit argument a block has is its literal.
+ // We assume this is always passed directly.
if (BlockInfo) {
- LocalDeclMap[&D] = Arg;
- llvm::Value *LocalAddr = nullptr;
- if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
- // Allocate a stack slot to let the debug info survive the RA.
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
- D.getName() + ".addr");
- Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
- LValue lv = MakeAddrLValue(Alloc, Ty, getContext().getDeclAlign(&D));
- EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
- LocalAddr = Builder.CreateLoad(Alloc);
- }
-
- if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().getDebugInfo()
- >= CodeGenOptions::LimitedDebugInfo) {
- DI->setLocation(D.getLocation());
- DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, ArgNo,
- LocalAddr, Builder);
- }
- }
-
+ setBlockContextParameter(IPD, ArgNo, Arg.getDirectValue());
return;
}
}
- llvm::Value *DeclPtr;
+ Address DeclPtr = Address::invalid();
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
- CharUnits Align = getContext().getDeclAlign(&D);
// If we already have a pointer to the argument, reuse the input pointer.
- if (ArgIsPointer) {
+ if (Arg.isIndirect()) {
+ DeclPtr = Arg.getIndirectAddress();
// If we have a prettier pointer type at this point, bitcast to that.
- unsigned AS = cast<llvm::PointerType>(Arg->getType())->getAddressSpace();
+ unsigned AS = DeclPtr.getType()->getAddressSpace();
llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
- DeclPtr = Arg->getType() == IRTy ? Arg : Builder.CreateBitCast(Arg, IRTy,
- D.getName());
+ if (DeclPtr.getType() != IRTy)
+ DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
+
// Push a destructor cleanup for this parameter if the ABI requires it.
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
@@ -1717,14 +1778,14 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
}
} else {
// Otherwise, create a temporary to hold the value.
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
- D.getName() + ".addr");
- Alloc->setAlignment(Align.getQuantity());
- DeclPtr = Alloc;
+ DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
+ D.getName() + ".addr");
DoStore = true;
}
- LValue lv = MakeAddrLValue(DeclPtr, Ty, Align);
+ llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
+
+ LValue lv = MakeAddrLValue(DeclPtr, Ty);
if (IsScalar) {
Qualifiers qs = Ty.getQualifiers();
if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
@@ -1754,26 +1815,26 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// objc_storeStrong attempts to release its old value.
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
- EmitARCStoreStrongCall(lv.getAddress(), Arg, true);
+ EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
DoStore = false;
}
else
// Don't use objc_retainBlock for block pointers, because we
// don't want to Block_copy something just because we got it
// as a parameter.
- Arg = EmitARCRetainNonBlock(Arg);
+ ArgVal = EmitARCRetainNonBlock(ArgVal);
}
} else {
// Push the cleanup for a consumed parameter.
if (isConsumed) {
ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
? ARCPreciseLifetime : ARCImpreciseLifetime);
- EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg,
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
precise);
}
if (lt == Qualifiers::OCL_Weak) {
- EmitARCInitWeak(DeclPtr, Arg);
+ EmitARCInitWeak(DeclPtr, ArgVal);
DoStore = false; // The weak init is a store, no need to do two.
}
}
@@ -1785,20 +1846,18 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Store the initial value into the alloca.
if (DoStore)
- EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
+ EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
- llvm::Value *&DMEntry = LocalDeclMap[&D];
- assert(!DMEntry && "Decl already exists in localdeclmap!");
- DMEntry = DeclPtr;
+ setAddrOfLocalVar(&D, DeclPtr);
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
- DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
}
}
if (D.hasAttr<AnnotateAttr>())
- EmitVarAnnotations(&D, DeclPtr);
+ EmitVarAnnotations(&D, DeclPtr.getPointer());
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 00d6d5cee749..adba73168797 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -24,16 +24,13 @@ using namespace clang;
using namespace CodeGen;
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *DeclPtr) {
+ ConstantAddress DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
- ASTContext &Context = CGF.getContext();
-
- CharUnits alignment = Context.getDeclAlign(&D);
QualType type = D.getType();
- LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
+ LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
const Expr *Init = D.getInit();
switch (CGF.getEvaluationKind(type)) {
@@ -64,7 +61,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
/// Emit code to cause the destruction of the given variable with
/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *addr) {
+ ConstantAddress addr) {
CodeGenModule &CGM = CGF.CGM;
// FIXME: __attribute__((cleanup)) ?
@@ -99,7 +96,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
argument = llvm::ConstantExpr::getBitCast(
- addr, CGF.getTypes().ConvertType(type)->getPointerTo());
+ addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo());
// Otherwise, the standard logic requires a helper function.
} else {
@@ -162,25 +159,26 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
}
+ ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
+
if (!T->isReferenceType()) {
if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
(void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
- &D, DeclPtr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
+ &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
PerformInit, this);
if (PerformInit)
- EmitDeclInit(*this, D, DeclPtr);
+ EmitDeclInit(*this, D, DeclAddr);
if (CGM.isTypeConstant(D.getType(), true))
EmitDeclInvariant(*this, D, DeclPtr);
else
- EmitDeclDestroy(*this, D, DeclPtr);
+ EmitDeclDestroy(*this, D, DeclAddr);
return;
}
assert(PerformInit && "cannot have constant initializer which needs "
"destruction for reference");
- unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
RValue RV = EmitReferenceBindingToExpr(Init);
- EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
}
/// Create a stub function, suitable for being passed to atexit,
@@ -195,13 +193,15 @@ llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
llvm::raw_svector_ostream Out(FnName);
CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
}
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(),
+ FI,
VD.getLocation());
CodeGenFunction CGF(CGM);
- CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn,
- CGM.getTypes().arrangeNullaryFunction(), FunctionArgList());
+ CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn, FI, FunctionArgList());
llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
@@ -249,7 +249,8 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
}
llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
- llvm::FunctionType *FTy, const Twine &Name, SourceLocation Loc, bool TLS) {
+ llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
+ SourceLocation Loc, bool TLS) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &getModule());
@@ -259,7 +260,7 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
Fn->setSection(Section);
}
- SetLLVMFunctionAttributes(nullptr, getTypes().arrangeNullaryFunction(), Fn);
+ SetInternalFunctionAttributes(nullptr, Fn, FI);
Fn->setCallingConv(getRuntimeCC());
@@ -317,7 +318,9 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// Create a variable initialization function.
llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(FTy, FnName.str(), D->getLocation());
+ CreateGlobalInitOrDestructFunction(FTy, FnName.str(),
+ getTypes().arrangeNullaryFunction(),
+ D->getLocation());
auto *ISA = D->getAttr<InitSegAttr>();
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
@@ -334,7 +337,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// FIXME: We only need to register one __cxa_thread_atexit function for the
// entire TU.
CXXThreadLocalInits.push_back(Fn);
- CXXThreadLocalInitVars.push_back(Addr);
+ CXXThreadLocalInitVars.push_back(D);
} else if (PerformInit && ISA) {
EmitPointerToInitFunc(D, Addr, Fn, ISA);
} else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
@@ -392,7 +395,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
-
+ const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
// Create our global initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
@@ -416,7 +419,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
// Priority is always <= 65535 (enforced by sema).
PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
- FTy, "_GLOBAL__I_" + PrioritySuffix);
+ FTy, "_GLOBAL__I_" + PrioritySuffix, FI);
for (; I < PrioE; ++I)
LocalCXXGlobalInits.push_back(I->second);
@@ -446,7 +449,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
}
llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
- FTy, llvm::Twine("_GLOBAL__sub_I_", FileName));
+ FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI);
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
@@ -461,7 +464,9 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global destructor function.
- llvm::Function *Fn = CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a");
+ const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI);
CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
@@ -498,7 +503,7 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> Decls,
- llvm::GlobalVariable *Guard) {
+ Address Guard) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
@@ -507,20 +512,20 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::BasicBlock *ExitBlock = nullptr;
- if (Guard) {
+ if (Guard.isValid()) {
// If we have a guard variable, check whether we've already performed
// these initializations. This happens for TLS initialization functions.
llvm::Value *GuardVal = Builder.CreateLoad(Guard);
llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
"guard.uninitialized");
- // Mark as initialized before initializing anything else. If the
- // initializers use previously-initialized thread_local vars, that's
- // probably supposed to be OK, but the standard doesn't say.
- Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
llvm::BasicBlock *InitBlock = createBasicBlock("init");
ExitBlock = createBasicBlock("exit");
Builder.CreateCondBr(Uninit, InitBlock, ExitBlock);
EmitBlock(InitBlock);
+ // Mark as initialized before initializing anything else. If the
+ // initializers use previously-initialized thread_local vars, that's
+ // probably supposed to be OK, but the standard doesn't say.
+ Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
}
RunCleanupsScope Scope(*this);
@@ -572,9 +577,10 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
}
/// generateDestroyHelper - Generates a helper function which, when
-/// invoked, destroys the given object.
+/// invoked, destroys the given object. The address of the object
+/// should be in global memory.
llvm::Function *CodeGenFunction::generateDestroyHelper(
- llvm::Constant *addr, QualType type, Destroyer *destroyer,
+ Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray, const VarDecl *VD) {
FunctionArgList args;
ImplicitParamDecl dst(getContext(), nullptr, SourceLocation(), nullptr,
@@ -585,7 +591,7 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
getContext().VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
- FTy, "__cxx_global_array_dtor", VD->getLocation());
+ FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
CurEHLocation = VD->getLocStart();
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 7b8368ee2b32..fce2e7581962 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -1,4 +1,4 @@
-//===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -81,38 +81,6 @@ static llvm::Constant *getCatchallRethrowFn(CodeGenModule &CGM,
return CGM.CreateRuntimeFunction(FTy, Name);
}
-namespace {
- /// The exceptions personality for a function.
- struct EHPersonality {
- const char *PersonalityFn;
-
- // If this is non-null, this personality requires a non-standard
- // function for rethrowing an exception after a catchall cleanup.
- // This function must have prototype void(void*).
- const char *CatchallRethrowFn;
-
- static const EHPersonality &get(CodeGenModule &CGM,
- const FunctionDecl *FD);
- static const EHPersonality &get(CodeGenFunction &CGF) {
- return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(CGF.CurCodeDecl));
- }
-
- static const EHPersonality GNU_C;
- static const EHPersonality GNU_C_SJLJ;
- static const EHPersonality GNU_C_SEH;
- static const EHPersonality GNU_ObjC;
- static const EHPersonality GNUstep_ObjC;
- static const EHPersonality GNU_ObjCXX;
- static const EHPersonality NeXT_ObjC;
- static const EHPersonality GNU_CPlusPlus;
- static const EHPersonality GNU_CPlusPlus_SJLJ;
- static const EHPersonality GNU_CPlusPlus_SEH;
- static const EHPersonality MSVC_except_handler;
- static const EHPersonality MSVC_C_specific_handler;
- static const EHPersonality MSVC_CxxFrameHandler3;
- };
-}
-
const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", nullptr };
@@ -161,6 +129,7 @@ static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
return getCPersonality(T, L);
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
return EHPersonality::NeXT_ObjC;
case ObjCRuntime::GNUstep:
if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
@@ -192,6 +161,7 @@ static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
// function on targets using (backend-driven) SJLJ EH.
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
return EHPersonality::NeXT_ObjC;
// In the fragile ABI, just use C++ exception handling and hope
@@ -221,14 +191,16 @@ const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
const llvm::Triple &T = CGM.getTarget().getTriple();
const LangOptions &L = CGM.getLangOpts();
+ // Functions using SEH get an SEH personality.
+ if (FD && FD->usesSEHTry())
+ return getSEHPersonalityMSVC(T);
+
// Try to pick a personality function that is compatible with MSVC if we're
// not compiling Obj-C. Obj-C users better have an Obj-C runtime that supports
// the GCC-style personality function.
if (T.isWindowsMSVCEnvironment() && !L.ObjC1) {
if (L.SjLjExceptions)
return EHPersonality::GNU_CPlusPlus_SJLJ;
- else if (FD && FD->usesSEHTry())
- return getSEHPersonalityMSVC(T);
else
return EHPersonality::MSVC_CxxFrameHandler3;
}
@@ -243,6 +215,10 @@ const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
return getCPersonality(T, L);
}
+const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
+ return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(CGF.CurCodeDecl));
+}
+
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::Constant *Fn =
@@ -257,6 +233,36 @@ static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
}
+/// Check whether a landingpad instruction only uses C++ features.
+static bool LandingPadHasOnlyCXXUses(llvm::LandingPadInst *LPI) {
+ for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
+ // Look for something that would've been returned by the ObjC
+ // runtime's GetEHType() method.
+ llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
+ if (LPI->isCatch(I)) {
+ // Check if the catch value has the ObjC prefix.
+ if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ } else {
+ // Check if any of the filter values have the ObjC prefix.
+ llvm::Constant *CVal = cast<llvm::Constant>(Val);
+ for (llvm::User::op_iterator
+ II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
+ if (llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
/// Check whether a personality function could reasonably be swapped
/// for a C++ personality function.
static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
@@ -269,34 +275,14 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
continue;
}
- // Otherwise, it has to be a landingpad instruction.
- llvm::LandingPadInst *LPI = dyn_cast<llvm::LandingPadInst>(U);
- if (!LPI) return false;
-
- for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
- // Look for something that would've been returned by the ObjC
- // runtime's GetEHType() method.
- llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
- if (LPI->isCatch(I)) {
- // Check if the catch value has the ObjC prefix.
- if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
- // ObjC EH selector entries are always global variables with
- // names starting like this.
- if (GV->getName().startswith("OBJC_EHTYPE"))
- return false;
- } else {
- // Check if any of the filter values have the ObjC prefix.
- llvm::Constant *CVal = cast<llvm::Constant>(Val);
- for (llvm::User::op_iterator
- II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
- if (llvm::GlobalVariable *GV =
- cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
- // ObjC EH selector entries are always global variables with
- // names starting like this.
- if (GV->getName().startswith("OBJC_EHTYPE"))
- return false;
- }
- }
+ // Otherwise it must be a function.
+ llvm::Function *F = dyn_cast<llvm::Function>(U);
+ if (!F) return false;
+
+ for (auto BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ if (BB->isLandingPad())
+ if (!LandingPadHasOnlyCXXUses(BB->getLandingPadInst()))
+ return false;
}
}
@@ -355,29 +341,29 @@ static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
- struct FreeException : EHScopeStack::Cleanup {
+ struct FreeException final : EHScopeStack::Cleanup {
llvm::Value *exn;
FreeException(llvm::Value *exn) : exn(exn) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitNounwindRuntimeCall(getFreeExceptionFn(CGF.CGM), exn);
}
};
-}
+} // end anonymous namespace
// Emits an exception expression into the given location. This
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
// call is required, an exception within that copy ctor causes
// std::terminate to be invoked.
-void CodeGenFunction::EmitAnyExprToExn(const Expr *e, llvm::Value *addr) {
+void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
- pushFullExprCleanup<FreeException>(EHCleanup, addr);
+ pushFullExprCleanup<FreeException>(EHCleanup, addr.getPointer());
EHScopeStack::stable_iterator cleanup = EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
- llvm::Value *typedAddr = Builder.CreateBitCast(addr, ty);
+ Address typedAddr = Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -390,19 +376,20 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, llvm::Value *addr) {
/*IsInit*/ true);
// Deactivate the cleanup block.
- DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
+ DeactivateCleanupBlock(cleanup,
+ cast<llvm::Instruction>(typedAddr.getPointer()));
}
-llvm::Value *CodeGenFunction::getExceptionSlot() {
+Address CodeGenFunction::getExceptionSlot() {
if (!ExceptionSlot)
ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
- return ExceptionSlot;
+ return Address(ExceptionSlot, getPointerAlign());
}
-llvm::Value *CodeGenFunction::getEHSelectorSlot() {
+Address CodeGenFunction::getEHSelectorSlot() {
if (!EHSelectorSlot)
EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
- return EHSelectorSlot;
+ return Address(EHSelectorSlot, CharUnits::fromQuantity(4));
}
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
@@ -571,22 +558,25 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
QualType CaughtType = CGM.getContext().getUnqualifiedArrayType(
C->getCaughtType().getNonReferenceType(), CaughtTypeQuals);
- llvm::Constant *TypeInfo = nullptr;
+ CatchTypeInfo TypeInfo{nullptr, 0};
if (CaughtType->isObjCObjectPointerType())
- TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
+ TypeInfo.RTTI = CGM.getObjCRuntime().GetEHType(CaughtType);
else
- TypeInfo =
- CGM.getAddrOfCXXCatchHandlerType(CaughtType, C->getCaughtType());
+ TypeInfo = CGM.getCXXABI().getAddrOfCXXCatchHandlerType(
+ CaughtType, C->getCaughtType());
CatchScope->setHandler(I, TypeInfo, Handler);
} else {
// No exception decl indicates '...', a catch-all.
- CatchScope->setCatchAllHandler(I, Handler);
+ CatchScope->setHandler(I, CGM.getCXXABI().getCatchAllTypeInfo(), Handler);
}
}
}
llvm::BasicBlock *
CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
+ if (EHPersonality::get(*this).usesFuncletPads())
+ return getMSVCDispatchBlock(si);
+
// The dispatch block for the end of the scope chain is a block that
// just resumes unwinding.
if (si == EHStack.stable_end())
@@ -623,12 +613,58 @@ CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
case EHScope::Terminate:
dispatchBlock = getTerminateHandler();
break;
+
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd unnecessary for Itanium!");
}
scope.setCachedEHDispatchBlock(dispatchBlock);
}
return dispatchBlock;
}
+llvm::BasicBlock *
+CodeGenFunction::getMSVCDispatchBlock(EHScopeStack::stable_iterator SI) {
+ // Returning nullptr indicates that the previous dispatch block should unwind
+ // to caller.
+ if (SI == EHStack.stable_end())
+ return nullptr;
+
+ // Otherwise, we should look at the actual scope.
+ EHScope &EHS = *EHStack.find(SI);
+
+ llvm::BasicBlock *DispatchBlock = EHS.getCachedEHDispatchBlock();
+ if (DispatchBlock)
+ return DispatchBlock;
+
+ if (EHS.getKind() == EHScope::Terminate)
+ DispatchBlock = getTerminateHandler();
+ else
+ DispatchBlock = createBasicBlock();
+ CGBuilderTy Builder(*this, DispatchBlock);
+
+ switch (EHS.getKind()) {
+ case EHScope::Catch:
+ DispatchBlock->setName("catch.dispatch");
+ break;
+
+ case EHScope::Cleanup:
+ DispatchBlock->setName("ehcleanup");
+ break;
+
+ case EHScope::Filter:
+ llvm_unreachable("exception specifications not handled yet!");
+
+ case EHScope::Terminate:
+ DispatchBlock->setName("terminate");
+ break;
+
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd dispatch block missing!");
+ }
+ EHS.setCachedEHDispatchBlock(DispatchBlock);
+ return DispatchBlock;
+}
+
/// Check whether this is a non-EH scope, i.e. a scope which doesn't
/// affect exception handling. Currently, the only non-EH scopes are
/// normal-only cleanup scopes.
@@ -639,6 +675,7 @@ static bool isNonEHScope(const EHScope &S) {
case EHScope::Filter:
case EHScope::Catch:
case EHScope::Terminate:
+ case EHScope::PadEnd:
return false;
}
@@ -664,8 +701,19 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
if (LP) return LP;
- // Build the landing pad for this scope.
- LP = EmitLandingPad();
+ const EHPersonality &Personality = EHPersonality::get(*this);
+
+ if (!CurFn->hasPersonalityFn())
+ CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality));
+
+ if (Personality.usesFuncletPads()) {
+ // We don't need separate landing pads in the funclet model.
+ LP = getEHDispatchBlock(EHStack.getInnermostEHScope());
+ } else {
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ }
+
assert(LP);
// Cache the landing pad on the innermost scope. If this is a
@@ -686,6 +734,9 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
case EHScope::Terminate:
return getTerminateLandingPad();
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd unnecessary for Itanium!");
+
case EHScope::Catch:
case EHScope::Cleanup:
case EHScope::Filter:
@@ -697,11 +748,6 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, CurEHLocation);
- const EHPersonality &personality = EHPersonality::get(*this);
-
- if (!CurFn->hasPersonalityFn())
- CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, personality));
-
// Create and configure the landing pad.
llvm::BasicBlock *lpad = createBasicBlock("lpad");
EmitBlock(lpad);
@@ -756,23 +802,28 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
case EHScope::Catch:
break;
+
+ case EHScope::PadEnd:
+ llvm_unreachable("PadEnd unnecessary for Itanium!");
}
EHCatchScope &catchScope = cast<EHCatchScope>(*I);
for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) {
EHCatchScope::Handler handler = catchScope.getHandler(hi);
+ assert(handler.Type.Flags == 0 &&
+ "landingpads do not support catch handler flags");
// If this is a catch-all, register that and abort.
- if (!handler.Type) {
+ if (!handler.Type.RTTI) {
assert(!hasCatchAll);
hasCatchAll = true;
goto done;
}
// Check whether we already have a handler for this type.
- if (catchTypes.insert(handler.Type).second)
+ if (catchTypes.insert(handler.Type.RTTI).second)
// If not, add it directly to the landingpad.
- LPadInst->addClause(handler.Type);
+ LPadInst->addClause(handler.Type.RTTI);
}
}
@@ -820,10 +871,53 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
return lpad;
}
+static void emitCatchPadBlock(CodeGenFunction &CGF, EHCatchScope &CatchScope) {
+ llvm::BasicBlock *DispatchBlock = CatchScope.getCachedEHDispatchBlock();
+ assert(DispatchBlock);
+
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveIP();
+ CGF.EmitBlockAfterUses(DispatchBlock);
+
+ llvm::Value *ParentPad = CGF.CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGF.getLLVMContext());
+ llvm::BasicBlock *UnwindBB =
+ CGF.getEHDispatchBlock(CatchScope.getEnclosingEHScope());
+
+ unsigned NumHandlers = CatchScope.getNumHandlers();
+ llvm::CatchSwitchInst *CatchSwitch =
+ CGF.Builder.CreateCatchSwitch(ParentPad, UnwindBB, NumHandlers);
+
+ // Test against each of the exception types we claim to catch.
+ for (unsigned I = 0; I < NumHandlers; ++I) {
+ const EHCatchScope::Handler &Handler = CatchScope.getHandler(I);
+
+ CatchTypeInfo TypeInfo = Handler.Type;
+ if (!TypeInfo.RTTI)
+ TypeInfo.RTTI = llvm::Constant::getNullValue(CGF.VoidPtrTy);
+
+ CGF.Builder.SetInsertPoint(Handler.Block);
+
+ if (EHPersonality::get(CGF).isMSVCXXPersonality()) {
+ CGF.Builder.CreateCatchPad(
+ CatchSwitch, {TypeInfo.RTTI, CGF.Builder.getInt32(TypeInfo.Flags),
+ llvm::Constant::getNullValue(CGF.VoidPtrTy)});
+ } else {
+ CGF.Builder.CreateCatchPad(CatchSwitch, {TypeInfo.RTTI});
+ }
+
+ CatchSwitch->addHandler(Handler.Block);
+ }
+ CGF.Builder.restoreIP(SavedIP);
+}
+
/// Emit the structure of the dispatch block for the given catch scope.
/// It is an invariant that the dispatch block already exists.
static void emitCatchDispatchBlock(CodeGenFunction &CGF,
EHCatchScope &catchScope) {
+ if (EHPersonality::get(CGF).usesFuncletPads())
+ return emitCatchPadBlock(CGF, catchScope);
+
llvm::BasicBlock *dispatchBlock = catchScope.getCachedEHDispatchBlock();
assert(dispatchBlock);
@@ -850,7 +944,9 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
assert(i < e && "ran off end of handlers!");
const EHCatchScope::Handler &handler = catchScope.getHandler(i);
- llvm::Value *typeValue = handler.Type;
+ llvm::Value *typeValue = handler.Type.RTTI;
+ assert(handler.Type.Flags == 0 &&
+ "landingpads do not support catch handler flags");
assert(typeValue && "fell into catch-all case!");
typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
@@ -919,9 +1015,8 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// Copy the handler blocks off before we pop the EH stack. Emitting
// the handlers might scribble on this memory.
- SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
- memcpy(Handlers.data(), CatchScope.begin(),
- NumHandlers * sizeof(EHCatchScope::Handler));
+ SmallVector<EHCatchScope::Handler, 8> Handlers(
+ CatchScope.begin(), CatchScope.begin() + NumHandlers);
EHStack.popCatch();
@@ -958,6 +1053,8 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
RunCleanupsScope CatchScope(*this);
// Initialize the catch variable and set up the cleanups.
+ SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
+ CurrentFuncletPad);
CGM.getCXXABI().emitBeginCatch(*this, C);
// Emit the PGO counter increment.
@@ -994,7 +1091,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
}
namespace {
- struct CallEndCatchForFinally : EHScopeStack::Cleanup {
+ struct CallEndCatchForFinally final : EHScopeStack::Cleanup {
llvm::Value *ForEHVar;
llvm::Value *EndCatchFn;
CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
@@ -1006,7 +1103,7 @@ namespace {
CGF.createBasicBlock("finally.cleanup.cont");
llvm::Value *ShouldEndCatch =
- CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ CGF.Builder.CreateFlagLoad(ForEHVar, "finally.endcatch");
CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
CGF.EmitBlock(EndCatchBB);
CGF.EmitRuntimeCallOrInvoke(EndCatchFn); // catch-all, so might throw
@@ -1014,7 +1111,7 @@ namespace {
}
};
- struct PerformFinally : EHScopeStack::Cleanup {
+ struct PerformFinally final : EHScopeStack::Cleanup {
const Stmt *Body;
llvm::Value *ForEHVar;
llvm::Value *EndCatchFn;
@@ -1049,13 +1146,13 @@ namespace {
llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
llvm::Value *ShouldRethrow =
- CGF.Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ CGF.Builder.CreateFlagLoad(ForEHVar, "finally.shouldthrow");
CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
CGF.EmitBlock(RethrowBB);
if (SavedExnVar) {
CGF.EmitRuntimeCallOrInvoke(RethrowFn,
- CGF.Builder.CreateLoad(SavedExnVar));
+ CGF.Builder.CreateAlignedLoad(SavedExnVar, CGF.getPointerAlign()));
} else {
CGF.EmitRuntimeCallOrInvoke(RethrowFn);
}
@@ -1082,7 +1179,7 @@ namespace {
CGF.EnsureInsertPoint();
}
};
-}
+} // end anonymous namespace
/// Enters a finally block for an implementation using zero-cost
/// exceptions. This is mostly general, but hard-codes some
@@ -1130,7 +1227,7 @@ void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
// Whether the finally block is being executed for EH purposes.
ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
- CGF.Builder.CreateStore(CGF.Builder.getFalse(), ForEHVar);
+ CGF.Builder.CreateFlagStore(false, ForEHVar);
// Enter a normal cleanup which will perform the @finally block.
CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
@@ -1168,11 +1265,11 @@ void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
// If we need to remember the exception pointer to rethrow later, do so.
if (SavedExnVar) {
if (!exn) exn = CGF.getExceptionFromSlot();
- CGF.Builder.CreateStore(exn, SavedExnVar);
+ CGF.Builder.CreateAlignedStore(exn, SavedExnVar, CGF.getPointerAlign());
}
// Tell the cleanups in the finally block that we're do this for EH.
- CGF.Builder.CreateStore(CGF.Builder.getTrue(), ForEHVar);
+ CGF.Builder.CreateFlagStore(true, ForEHVar);
// Thread a jump through the finally cleanup.
CGF.EmitBranchThroughCleanup(RethrowDest);
@@ -1204,7 +1301,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr), 0);
LPadInst->addClause(getCatchAllValue(*this));
- llvm::Value *Exn = 0;
+ llvm::Value *Exn = nullptr;
if (getLangOpts().CPlusPlus)
Exn = Builder.CreateExtractValue(LPadInst, 0);
llvm::CallInst *terminateCall =
@@ -1228,9 +1325,16 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
// end of the function by FinishFunction.
TerminateHandler = createBasicBlock("terminate.handler");
Builder.SetInsertPoint(TerminateHandler);
- llvm::Value *Exn = 0;
- if (getLangOpts().CPlusPlus)
- Exn = getExceptionFromSlot();
+ llvm::Value *Exn = nullptr;
+ if (EHPersonality::get(*this).usesFuncletPads()) {
+ llvm::Value *ParentPad = CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
+ Builder.CreateCleanupPad(ParentPad);
+ } else {
+ if (getLangOpts().CPlusPlus)
+ Exn = getExceptionFromSlot();
+ }
llvm::CallInst *terminateCall =
CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
terminateCall->setDoesNotReturn();
@@ -1297,7 +1401,7 @@ void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) {
}
namespace {
-struct PerformSEHFinally : EHScopeStack::Cleanup {
+struct PerformSEHFinally final : EHScopeStack::Cleanup {
llvm::Function *OutlinedFinally;
PerformSEHFinally(llvm::Function *OutlinedFinally)
: OutlinedFinally(OutlinedFinally) {}
@@ -1328,21 +1432,21 @@ struct PerformSEHFinally : EHScopeStack::Cleanup {
CGF.EmitCall(FnInfo, OutlinedFinally, ReturnValueSlot(), Args);
}
};
-}
+} // end anonymous namespace
namespace {
/// Find all local variable captures in the statement.
struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
CodeGenFunction &ParentCGF;
const VarDecl *ParentThis;
- SmallVector<const VarDecl *, 4> Captures;
- llvm::Value *SEHCodeSlot = nullptr;
+ llvm::SmallSetVector<const VarDecl *, 4> Captures;
+ Address SEHCodeSlot = Address::invalid();
CaptureFinder(CodeGenFunction &ParentCGF, const VarDecl *ParentThis)
: ParentCGF(ParentCGF), ParentThis(ParentThis) {}
// Return true if we need to do any capturing work.
bool foundCaptures() {
- return !Captures.empty() || SEHCodeSlot;
+ return !Captures.empty() || SEHCodeSlot.isValid();
}
void Visit(const Stmt *S) {
@@ -1356,17 +1460,17 @@ struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
void VisitDeclRefExpr(const DeclRefExpr *E) {
// If this is already a capture, just make sure we capture 'this'.
if (E->refersToEnclosingVariableOrCapture()) {
- Captures.push_back(ParentThis);
+ Captures.insert(ParentThis);
return;
}
const auto *D = dyn_cast<VarDecl>(E->getDecl());
if (D && D->isLocalVarDeclOrParm() && D->hasLocalStorage())
- Captures.push_back(D);
+ Captures.insert(D);
}
void VisitCXXThisExpr(const CXXThisExpr *E) {
- Captures.push_back(ParentThis);
+ Captures.insert(ParentThis);
}
void VisitCallExpr(const CallExpr *E) {
@@ -1381,19 +1485,20 @@ struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
// This is the simple case where we are the outermost finally. All we
// have to do here is make sure we escape this and recover it in the
// outlined handler.
- if (!SEHCodeSlot)
+ if (!SEHCodeSlot.isValid())
SEHCodeSlot = ParentCGF.SEHCodeSlotStack.back();
break;
}
}
};
-}
+} // end anonymous namespace
-llvm::Value *CodeGenFunction::recoverAddrOfEscapedLocal(
- CodeGenFunction &ParentCGF, llvm::Value *ParentVar, llvm::Value *ParentFP) {
+Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
+ Address ParentVar,
+ llvm::Value *ParentFP) {
llvm::CallInst *RecoverCall = nullptr;
- CGBuilderTy Builder(AllocaInsertPt);
- if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar)) {
+ CGBuilderTy Builder(*this, AllocaInsertPt);
+ if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar.getPointer())) {
// Mark the variable escaped if nobody else referenced it and compute the
// localescape index.
auto InsertPair = ParentCGF.EscapedLocals.insert(
@@ -1413,7 +1518,7 @@ llvm::Value *CodeGenFunction::recoverAddrOfEscapedLocal(
// Just clone the existing localrecover call, but tweak the FP argument to
// use our FP value. All other arguments are constants.
auto *ParentRecover =
- cast<llvm::IntrinsicInst>(ParentVar->stripPointerCasts());
+ cast<llvm::IntrinsicInst>(ParentVar.getPointer()->stripPointerCasts());
assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover &&
"expected alloca or localrecover in parent LocalDeclMap");
RecoverCall = cast<llvm::CallInst>(ParentRecover->clone());
@@ -1423,9 +1528,9 @@ llvm::Value *CodeGenFunction::recoverAddrOfEscapedLocal(
// Bitcast the variable, rename it, and insert it in the local decl map.
llvm::Value *ChildVar =
- Builder.CreateBitCast(RecoverCall, ParentVar->getType());
- ChildVar->setName(ParentVar->getName());
- return ChildVar;
+ Builder.CreateBitCast(RecoverCall, ParentVar.getType());
+ ChildVar->setName(ParentVar.getName());
+ return Address(ChildVar, ParentVar.getAlignment());
}
void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
@@ -1444,27 +1549,32 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
return;
}
- llvm::Value *EntryEBP = nullptr;
- llvm::Value *ParentFP;
+ llvm::Value *EntryFP = nullptr;
+ CGBuilderTy Builder(CGM, AllocaInsertPt);
if (IsFilter && CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) {
// 32-bit SEH filters need to be careful about FP recovery. The end of the
// EH registration is passed in as the EBP physical register. We can
- // recover that with llvm.frameaddress(1), and adjust that to recover the
- // parent's true frame pointer.
- CGBuilderTy Builder(AllocaInsertPt);
- EntryEBP = Builder.CreateCall(
+ // recover that with llvm.frameaddress(1).
+ EntryFP = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::frameaddress), {Builder.getInt32(1)});
- llvm::Function *RecoverFPIntrin =
- CGM.getIntrinsic(llvm::Intrinsic::x86_seh_recoverfp);
- llvm::Constant *ParentI8Fn =
- llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
- ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryEBP});
} else {
// Otherwise, for x64 and 32-bit finally functions, the parent FP is the
// second parameter.
auto AI = CurFn->arg_begin();
++AI;
- ParentFP = AI;
+ EntryFP = &*AI;
+ }
+
+ llvm::Value *ParentFP = EntryFP;
+ if (IsFilter) {
+ // Given whatever FP the runtime provided us in EntryFP, recover the true
+ // frame pointer of the parent function. We only need to do this in filters,
+ // since finally funclets recover the parent FP for us.
+ llvm::Function *RecoverFPIntrin =
+ CGM.getIntrinsic(llvm::Intrinsic::x86_seh_recoverfp);
+ llvm::Constant *ParentI8Fn =
+ llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
+ ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryFP});
}
// Create llvm.localrecover calls for all captures.
@@ -1486,19 +1596,19 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
auto I = ParentCGF.LocalDeclMap.find(VD);
if (I == ParentCGF.LocalDeclMap.end())
continue;
- llvm::Value *ParentVar = I->second;
- LocalDeclMap[VD] =
- recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP);
+ Address ParentVar = I->second;
+ setAddrOfLocalVar(
+ VD, recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP));
}
- if (Finder.SEHCodeSlot) {
+ if (Finder.SEHCodeSlot.isValid()) {
SEHCodeSlotStack.push_back(
recoverAddrOfEscapedLocal(ParentCGF, Finder.SEHCodeSlot, ParentFP));
}
if (IsFilter)
- EmitSEHExceptionCodeSave(ParentCGF, ParentFP, EntryEBP);
+ EmitSEHExceptionCodeSave(ParentCGF, ParentFP, EntryFP);
}
/// Arrange a function prototype that can be called by Windows exception
@@ -1614,13 +1724,12 @@ CodeGenFunction::GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
llvm::Value *ParentFP,
- llvm::Value *EntryEBP) {
+ llvm::Value *EntryFP) {
// Get the pointer to the EXCEPTION_POINTERS struct. This is returned by the
// __exception_info intrinsic.
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
// On Win64, the info is passed as the first parameter to the filter.
- auto AI = CurFn->arg_begin();
- SEHInfo = AI;
+ SEHInfo = &*CurFn->arg_begin();
SEHCodeSlotStack.push_back(
CreateMemTemp(getContext().IntTy, "__exception_code"));
} else {
@@ -1628,9 +1737,9 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
// exception registration object. It contains 6 32-bit fields, and the info
// pointer is stored in the second field. So, GEP 20 bytes backwards and
// load the pointer.
- SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryEBP, -20);
+ SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryFP, -20);
SEHInfo = Builder.CreateBitCast(SEHInfo, Int8PtrTy->getPointerTo());
- SEHInfo = Builder.CreateLoad(Int8PtrTy, SEHInfo);
+ SEHInfo = Builder.CreateAlignedLoad(Int8PtrTy, SEHInfo, getPointerAlign());
SEHCodeSlotStack.push_back(recoverAddrOfEscapedLocal(
ParentCGF, ParentCGF.SEHCodeSlotStack.back(), ParentFP));
}
@@ -1646,8 +1755,8 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy, nullptr);
llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
- Rec = Builder.CreateLoad(Rec);
- llvm::Value *Code = Builder.CreateLoad(Rec);
+ Rec = Builder.CreateAlignedLoad(Rec, getPointerAlign());
+ llvm::Value *Code = Builder.CreateAlignedLoad(Rec, getIntAlign());
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
Builder.CreateStore(Code, SEHCodeSlotStack.back());
}
@@ -1663,7 +1772,7 @@ llvm::Value *CodeGenFunction::EmitSEHExceptionInfo() {
llvm::Value *CodeGenFunction::EmitSEHExceptionCode() {
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
- return Builder.CreateLoad(Int32Ty, SEHCodeSlotStack.back());
+ return Builder.CreateLoad(SEHCodeSlotStack.back());
}
llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() {
@@ -1709,7 +1818,7 @@ void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
HelperCGF.GenerateSEHFilterFunction(*this, *Except);
llvm::Constant *OpaqueFunc =
llvm::ConstantExpr::getBitCast(FilterFunc, Int8PtrTy);
- CatchScope->setHandler(0, OpaqueFunc, createBasicBlock("__except"));
+ CatchScope->setHandler(0, OpaqueFunc, createBasicBlock("__except.ret"));
}
void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) {
@@ -1745,16 +1854,24 @@ void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) {
emitCatchDispatchBlock(*this, CatchScope);
// Grab the block before we pop the handler.
- llvm::BasicBlock *ExceptBB = CatchScope.getHandler(0).Block;
+ llvm::BasicBlock *CatchPadBB = CatchScope.getHandler(0).Block;
EHStack.popCatch();
- EmitBlockAfterUses(ExceptBB);
+ EmitBlockAfterUses(CatchPadBB);
+
+ // __except blocks don't get outlined into funclets, so immediately do a
+ // catchret.
+ llvm::CatchPadInst *CPI =
+ cast<llvm::CatchPadInst>(CatchPadBB->getFirstNonPHI());
+ llvm::BasicBlock *ExceptBB = createBasicBlock("__except");
+ Builder.CreateCatchRet(CPI, ExceptBB);
+ EmitBlock(ExceptBB);
- // On Win64, the exception pointer is the exception code. Copy it to the slot.
+ // On Win64, the exception code is returned in EAX. Copy it into the slot.
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
- llvm::Value *Code =
- Builder.CreatePtrToInt(getExceptionFromSlot(), IntPtrTy);
- Code = Builder.CreateTrunc(Code, Int32Ty);
+ llvm::Function *SEHCodeIntrin =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_exceptioncode);
+ llvm::Value *Code = Builder.CreateCall(SEHCodeIntrin, {CPI});
Builder.CreateStore(Code, SEHCodeSlotStack.back());
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 9c3dfe52716b..dabd2b1528bb 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -54,6 +54,15 @@ llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
+Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
+ const Twine &Name) {
+ auto Alloca = CreateTempAlloca(Ty, Name);
+ Alloca->setAlignment(Align.getQuantity());
+ return Address(Alloca, Align);
+}
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
const Twine &Name) {
if (!Builder.isNamePreserving())
@@ -61,29 +70,38 @@ llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
return new llvm::AllocaInst(Ty, nullptr, Name, AllocaInsertPt);
}
-void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
- llvm::Value *Init) {
- auto *Store = new llvm::StoreInst(Init, Var);
+/// CreateDefaultAlignTempAlloca - This creates an alloca with the
+/// default alignment of the corresponding LLVM type, which is *not*
+/// guaranteed to be related in any way to the expected alignment of
+/// an AST type that might have been lowered to Ty.
+Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
+ CharUnits Align =
+ CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
+ return CreateTempAlloca(Ty, Align, Name);
+}
+
+void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
+ assert(isa<llvm::AllocaInst>(Var.getPointer()));
+ auto *Store = new llvm::StoreInst(Init, Var.getPointer());
+ Store->setAlignment(Var.getAlignment().getQuantity());
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
- Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
+ Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
}
-llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
- const Twine &Name) {
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
- // FIXME: Should we prefer the preferred type alignment here?
+Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
CharUnits Align = getContext().getTypeAlignInChars(Ty);
- Alloc->setAlignment(Align.getQuantity());
- return Alloc;
+ return CreateTempAlloca(ConvertType(Ty), Align, Name);
}
-llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
- const Twine &Name) {
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
+Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name) {
// FIXME: Should we prefer the preferred type alignment here?
- CharUnits Align = getContext().getTypeAlignInChars(Ty);
- Alloc->setAlignment(Align.getQuantity());
- return Alloc;
+ return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name);
+}
+
+Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
+ const Twine &Name) {
+ return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name);
}
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
@@ -96,10 +114,12 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
}
QualType BoolTy = getContext().BoolTy;
+ SourceLocation Loc = E->getExprLoc();
if (!E->getType()->isAnyComplexType())
- return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
- return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
+ Loc);
}
/// EmitIgnoredExpr - Emit code to compute the specified expression,
@@ -146,20 +166,18 @@ RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
/// EmitAnyExprToMem - Evaluate an expression into a given memory
/// location.
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
- llvm::Value *Location,
+ Address Location,
Qualifiers Quals,
bool IsInit) {
// FIXME: This function should take an LValue as an argument.
switch (getEvaluationKind(E->getType())) {
case TEK_Complex:
- EmitComplexExprIntoLValue(E,
- MakeNaturalAlignAddrLValue(Location, E->getType()),
+ EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
/*isInit*/ false);
return;
case TEK_Aggregate: {
- CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
- EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
AggValueSlot::IsDestructed_t(IsInit),
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsAliased_t(!IsInit)));
@@ -178,17 +196,14 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
static void
pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
- const Expr *E, llvm::Value *ReferenceTemporary) {
+ const Expr *E, Address ReferenceTemporary) {
// Objective-C++ ARC:
// If we are binding a reference to a temporary that has ownership, we
// need to perform retain/release operations on the temporary.
//
// FIXME: This should be looking at E, not M.
- if (CGF.getLangOpts().ObjCAutoRefCount &&
- M->getType()->isObjCLifetimeType()) {
- QualType ObjCARCReferenceLifetimeType = M->getType();
- switch (Qualifiers::ObjCLifetime Lifetime =
- ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ if (auto Lifetime = M->getType().getObjCLifetime()) {
+ switch (Lifetime) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
// Carry on to normal cleanup handling.
@@ -229,11 +244,11 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
}
if (Duration == SD_FullExpression)
CGF.pushDestroy(CleanupKind, ReferenceTemporary,
- ObjCARCReferenceLifetimeType, *Destroy,
+ M->getType(), *Destroy,
CleanupKind & EHCleanup);
else
CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
- ObjCARCReferenceLifetimeType,
+ M->getType(),
*Destroy, CleanupKind & EHCleanup);
return;
@@ -264,14 +279,14 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
llvm::Constant *CleanupArg;
if (E->getType()->isArrayType()) {
CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
- cast<llvm::Constant>(ReferenceTemporary), E->getType(),
+ ReferenceTemporary, E->getType(),
CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
} else {
CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor,
StructorType::Complete);
- CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
+ CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
}
CGF.CGM.getCXXABI().registerGlobalDtor(
CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
@@ -296,7 +311,7 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
}
}
-static llvm::Value *
+static Address
createReferenceTemporary(CodeGenFunction &CGF,
const MaterializeTemporaryExpr *M, const Expr *Inner) {
switch (M->getStorageDuration()) {
@@ -314,10 +329,10 @@ createReferenceTemporary(CodeGenFunction &CGF,
auto *GV = new llvm::GlobalVariable(
CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp");
- GV->setAlignment(
- CGF.getContext().getTypeAlignInChars(Ty).getQuantity());
+ CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
+ GV->setAlignment(alignment.getQuantity());
// FIXME: Should we put the new global into a COMDAT?
- return GV;
+ return Address(GV, alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp");
}
@@ -337,20 +352,22 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
// FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
// as that will cause the lifetime adjustment to be lost for ARC
- if (getLangOpts().ObjCAutoRefCount &&
- M->getType()->isObjCLifetimeType() &&
- M->getType().getObjCLifetime() != Qualifiers::OCL_None &&
- M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
- llvm::Value *Object = createReferenceTemporary(*this, M, E);
- if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
- Object = llvm::ConstantExpr::getBitCast(
- Var, ConvertTypeForMem(E->getType())->getPointerTo());
+ auto ownership = M->getType().getObjCLifetime();
+ if (ownership != Qualifiers::OCL_None &&
+ ownership != Qualifiers::OCL_ExplicitNone) {
+ Address Object = createReferenceTemporary(*this, M, E);
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
+ Object = Address(llvm::ConstantExpr::getBitCast(Var,
+ ConvertTypeForMem(E->getType())
+ ->getPointerTo(Object.getAddressSpace())),
+ Object.getAlignment());
// We should not have emitted the initializer for this temporary as a
// constant.
assert(!Var->hasInitializer());
Var->setInitializer(CGM.EmitNullConstant(E->getType()));
}
- LValue RefTempDst = MakeAddrLValue(Object, M->getType());
+ LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
+ AlignmentSource::Decl);
switch (getEvaluationKind(E->getType())) {
default: llvm_unreachable("expected scalar or aggregate expression");
@@ -358,8 +375,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
break;
case TEK_Aggregate: {
- CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
- EmitAggExpr(E, AggValueSlot::forAddr(Object, Alignment,
+ EmitAggExpr(E, AggValueSlot::forAddr(Object,
E->getType().getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -387,10 +403,11 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
}
// Create and initialize the reference temporary.
- llvm::Value *Object = createReferenceTemporary(*this, M, E);
- if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
- Object = llvm::ConstantExpr::getBitCast(
- Var, ConvertTypeForMem(E->getType())->getPointerTo());
+ Address Object = createReferenceTemporary(*this, M, E);
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
+ Object = Address(llvm::ConstantExpr::getBitCast(
+ Var, ConvertTypeForMem(E->getType())->getPointerTo()),
+ Object.getAlignment());
// If the temporary is a global and has a constant initializer or is a
// constant temporary that we promoted to a global, we may have already
// initialized it.
@@ -418,7 +435,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
break;
case SubobjectAdjustment::FieldAdjustment: {
- LValue LV = MakeAddrLValue(Object, E->getType());
+ LValue LV = MakeAddrLValue(Object, E->getType(),
+ AlignmentSource::Decl);
LV = EmitLValueForField(LV, Adjustment.Field);
assert(LV.isSimple() &&
"materialized temporary field is not a simple lvalue");
@@ -428,14 +446,14 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
case SubobjectAdjustment::MemberPointerAdjustment: {
llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
- Object = CGM.getCXXABI().EmitMemberDataPointerAddress(
- *this, E, Object, Ptr, Adjustment.Ptr.MPT);
+ Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
+ Adjustment.Ptr.MPT);
break;
}
}
}
- return MakeAddrLValue(Object, M->getType());
+ return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
}
RValue
@@ -443,7 +461,7 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
// Emit the expression as an lvalue.
LValue LV = EmitLValue(E);
assert(LV.isSimple());
- llvm::Value *Value = LV.getAddress();
+ llvm::Value *Value = LV.getPointer();
if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
// C++11 [dcl.ref]p5 (as amended by core issue 453):
@@ -487,7 +505,7 @@ bool CodeGenFunction::sanitizePerformTypeCheck() const {
}
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
- llvm::Value *Address, QualType Ty,
+ llvm::Value *Ptr, QualType Ty,
CharUnits Alignment, bool SkipNullCheck) {
if (!sanitizePerformTypeCheck())
return;
@@ -495,7 +513,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// Don't check pointers outside the default address space. The null check
// isn't correct, the object-size check isn't supported by LLVM, and we can't
// communicate the addresses to the runtime handler for the vptr check.
- if (Address->getType()->getPointerAddressSpace())
+ if (Ptr->getType()->getPointerAddressSpace())
return;
SanitizerScope SanScope(this);
@@ -508,8 +526,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
!SkipNullCheck) {
// The glvalue must not be an empty glvalue.
- llvm::Value *IsNonNull = Builder.CreateICmpNE(
- Address, llvm::Constant::getNullValue(Address->getType()));
+ llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr);
if (AllowNullPointers) {
// When performing pointer casts, it's OK if the value is null.
@@ -533,7 +550,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
llvm::Value *Min = Builder.getFalse();
- llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
+ llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
llvm::Value *LargeEnough =
Builder.CreateICmpUGE(Builder.CreateCall(F, {CastAddr, Min}),
llvm::ConstantInt::get(IntPtrTy, Size));
@@ -550,7 +567,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// The glvalue must be suitably aligned.
if (AlignVal) {
llvm::Value *Align =
- Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
+ Builder.CreateAnd(Builder.CreatePtrToInt(Ptr, IntPtrTy),
llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
llvm::Value *Aligned =
Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
@@ -565,7 +582,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(SizeTy, AlignVal),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- EmitCheck(Checks, "type_mismatch", StaticData, Address);
+ EmitCheck(Checks, "type_mismatch", StaticData, Ptr);
}
// If possible, check that the vptr indicates that there is a subobject of
@@ -600,7 +617,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
- llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
+ Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
@@ -617,7 +634,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
CacheSize-1));
llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
llvm::Value *CacheVal =
- Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
+ Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices),
+ getPointerAlign());
// If the hash isn't in the cache, call a runtime handler to perform the
// hard work of checking whether the vptr is for an object of the right
@@ -630,7 +648,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- llvm::Value *DynamicData[] = { Address, Hash };
+ llvm::Value *DynamicData[] = { Ptr, Hash };
EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
"dynamic_type_cache_miss", StaticData, DynamicData);
}
@@ -758,10 +776,104 @@ EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
return isPre ? IncVal : InVal;
}
+void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
+ CodeGenFunction *CGF) {
+ // Bind VLAs in the cast type.
+ if (CGF && E->getType()->isVariablyModifiedType())
+ CGF->EmitVariablyModifiedType(E->getType());
+
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitExplicitCastType(E->getType());
+}
+
//===----------------------------------------------------------------------===//
// LValue Expression Emission
//===----------------------------------------------------------------------===//
+/// EmitPointerWithAlignment - Given an expression of pointer type, try to
+/// derive a more accurate bound on the alignment of the pointer.
+Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
+ AlignmentSource *Source) {
+ // We allow this with ObjC object pointers because of fragile ABIs.
+ assert(E->getType()->isPointerType() ||
+ E->getType()->isObjCObjectPointerType());
+ E = E->IgnoreParens();
+
+ // Casts:
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
+ CGM.EmitExplicitCastExprType(ECE, this);
+
+ switch (CE->getCastKind()) {
+ // Non-converting casts (but not C's implicit conversion from void*).
+ case CK_BitCast:
+ case CK_NoOp:
+ if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
+ if (PtrTy->getPointeeType()->isVoidType())
+ break;
+
+ AlignmentSource InnerSource;
+ Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), &InnerSource);
+ if (Source) *Source = InnerSource;
+
+ // If this is an explicit bitcast, and the source l-value is
+ // opaque, honor the alignment of the casted-to type.
+ if (isa<ExplicitCastExpr>(CE) &&
+ InnerSource != AlignmentSource::Decl) {
+ Addr = Address(Addr.getPointer(),
+ getNaturalPointeeTypeAlignment(E->getType(), Source));
+ }
+
+ if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
+ if (auto PT = E->getType()->getAs<PointerType>())
+ EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_UnrelatedCast,
+ CE->getLocStart());
+ }
+
+ return Builder.CreateBitCast(Addr, ConvertType(E->getType()));
+ }
+ break;
+
+ // Array-to-pointer decay.
+ case CK_ArrayToPointerDecay:
+ return EmitArrayToPointerDecay(CE->getSubExpr(), Source);
+
+ // Derived-to-base conversions.
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), Source);
+ auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
+ return GetAddressOfBaseClass(Addr, Derived,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE),
+ CE->getExprLoc());
+ }
+
+ // TODO: Is there any reason to treat base-to-derived conversions
+ // specially?
+ default:
+ break;
+ }
+ }
+
+ // Unary &.
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ LValue LV = EmitLValue(UO->getSubExpr());
+ if (Source) *Source = LV.getAlignmentSource();
+ return LV.getAddress();
+ }
+ }
+
+ // TODO: conditional operators, comma.
+
+ // Otherwise, use the alignment of the type.
+ CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), Source);
+ return Address(EmitScalarExpr(E), Align);
+}
+
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
if (Ty->isVoidType())
return RValue::get(nullptr);
@@ -778,7 +890,7 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
// identifiable address. Just because the contents of the value are undefined
// doesn't mean that the address can't be taken and compared.
case TEK_Aggregate: {
- llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
+ Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
return RValue::getAggregate(DestPtr);
}
@@ -798,7 +910,8 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
- return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
+ return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
+ E->getType());
}
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
@@ -808,7 +921,7 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
else
LV = EmitLValue(E);
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
- EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
+ EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(),
E->getType(), LV.getAlignment());
return LV;
}
@@ -909,6 +1022,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitUnaryOpLValue(cast<UnaryOperator>(E));
case Expr::ArraySubscriptExprClass:
return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::OMPArraySectionExprClass:
+ return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
case Expr::ExtVectorElementExprClass:
return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
case Expr::MemberExprClass:
@@ -1055,9 +1170,10 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment().getQuantity(),
- lvalue.getType(), Loc, lvalue.getTBAAInfo(),
- lvalue.getTBAABaseType(), lvalue.getTBAAOffset());
+ lvalue.getType(), Loc, lvalue.getAlignmentSource(),
+ lvalue.getTBAAInfo(),
+ lvalue.getTBAABaseType(), lvalue.getTBAAOffset(),
+ lvalue.isNontemporal());
}
static bool hasBooleanRepresentation(QualType Ty) {
@@ -1117,68 +1233,56 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
return MDHelper.createRange(Min, End);
}
-llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty,
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
+ QualType Ty,
SourceLocation Loc,
+ AlignmentSource AlignSource,
llvm::MDNode *TBAAInfo,
QualType TBAABaseType,
- uint64_t TBAAOffset) {
+ uint64_t TBAAOffset,
+ bool isNontemporal) {
// For better performance, handle vector loads differently.
if (Ty->isVectorType()) {
- llvm::Value *V;
- const llvm::Type *EltTy =
- cast<llvm::PointerType>(Addr->getType())->getElementType();
+ const llvm::Type *EltTy = Addr.getElementType();
const auto *VTy = cast<llvm::VectorType>(EltTy);
- // Handle vectors of size 3, like size 4 for better performance.
+ // Handle vectors of size 3 like size 4 for better performance.
if (VTy->getNumElements() == 3) {
// Bitcast to vec4 type.
llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
4);
- llvm::PointerType *ptVec4Ty =
- llvm::PointerType::get(vec4Ty,
- (cast<llvm::PointerType>(
- Addr->getType()))->getAddressSpace());
- llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
- "castToVec4");
+ Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
// Now load value.
- llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
+ llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
// Shuffle vector to get vec3.
- llvm::Constant *Mask[] = {
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 1),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 2)
- };
-
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
- V = Builder.CreateShuffleVector(LoadVal,
- llvm::UndefValue::get(vec4Ty),
- MaskV, "extractVec");
+ V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
+ {0, 1, 2}, "extractVec");
return EmitFromMemory(V, Ty);
}
}
// Atomic operations have to be done on integral types.
if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) {
- LValue lvalue = LValue::MakeAddr(Addr, Ty,
- CharUnits::fromQuantity(Alignment),
- getContext(), TBAAInfo);
+ LValue lvalue =
+ LValue::MakeAddr(Addr, Ty, getContext(), AlignSource, TBAAInfo);
return EmitAtomicLoad(lvalue, Loc).getScalarVal();
}
- llvm::LoadInst *Load = Builder.CreateLoad(Addr);
- if (Volatile)
- Load->setVolatile(true);
- if (Alignment)
- Load->setAlignment(Alignment);
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
+ if (isNontemporal) {
+ llvm::MDNode *Node = llvm::MDNode::get(
+ Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+ Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ }
if (TBAAInfo) {
llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
TBAAOffset);
if (TBAAPath)
- CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/);
+ CGM.DecorateInstructionWithTBAA(Load, TBAAPath,
+ false /*ConvertTypeToTag*/);
}
bool NeedsBoolCheck =
@@ -1241,11 +1345,13 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
return Value;
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment,
- QualType Ty, llvm::MDNode *TBAAInfo,
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
+ bool Volatile, QualType Ty,
+ AlignmentSource AlignSource,
+ llvm::MDNode *TBAAInfo,
bool isInit, QualType TBAABaseType,
- uint64_t TBAAOffset) {
+ uint64_t TBAAOffset,
+ bool isNontemporal) {
// Handle vectors differently to get better performance.
if (Ty->isVectorType()) {
@@ -1253,29 +1359,18 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
auto *VecTy = cast<llvm::VectorType>(SrcTy);
// Handle vec3 special.
if (VecTy->getNumElements() == 3) {
- llvm::LLVMContext &VMContext = getLLVMContext();
-
// Our source is a vec3, do a shuffle vector to make it a vec4.
- SmallVector<llvm::Constant*, 4> Mask;
- Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- 0));
- Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- 1));
- Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- 2));
- Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
-
+ llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
+ Builder.getInt32(2),
+ llvm::UndefValue::get(Builder.getInt32Ty())};
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Value = Builder.CreateShuffleVector(Value,
llvm::UndefValue::get(VecTy),
MaskV, "extractVec");
SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
}
- auto *DstPtr = cast<llvm::PointerType>(Addr->getType());
- if (DstPtr->getElementType() != SrcTy) {
- llvm::Type *MemTy =
- llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
- Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
+ if (Addr.getElementType() != SrcTy) {
+ Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
}
}
@@ -1284,30 +1379,34 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
if (Ty->isAtomicType() ||
(!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) {
EmitAtomicStore(RValue::get(Value),
- LValue::MakeAddr(Addr, Ty,
- CharUnits::fromQuantity(Alignment),
- getContext(), TBAAInfo),
+ LValue::MakeAddr(Addr, Ty, getContext(),
+ AlignSource, TBAAInfo),
isInit);
return;
}
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
- if (Alignment)
- Store->setAlignment(Alignment);
+ if (isNontemporal) {
+ llvm::MDNode *Node =
+ llvm::MDNode::get(Store->getContext(),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+ Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ }
if (TBAAInfo) {
llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
TBAAOffset);
if (TBAAPath)
- CGM.DecorateInstruction(Store, TBAAPath, false/*ConvertTypeToTag*/);
+ CGM.DecorateInstructionWithTBAA(Store, TBAAPath,
+ false /*ConvertTypeToTag*/);
}
}
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment().getQuantity(), lvalue.getType(),
+ lvalue.getType(), lvalue.getAlignmentSource(),
lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(),
- lvalue.getTBAAOffset());
+ lvalue.getTBAAOffset(), lvalue.isNontemporal());
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -1316,11 +1415,17 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isObjCWeak()) {
// load of a __weak object.
- llvm::Value *AddrWeakObj = LV.getAddress();
+ Address AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ // In MRC mode, we do a load+autorelease.
+ if (!getLangOpts().ObjCAutoRefCount) {
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
+ }
+
+ // In ARC mode, we load retained and then consume the value.
llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
Object = EmitObjCConsumeObject(LV.getType(), Object);
return RValue::get(Object);
@@ -1334,9 +1439,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
}
if (LV.isVectorElt()) {
- llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
LV.isVolatileQualified());
- Load->setAlignment(LV.getAlignment().getQuantity());
return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
"vecext"));
}
@@ -1356,15 +1460,12 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
- CharUnits Align = LV.getAlignment().alignmentAtOffset(Info.StorageOffset);
// Get the output type.
llvm::Type *ResLTy = ConvertType(LV.getType());
- llvm::Value *Ptr = LV.getBitFieldAddr();
- llvm::Value *Val = Builder.CreateAlignedLoad(Ptr, Align.getQuantity(),
- LV.isVolatileQualified(),
- "bf.load");
+ Address Ptr = LV.getBitFieldAddress();
+ llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
if (Info.IsSigned) {
assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
@@ -1389,10 +1490,8 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
- llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
- LV.isVolatileQualified());
- Load->setAlignment(LV.getAlignment().getQuantity());
- llvm::Value *Vec = Load;
+ llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
+ LV.isVolatileQualified());
const llvm::Constant *Elts = LV.getExtVectorElts();
@@ -1419,24 +1518,24 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
}
/// @brief Generates lvalue for partial ext_vector access.
-llvm::Value *CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
- llvm::Value *VectorAddress = LV.getExtVectorAddr();
+Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
+ Address VectorAddress = LV.getExtVectorAddress();
const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
QualType EQT = ExprVT->getElementType();
llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
- llvm::Type *VectorElementPtrToTy = VectorElementTy->getPointerTo();
- llvm::Value *CastToPointerElement =
- Builder.CreateBitCast(VectorAddress,
- VectorElementPtrToTy, "conv.ptr.element");
+ Address CastToPointerElement =
+ Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
+ "conv.ptr.element");
const llvm::Constant *Elts = LV.getExtVectorElts();
unsigned ix = getAccessedFieldNo(0, Elts);
- llvm::Value *VectorBasePtrPlusIx =
- Builder.CreateInBoundsGEP(CastToPointerElement,
- llvm::ConstantInt::get(SizeTy, ix), "add.ptr");
-
+ Address VectorBasePtrPlusIx =
+ Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
+ getContext().getTypeSizeInChars(EQT),
+ "vector.elt");
+
return VectorBasePtrPlusIx;
}
@@ -1471,15 +1570,12 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
- llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
- Dst.isVolatileQualified());
- Load->setAlignment(Dst.getAlignment().getQuantity());
- llvm::Value *Vec = Load;
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
+ Dst.isVolatileQualified());
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
- llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
- Dst.isVolatileQualified());
- Store->setAlignment(Dst.getAlignment().getQuantity());
+ Builder.CreateStore(Vec, Dst.getVectorAddress(),
+ Dst.isVolatileQualified());
return;
}
@@ -1523,7 +1619,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
- llvm::Value *LvalueDst = Dst.getAddress();
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
return;
@@ -1531,16 +1627,17 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
// load of a __strong object.
- llvm::Value *LvalueDst = Dst.getAddress();
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
- llvm::Type *ResultType = ConvertType(getContext().LongTy);
- llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
- llvm::Value *dst = RHS;
+ llvm::Type *ResultType = IntPtrTy;
+ Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
+ llvm::Value *RHS = dst.getPointer();
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
llvm::Value *LHS =
- Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+ Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
+ "sub.ptr.lhs.cast");
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
BytesBetween);
@@ -1560,16 +1657,14 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value **Result) {
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
- CharUnits Align = Dst.getAlignment().alignmentAtOffset(Info.StorageOffset);
llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
- llvm::Value *Ptr = Dst.getBitFieldAddr();
+ Address Ptr = Dst.getBitFieldAddress();
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
// Cast the source to the storage type and shift it into place.
- SrcVal = Builder.CreateIntCast(SrcVal,
- Ptr->getType()->getPointerElementType(),
+ SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
/*IsSigned=*/false);
llvm::Value *MaskedVal = SrcVal;
@@ -1577,9 +1672,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// and mask together with source before storing.
if (Info.StorageSize != Info.Size) {
assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
- llvm::Value *Val = Builder.CreateAlignedLoad(Ptr, Align.getQuantity(),
- Dst.isVolatileQualified(),
- "bf.load");
+ llvm::Value *Val =
+ Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
// Mask the source value as needed.
if (!hasBooleanRepresentation(Dst.getType()))
@@ -1605,8 +1699,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
// Write the new value back out.
- Builder.CreateAlignedStore(SrcVal, Ptr, Align.getQuantity(),
- Dst.isVolatileQualified());
+ Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
// Return the new value of the bit-field, if requested.
if (Result) {
@@ -1632,10 +1725,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
- llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
- Dst.isVolatileQualified());
- Load->setAlignment(Dst.getAlignment().getQuantity());
- llvm::Value *Vec = Load;
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(),
+ Dst.isVolatileQualified());
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1697,9 +1788,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
}
- llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
- Dst.isVolatileQualified());
- Store->setAlignment(Dst.getAlignment().getQuantity());
+ Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
+ Dst.isVolatileQualified());
}
/// @brief Store of global named registers are always calls to intrinsics.
@@ -1834,11 +1924,27 @@ EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
}
static LValue EmitThreadPrivateVarDeclLValue(
- CodeGenFunction &CGF, const VarDecl *VD, QualType T, llvm::Value *V,
- llvm::Type *RealVarTy, CharUnits Alignment, SourceLocation Loc) {
- V = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, V, Loc);
- V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
- return CGF.MakeAddrLValue(V, T, Alignment);
+ CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
+ llvm::Type *RealVarTy, SourceLocation Loc) {
+ Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
+ return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
+}
+
+Address CodeGenFunction::EmitLoadOfReference(Address Addr,
+ const ReferenceType *RefTy,
+ AlignmentSource *Source) {
+ llvm::Value *Ptr = Builder.CreateLoad(Addr);
+ return Address(Ptr, getNaturalTypeAlignment(RefTy->getPointeeType(),
+ Source, /*forPointee*/ true));
+
+}
+
+LValue CodeGenFunction::EmitLoadOfReferenceLValue(Address RefAddr,
+ const ReferenceType *RefTy) {
+ AlignmentSource Source;
+ Address Addr = EmitLoadOfReference(RefAddr, RefTy, &Source);
+ return MakeAddrLValue(Addr, RefTy->getPointeeType(), Source);
}
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
@@ -1854,19 +1960,17 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ Address Addr(V, Alignment);
LValue LV;
// Emit reference to the private copy of the variable if it is an OpenMP
// threadprivate variable.
if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
- return EmitThreadPrivateVarDeclLValue(CGF, VD, T, V, RealVarTy, Alignment,
+ return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
E->getExprLoc());
- if (VD->getType()->isReferenceType()) {
- llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
- LI->setAlignment(Alignment.getQuantity());
- V = LI;
- LV = CGF.MakeNaturalAlignAddrLValue(V, T);
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ LV = CGF.EmitLoadOfReferenceLValue(Addr, RefTy);
} else {
- LV = CGF.MakeAddrLValue(V, T, Alignment);
+ LV = CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
setObjCGCLValueClass(CGF.getContext(), E, LV);
return LV;
@@ -1888,7 +1992,7 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
}
}
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
- return CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl);
}
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
@@ -1904,9 +2008,7 @@ static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
/// So far, only the name is being passed down, but other options such as
/// register type, allocation type or even optimization options could be
/// passed down via the metadata node.
-static LValue EmitGlobalNamedRegister(const VarDecl *VD,
- CodeGenModule &CGM,
- CharUnits Alignment) {
+static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
SmallString<64> Name("llvm.named.register.");
AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
assert(Asm->getLabel().size() < 64-Name.size() &&
@@ -1920,33 +2022,43 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD,
llvm::Metadata *Ops[] = {Str};
M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
- return LValue::MakeGlobalReg(
- llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)),
- VD->getType(), Alignment);
+
+ CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
+
+ llvm::Value *Ptr =
+ llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
+ return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
- CharUnits Alignment = getContext().getDeclAlign(ND);
QualType T = E->getType();
if (const auto *VD = dyn_cast<VarDecl>(ND)) {
// Global Named registers access via intrinsics only
if (VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
- return EmitGlobalNamedRegister(VD, CGM, Alignment);
+ return EmitGlobalNamedRegister(VD, CGM);
// A DeclRefExpr for a reference initialized by a constant expression can
// appear without being odr-used. Directly emit the constant initializer.
const Expr *Init = VD->getAnyInitializer(VD);
if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
VD->isUsableInConstantExpressions(getContext()) &&
- VD->checkInitIsICE()) {
+ VD->checkInitIsICE() &&
+ // Do not emit if it is private OpenMP variable.
+ !(E->refersToEnclosingVariableOrCapture() && CapturedStmtInfo &&
+ LocalDeclMap.count(VD))) {
llvm::Constant *Val =
CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
assert(Val && "failed to emit reference constant expression");
// FIXME: Eventually we will want to emit vector element references.
- return MakeAddrLValue(Val, T, Alignment);
+
+ // Should we be using the alignment of the constant pointer we emitted?
+ CharUnits Alignment = getNaturalTypeAlignment(E->getType(), nullptr,
+ /*pointee*/ true);
+
+ return MakeAddrLValue(Address(Val, Alignment), T, AlignmentSource::Decl);
}
// Check for captured variables.
@@ -1954,15 +2066,24 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (auto *FD = LambdaCaptureFields.lookup(VD))
return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
else if (CapturedStmtInfo) {
- if (auto *V = LocalDeclMap.lookup(VD))
- return MakeAddrLValue(V, T, Alignment);
- else
- return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
- CapturedStmtInfo->getContextValue());
+ auto it = LocalDeclMap.find(VD);
+ if (it != LocalDeclMap.end()) {
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ return EmitLoadOfReferenceLValue(it->second, RefTy);
+ }
+ return MakeAddrLValue(it->second, T);
+ }
+ LValue CapLVal =
+ EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
+ CapturedStmtInfo->getContextValue());
+ return MakeAddrLValue(
+ Address(CapLVal.getPointer(), getContext().getDeclAlign(VD)),
+ CapLVal.getType(), AlignmentSource::Decl);
}
+
assert(isa<BlockDecl>(CurCodeDecl));
- return MakeAddrLValue(GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()),
- T, Alignment);
+ Address addr = GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>());
+ return MakeAddrLValue(addr, T, AlignmentSource::Decl);
}
}
@@ -1975,8 +2096,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (ND->hasAttr<WeakRefAttr>()) {
const auto *VD = cast<ValueDecl>(ND);
- llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
- return MakeAddrLValue(Aliasee, T, Alignment);
+ ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
+ return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
}
if (const auto *VD = dyn_cast<VarDecl>(ND)) {
@@ -1984,39 +2105,52 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (VD->hasLinkage() || VD->isStaticDataMember())
return EmitGlobalVarDeclLValue(*this, E, VD);
- bool isBlockVariable = VD->hasAttr<BlocksAttr>();
+ Address addr = Address::invalid();
- llvm::Value *V = LocalDeclMap.lookup(VD);
- if (!V && VD->isStaticLocal())
- V = CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
+ // The variable should generally be present in the local decl map.
+ auto iter = LocalDeclMap.find(VD);
+ if (iter != LocalDeclMap.end()) {
+ addr = iter->second;
- // Check if variable is threadprivate.
- if (V && getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
- return EmitThreadPrivateVarDeclLValue(
- *this, VD, T, V, getTypes().ConvertTypeForMem(VD->getType()),
- Alignment, E->getExprLoc());
+ // Otherwise, it might be static local we haven't emitted yet for
+ // some reason; most likely, because it's in an outer function.
+ } else if (VD->isStaticLocal()) {
+ addr = Address(CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)),
+ getContext().getDeclAlign(VD));
+
+ // No other cases for now.
+ } else {
+ llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
+ }
- assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- if (isBlockVariable)
- V = BuildBlockByrefAddress(V, VD);
+ // Check for OpenMP threadprivate variables.
+ if (getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
+ return EmitThreadPrivateVarDeclLValue(
+ *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
+ E->getExprLoc());
+ }
+ // Drill into block byref variables.
+ bool isBlockByref = VD->hasAttr<BlocksAttr>();
+ if (isBlockByref) {
+ addr = emitBlockByrefAddress(addr, VD);
+ }
+
+ // Drill into reference types.
LValue LV;
- if (VD->getType()->isReferenceType()) {
- llvm::LoadInst *LI = Builder.CreateLoad(V);
- LI->setAlignment(Alignment.getQuantity());
- V = LI;
- LV = MakeNaturalAlignAddrLValue(V, T);
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ LV = EmitLoadOfReferenceLValue(addr, RefTy);
} else {
- LV = MakeAddrLValue(V, T, Alignment);
+ LV = MakeAddrLValue(addr, T, AlignmentSource::Decl);
}
bool isLocalStorage = VD->hasLocalStorage();
bool NonGCable = isLocalStorage &&
!VD->getType()->isReferenceType() &&
- !isBlockVariable;
+ !isBlockByref;
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
LV.setNonGC(true);
@@ -2048,7 +2182,9 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
- LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ AlignmentSource AlignSource;
+ Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &AlignSource);
+ LValue LV = MakeAddrLValue(Addr, T, AlignSource);
LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
// We should not generate __weak write barrier on indirect reference
@@ -2065,22 +2201,22 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
case UO_Imag: {
LValue LV = EmitLValue(E->getSubExpr());
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
- llvm::Value *Addr = LV.getAddress();
// __real is valid on scalars. This is a faster way of testing that.
// __imag can only produce an rvalue on scalars.
if (E->getOpcode() == UO_Real &&
- !cast<llvm::PointerType>(Addr->getType())
- ->getElementType()->isStructTy()) {
+ !LV.getAddress().getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
}
assert(E->getSubExpr()->getType()->isAnyComplexType());
- unsigned Idx = E->getOpcode() == UO_Imag;
- return MakeAddrLValue(
- Builder.CreateStructGEP(nullptr, LV.getAddress(), Idx, "idx"), ExprTy);
+ Address Component =
+ (E->getOpcode() == UO_Real
+ ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
+ : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
+ return MakeAddrLValue(Component, ExprTy, LV.getAlignmentSource());
}
case UO_PreInc:
case UO_PreDec: {
@@ -2098,12 +2234,12 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
- E->getType());
+ E->getType(), AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
- E->getType());
+ E->getType(), AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
@@ -2116,11 +2252,11 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) {
- auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str(), 1);
- return MakeAddrLValue(C, E->getType());
+ auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
- return MakeAddrLValue(C, E->getType());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
/// Emit a type description suitable for use by a runtime sanitizer library. The
@@ -2194,9 +2330,9 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
// Pointers are passed directly, everything else is passed by address.
if (!V->getType()->isPointerTy()) {
- llvm::Value *Ptr = CreateTempAlloca(V->getType());
+ Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
Builder.CreateStore(V, Ptr);
- V = Ptr;
+ V = Ptr.getPointer();
}
return Builder.CreatePtrToInt(V, TargetTy);
}
@@ -2217,8 +2353,9 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
if (PLoc.isValid()) {
auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src");
- CGM.getSanitizerMetadata()->disableSanitizerForGlobal(FilenameGV);
- Filename = FilenameGV;
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
+ cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
+ Filename = FilenameGV.getPointer();
Line = PLoc.getLine();
Column = PLoc.getColumn();
} else {
@@ -2395,6 +2532,34 @@ void CodeGenFunction::EmitCheck(
EmitBlock(Cont);
}
+void CodeGenFunction::EmitCfiSlowPathCheck(llvm::Value *Cond,
+ llvm::ConstantInt *TypeId,
+ llvm::Value *Ptr) {
+ auto &Ctx = getLLVMContext();
+ llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
+
+ llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
+ llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
+
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
+ BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
+
+ EmitBlock(CheckBB);
+
+ llvm::Constant *SlowPathFn = CGM.getModule().getOrInsertFunction(
+ "__cfi_slowpath",
+ llvm::FunctionType::get(
+ llvm::Type::getVoidTy(Ctx),
+ {llvm::Type::getInt64Ty(Ctx),
+ llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(Ctx))},
+ false));
+ llvm::CallInst *CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
+ CheckCall->setDoesNotThrow();
+
+ EmitBlock(Cont);
+}
+
void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
llvm::BasicBlock *Cont = createBasicBlock("cont");
@@ -2426,6 +2591,33 @@ llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
return TrapCall;
}
+Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
+ AlignmentSource *AlignSource) {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ // Expressions of array type can't be bitfields or vector elements.
+ LValue LV = EmitLValue(E);
+ Address Addr = LV.getAddress();
+ if (AlignSource) *AlignSource = LV.getAlignmentSource();
+
+ // If the array type was an incomplete type, we need to make sure
+ // the decay ends up being the right type.
+ llvm::Type *NewTy = ConvertType(E->getType());
+ Addr = Builder.CreateElementBitCast(Addr, NewTy);
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!E->getType()->isVariableArrayType()) {
+ assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
+ "Expected pointer to array");
+ Addr = Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(), "arraydecay");
+ }
+
+ QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
+ return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
+}
+
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
/// array to pointer, return the array subexpression.
static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
@@ -2442,6 +2634,69 @@ static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
return SubExpr;
}
+static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ ArrayRef<llvm::Value*> indices,
+ bool inbounds,
+ const llvm::Twine &name = "arrayidx") {
+ if (inbounds) {
+ return CGF.Builder.CreateInBoundsGEP(ptr, indices, name);
+ } else {
+ return CGF.Builder.CreateGEP(ptr, indices, name);
+ }
+}
+
+static CharUnits getArrayElementAlign(CharUnits arrayAlign,
+ llvm::Value *idx,
+ CharUnits eltSize) {
+ // If we have a constant index, we can use the exact offset of the
+ // element we're accessing.
+ if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
+ CharUnits offset = constantIdx->getZExtValue() * eltSize;
+ return arrayAlign.alignmentAtOffset(offset);
+
+ // Otherwise, use the worst-case alignment for any element.
+ } else {
+ return arrayAlign.alignmentOfArrayElement(eltSize);
+ }
+}
+
+static QualType getFixedSizeElementType(const ASTContext &ctx,
+ const VariableArrayType *vla) {
+ QualType eltType;
+ do {
+ eltType = vla->getElementType();
+ } while ((vla = ctx.getAsVariableArrayType(eltType)));
+ return eltType;
+}
+
+static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
+ ArrayRef<llvm::Value*> indices,
+ QualType eltType, bool inbounds,
+ const llvm::Twine &name = "arrayidx") {
+ // All the indices except that last must be zero.
+#ifndef NDEBUG
+ for (auto idx : indices.drop_back())
+ assert(isa<llvm::ConstantInt>(idx) &&
+ cast<llvm::ConstantInt>(idx)->isZero());
+#endif
+
+ // Determine the element size of the statically-sized base. This is
+ // the thing that the indices are expressed in terms of.
+ if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
+ eltType = getFixedSizeElementType(CGF.getContext(), vla);
+ }
+
+ // We can use that to compute the best alignment of the element.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ CharUnits eltAlign =
+ getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
+
+ llvm::Value *eltPtr =
+ emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, name);
+ return Address(eltPtr, eltAlign);
+}
+
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed) {
// The index must always be an integer, which is not an aggregate. Emit it.
@@ -2460,32 +2715,34 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType(), LHS.getAlignment());
+ E->getBase()->getType(),
+ LHS.getAlignmentSource());
}
+ // All the other cases basically behave like simple offsetting.
+
// Extend or truncate the index type to 32 or 64-bits.
if (Idx->getType() != IntPtrTy)
Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
- // We know that the pointer points to a type of the correct size, unless the
- // size is a VLA or Objective-C interface.
- llvm::Value *Address = nullptr;
- CharUnits ArrayAlignment;
+ // Handle the extvector case we ignored above.
if (isa<ExtVectorElementExpr>(E->getBase())) {
LValue LV = EmitLValue(E->getBase());
- Address = EmitExtVectorElementLValue(LV);
- Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
- const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
- QualType EQT = ExprVT->getElementType();
- return MakeAddrLValue(Address, EQT,
- getContext().getTypeAlignInChars(EQT));
- }
- else if (const VariableArrayType *vla =
+ Address Addr = EmitExtVectorElementLValue(LV);
+
+ QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true);
+ return MakeAddrLValue(Addr, EltType, LV.getAlignmentSource());
+ }
+
+ AlignmentSource AlignSource;
+ Address Addr = Address::invalid();
+ if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
- Address = EmitScalarExpr(E->getBase());
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
// The element count here is the total number of non-VLA elements.
llvm::Value *numElements = getVLASize(vla).first;
@@ -2496,24 +2753,40 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// multiply. We suppress this if overflow is not undefined behavior.
if (getLangOpts().isSignedOverflowDefined()) {
Idx = Builder.CreateMul(Idx, numElements);
- Address = Builder.CreateGEP(Address, Idx, "arrayidx");
} else {
Idx = Builder.CreateNSWMul(Idx, numElements);
- Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
}
- } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
- // Indexing over an interface, as in "NSString *P; P[4];"
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- getContext().getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
+ !getLangOpts().isSignedOverflowDefined());
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
- Address = EmitCastToVoidPtr(Base);
- Address = Builder.CreateGEP(Address, Idx, "arrayidx");
- Address = Builder.CreateBitCast(Address, Base->getType());
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
+ CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
+ llvm::Value *InterfaceSizeVal =
+ llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());;
+
+ llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
+
+ // Emit the base pointer.
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+
+ // We don't necessarily build correct LLVM struct types for ObjC
+ // interfaces, so we can't rely on GEP to do this scaling
+ // correctly, so we need to cast to i8*. FIXME: is this actually
+ // true? A lot of other things in the fragile ABI would break...
+ llvm::Type *OrigBaseTy = Addr.getType();
+ Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
+
+ // Do the GEP.
+ CharUnits EltAlign =
+ getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
+ llvm::Value *EltPtr =
+ emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false);
+ Addr = Address(EltPtr, EltAlign);
+
+ // Cast back.
+ Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -2528,42 +2801,23 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
else
ArrayLV = EmitLValue(Array);
- llvm::Value *ArrayPtr = ArrayLV.getAddress();
- llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
- llvm::Value *Args[] = { Zero, Idx };
// Propagate the alignment from the array itself to the result.
- ArrayAlignment = ArrayLV.getAlignment();
-
- if (getLangOpts().isSignedOverflowDefined())
- Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
- else
- Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
+ Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(),
+ {CGM.getSize(CharUnits::Zero()), Idx},
+ E->getType(),
+ !getLangOpts().isSignedOverflowDefined());
+ AlignSource = ArrayLV.getAlignmentSource();
} else {
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
- if (getLangOpts().isSignedOverflowDefined())
- Address = Builder.CreateGEP(Base, Idx, "arrayidx");
- else
- Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ // The base must be a pointer; emit it with an estimate of its alignment.
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
+ !getLangOpts().isSignedOverflowDefined());
}
- QualType T = E->getBase()->getType()->getPointeeType();
- assert(!T.isNull() &&
- "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
-
+ LValue LV = MakeAddrLValue(Addr, E->getType(), AlignSource);
- // Limit the alignment to that of the result type.
- LValue LV;
- if (!ArrayAlignment.isZero()) {
- CharUnits Align = getContext().getTypeAlignInChars(T);
- ArrayAlignment = std::min(Align, ArrayAlignment);
- LV = MakeAddrLValue(Address, T, ArrayAlignment);
- } else {
- LV = MakeNaturalAlignAddrLValue(Address, T);
- }
-
- LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
+ // TODO: Preserve/extend path TBAA metadata?
if (getLangOpts().ObjC1 &&
getLangOpts().getGC() != LangOptions::NonGC) {
@@ -2573,14 +2827,150 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
return LV;
}
-static
-llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
- SmallVectorImpl<unsigned> &Elts) {
- SmallVector<llvm::Constant*, 4> CElts;
- for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- CElts.push_back(Builder.getInt32(Elts[i]));
+LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
+ bool IsLowerBound) {
+ LValue Base;
+ if (auto *ASE =
+ dyn_cast<OMPArraySectionExpr>(E->getBase()->IgnoreParenImpCasts()))
+ Base = EmitOMPArraySectionExpr(ASE, IsLowerBound);
+ else
+ Base = EmitLValue(E->getBase());
+ QualType BaseTy = Base.getType();
+ llvm::Value *Idx = nullptr;
+ QualType ResultExprTy;
+ if (auto *AT = getContext().getAsArrayType(BaseTy))
+ ResultExprTy = AT->getElementType();
+ else
+ ResultExprTy = BaseTy->getPointeeType();
+ if (IsLowerBound || (!IsLowerBound && E->getColonLoc().isInvalid())) {
+ // Requesting lower bound or upper bound, but without provided length and
+ // without ':' symbol for the default length -> length = 1.
+ // Idx = LowerBound ?: 0;
+ if (auto *LowerBound = E->getLowerBound()) {
+ Idx = Builder.CreateIntCast(
+ EmitScalarExpr(LowerBound), IntPtrTy,
+ LowerBound->getType()->hasSignedIntegerRepresentation());
+ } else
+ Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
+ } else {
+ // Try to emit length or lower bound as constant. If this is possible, 1 is
+ // subtracted from constant length or lower bound. Otherwise, emit LLVM IR
+ // (LB + Len) - 1.
+ auto &C = CGM.getContext();
+ auto *Length = E->getLength();
+ llvm::APSInt ConstLength;
+ if (Length) {
+ // Idx = LowerBound + Length - 1;
+ if (Length->isIntegerConstantExpr(ConstLength, C)) {
+ ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
+ Length = nullptr;
+ }
+ auto *LowerBound = E->getLowerBound();
+ llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
+ if (LowerBound && LowerBound->isIntegerConstantExpr(ConstLowerBound, C)) {
+ ConstLowerBound = ConstLowerBound.zextOrTrunc(PointerWidthInBits);
+ LowerBound = nullptr;
+ }
+ if (!Length)
+ --ConstLength;
+ else if (!LowerBound)
+ --ConstLowerBound;
+
+ if (Length || LowerBound) {
+ auto *LowerBoundVal =
+ LowerBound
+ ? Builder.CreateIntCast(
+ EmitScalarExpr(LowerBound), IntPtrTy,
+ LowerBound->getType()->hasSignedIntegerRepresentation())
+ : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
+ auto *LengthVal =
+ Length
+ ? Builder.CreateIntCast(
+ EmitScalarExpr(Length), IntPtrTy,
+ Length->getType()->hasSignedIntegerRepresentation())
+ : llvm::ConstantInt::get(IntPtrTy, ConstLength);
+ Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
+ /*HasNUW=*/false,
+ !getLangOpts().isSignedOverflowDefined());
+ if (Length && LowerBound) {
+ Idx = Builder.CreateSub(
+ Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
+ /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
+ }
+ } else
+ Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
+ } else {
+ // Idx = ArraySize - 1;
+ if (auto *VAT = C.getAsVariableArrayType(BaseTy)) {
+ Length = VAT->getSizeExpr();
+ if (Length->isIntegerConstantExpr(ConstLength, C))
+ Length = nullptr;
+ } else {
+ auto *CAT = C.getAsConstantArrayType(BaseTy);
+ ConstLength = CAT->getSize();
+ }
+ if (Length) {
+ auto *LengthVal = Builder.CreateIntCast(
+ EmitScalarExpr(Length), IntPtrTy,
+ Length->getType()->hasSignedIntegerRepresentation());
+ Idx = Builder.CreateSub(
+ LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
+ /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
+ } else {
+ ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
+ --ConstLength;
+ Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
+ }
+ }
+ }
+ assert(Idx);
- return llvm::ConstantVector::get(CElts);
+ llvm::Value *EltPtr;
+ QualType FixedSizeEltType = ResultExprTy;
+ if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = getVLASize(VLA).first;
+ FixedSizeEltType = getFixedSizeElementType(getContext(), VLA);
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOpts().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
+ EltPtr = Builder.CreateGEP(Base.getPointer(), Idx, "arrayidx");
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
+ EltPtr = Builder.CreateInBoundsGEP(Base.getPointer(), Idx, "arrayidx");
+ }
+ } else if (BaseTy->isConstantArrayType()) {
+ llvm::Value *ArrayPtr = Base.getPointer();
+ llvm::Value *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
+ llvm::Value *Args[] = {Zero, Idx};
+
+ if (getLangOpts().isSignedOverflowDefined())
+ EltPtr = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
+ else
+ EltPtr = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
+ } else {
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ if (getLangOpts().isSignedOverflowDefined())
+ EltPtr = Builder.CreateGEP(Base.getPointer(), Idx, "arrayidx");
+ else
+ EltPtr = Builder.CreateInBoundsGEP(Base.getPointer(), Idx, "arrayidx");
+ }
+
+ CharUnits EltAlign =
+ Base.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(FixedSizeEltType));
+
+ // Limit the alignment to that of the result type.
+ LValue LV = MakeAddrLValue(Address(EltPtr, EltAlign), ResultExprTy,
+ Base.getAlignmentSource());
+
+ LV.getQuals().setAddressSpace(BaseTy.getAddressSpace());
+
+ return LV;
}
LValue CodeGenFunction::
@@ -2592,9 +2982,10 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
if (E->isArrow()) {
// If it is a pointer to a vector, emit the address and form an lvalue with
// it.
- llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ AlignmentSource AlignSource;
+ Address Ptr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
- Base = MakeAddrLValue(Ptr, PT->getPointeeType());
+ Base = MakeAddrLValue(Ptr, PT->getPointeeType(), AlignSource);
Base.getQuals().removeObjCGCAttr();
} else if (E->getBase()->isGLValue()) {
// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
@@ -2608,22 +2999,24 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
llvm::Value *Vec = EmitScalarExpr(E->getBase());
// Store the vector to memory (because LValue wants an address).
- llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
+ Address VecMem = CreateMemTemp(E->getBase()->getType());
Builder.CreateStore(Vec, VecMem);
- Base = MakeAddrLValue(VecMem, E->getBase()->getType());
+ Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
+ AlignmentSource::Decl);
}
QualType type =
E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
// Encode the element access list into a vector of unsigned indices.
- SmallVector<unsigned, 4> Indices;
+ SmallVector<uint32_t, 4> Indices;
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
- llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
+ llvm::Constant *CV =
+ llvm::ConstantDataVector::get(getLLVMContext(), Indices);
return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
- Base.getAlignment());
+ Base.getAlignmentSource());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -2633,8 +3026,8 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
for (unsigned i = 0, e = Indices.size(); i != e; ++i)
CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
- return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
- Base.getAlignment());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
+ Base.getAlignmentSource());
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
@@ -2643,10 +3036,11 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
LValue BaseLV;
if (E->isArrow()) {
- llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
+ AlignmentSource AlignSource;
+ Address Addr = EmitPointerWithAlignment(BaseExpr, &AlignSource);
QualType PtrTy = BaseExpr->getType()->getPointeeType();
- EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
- BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy);
+ BaseLV = MakeAddrLValue(Addr, PtrTy, AlignSource);
} else
BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
@@ -2677,41 +3071,65 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
return EmitLValueForField(LambdaLV, Field);
}
+/// Drill down to the storage of a field without walking into
+/// reference types.
+///
+/// The resulting address doesn't necessarily have the right type.
+static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
+ const FieldDecl *field) {
+ const RecordDecl *rec = field->getParent();
+
+ unsigned idx =
+ CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
+
+ CharUnits offset;
+ // Adjust the alignment down to the given offset.
+ // As a special case, if the LLVM field index is 0, we know that this
+ // is zero.
+ assert((idx != 0 || CGF.getContext().getASTRecordLayout(rec)
+ .getFieldOffset(field->getFieldIndex()) == 0) &&
+ "LLVM field at index zero had non-zero offset?");
+ if (idx != 0) {
+ auto &recLayout = CGF.getContext().getASTRecordLayout(rec);
+ auto offsetInBits = recLayout.getFieldOffset(field->getFieldIndex());
+ offset = CGF.getContext().toCharUnitsFromBits(offsetInBits);
+ }
+
+ return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName());
+}
+
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
+ AlignmentSource fieldAlignSource =
+ getFieldAlignmentSource(base.getAlignmentSource());
+
if (field->isBitField()) {
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
- llvm::Value *Addr = base.getAddress();
+ Address Addr = base.getAddress();
unsigned Idx = RL.getLLVMFieldNo(field);
if (Idx != 0)
// For structs, we GEP to the field that the record layout suggests.
- Addr = Builder.CreateStructGEP(nullptr, Addr, Idx, field->getName());
+ Addr = Builder.CreateStructGEP(Addr, Idx, Info.StorageOffset,
+ field->getName());
// Get the access type.
- llvm::Type *PtrTy = llvm::Type::getIntNPtrTy(
- getLLVMContext(), Info.StorageSize,
- CGM.getContext().getTargetAddressSpace(base.getType()));
- if (Addr->getType() != PtrTy)
- Addr = Builder.CreateBitCast(Addr, PtrTy);
+ llvm::Type *FieldIntTy =
+ llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
+ if (Addr.getElementType() != FieldIntTy)
+ Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
QualType fieldType =
field->getType().withCVRQualifiers(base.getVRQualifiers());
- return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment());
+ return LValue::MakeBitfield(Addr, Info, fieldType, fieldAlignSource);
}
const RecordDecl *rec = field->getParent();
QualType type = field->getType();
- CharUnits alignment = getContext().getDeclAlign(field);
-
- // FIXME: It should be impossible to have an LValue without alignment for a
- // complete type.
- if (!base.getAlignment().isZero())
- alignment = std::min(alignment, base.getAlignment());
bool mayAlias = rec->hasAttr<MayAliasAttr>();
- llvm::Value *addr = base.getAddress();
+ Address addr = base.getAddress();
unsigned cvr = base.getVRQualifiers();
bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA;
if (rec->isUnion()) {
@@ -2721,14 +3139,12 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
TBAAPath = false;
} else {
// For structs, we GEP to the field that the record layout suggests.
- unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
- addr = Builder.CreateStructGEP(nullptr, addr, idx, field->getName());
+ addr = emitAddrOfFieldStorage(*this, addr, field);
// If this is a reference field, load the reference right now.
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
if (cvr & Qualifiers::Volatile) load->setVolatile(true);
- load->setAlignment(alignment.getQuantity());
// Loading the reference will disable path-aware TBAA.
TBAAPath = false;
@@ -2739,17 +3155,20 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
else
tbaa = CGM.getTBAAInfo(type);
if (tbaa)
- CGM.DecorateInstruction(load, tbaa);
+ CGM.DecorateInstructionWithTBAA(load, tbaa);
}
- addr = load;
mayAlias = false;
type = refType->getPointeeType();
- if (type->isIncompleteType())
- alignment = CharUnits();
- else
- alignment = getContext().getTypeAlignInChars(type);
- cvr = 0; // qualifiers don't recursively apply to referencee
+
+ CharUnits alignment =
+ getNaturalTypeAlignment(type, &fieldAlignSource, /*pointee*/ true);
+ addr = Address(load, alignment);
+
+ // Qualifiers on the struct don't apply to the referencee, and
+ // we'll pick up CVR from the actual type later, so reset these
+ // additional qualifiers now.
+ cvr = 0;
}
}
@@ -2757,14 +3176,14 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// for both unions and structs. A union needs a bitcast, a struct element
// will need a bitcast if the LLVM type laid out doesn't match the desired
// type.
- addr = EmitBitCastOfLValueToProperType(*this, addr,
- CGM.getTypes().ConvertTypeForMem(type),
- field->getName());
+ addr = Builder.CreateElementBitCast(addr,
+ CGM.getTypes().ConvertTypeForMem(type),
+ field->getName());
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
- LValue LV = MakeAddrLValue(addr, type, alignment);
+ LValue LV = MakeAddrLValue(addr, type, fieldAlignSource);
LV.getQuals().addCVRQualifiers(cvr);
if (TBAAPath) {
const ASTRecordLayout &Layout =
@@ -2798,41 +3217,29 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
if (!FieldType->isReferenceType())
return EmitLValueForField(Base, Field);
- const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(Field->getParent());
- unsigned idx = RL.getLLVMFieldNo(Field);
- llvm::Value *V = Builder.CreateStructGEP(nullptr, Base.getAddress(), idx);
- assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+ Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
- // Make sure that the address is pointing to the right type. This is critical
- // for both unions and structs. A union needs a bitcast, a struct element
- // will need a bitcast if the LLVM type laid out doesn't match the desired
- // type.
+ // Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
- V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
-
- CharUnits Alignment = getContext().getDeclAlign(Field);
-
- // FIXME: It should be impossible to have an LValue without alignment for a
- // complete type.
- if (!Base.getAlignment().isZero())
- Alignment = std::min(Alignment, Base.getAlignment());
+ V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
- return MakeAddrLValue(V, FieldType, Alignment);
+ // TODO: access-path TBAA?
+ auto FieldAlignSource = getFieldAlignmentSource(Base.getAlignmentSource());
+ return MakeAddrLValue(V, FieldType, FieldAlignSource);
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
if (E->isFileScope()) {
- llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
- return MakeAddrLValue(GlobalPtr, E->getType());
+ ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
}
if (E->getType()->isVariablyModifiedType())
// make sure to emit the VLA size.
EmitVariablyModifiedType(E->getType());
- llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
+ Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
- LValue Result = MakeAddrLValue(DeclPtr, E->getType());
+ LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
/*Init*/ true);
@@ -2923,11 +3330,14 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
EmitBlock(contBlock);
if (lhs && rhs) {
- llvm::PHINode *phi = Builder.CreatePHI(lhs->getAddress()->getType(),
+ llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(),
2, "cond-lvalue");
- phi->addIncoming(lhs->getAddress(), lhsBlock);
- phi->addIncoming(rhs->getAddress(), rhsBlock);
- return MakeAddrLValue(phi, expr->getType());
+ phi->addIncoming(lhs->getPointer(), lhsBlock);
+ phi->addIncoming(rhs->getPointer(), rhsBlock);
+ Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
+ AlignmentSource alignSource =
+ std::max(lhs->getAlignmentSource(), rhs->getAlignmentSource());
+ return MakeAddrLValue(result, expr->getType(), alignSource);
} else {
assert((lhs || rhs) &&
"both operands of glvalue conditional are throw-expressions?");
@@ -2996,9 +3406,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *V = LV.getAddress();
+ Address V = LV.getAddress();
const auto *DCE = cast<CXXDynamicCastExpr>(E);
- return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
case CK_ConstructorConversion:
@@ -3016,14 +3426,14 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *This = LV.getAddress();
+ Address This = LV.getAddress();
// Perform the derived-to-base conversion
- llvm::Value *Base = GetAddressOfBaseClass(
+ Address Base = GetAddressOfBaseClass(
This, DerivedClassDecl, E->path_begin(), E->path_end(),
/*NullCheckValue=*/false, E->getExprLoc());
- return MakeAddrLValue(Base, E->getType());
+ return MakeAddrLValue(Base, E->getType(), LV.getAlignmentSource());
}
case CK_ToUnion:
return EmitAggExprToLValue(E);
@@ -3034,7 +3444,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
// Perform the base-to-derived conversion
- llvm::Value *Derived =
+ Address Derived =
GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
E->path_begin(), E->path_end(),
/*NullCheckValue=*/false);
@@ -3043,34 +3453,36 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// performed and the object is not of the derived type.
if (sanitizePerformTypeCheck())
EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
- Derived, E->getType());
+ Derived.getPointer(), E->getType());
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
- EmitVTablePtrCheckForCast(E->getType(), Derived, /*MayBeNull=*/false,
+ EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
+ /*MayBeNull=*/false,
CFITCK_DerivedCast, E->getLocStart());
- return MakeAddrLValue(Derived, E->getType());
+ return MakeAddrLValue(Derived, E->getType(), LV.getAlignmentSource());
}
case CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const auto *CE = cast<ExplicitCastExpr>(E);
+ CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
- ConvertType(CE->getTypeAsWritten()));
+ Address V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(CE->getTypeAsWritten()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
- EmitVTablePtrCheckForCast(E->getType(), V, /*MayBeNull=*/false,
+ EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
+ /*MayBeNull=*/false,
CFITCK_UnrelatedCast, E->getLocStart());
- return MakeAddrLValue(V, E->getType());
+ return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
- QualType ToType = getContext().getLValueReferenceType(E->getType());
- llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
- ConvertType(ToType));
- return MakeAddrLValue(V, E->getType());
+ Address V = Builder.CreateElementBitCast(LV.getAddress(),
+ ConvertType(E->getType()));
+ return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
}
case CK_ZeroToOCLEvent:
llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
@@ -3129,20 +3541,17 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const auto *PseudoDtor =
dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (getLangOpts().ObjCAutoRefCount &&
- DestroyedType->isObjCLifetimeType() &&
- (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
- DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
+ if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
// Automatic Reference Counting:
// If the pseudo-expression names a retainable object with weak or
// strong lifetime, the object shall be released.
Expr *BaseExpr = PseudoDtor->getBase();
- llvm::Value *BaseValue = nullptr;
+ Address BaseValue = Address::invalid();
Qualifiers BaseQuals;
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
if (PseudoDtor->isArrow()) {
- BaseValue = EmitScalarExpr(BaseExpr);
+ BaseValue = EmitPointerWithAlignment(BaseExpr);
const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
BaseQuals = PTy->getPointeeType().getQualifiers();
} else {
@@ -3152,7 +3561,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
BaseQuals = BaseTy.getQualifiers();
}
- switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
+ switch (DestroyedType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
@@ -3237,13 +3646,14 @@ LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
RValue RV = EmitCallExpr(E);
if (!RV.isScalar())
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
assert(E->getCallReturnType(getContext())->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
- return MakeAddrLValue(RV.getScalarVal(), E->getType());
+ return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
@@ -3256,21 +3666,23 @@ LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
&& "binding l-value to type which needs a temporary");
AggValueSlot Slot = CreateAggTemp(E->getType());
EmitCXXConstructExpr(E, Slot);
- return MakeAddrLValue(Slot.getAddr(), E->getType());
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
}
LValue
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
- return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+ return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
-llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Builder.CreateBitCast(CGM.GetAddrOfUuidDescriptor(E),
- ConvertType(E->getType())->getPointerTo());
+Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E),
+ ConvertType(E->getType()));
}
LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
- return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
+ return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
+ AlignmentSource::Decl);
}
LValue
@@ -3278,34 +3690,37 @@ CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
Slot.setExternallyDestructed();
EmitAggExpr(E->getSubExpr(), Slot);
- EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
- return MakeAddrLValue(Slot.getAddr(), E->getType());
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
}
LValue
CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
EmitLambdaExpr(E, Slot);
- return MakeAddrLValue(Slot.getAddr(), E->getType());
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
RValue RV = EmitObjCMessageExpr(E);
if (!RV.isScalar())
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
- return MakeAddrLValue(RV.getScalarVal(), E->getType());
+ return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
- llvm::Value *V =
- CGM.getObjCRuntime().GetSelector(*this, E->getSelector(), true);
- return MakeAddrLValue(V, E->getType());
+ Address V =
+ CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
+ return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
}
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
@@ -3333,8 +3748,7 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
BaseQuals = ObjectTy.getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
- // FIXME: this isn't right for bitfields.
- BaseValue = BaseLV.getAddress();
+ BaseValue = BaseLV.getPointer();
ObjectTy = BaseExpr->getType();
BaseQuals = ObjectTy.getQualifiers();
}
@@ -3349,17 +3763,38 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
}
RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
const CallExpr *E, ReturnValueSlot ReturnValue,
- const Decl *TargetDecl, llvm::Value *Chain) {
+ CGCalleeInfo CalleeInfo, llvm::Value *Chain) {
// Get the actual function type. The callee type will always be a pointer to
// function type or a block pointer type.
assert(CalleeType->isFunctionPointerType() &&
"Call must have function pointer type!");
+ // Preserve the non-canonical function type because things like exception
+ // specifications disappear in the canonical type. That information is useful
+ // to drive the generation of more accurate code for this call later on.
+ const FunctionProtoType *NonCanonicalFTP = CalleeType->getAs<PointerType>()
+ ->getPointeeType()
+ ->getAs<FunctionProtoType>();
+
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ // We can only guarantee that a function is called from the correct
+ // context/function based on the appropriate target attributes,
+ // so only check in the case where we have both always_inline and target
+ // since otherwise we could be making a conditional call after a check for
+ // the proper cpu features (and it won't cause code generation issues due to
+ // function based code generation).
+ if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
+ TargetDecl->hasAttr<TargetAttr>())
+ checkTargetFeatures(E, FD);
+
CalleeType = getContext().getCanonicalType(CalleeType);
const auto *FnType =
@@ -3383,7 +3818,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
Callee, llvm::PointerType::getUnqual(PrefixStructTy));
llvm::Value *CalleeSigPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
- llvm::Value *CalleeSig = Builder.CreateLoad(CalleeSigPtr);
+ llvm::Value *CalleeSig =
+ Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign());
llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
llvm::BasicBlock *Cont = createBasicBlock("cont");
@@ -3393,7 +3829,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
EmitBlock(TypeCheck);
llvm::Value *CalleeRTTIPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
- llvm::Value *CalleeRTTI = Builder.CreateLoad(CalleeRTTIPtr);
+ llvm::Value *CalleeRTTI =
+ Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign());
llvm::Value *CalleeRTTIMatch =
Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
llvm::Constant *StaticData[] = {
@@ -3408,12 +3845,39 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
}
}
+ // If we are checking indirect calls and this call is indirect, check that the
+ // function pointer is a member of the bit set for the function type.
+ if (SanOpts.has(SanitizerKind::CFIICall) &&
+ (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
+ SanitizerScope SanScope(this);
+
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
+ llvm::Value *BitSetName = llvm::MetadataAsValue::get(getLLVMContext(), MD);
+
+ llvm::Value *CastedCallee = Builder.CreateBitCast(Callee, Int8PtrTy);
+ llvm::Value *BitSetTest =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test),
+ {CastedCallee, BitSetName});
+
+ auto TypeId = CGM.CreateCfiIdForTypeMetadata(MD);
+ if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && TypeId) {
+ EmitCfiSlowPathCheck(BitSetTest, TypeId, CastedCallee);
+ } else {
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(E->getLocStart()),
+ EmitCheckTypeDescriptor(QualType(FnType, 0)),
+ };
+ EmitCheck(std::make_pair(BitSetTest, SanitizerKind::CFIICall),
+ "cfi_bad_icall", StaticData, CastedCallee);
+ }
+ }
+
CallArgList Args;
if (Chain)
Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
CGM.getContext().VoidPtrTy);
- EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arg_begin(),
- E->arg_end(), E->getDirectCallee(), /*ParamsToSkip*/ 0);
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
+ E->getDirectCallee(), /*ParamsToSkip*/ 0);
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
Args, FnType, /*isChainCall=*/Chain);
@@ -3444,34 +3908,38 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
}
- return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args,
+ CGCalleeInfo(NonCanonicalFTP, TargetDecl));
}
LValue CodeGenFunction::
EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
- llvm::Value *BaseV;
- if (E->getOpcode() == BO_PtrMemI)
- BaseV = EmitScalarExpr(E->getLHS());
- else
- BaseV = EmitLValue(E->getLHS()).getAddress();
+ Address BaseAddr = Address::invalid();
+ if (E->getOpcode() == BO_PtrMemI) {
+ BaseAddr = EmitPointerWithAlignment(E->getLHS());
+ } else {
+ BaseAddr = EmitLValue(E->getLHS()).getAddress();
+ }
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
const MemberPointerType *MPT
= E->getRHS()->getType()->getAs<MemberPointerType>();
- llvm::Value *AddV = CGM.getCXXABI().EmitMemberDataPointerAddress(
- *this, E, BaseV, OffsetV, MPT);
+ AlignmentSource AlignSource;
+ Address MemberAddr =
+ EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT,
+ &AlignSource);
- return MakeAddrLValue(AddV, MPT->getPointeeType());
+ return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), AlignSource);
}
/// Given the address of a temporary variable, produce an r-value of
/// its type.
-RValue CodeGenFunction::convertTempToRValue(llvm::Value *addr,
+RValue CodeGenFunction::convertTempToRValue(Address addr,
QualType type,
SourceLocation loc) {
- LValue lvalue = MakeNaturalAlignAddrLValue(addr, type);
+ LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
switch (getEvaluationKind(type)) {
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
@@ -3527,7 +3995,8 @@ static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
CGF.EmitAggExpr(ov->getSourceExpr(), slot);
- LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
+ LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
+ AlignmentSource::Decl);
opaqueData = OVMA::bind(CGF, ov, LV);
result.RV = slot.asRValue();
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 883b76bcfab0..20838db044c9 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -49,7 +49,8 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
if (!shouldUseDestForReturnSlot())
return ReturnValueSlot();
- return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile(), IsResultUnused);
+ return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
+ IsResultUnused);
}
AggValueSlot EnsureSlot(QualType T) {
@@ -77,14 +78,13 @@ public:
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void EmitFinalDestCopy(QualType type, const LValue &src);
- void EmitFinalDestCopy(QualType type, RValue src,
- CharUnits srcAlignment = CharUnits::Zero());
+ void EmitFinalDestCopy(QualType type, RValue src);
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
- void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
QualType elementType, InitListExpr *E);
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
@@ -199,7 +199,8 @@ public:
// case Expr::ChooseExprClass:
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
void VisitAtomicExpr(AtomicExpr *E) {
- CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
+ RValue Res = CGF.EmitAtomicExpr(E);
+ EmitFinalDestCopy(E->getType(), Res);
}
};
} // end anonymous namespace.
@@ -259,17 +260,14 @@ void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
}
// Otherwise, copy from there to the destination.
- assert(Dest.getAddr() != src.getAggregateAddr());
- std::pair<CharUnits, CharUnits> typeInfo =
- CGF.getContext().getTypeInfoInChars(E->getType());
- EmitFinalDestCopy(E->getType(), src, typeInfo.second);
+ assert(Dest.getPointer() != src.getAggregatePointer());
+ EmitFinalDestCopy(E->getType(), src);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
- CharUnits srcAlign) {
+void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
assert(src.isAggregate() && "value must be aggregate value!");
- LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
+ LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
EmitFinalDestCopy(type, srcLV);
}
@@ -298,8 +296,8 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
- dest.getAddr(),
- src.getAddr(),
+ dest.getAddress(),
+ src.getAddress(),
size);
return;
}
@@ -307,9 +305,8 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
// If the result of the assignment is used, copy the LHS there also.
// It's volatile if either side is. Use the minimum alignment of
// the two sides.
- CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
- dest.isVolatile() || src.isVolatile(),
- std::min(dest.getAlignment(), src.getAlignment()));
+ CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
+ dest.isVolatile() || src.isVolatile());
}
/// \brief Emit the initializer for a std::initializer_list initialized with a
@@ -321,7 +318,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
ASTContext &Ctx = CGF.getContext();
LValue Array = CGF.EmitLValue(E->getSubExpr());
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
- llvm::Value *ArrayPtr = Array.getAddress();
+ Address ArrayPtr = Array.getAddress();
const ConstantArrayType *ArrayType =
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
@@ -344,13 +341,12 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
}
AggValueSlot Dest = EnsureSlot(E->getType());
- LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
- Dest.getAlignment());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
llvm::Value *IdxStart[] = { Zero, Zero };
llvm::Value *ArrayStart =
- Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
+ Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
++Field;
@@ -367,7 +363,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
// End pointer.
llvm::Value *IdxEnd[] = { Zero, Size };
llvm::Value *ArrayEnd =
- Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
+ Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
} else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
// Length.
@@ -402,7 +398,7 @@ static bool isTrivialFiller(Expr *E) {
}
/// \brief Emit initialization of an array from an initializer list.
-void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
QualType elementType, InitListExpr *E) {
uint64_t NumInitElements = E->getNumInits();
@@ -414,13 +410,17 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
llvm::Value *begin =
- Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
+ Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
+
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
// Exception safety requires us to destroy all the
// already-constructed members if an initializer throws.
// For that, we'll need an EH cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
- llvm::AllocaInst *endOfInit = nullptr;
+ Address endOfInit = Address::invalid();
EHScopeStack::stable_iterator cleanup;
llvm::Instruction *cleanupDominator = nullptr;
if (CGF.needsEHCleanup(dtorKind)) {
@@ -428,10 +428,11 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
- endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
"arrayinit.endOfInit");
cleanupDominator = Builder.CreateStore(begin, endOfInit);
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ elementAlign,
CGF.getDestroyer(dtorKind));
cleanup = CGF.EHStack.stable_begin();
@@ -458,10 +459,11 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
- if (endOfInit) Builder.CreateStore(element, endOfInit);
+ if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
- LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
EmitInitializationToLValue(E->getInit(i), elementLV);
}
@@ -482,7 +484,7 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
// Advance to the start of the rest of the array.
if (NumInitElements) {
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
- if (endOfInit) Builder.CreateStore(element, endOfInit);
+ if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
// Compute the end of the array.
@@ -500,7 +502,8 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
currentElement->addIncoming(element, entryBB);
// Emit the actual filler expression.
- LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
if (filler)
EmitInitializationToLValue(filler, elementLV);
else
@@ -511,7 +514,7 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
// Tell the EH cleanup that we finished with the last element.
- if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+ if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
// Leave the loop if we're done.
llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
@@ -569,6 +572,8 @@ static Expr *findPeephole(Expr *op, CastKind kind) {
}
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
+ CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
switch (E->getCastKind()) {
case CK_Dynamic: {
// FIXME: Can this actually happen? We have no test coverage for it.
@@ -596,9 +601,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
- QualType PtrTy = CGF.getContext().getPointerType(Ty);
- llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
- CGF.ConvertType(PtrTy));
+ Address CastPtr =
+ Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
EmitInitializationToLValue(E->getSubExpr(),
CGF.MakeAddrLValue(CastPtr, Ty));
break;
@@ -649,13 +653,13 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// Zero-initialize. (Strictly speaking, we only need to intialize
// the padding at the end, but this is simpler.)
if (!Dest.isZeroed())
- CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
+ CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
// Build a GEP to refer to the subobject.
- llvm::Value *valueAddr =
- CGF.Builder.CreateStructGEP(nullptr, valueDest.getAddr(), 0);
+ Address valueAddr =
+ CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
+ CharUnits());
valueDest = AggValueSlot::forAddr(valueAddr,
- valueDest.getAlignment(),
valueDest.getQualifiers(),
valueDest.isExternallyDestructed(),
valueDest.requiresGCollection(),
@@ -673,8 +677,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
- llvm::Value *valueAddr =
- Builder.CreateStructGEP(nullptr, atomicSlot.getAddr(), 0);
+ Address valueAddr =
+ Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
return EmitFinalDestCopy(valueType, rvalue);
}
@@ -959,15 +963,15 @@ void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
}
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
- llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
- llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+ Address ArgValue = Address::invalid();
+ Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
- if (!ArgPtr) {
+ if (!ArgPtr.isValid()) {
// If EmitVAArg fails, we fall back to the LLVM instruction.
- llvm::Value *Val =
- Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType()));
+ llvm::Value *Val = Builder.CreateVAArg(ArgValue.getPointer(),
+ CGF.ConvertType(VE->getType()));
if (!Dest.isIgnored())
- Builder.CreateStore(Val, Dest.getAddr());
+ Builder.CreateStore(Val, Dest.getAddress());
return;
}
@@ -987,7 +991,7 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Push that destructor we promised.
if (!wasExternallyDestructed)
- CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
+ CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
}
void
@@ -1011,13 +1015,13 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
}
/// isSimpleZero - If emitting this value will obviously just cause a store of
@@ -1135,8 +1139,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
AggValueSlot Dest = EnsureSlot(E->getType());
- LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
- Dest.getAlignment());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
@@ -1146,12 +1149,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
QualType elementType =
CGF.getContext().getAsArrayType(E->getType())->getElementType();
- llvm::PointerType *APType =
- cast<llvm::PointerType>(Dest.getAddr()->getType());
- llvm::ArrayType *AType =
- cast<llvm::ArrayType>(APType->getElementType());
-
- EmitArrayInit(Dest.getAddr(), AType, elementType, E);
+ auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
+ EmitArrayInit(Dest.getAddress(), AType, elementType, E);
return;
}
@@ -1175,7 +1174,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
// Prepare a 'this' for CXXDefaultInitExprs.
- CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
+ CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
@@ -1253,7 +1252,10 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
assert(LV.isSimple());
if (CGF.needsEHCleanup(dtorKind)) {
if (!cleanupDominator)
- cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
+ cleanupDominator = CGF.Builder.CreateAlignedLoad(
+ CGF.Int8Ty,
+ llvm::Constant::getNullValue(CGF.Int8PtrTy),
+ CharUnits::One()); // placeholder
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
CGF.getDestroyer(dtorKind), false);
@@ -1266,7 +1268,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// else, clean it up for -O0 builds and general tidiness.
if (!pushedCleanup && LV.isSimple())
if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
+ dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
if (GEP->use_empty())
GEP->eraseFromParent();
}
@@ -1284,8 +1286,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
AggValueSlot Dest = EnsureSlot(E->getType());
- LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
- Dest.getAlignment());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
EmitInitializationToLValue(E->getBase(), DestLV);
VisitInitListExpr(E->getUpdater());
}
@@ -1355,7 +1356,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CodeGenFunction &CGF) {
// If the slot is already known to be zeroed, nothing to do. Don't mess with
// volatile stores.
- if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr)
+ if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
return;
// C++ objects with a user-declared constructor don't need zero'ing.
@@ -1368,26 +1369,22 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
}
// If the type is 16-bytes or smaller, prefer individual stores over memset.
- std::pair<CharUnits, CharUnits> TypeInfo =
- CGF.getContext().getTypeInfoInChars(E->getType());
- if (TypeInfo.first <= CharUnits::fromQuantity(16))
+ CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
+ if (Size <= CharUnits::fromQuantity(16))
return;
// Check to see if over 3/4 of the initializer are known to be zero. If so,
// we prefer to emit memset + individual stores for the rest.
CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
- if (NumNonZeroBytes*4 > TypeInfo.first)
+ if (NumNonZeroBytes*4 > Size)
return;
// Okay, it seems like a good idea to use an initial memset, emit the call.
- llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
- CharUnits Align = TypeInfo.second;
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
- llvm::Value *Loc = Slot.getAddr();
-
- Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
- CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
- Align.getQuantity(), false);
+ Address Loc = Slot.getAddress();
+ Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
// Tell the AggExprEmitter that the slot is known zero.
Slot.setZeroed();
@@ -1403,7 +1400,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
assert(E && hasAggregateEvaluationKind(E->getType()) &&
"Invalid aggregate expression to emit");
- assert((Slot.getAddr() != nullptr || Slot.isIgnored()) &&
+ assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
"slot has bits but no address");
// Optimize the slot if possible.
@@ -1414,7 +1411,7 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
- llvm::Value *Temp = CreateMemTemp(E->getType());
+ Address Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -1422,10 +1419,9 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
return LV;
}
-void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
- llvm::Value *SrcPtr, QualType Ty,
+void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
+ Address SrcPtr, QualType Ty,
bool isVolatile,
- CharUnits alignment,
bool isAssignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
@@ -1456,17 +1452,16 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- // Get data size and alignment info for this aggregate. If this is an
- // assignment don't copy the tail padding. Otherwise copying it is fine.
+ // Get data size info for this aggregate. If this is an assignment,
+ // don't copy the tail padding, because we might be assigning into a
+ // base subobject where the tail padding is claimed. Otherwise,
+ // copying it is fine.
std::pair<CharUnits, CharUnits> TypeInfo;
if (isAssignment)
TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
else
TypeInfo = getContext().getTypeInfoInChars(Ty);
- if (alignment.isZero())
- alignment = TypeInfo.second;
-
llvm::Value *SizeVal = nullptr;
if (TypeInfo.first.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
@@ -1509,15 +1504,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
- llvm::Type *DBP =
- llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
- DestPtr = Builder.CreateBitCast(DestPtr, DBP);
-
- llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
- llvm::Type *SBP =
- llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
- SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
+ DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
+ SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
// Don't do any of the memmove_collectable tests if GC isn't set.
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
@@ -1540,11 +1528,11 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
}
}
+ auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
+
// Determine the metadata to describe the position of any padding in this
// memcpy, as well as the TBAA tags for the members of the struct, in case
// the optimizer wishes to expand it in to scalar memory operations.
- llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
-
- Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, alignment.getQuantity(),
- isVolatile, /*TBAATag=*/nullptr, TBAAStructTag);
+ if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index c7adccaeeaea..604cde76a7b1 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -59,7 +59,7 @@ static RequiredArgs commonEmitCXXMemberOrOperatorCall(
if (CE) {
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
- CGF.EmitCallArgs(Args, FPT, CE->arg_begin() + ArgsToSkip, CE->arg_end(),
+ CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
CE->getDirectCallee());
} else {
assert(
@@ -166,9 +166,9 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
}
}
- llvm::Value *This;
+ Address This = Address::invalid();
if (IsArrow)
- This = EmitScalarExpr(Base);
+ This = EmitPointerWithAlignment(Base);
else
This = EmitLValue(Base).getAddress();
@@ -185,19 +185,18 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
// when it isn't necessary; just produce the proper effect here.
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
- llvm::Value *RHS =
- EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
+ Address RHS = EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
EmitAggregateAssign(This, RHS, CE->getType());
- return RValue::get(This);
+ return RValue::get(This.getPointer());
}
if (isa<CXXConstructorDecl>(MD) &&
cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
// Trivial move and copy ctor are the same.
assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, CE->arg_begin()->getType());
- return RValue::get(This);
+ Address RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType());
+ return RValue::get(This.getPointer());
}
llvm_unreachable("unknown trivial member function");
}
@@ -245,7 +244,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
}
- EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
/*ImplicitParam=*/nullptr, QualType(), CE);
}
return RValue::get(nullptr);
@@ -259,7 +258,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
} else {
if (SanOpts.has(SanitizerKind::CFINVCall) &&
MD->getParent()->isDynamicClass()) {
- llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy);
+ llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy, MD->getParent());
EmitVTablePtrCheckForCall(MD, VTable, CFITCK_NVCall, CE->getLocStart());
}
@@ -277,7 +276,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
*this, MD, This, UseVirtualCall);
}
- return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
/*ImplicitParam=*/nullptr, QualType(), CE);
}
@@ -301,19 +300,20 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
// Emit the 'this' pointer.
- llvm::Value *This;
-
+ Address This = Address::invalid();
if (BO->getOpcode() == BO_PtrMemI)
- This = EmitScalarExpr(BaseExpr);
+ This = EmitPointerWithAlignment(BaseExpr);
else
This = EmitLValue(BaseExpr).getAddress();
- EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
QualType(MPT->getClass(), 0));
// Ask the ABI to load the callee. Note that This is modified.
+ llvm::Value *ThisPtrForCall = nullptr;
llvm::Value *Callee =
- CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, MemFnPtr, MPT);
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
+ ThisPtrForCall, MemFnPtr, MPT);
CallArgList Args;
@@ -321,12 +321,12 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
getContext().getPointerType(getContext().getTagDeclType(RD));
// Push the this ptr.
- Args.add(RValue::get(This), ThisType);
+ Args.add(RValue::get(ThisPtrForCall), ThisType);
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
// And the rest of the call args
- EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getDirectCallee());
+ EmitCallArgs(Args, FPT, E->arguments(), E->getDirectCallee());
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args);
}
@@ -348,18 +348,43 @@ RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
}
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
- llvm::Value *DestPtr,
+ Address DestPtr,
const CXXRecordDecl *Base) {
if (Base->isEmpty())
return;
- DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
+ DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
- CharUnits Size = Layout.getNonVirtualSize();
- CharUnits Align = Layout.getNonVirtualAlignment();
-
- llvm::Value *SizeVal = CGF.CGM.getSize(Size);
+ CharUnits NVSize = Layout.getNonVirtualSize();
+
+ // We cannot simply zero-initialize the entire base sub-object if vbptrs are
+ // present, they are initialized by the most derived class before calling the
+ // constructor.
+ SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
+ Stores.emplace_back(CharUnits::Zero(), NVSize);
+
+ // Each store is split by the existence of a vbptr.
+ CharUnits VBPtrWidth = CGF.getPointerSize();
+ std::vector<CharUnits> VBPtrOffsets =
+ CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
+ for (CharUnits VBPtrOffset : VBPtrOffsets) {
+ std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
+ CharUnits LastStoreOffset = LastStore.first;
+ CharUnits LastStoreSize = LastStore.second;
+
+ CharUnits SplitBeforeOffset = LastStoreOffset;
+ CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
+ assert(!SplitBeforeSize.isNegative() && "negative store size!");
+ if (!SplitBeforeSize.isZero())
+ Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
+
+ CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
+ CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
+ assert(!SplitAfterSize.isNegative() && "negative store size!");
+ if (!SplitAfterSize.isZero())
+ Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
+ }
// If the type contains a pointer to data member we can't memset it to zero.
// Instead, create a null constant and copy it to the destination.
@@ -367,27 +392,43 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
// like -1, which happens to be the pattern used by member-pointers.
// TODO: isZeroInitializable can be over-conservative in the case where a
// virtual base contains a member pointer.
- if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
- llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
-
- llvm::GlobalVariable *NullVariable =
- new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
- /*isConstant=*/true,
- llvm::GlobalVariable::PrivateLinkage,
- NullConstant, Twine());
+ llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
+ if (!NullConstantForBase->isNullValue()) {
+ llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
+ CGF.CGM.getModule(), NullConstantForBase->getType(),
+ /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
+ NullConstantForBase, Twine());
+
+ CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
+ DestPtr.getAlignment());
NullVariable->setAlignment(Align.getQuantity());
- llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
+
+ Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
// Get and call the appropriate llvm.memcpy overload.
- CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
- return;
- }
-
+ for (std::pair<CharUnits, CharUnits> Store : Stores) {
+ CharUnits StoreOffset = Store.first;
+ CharUnits StoreSize = Store.second;
+ llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
+ CGF.Builder.CreateMemCpy(
+ CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
+ CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
+ StoreSizeVal);
+ }
+
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
- CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
- Align.getQuantity());
+ } else {
+ for (std::pair<CharUnits, CharUnits> Store : Stores) {
+ CharUnits StoreOffset = Store.first;
+ CharUnits StoreSize = Store.second;
+ llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
+ CGF.Builder.CreateMemSet(
+ CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
+ CGF.Builder.getInt8(0), StoreSizeVal);
+ }
+ }
}
void
@@ -404,11 +445,12 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
switch (E->getConstructionKind()) {
case CXXConstructExpr::CK_Delegating:
case CXXConstructExpr::CK_Complete:
- EmitNullInitialization(Dest.getAddr(), E->getType());
+ EmitNullInitialization(Dest.getAddress(), E->getType());
break;
case CXXConstructExpr::CK_VirtualBase:
case CXXConstructExpr::CK_NonVirtualBase:
- EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
+ EmitNullBaseClassInitialization(*this, Dest.getAddress(),
+ CD->getParent());
break;
}
}
@@ -431,7 +473,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
if (const ConstantArrayType *arrayType
= getContext().getAsConstantArrayType(E->getType())) {
- EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), E);
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E);
} else {
CXXCtorType Type = Ctor_Complete;
bool ForVirtualBase = false;
@@ -457,15 +499,13 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
}
// Call the constructor.
- EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
- E);
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
+ Dest.getAddress(), E);
}
}
-void
-CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
- llvm::Value *Src,
- const Expr *Exp) {
+void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
+ const Expr *Exp) {
if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
Exp = E->getSubExpr();
assert(isa<CXXConstructExpr>(Exp) &&
@@ -759,22 +799,20 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
}
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
- QualType AllocType, llvm::Value *NewPtr) {
+ QualType AllocType, Address NewPtr) {
// FIXME: Refactor with EmitExprAsInit.
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
switch (CGF.getEvaluationKind(AllocType)) {
case TEK_Scalar:
CGF.EmitScalarInit(Init, nullptr,
- CGF.MakeAddrLValue(NewPtr, AllocType, Alignment), false);
+ CGF.MakeAddrLValue(NewPtr, AllocType), false);
return;
case TEK_Complex:
- CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
- Alignment),
+ CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
/*isInit*/ true);
return;
case TEK_Aggregate: {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
+ = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -787,23 +825,27 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
void CodeGenFunction::EmitNewArrayInitializer(
const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
- llvm::Value *BeginPtr, llvm::Value *NumElements,
+ Address BeginPtr, llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
// If we have a type with trivial initialization and no initializer,
// there's nothing to do.
if (!E->hasInitializer())
return;
- llvm::Value *CurPtr = BeginPtr;
+ Address CurPtr = BeginPtr;
unsigned InitListElements = 0;
const Expr *Init = E->getInitializer();
- llvm::AllocaInst *EndOfInit = nullptr;
+ Address EndOfInit = Address::invalid();
QualType::DestructionKind DtorKind = ElementType.isDestructedType();
EHScopeStack::stable_iterator Cleanup;
llvm::Instruction *CleanupDominator = nullptr;
+ CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
+ CharUnits ElementAlign =
+ BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
+
// If the initializer is an initializer list, first do the explicit elements.
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
InitListElements = ILE->getNumInits();
@@ -813,10 +855,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
QualType AllocType = E->getAllocatedType();
if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
AllocType->getAsArrayTypeUnsafe())) {
- unsigned AS = CurPtr->getType()->getPointerAddressSpace();
ElementTy = ConvertTypeForMem(AllocType);
- llvm::Type *AllocPtrTy = ElementTy->getPointerTo(AS);
- CurPtr = Builder.CreateBitCast(CurPtr, AllocPtrTy);
+ CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
InitListElements *= getContext().getConstantArrayElementCount(CAT);
}
@@ -826,27 +866,34 @@ void CodeGenFunction::EmitNewArrayInitializer(
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
- EndOfInit = CreateTempAlloca(BeginPtr->getType(), "array.init.end");
- CleanupDominator = Builder.CreateStore(BeginPtr, EndOfInit);
- pushIrregularPartialArrayCleanup(BeginPtr, EndOfInit, ElementType,
+ EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
+ "array.init.end");
+ CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
+ pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
+ ElementType, ElementAlign,
getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
}
+ CharUnits StartAlign = CurPtr.getAlignment();
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
- if (EndOfInit)
- Builder.CreateStore(Builder.CreateBitCast(CurPtr, BeginPtr->getType()),
- EndOfInit);
+ if (EndOfInit.isValid()) {
+ auto FinishedPtr =
+ Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
+ Builder.CreateStore(FinishedPtr, EndOfInit);
+ }
// FIXME: If the last initializer is an incomplete initializer list for
// an array, and we have an array filler, we can fold together the two
// initialization loops.
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
ILE->getInit(i)->getType(), CurPtr);
- CurPtr = Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1,
- "array.exp.next");
+ CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ Builder.getSize(1),
+ "array.exp.next"),
+ StartAlign.alignmentAtOffset((i + 1) * ElementSize));
}
// The remaining elements are filled with the array filler expression.
@@ -864,7 +911,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Switch back to initializing one base element at a time.
- CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr->getType());
+ CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
}
// Attempt to perform zero-initialization using memset.
@@ -889,9 +936,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Create the memset.
- CharUnits Alignment = getContext().getTypeAlignInChars(ElementType);
- Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize,
- Alignment.getQuantity(), false);
+ Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
return true;
};
@@ -925,7 +970,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
//
// FIXME: Share this cleanup with the constructor call emission rather than
// having it create a cleanup of its own.
- if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
+ if (EndOfInit.isValid())
+ Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
// Emit a constructor call loop to initialize the remaining elements.
if (InitListElements)
@@ -985,13 +1031,13 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Find the end of the array, hoisted out of the loop.
llvm::Value *EndPtr =
- Builder.CreateInBoundsGEP(BeginPtr, NumElements, "array.end");
+ Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
// If the number of elements isn't constant, we have to now check if there is
// anything left to initialize.
if (!ConstNum) {
- llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr, EndPtr,
- "array.isempty");
+ llvm::Value *IsEmpty =
+ Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
}
@@ -1000,16 +1046,19 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Set up the current-element phi.
llvm::PHINode *CurPtrPhi =
- Builder.CreatePHI(CurPtr->getType(), 2, "array.cur");
- CurPtrPhi->addIncoming(CurPtr, EntryBB);
- CurPtr = CurPtrPhi;
+ Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
+ CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
+
+ CurPtr = Address(CurPtrPhi, ElementAlign);
// Store the new Cleanup position for irregular Cleanups.
- if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
+ if (EndOfInit.isValid())
+ Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
// Enter a partial-destruction Cleanup if necessary.
if (!CleanupDominator && needsEHCleanup(DtorKind)) {
- pushRegularPartialArrayCleanup(BeginPtr, CurPtr, ElementType,
+ pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
+ ElementType, ElementAlign,
getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
CleanupDominator = Builder.CreateUnreachable();
@@ -1026,7 +1075,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Advance to the next element by adjusting the pointer type as necessary.
llvm::Value *NextPtr =
- Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1, "array.next");
+ Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
+ "array.next");
// Check whether we've gotten to the end of the array and, if so,
// exit the loop.
@@ -1039,7 +1089,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
QualType ElementType, llvm::Type *ElementTy,
- llvm::Value *NewPtr, llvm::Value *NumElements,
+ Address NewPtr, llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
ApplyDebugLocation DL(CGF, E);
if (E->isArray())
@@ -1089,8 +1139,7 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
bool IsDelete) {
CallArgList Args;
const Stmt *ArgS = Arg;
- EmitCallArgs(Args, *Type->param_type_begin(),
- ConstExprIterator(&ArgS), ConstExprIterator(&ArgS + 1));
+ EmitCallArgs(Args, *Type->param_type_begin(), llvm::makeArrayRef(ArgS));
// Find the allocation or deallocation function that we're calling.
ASTContext &Ctx = getContext();
DeclarationName Name = Ctx.DeclarationNames
@@ -1105,7 +1154,7 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
namespace {
/// A cleanup to call the given 'operator delete' function upon
/// abnormal exit from a new expression.
- class CallDeleteDuringNew : public EHScopeStack::Cleanup {
+ class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
size_t NumPlacementArgs;
const FunctionDecl *OperatorDelete;
llvm::Value *Ptr;
@@ -1158,7 +1207,7 @@ namespace {
/// A cleanup to call the given 'operator delete' function upon
/// abnormal exit from a new expression when the new expression is
/// conditional.
- class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
+ class CallDeleteDuringConditionalNew final : public EHScopeStack::Cleanup {
size_t NumPlacementArgs;
const FunctionDecl *OperatorDelete;
DominatingValue<RValue>::saved_type Ptr;
@@ -1219,7 +1268,7 @@ namespace {
/// new-expression throws.
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
const CXXNewExpr *E,
- llvm::Value *NewPtr,
+ Address NewPtr,
llvm::Value *AllocSize,
const CallArgList &NewArgs) {
// If we're not inside a conditional branch, then the cleanup will
@@ -1229,7 +1278,8 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
.pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
E->getNumPlacementArgs(),
E->getOperatorDelete(),
- NewPtr, AllocSize);
+ NewPtr.getPointer(),
+ AllocSize);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
@@ -1238,7 +1288,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
// Otherwise, we need to save all this stuff.
DominatingValue<RValue>::saved_type SavedNewPtr =
- DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
@@ -1261,13 +1311,6 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// 1. Build a call to the allocation function.
FunctionDecl *allocator = E->getOperatorNew();
- const FunctionProtoType *allocatorType =
- allocator->getType()->castAs<FunctionProtoType>();
-
- CallArgList allocatorArgs;
-
- // The allocation size is the first argument.
- QualType sizeType = getContext().getSizeType();
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
@@ -1282,24 +1325,61 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
- allocatorArgs.add(RValue::get(allocSize), sizeType);
-
- // We start at 1 here because the first argument (the allocation size)
- // has already been emitted.
- EmitCallArgs(allocatorArgs, allocatorType, E->placement_arg_begin(),
- E->placement_arg_end(), /* CalleeDecl */ nullptr,
- /*ParamsToSkip*/ 1);
-
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
- RValue RV;
+ Address allocation = Address::invalid();
+ CallArgList allocatorArgs;
if (allocator->isReservedGlobalPlacementOperator()) {
- assert(allocatorArgs.size() == 2);
- RV = allocatorArgs[1].RV;
- // TODO: kill any unnecessary computations done for the size
- // argument.
+ assert(E->getNumPlacementArgs() == 1);
+ const Expr *arg = *E->placement_arguments().begin();
+
+ AlignmentSource alignSource;
+ allocation = EmitPointerWithAlignment(arg, &alignSource);
+
+ // The pointer expression will, in many cases, be an opaque void*.
+ // In these cases, discard the computed alignment and use the
+ // formal alignment of the allocated type.
+ if (alignSource != AlignmentSource::Decl) {
+ allocation = Address(allocation.getPointer(),
+ getContext().getTypeAlignInChars(allocType));
+ }
+
+ // Set up allocatorArgs for the call to operator delete if it's not
+ // the reserved global operator.
+ if (E->getOperatorDelete() &&
+ !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
+ allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
+ allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
+ }
+
} else {
- RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+ const FunctionProtoType *allocatorType =
+ allocator->getType()->castAs<FunctionProtoType>();
+
+ // The allocation size is the first argument.
+ QualType sizeType = getContext().getSizeType();
+ allocatorArgs.add(RValue::get(allocSize), sizeType);
+
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
+ /* CalleeDecl */ nullptr,
+ /*ParamsToSkip*/ 1);
+
+ RValue RV =
+ EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+
+ // For now, only assume that the allocation function returns
+ // something satisfactorily aligned for the element type, plus
+ // the cookie if we have one.
+ CharUnits allocationAlign =
+ getContext().getTypeAlignInChars(allocType);
+ if (allocSize != allocSizeWithoutCookie) {
+ CharUnits cookieAlign = getSizeAlign(); // FIXME?
+ allocationAlign = std::max(allocationAlign, cookieAlign);
+ }
+
+ allocation = Address(RV.getScalarVal(), allocationAlign);
}
// Emit a null check on the allocation result if the allocation
@@ -1312,9 +1392,6 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *nullCheckBB = nullptr;
llvm::BasicBlock *contBB = nullptr;
- llvm::Value *allocation = RV.getScalarVal();
- unsigned AS = allocation->getType()->getPointerAddressSpace();
-
// The null-check means that the initializer is conditionally
// evaluated.
ConditionalEvaluation conditional(*this);
@@ -1326,7 +1403,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
contBB = createBasicBlock("new.cont");
- llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
+ llvm::Value *isNull =
+ Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
Builder.CreateCondBr(isNull, contBB, notNullBB);
EmitBlock(notNullBB);
}
@@ -1352,8 +1430,15 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
}
llvm::Type *elementTy = ConvertTypeForMem(allocType);
- llvm::Type *elementPtrTy = elementTy->getPointerTo(AS);
- llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
+ Address result = Builder.CreateElementBitCast(allocation, elementTy);
+
+ // Passing pointer through invariant.group.barrier to avoid propagation of
+ // vptrs information which may be included in previous type.
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ allocator->isReservedGlobalPlacementOperator())
+ result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()),
+ result.getAlignment());
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
@@ -1362,7 +1447,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
llvm::Type *resultType = ConvertTypeForMem(E->getType());
- if (result->getType() != resultType)
+ if (result.getType() != resultType)
result = Builder.CreateBitCast(result, resultType);
}
@@ -1373,21 +1458,22 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
cleanupDominator->eraseFromParent();
}
+ llvm::Value *resultPtr = result.getPointer();
if (nullCheck) {
conditional.end(*this);
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
EmitBlock(contBB);
- llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
- PHI->addIncoming(result, notNullBB);
- PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
+ llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
+ PHI->addIncoming(resultPtr, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
nullCheckBB);
- result = PHI;
+ resultPtr = PHI;
}
- return result;
+ return resultPtr;
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
@@ -1423,7 +1509,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
namespace {
/// Calls the given 'operator delete' on a single object.
- struct CallObjectDelete : EHScopeStack::Cleanup {
+ struct CallObjectDelete final : EHScopeStack::Cleanup {
llvm::Value *Ptr;
const FunctionDecl *OperatorDelete;
QualType ElementType;
@@ -1450,7 +1536,7 @@ CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
/// Emit the code for deleting a single object.
static void EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr,
+ Address Ptr,
QualType ElementType) {
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
@@ -1473,29 +1559,24 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
// to pop it off in a second.
const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr, OperatorDelete, ElementType);
+ Ptr.getPointer(),
+ OperatorDelete, ElementType);
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false,
Ptr);
- else if (CGF.getLangOpts().ObjCAutoRefCount &&
- ElementType->isObjCLifetimeType()) {
- switch (ElementType.getObjCLifetime()) {
+ else if (auto Lifetime = ElementType.getObjCLifetime()) {
+ switch (Lifetime) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
break;
- case Qualifiers::OCL_Strong: {
- // Load the pointer value.
- llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
- ElementType.isVolatileQualified());
-
- CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
+ case Qualifiers::OCL_Strong:
+ CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
break;
- }
case Qualifiers::OCL_Weak:
CGF.EmitARCDestroyWeak(Ptr);
@@ -1508,7 +1589,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
namespace {
/// Calls the given 'operator delete' on an array of objects.
- struct CallArrayDelete : EHScopeStack::Cleanup {
+ struct CallArrayDelete final : EHScopeStack::Cleanup {
llvm::Value *Ptr;
const FunctionDecl *OperatorDelete;
llvm::Value *NumElements;
@@ -1570,7 +1651,7 @@ namespace {
/// Emit the code for deleting an array of objects.
static void EmitArrayDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *E,
- llvm::Value *deletedPtr,
+ Address deletedPtr,
QualType elementType) {
llvm::Value *numElements = nullptr;
llvm::Value *allocatedPtr = nullptr;
@@ -1591,13 +1672,18 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
assert(numElements && "no element count for a type with a destructor!");
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
+
+ llvm::Value *arrayBegin = deletedPtr.getPointer();
llvm::Value *arrayEnd =
- CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
+ CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
// Note that it is legal to allocate a zero-length array, and we
// can never fold the check away because the length should always
// come from a cookie.
- CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
+ CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
CGF.getDestroyer(dtorKind),
/*checkZeroLength*/ true,
CGF.needsEHCleanup(dtorKind));
@@ -1609,13 +1695,13 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
const Expr *Arg = E->getArgument();
- llvm::Value *Ptr = EmitScalarExpr(Arg);
+ Address Ptr = EmitPointerWithAlignment(Arg);
// Null check the pointer.
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
@@ -1640,11 +1726,11 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
GEP.push_back(Zero);
}
- Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
+ Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
+ Ptr.getAlignment());
}
- assert(ConvertTypeForMem(DeleteTy) ==
- cast<llvm::PointerType>(Ptr->getType())->getElementType());
+ assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
@@ -1690,7 +1776,7 @@ static bool isGLValueFromPointerDeref(const Expr *E) {
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
llvm::Type *StdTypeInfoPtrTy) {
// Get the vtable pointer.
- llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
+ Address ThisPtr = CGF.EmitLValue(E).getAddress();
// C++ [expr.typeid]p2:
// If the glvalue expression is obtained by applying the unary * operator to
@@ -1707,7 +1793,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
CGF.createBasicBlock("typeid.bad_typeid");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
- llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
CGF.EmitBlock(BadTypeidBlock);
@@ -1758,8 +1844,9 @@ static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
return llvm::UndefValue::get(DestLTy);
}
-llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
+llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
const CXXDynamicCastExpr *DCE) {
+ CGM.EmitExplicitCastExprType(DCE, this);
QualType DestTy = DCE->getTypeAsWritten();
if (DCE->isAlwaysNull())
@@ -1803,19 +1890,21 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
CastNull = createBasicBlock("dynamic_cast.null");
CastNotNull = createBasicBlock("dynamic_cast.notnull");
- llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
+ llvm::Value *Value;
if (isDynamicCastToVoid) {
- Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, Value, SrcRecordTy,
+ Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
DestTy);
} else {
assert(DestRecordTy->isRecordType() &&
"destination type must be a record type!");
- Value = CGM.getCXXABI().EmitDynamicCastCall(*this, Value, SrcRecordTy,
+ Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
DestTy, DestRecordTy, CastEnd);
+ CastNotNull = Builder.GetInsertBlock();
}
if (ShouldNullCheckSrcValue) {
@@ -1840,12 +1929,11 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
RunCleanupsScope Scope(*this);
- LValue SlotLV =
- MakeAddrLValue(Slot.getAddr(), E->getType(), Slot.getAlignment());
+ LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
- for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
- e = E->capture_init_end();
+ for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
+ e = E->capture_init_end();
i != e; ++i, ++CurField) {
// Emit initialization
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 27d1c689966b..ccdb53287e9f 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -83,12 +83,12 @@ public:
/// specified value pointer.
void EmitStoreOfComplex(ComplexPairTy Val, LValue LV, bool isInit);
- /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ /// Emit a cast from complex value Val to DestType.
ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
- QualType DestType);
- /// EmitComplexToComplexCast - Emit a cast from scalar value Val to DestType.
+ QualType DestType, SourceLocation Loc);
+ /// Emit a cast from scalar value Val to DestType.
ComplexPairTy EmitScalarToComplexCast(llvm::Value *Val, QualType SrcType,
- QualType DestType);
+ QualType DestType, SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Visitor Methods
@@ -154,6 +154,8 @@ public:
return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCastExpr(CastExpr *E) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
+ CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCallExpr(const CallExpr *E);
@@ -298,6 +300,19 @@ public:
// Utilities
//===----------------------------------------------------------------------===//
+Address CodeGenFunction::emitAddrOfRealComponent(Address addr,
+ QualType complexType) {
+ CharUnits offset = CharUnits::Zero();
+ return Builder.CreateStructGEP(addr, 0, offset, addr.getName() + ".realp");
+}
+
+Address CodeGenFunction::emitAddrOfImagComponent(Address addr,
+ QualType complexType) {
+ QualType eltType = complexType->castAs<ComplexType>()->getElementType();
+ CharUnits offset = getContext().getTypeSizeInChars(eltType);
+ return Builder.CreateStructGEP(addr, 1, offset, addr.getName() + ".imagp");
+}
+
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to
/// load the real and imaginary pieces, returning them as Real/Imag.
ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
@@ -306,29 +321,21 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
- llvm::Value *SrcPtr = lvalue.getAddress();
+ Address SrcPtr = lvalue.getAddress();
bool isVolatile = lvalue.isVolatileQualified();
- unsigned AlignR = lvalue.getAlignment().getQuantity();
- ASTContext &C = CGF.getContext();
- QualType ComplexTy = lvalue.getType();
- unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
- unsigned AlignI = std::min(AlignR, ComplexAlign);
- llvm::Value *Real=nullptr, *Imag=nullptr;
+ llvm::Value *Real = nullptr, *Imag = nullptr;
if (!IgnoreReal || isVolatile) {
- llvm::Value *RealP = Builder.CreateStructGEP(nullptr, SrcPtr, 0,
- SrcPtr->getName() + ".realp");
- Real = Builder.CreateAlignedLoad(RealP, AlignR, isVolatile,
- SrcPtr->getName() + ".real");
+ Address RealP = CGF.emitAddrOfRealComponent(SrcPtr, lvalue.getType());
+ Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr.getName() + ".real");
}
if (!IgnoreImag || isVolatile) {
- llvm::Value *ImagP = Builder.CreateStructGEP(nullptr, SrcPtr, 1,
- SrcPtr->getName() + ".imagp");
- Imag = Builder.CreateAlignedLoad(ImagP, AlignI, isVolatile,
- SrcPtr->getName() + ".imag");
+ Address ImagP = CGF.emitAddrOfImagComponent(SrcPtr, lvalue.getType());
+ Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr.getName() + ".imag");
}
+
return ComplexPairTy(Real, Imag);
}
@@ -340,19 +347,12 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
(!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
- llvm::Value *Ptr = lvalue.getAddress();
- llvm::Value *RealPtr = Builder.CreateStructGEP(nullptr, Ptr, 0, "real");
- llvm::Value *ImagPtr = Builder.CreateStructGEP(nullptr, Ptr, 1, "imag");
- unsigned AlignR = lvalue.getAlignment().getQuantity();
- ASTContext &C = CGF.getContext();
- QualType ComplexTy = lvalue.getType();
- unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
- unsigned AlignI = std::min(AlignR, ComplexAlign);
-
- Builder.CreateAlignedStore(Val.first, RealPtr, AlignR,
- lvalue.isVolatileQualified());
- Builder.CreateAlignedStore(Val.second, ImagPtr, AlignI,
- lvalue.isVolatileQualified());
+ Address Ptr = lvalue.getAddress();
+ Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType());
+ Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType());
+
+ Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified());
+ Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified());
}
@@ -385,16 +385,17 @@ ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
- llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), true);
- assert(RetAlloca && "Expected complex return value");
+ Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), true);
+ assert(RetAlloca.isValid() && "Expected complex return value");
return EmitLoadOfLValue(CGF.MakeAddrLValue(RetAlloca, E->getType()),
E->getExprLoc());
}
-/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+/// Emit a cast from complex value Val to DestType.
ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
QualType SrcType,
- QualType DestType) {
+ QualType DestType,
+ SourceLocation Loc) {
// Get the src/dest element type.
SrcType = SrcType->castAs<ComplexType>()->getElementType();
DestType = DestType->castAs<ComplexType>()->getElementType();
@@ -402,17 +403,18 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
// C99 6.3.1.6: When a value of complex type is converted to another
// complex type, both the real and imaginary parts follow the conversion
// rules for the corresponding real types.
- Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
- Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType, Loc);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType, Loc);
return Val;
}
ComplexPairTy ComplexExprEmitter::EmitScalarToComplexCast(llvm::Value *Val,
QualType SrcType,
- QualType DestType) {
+ QualType DestType,
+ SourceLocation Loc) {
// Convert the input element to the element type of the complex.
DestType = DestType->castAs<ComplexType>()->getElementType();
- Val = CGF.EmitScalarConversion(Val, SrcType, DestType);
+ Val = CGF.EmitScalarConversion(Val, SrcType, DestType, Loc);
// Return (realval, 0).
return ComplexPairTy(Val, llvm::Constant::getNullValue(Val->getType()));
@@ -434,12 +436,9 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
- llvm::Value *V = origLV.getAddress();
- V = Builder.CreateBitCast(V,
- CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy,
- origLV.getAlignment()),
- Op->getExprLoc());
+ Address V = origLV.getAddress();
+ V = Builder.CreateElementBitCast(V, CGF.ConvertType(DestTy));
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
case CK_BitCast:
@@ -488,14 +487,15 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_FloatingRealToComplex:
case CK_IntegralRealToComplex:
- return EmitScalarToComplexCast(CGF.EmitScalarExpr(Op),
- Op->getType(), DestTy);
+ return EmitScalarToComplexCast(CGF.EmitScalarExpr(Op), Op->getType(),
+ DestTy, Op->getExprLoc());
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
- return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy,
+ Op->getExprLoc());
}
llvm_unreachable("unknown cast resulting in complex value");
@@ -585,19 +585,25 @@ ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
// We *must* use the full CG function call building logic here because the
// complex type has special ABI handling. We also should not forget about
// special calling convention which may be used for compiler builtins.
- const CGFunctionInfo &FuncInfo =
- CGF.CGM.getTypes().arrangeFreeFunctionCall(
- Op.Ty, Args, FunctionType::ExtInfo(/* No CC here - will be added later */),
- RequiredArgs::All);
+
+ // We create a function qualified type to state that this call does not have
+ // any exceptions.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI = EPI.withExceptionSpec(
+ FunctionProtoType::ExceptionSpecInfo(EST_BasicNoexcept));
+ SmallVector<QualType, 4> ArgsQTys(
+ 4, Op.Ty->castAs<ComplexType>()->getElementType());
+ QualType FQTy = CGF.getContext().getFunctionType(Op.Ty, ArgsQTys, EPI);
+ const CGFunctionInfo &FuncInfo = CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Args, cast<FunctionType>(FQTy.getTypePtr()), false);
+
llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
llvm::Instruction *Call;
RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
- nullptr, &Call);
+ FQTy->getAs<FunctionProtoType>(), &Call);
cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
- cast<llvm::CallInst>(Call)->setDoesNotThrow();
-
return Res.getComplexVal();
}
@@ -846,19 +852,20 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
LValue LHS = CGF.EmitLValue(E->getLHS());
// Load from the l-value and convert it.
+ SourceLocation Loc = E->getExprLoc();
if (LHSTy->isAnyComplexType()) {
- ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, E->getExprLoc());
- OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, Loc);
+ OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc);
} else {
- llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
+ llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, Loc);
// For floating point real operands we can directly pass the scalar form
// to the binary operator emission and potentially get more efficient code.
if (LHSTy->isRealFloatingType()) {
if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
- LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy);
+ LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc);
OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
} else {
- OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc);
}
}
@@ -867,12 +874,13 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
// Truncate the result and store it into the LHS lvalue.
if (LHSTy->isAnyComplexType()) {
- ComplexPairTy ResVal = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+ ComplexPairTy ResVal =
+ EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy, Loc);
EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
Val = RValue::getComplex(ResVal);
} else {
llvm::Value *ResVal =
- CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy);
+ CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy, Loc);
CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
Val = RValue::get(ResVal);
}
@@ -1011,10 +1019,10 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
- llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
- llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+ Address ArgValue = Address::invalid();
+ Address ArgPtr = CGF.EmitVAArg(E, ArgValue);
- if (!ArgPtr) {
+ if (!ArgPtr.isValid()) {
CGF.ErrorUnsupported(E, "complex va_arg expression");
llvm::Type *EltTy =
CGF.ConvertType(E->getType()->castAs<ComplexType>()->getElementType());
@@ -1022,7 +1030,7 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
return ComplexPairTy(U, U);
}
- return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(ArgPtr, E->getType()),
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(ArgPtr, E->getType()),
E->getExprLoc());
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index a15c151d6f9d..3839ab718fa3 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -636,6 +636,8 @@ public:
}
llvm::Constant *VisitCastExpr(CastExpr* E) {
+ if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
+ CGM.EmitExplicitCastExprType(ECE, CGF);
Expr *subExpr = E->getSubExpr();
llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
if (!C) return nullptr;
@@ -977,23 +979,26 @@ public:
}
public:
- llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
+ ConstantAddress EmitLValue(APValue::LValueBase LVBase) {
if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
if (Decl->hasAttr<WeakRefAttr>())
return CGM.GetWeakRefReference(Decl);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
- return CGM.GetAddrOfFunction(FD);
+ return ConstantAddress(CGM.GetAddrOfFunction(FD), CharUnits::One());
if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
// We can never refer to a variable with local storage.
if (!VD->hasLocalStorage()) {
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (VD->isFileVarDecl() || VD->hasExternalStorage())
- return CGM.GetAddrOfGlobalVar(VD);
- else if (VD->isLocalVarDecl())
- return CGM.getOrCreateStaticVarDecl(
+ return ConstantAddress(CGM.GetAddrOfGlobalVar(VD), Align);
+ else if (VD->isLocalVarDecl()) {
+ auto Ptr = CGM.getOrCreateStaticVarDecl(
*VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
+ return ConstantAddress(Ptr, Align);
+ }
}
}
- return nullptr;
+ return ConstantAddress::invalid();
}
Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
@@ -1006,14 +1011,18 @@ public:
llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
CLE->getType(), CGF);
// FIXME: "Leaked" on failure.
- if (C)
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ if (!C) return ConstantAddress::invalid();
+
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
+
+ auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
C, ".compoundliteral", nullptr,
llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(E->getType()));
- return C;
+ GV->setAlignment(Align.getQuantity());
+ return ConstantAddress(GV, Align);
}
case Expr::StringLiteralClass:
return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
@@ -1021,15 +1030,15 @@ public:
return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
case Expr::ObjCStringLiteralClass: {
ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
- llvm::Constant *C =
+ ConstantAddress C =
CGM.getObjCRuntime().GenerateConstantString(SL->getString());
- return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ return C.getElementBitCast(ConvertType(E->getType()));
}
case Expr::PredefinedExprClass: {
unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
if (CGF) {
LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
- return cast<llvm::Constant>(Res.getAddress());
+ return cast<ConstantAddress>(Res.getAddress());
} else if (Type == PredefinedExpr::PrettyFunction) {
return CGM.GetAddrOfConstantCString("top level", ".tmp");
}
@@ -1040,7 +1049,8 @@ public:
assert(CGF && "Invalid address of label expression outside function.");
llvm::Constant *Ptr =
CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
- return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ Ptr = llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ return ConstantAddress(Ptr, CharUnits::One());
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
@@ -1066,7 +1076,10 @@ public:
else
FunctionName = "global";
- return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ // This is not really an l-value.
+ llvm::Constant *Ptr =
+ CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ return ConstantAddress(Ptr, CGM.getPointerAlign());
}
case Expr::CXXTypeidExprClass: {
CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
@@ -1075,7 +1088,8 @@ public:
T = Typeid->getTypeOperand(CGM.getContext());
else
T = Typeid->getExprOperand()->getType();
- return CGM.GetAddrOfRTTIDescriptor(T);
+ return ConstantAddress(CGM.GetAddrOfRTTIDescriptor(T),
+ CGM.getPointerAlign());
}
case Expr::CXXUuidofExprClass: {
return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
@@ -1091,7 +1105,7 @@ public:
}
}
- return nullptr;
+ return ConstantAddress::invalid();
}
};
@@ -1255,7 +1269,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
llvm::Constant *Offset =
llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
- llvm::Constant *C;
+ llvm::Constant *C = nullptr;
if (APValue::LValueBase LVBase = Value.getLValueBase()) {
// An array can be represented as an lvalue referring to the base.
if (isa<llvm::ArrayType>(DestTy)) {
@@ -1264,7 +1278,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
const_cast<Expr*>(LVBase.get<const Expr*>()));
}
- C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
+ C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase).getPointer();
// Apply offset if necessary.
if (!Offset->isNullValue()) {
@@ -1336,15 +1350,17 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Vector: {
- SmallVector<llvm::Constant *, 4> Inits;
unsigned NumElts = Value.getVectorLength();
+ SmallVector<llvm::Constant *, 4> Inits(NumElts);
- for (unsigned i = 0; i != NumElts; ++i) {
- const APValue &Elt = Value.getVectorElt(i);
+ for (unsigned I = 0; I != NumElts; ++I) {
+ const APValue &Elt = Value.getVectorElt(I);
if (Elt.isInt())
- Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
+ Inits[I] = llvm::ConstantInt::get(VMContext, Elt.getInt());
+ else if (Elt.isFloat())
+ Inits[I] = llvm::ConstantFP::get(VMContext, Elt.getFloat());
else
- Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+ llvm_unreachable("unsupported vector element type");
}
return llvm::ConstantVector::get(Inits);
}
@@ -1438,7 +1454,7 @@ CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
return C;
}
-llvm::Constant *
+ConstantAddress
CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
assert(E->isFileScope() && "not a file-scope compound literal expr");
return ConstExprEmitter(*this, nullptr).EmitLValue(E);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 74f6019b1a2c..725d96f29877 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -140,21 +140,25 @@ public:
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *EmitConversionToBool(Value *Src, QualType DstTy);
- /// \brief Emit a check that a conversion to or from a floating-point type
- /// does not overflow.
+ /// Emit a check that a conversion to or from a floating-point type does not
+ /// overflow.
void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
- Value *Src, QualType SrcType,
- QualType DstType, llvm::Type *DstTy);
+ Value *Src, QualType SrcType, QualType DstType,
+ llvm::Type *DstTy, SourceLocation Loc);
- /// EmitScalarConversion - Emit a conversion from the specified type to the
- /// specified destination type, both of which are LLVM scalar types.
- Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+ /// Emit a conversion from the specified type to the specified destination
+ /// type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc);
- /// EmitComplexToScalarConversion - Emit a conversion from the specified
- /// complex type to the specified destination type, where the destination type
- /// is an LLVM scalar type.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc, bool TreatBooleanAsSigned);
+
+ /// Emit a conversion from the specified complex type to the specified
+ /// destination type, where the destination type is an LLVM scalar type.
Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
- QualType SrcTy, QualType DstTy);
+ QualType SrcTy, QualType DstTy,
+ SourceLocation Loc);
/// EmitNullValue - Emit a value that corresponds to null for the given type.
Value *EmitNullValue(QualType Ty);
@@ -310,12 +314,7 @@ public:
return EmitNullValue(E->getType());
}
Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
- if (E->getType()->isVariablyModifiedType())
- CGF.EmitVariablyModifiedType(E->getType());
-
- if (CGDebugInfo *DI = CGF.getDebugInfo())
- DI->EmitExplicitCastType(E->getType());
-
+ CGF.CGM.EmitExplicitCastExprType(E, &CGF);
return VisitCastExpr(E);
}
Value *VisitCastExpr(CastExpr *E);
@@ -362,7 +361,7 @@ public:
if (isa<MemberPointerType>(E->getType())) // never sugared
return CGF.CGM.getMemberPointerConstant(E);
- return EmitLValue(E->getSubExpr()).getAddress();
+ return EmitLValue(E->getSubExpr()).getPointer();
}
Value *VisitUnaryDeref(const UnaryOperator *E) {
if (E->getType()->isVoidType())
@@ -524,8 +523,9 @@ public:
#undef HANDLEBINOP
// Comparisons.
- Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
- unsigned SICmpOpc, unsigned FCmpOpc);
+ Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
+ llvm::CmpInst::Predicate SICmpOpc,
+ llvm::CmpInst::Predicate FCmpOpc);
#define VISITCOMP(CODE, UI, SI, FP) \
Value *VisitBin##CODE(const BinaryOperator *E) { \
return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
@@ -594,11 +594,9 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return EmitPointerToBoolConversion(Src);
}
-void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
- QualType OrigSrcType,
- Value *Src, QualType SrcType,
- QualType DstType,
- llvm::Type *DstTy) {
+void ScalarExprEmitter::EmitFloatConversionCheck(
+ Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
+ QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
using llvm::APFloat;
using llvm::APSInt;
@@ -722,19 +720,25 @@ void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
}
}
- // FIXME: Provide a SourceLocation.
- llvm::Constant *StaticArgs[] = {
- CGF.EmitCheckTypeDescriptor(OrigSrcType),
- CGF.EmitCheckTypeDescriptor(DstType)
- };
+ llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
+ CGF.EmitCheckTypeDescriptor(OrigSrcType),
+ CGF.EmitCheckTypeDescriptor(DstType)};
CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
"float_cast_overflow", StaticArgs, OrigSrc);
}
-/// EmitScalarConversion - Emit a conversion from the specified type to the
-/// specified destination type, both of which are LLVM scalar types.
+/// Emit a conversion from the specified type to the specified destination type,
+/// both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType,
+ SourceLocation Loc) {
+ return EmitScalarConversion(Src, SrcType, DstType, Loc, false);
+}
+
Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
- QualType DstType) {
+ QualType DstType,
+ SourceLocation Loc,
+ bool TreatBooleanAsSigned) {
SrcType = CGF.getContext().getCanonicalType(SrcType);
DstType = CGF.getContext().getCanonicalType(DstType);
if (SrcType == DstType) return Src;
@@ -809,7 +813,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
// Cast the scalar to element type
QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
- llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+ llvm::Value *Elt = EmitScalarConversion(
+ Src, SrcType, EltTy, Loc, CGF.getContext().getLangOpts().OpenCL);
// Splat the element across to all elements
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
@@ -829,8 +834,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// or the destination type is a floating-point type.
if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
(OrigSrcType->isFloatingType() || DstType->isFloatingType()))
- EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType,
- DstTy);
+ EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
+ Loc);
// Cast to half through float if half isn't a native type.
if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
@@ -849,6 +854,9 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (isa<llvm::IntegerType>(SrcTy)) {
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
+ if (SrcType->isBooleanType() && TreatBooleanAsSigned) {
+ InputSigned = true;
+ }
if (isa<llvm::IntegerType>(DstTy))
Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
else if (InputSigned)
@@ -884,20 +892,19 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
return Res;
}
-/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
-/// type to the specified destination type, where the destination type is an
-/// LLVM scalar type.
-Value *ScalarExprEmitter::
-EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
- QualType SrcTy, QualType DstTy) {
+/// Emit a conversion from the specified complex type to the specified
+/// destination type, where the destination type is an LLVM scalar type.
+Value *ScalarExprEmitter::EmitComplexToScalarConversion(
+ CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc) {
// Get the source element type.
SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstTy->isBooleanType()) {
// Complex != 0 -> (Real != 0) | (Imag != 0)
- Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
- Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
return Builder.CreateOr(Src.first, Src.second, "tobool");
}
@@ -905,7 +912,7 @@ EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
// the imaginary part of the complex value is discarded and the value of the
// real part is converted according to the conversion rules for the
// corresponding real type.
- return EmitScalarConversion(Src.first, SrcTy, DstTy);
+ return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
}
Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
@@ -1003,14 +1010,10 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
}
llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
- llvm::Constant* EltMask;
-
- EltMask = llvm::ConstantInt::get(MTy->getElementType(),
- llvm::NextPowerOf2(LHSElts-1)-1);
// Mask off the high bits of each shuffle index.
- Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(),
- EltMask);
+ Value *MaskBits =
+ llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
// newv = undef
@@ -1334,13 +1337,13 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
return V;
}
-static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
+bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
const Expr *E = CE->getSubExpr();
if (CE->getCastKind() == CK_UncheckedDerivedToBase)
return false;
- if (isa<CXXThisExpr>(E)) {
+ if (isa<CXXThisExpr>(E->IgnoreParens())) {
// We always assume that 'this' is never null.
return false;
}
@@ -1375,11 +1378,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
- Value *V = EmitLValue(E).getAddress();
- V = Builder.CreateBitCast(V,
- ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy),
- CE->getExprLoc());
+ Address Addr = EmitLValue(E).getAddress();
+ Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
+ LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
+ return EmitLoadOfLValue(LV, CE->getExprLoc());
}
case CK_CPointerToObjCPointerCast:
@@ -1419,68 +1421,44 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
- llvm::Value *V = Visit(E);
-
- llvm::Value *Derived =
- CGF.GetAddressOfDerivedClass(V, DerivedClassDecl,
+ Address Base = CGF.EmitPointerWithAlignment(E);
+ Address Derived =
+ CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE));
+ CGF.ShouldNullCheckClassCastValue(CE));
// C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
// performed and the object is not of the derived type.
if (CGF.sanitizePerformTypeCheck())
CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
- Derived, DestTy->getPointeeType());
+ Derived.getPointer(), DestTy->getPointeeType());
if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
- CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
+ CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(),
+ Derived.getPointer(),
/*MayBeNull=*/true,
CodeGenFunction::CFITCK_DerivedCast,
CE->getLocStart());
- return Derived;
+ return Derived.getPointer();
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const CXXRecordDecl *DerivedClassDecl =
- E->getType()->getPointeeCXXRecordDecl();
- assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
-
- return CGF.GetAddressOfBaseClass(
- Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
+ // The EmitPointerWithAlignment path does this fine; just discard
+ // the alignment.
+ return CGF.EmitPointerWithAlignment(CE).getPointer();
}
+
case CK_Dynamic: {
- Value *V = Visit(const_cast<Expr*>(E));
+ Address V = CGF.EmitPointerWithAlignment(E);
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
return CGF.EmitDynamicCast(V, DCE);
}
- case CK_ArrayToPointerDecay: {
- assert(E->getType()->isArrayType() &&
- "Array to pointer decay must have array source type!");
-
- Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
-
- // Note that VLA pointers are always decayed, so we don't need to do
- // anything here.
- if (!E->getType()->isVariableArrayType()) {
- assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
- llvm::Type *NewTy = ConvertType(E->getType());
- V = CGF.Builder.CreatePointerCast(
- V, NewTy->getPointerTo(V->getType()->getPointerAddressSpace()));
-
- assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) &&
- "Expected pointer to array");
- V = Builder.CreateStructGEP(NewTy, V, 0, "arraydecay");
- }
-
- // Make sure the array decay ends up being the right type. This matters if
- // the array type was of an incomplete type.
- return CGF.Builder.CreatePointerCast(V, ConvertType(CE->getType()));
- }
+ case CK_ArrayToPointerDecay:
+ return CGF.EmitArrayToPointerDecay(E).getPointer();
case CK_FunctionToPointerDecay:
- return EmitLValue(E).getAddress();
+ return EmitLValue(E).getPointer();
case CK_NullToPointer:
if (MustVisitNullValue(E))
@@ -1563,9 +1541,14 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
case CK_VectorSplat: {
llvm::Type *DstTy = ConvertType(DestTy);
- Value *Elt = Visit(const_cast<Expr*>(E));
- Elt = EmitScalarConversion(Elt, E->getType(),
- DestTy->getAs<VectorType>()->getElementType());
+ // Need an IgnoreImpCasts here as by default a boolean will be promoted to
+ // an int, which will not perform the sign extension, so if we know we are
+ // going to cast to a vector we have to strip the implicit cast off.
+ Value *Elt = Visit(const_cast<Expr*>(E->IgnoreImpCasts()));
+ Elt = EmitScalarConversion(Elt, E->IgnoreImpCasts()->getType(),
+ DestTy->getAs<VectorType>()->getElementType(),
+ CE->getExprLoc(),
+ CGF.getContext().getLangOpts().OpenCL);
// Splat the element across to all elements
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
@@ -1576,7 +1559,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingCast:
- return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
case CK_IntegralToBoolean:
return EmitIntToBoolConversion(Visit(E));
case CK_PointerToBoolean:
@@ -1598,7 +1582,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
// TODO: kill this function off, inline appropriate case here
- return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy,
+ CE->getExprLoc());
}
case CK_ZeroToOCLEvent: {
@@ -1613,9 +1598,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
- llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
- !E->getType()->isVoidType());
- if (!RetAlloca)
+ Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType());
+ if (!RetAlloca.isValid())
return nullptr;
return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
E->getExprLoc());
@@ -1671,16 +1656,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (isInc && type->isBooleanType()) {
llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
if (isPre) {
- Builder.Insert(new llvm::StoreInst(True,
- LV.getAddress(), LV.isVolatileQualified(),
- LV.getAlignment().getQuantity(),
- llvm::SequentiallyConsistent));
+ Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
+ ->setAtomic(llvm::SequentiallyConsistent);
return Builder.getTrue();
}
// For atomic bool increment, we just store true and return it for
// preincrement, do an atomic swap with true for postincrement
return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
- LV.getAddress(), True, llvm::SequentiallyConsistent);
+ LV.getPointer(), True, llvm::SequentiallyConsistent);
}
// Special case for atomic increment / decrement on integers, emit
// atomicrmw instructions. We skip this if we want to be doing overflow
@@ -1697,7 +1680,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = CGF.EmitToMemory(
llvm::ConstantInt::get(ConvertType(type), 1, true), type);
llvm::Value *old = Builder.CreateAtomicRMW(aop,
- LV.getAddress(), amt, llvm::SequentiallyConsistent);
+ LV.getPointer(), amt, llvm::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
value = EmitLoadOfLValue(LV, E->getExprLoc());
@@ -1941,10 +1924,10 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
QualType CurrentType = E->getTypeSourceInfo()->getType();
for (unsigned i = 0; i != n; ++i) {
- OffsetOfExpr::OffsetOfNode ON = E->getComponent(i);
+ OffsetOfNode ON = E->getComponent(i);
llvm::Value *Offset = nullptr;
switch (ON.getKind()) {
- case OffsetOfExpr::OffsetOfNode::Array: {
+ case OffsetOfNode::Array: {
// Compute the index
Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
@@ -1964,7 +1947,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
break;
}
- case OffsetOfExpr::OffsetOfNode::Field: {
+ case OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
@@ -1990,10 +1973,10 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
break;
}
- case OffsetOfExpr::OffsetOfNode::Identifier:
+ case OffsetOfNode::Identifier:
llvm_unreachable("dependent __builtin_offsetof");
- case OffsetOfExpr::OffsetOfNode::Base: {
+ case OffsetOfNode::Base: {
if (ON.getBase()->isVirtual()) {
CGF.ErrorUnsupported(E, "virtual base in offsetof");
continue;
@@ -2134,7 +2117,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
OpInfo.Opcode = E->getOpcode();
- OpInfo.FPContractable = false;
+ OpInfo.FPContractable = E->isFPContractable();
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
@@ -2174,9 +2157,11 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
llvm_unreachable("Invalid compound assignment type");
}
if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
- llvm::Value *amt = CGF.EmitToMemory(EmitScalarConversion(OpInfo.RHS,
- E->getRHS()->getType(), LHSTy), LHSTy);
- Builder.CreateAtomicRMW(aop, LHSLV.getAddress(), amt,
+ llvm::Value *amt = CGF.EmitToMemory(
+ EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
+ E->getExprLoc()),
+ LHSTy);
+ Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
llvm::SequentiallyConsistent);
return LHSLV;
}
@@ -2196,14 +2181,16 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
else
OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
- OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
- E->getComputationLHSType());
+ SourceLocation Loc = E->getExprLoc();
+ OpInfo.LHS =
+ EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type.
- Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+ Result =
+ EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, Loc);
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
@@ -2389,9 +2376,9 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
- llvm::Function::iterator insertPt = initialBB;
+ llvm::Function::iterator insertPt = initialBB->getIterator();
llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
- std::next(insertPt));
+ &*std::next(insertPt));
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
@@ -2578,19 +2565,17 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op,
return nullptr;
// We have a potentially fusable op. Look for a mul on one of the operands.
- if (llvm::BinaryOperator* LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
- if (LHSBinOp->getOpcode() == llvm::Instruction::FMul) {
- assert(LHSBinOp->getNumUses() == 0 &&
- "Operations with multiple uses shouldn't be contracted.");
+ // Also, make sure that the mul result isn't used directly. In that case,
+ // there's no point creating a muladd operation.
+ if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
+ LHSBinOp->use_empty())
return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
- }
- } else if (llvm::BinaryOperator* RHSBinOp =
- dyn_cast<llvm::BinaryOperator>(op.RHS)) {
- if (RHSBinOp->getOpcode() == llvm::Instruction::FMul) {
- assert(RHSBinOp->getNumUses() == 0 &&
- "Operations with multiple uses shouldn't be contracted.");
+ }
+ if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
+ RHSBinOp->use_empty())
return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
- }
}
return nullptr;
@@ -2848,8 +2833,10 @@ static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
}
}
-Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
- unsigned SICmpOpc, unsigned FCmpOpc) {
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
+ llvm::CmpInst::Predicate UICmpOpc,
+ llvm::CmpInst::Predicate SICmpOpc,
+ llvm::CmpInst::Predicate FCmpOpc) {
TestAndClearIgnoreResultAssign();
Value *Result;
QualType LHSTy = E->getLHS()->getType();
@@ -2927,19 +2914,17 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *CR6Param = Builder.getInt32(CR6);
llvm::Function *F = CGF.CGM.getIntrinsic(ID);
Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
- return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
+ E->getExprLoc());
}
if (LHS->getType()->isFPOrFPVectorTy()) {
- Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
- LHS, RHS, "cmp");
+ Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
} else if (LHSTy->hasSignedIntegerRepresentation()) {
- Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
- LHS, RHS, "cmp");
+ Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
} else {
// Unsigned integers and pointers.
- Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
- LHS, RHS, "cmp");
+ Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
}
// If this is a vector comparison, sign extend the result to the appropriate
@@ -2974,17 +2959,13 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *ResultR, *ResultI;
if (CETy->isRealFloatingType()) {
- ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
- LHS.first, RHS.first, "cmp.r");
- ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
- LHS.second, RHS.second, "cmp.i");
+ ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
} else {
// Complex comparisons can only be equality comparisons. As such, signed
// and unsigned opcodes are the same.
- ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
- LHS.first, RHS.first, "cmp.r");
- ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
- LHS.second, RHS.second, "cmp.i");
+ ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
}
if (E->getOpcode() == BO_EQ) {
@@ -2996,7 +2977,8 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
}
}
- return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
+ E->getExprLoc());
}
Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
@@ -3382,13 +3364,14 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
if (Ty->isVariablyModifiedType())
CGF.EmitVariablyModifiedType(Ty);
- llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
- llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+ Address ArgValue = Address::invalid();
+ Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
+
llvm::Type *ArgTy = ConvertType(VE->getType());
// If EmitVAArg fails, we fall back to the LLVM instruction.
- if (!ArgPtr)
- return Builder.CreateVAArg(ArgValue, ArgTy);
+ if (!ArgPtr.isValid())
+ return Builder.CreateVAArg(ArgValue.getPointer(), ArgTy);
// FIXME Volatility.
llvm::Value *Val = Builder.CreateLoad(ArgPtr);
@@ -3465,8 +3448,8 @@ Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
// Entry Point into this File
//===----------------------------------------------------------------------===//
-/// EmitScalarExpr - Emit the computation of the specified expression of scalar
-/// type, ignoring the result.
+/// Emit the computation of the specified expression of scalar type, ignoring
+/// the result.
Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
assert(E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit");
@@ -3475,25 +3458,26 @@ Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
.Visit(const_cast<Expr *>(E));
}
-/// EmitScalarConversion - Emit a conversion from the specified type to the
-/// specified destination type, both of which are LLVM scalar types.
+/// Emit a conversion from the specified type to the specified destination type,
+/// both of which are LLVM scalar types.
Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
- QualType DstTy) {
+ QualType DstTy,
+ SourceLocation Loc) {
assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
"Invalid scalar expression to emit");
- return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
}
-/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
-/// type to the specified destination type, where the destination type is an
-/// LLVM scalar type.
+/// Emit a conversion from the specified complex type to the specified
+/// destination type, where the destination type is an LLVM scalar type.
Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
QualType SrcTy,
- QualType DstTy) {
+ QualType DstTy,
+ SourceLocation Loc) {
assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
"Invalid complex -> scalar conversion");
- return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
- DstTy);
+ return ScalarExprEmitter(*this)
+ .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
}
@@ -3504,30 +3488,20 @@ EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
- llvm::Value *V;
// object->isa or (*object).isa
// Generate code as for: *(Class*)object
- // build Class* type
- llvm::Type *ClassPtrTy = ConvertType(E->getType());
Expr *BaseExpr = E->getBase();
+ Address Addr = Address::invalid();
if (BaseExpr->isRValue()) {
- V = CreateMemTemp(E->getType(), "resval");
- llvm::Value *Src = EmitScalarExpr(BaseExpr);
- Builder.CreateStore(Src, V);
- V = ScalarExprEmitter(*this).EmitLoadOfLValue(
- MakeNaturalAlignAddrLValue(V, E->getType()), E->getExprLoc());
+ Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
} else {
- if (E->isArrow())
- V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
- else
- V = EmitLValue(BaseExpr).getAddress();
+ Addr = EmitLValue(BaseExpr).getAddress();
}
- // build Class* type
- ClassPtrTy = ClassPtrTy->getPointerTo();
- V = Builder.CreateBitCast(V, ClassPtrTy);
- return MakeNaturalAlignAddrLValue(V, E->getType());
+ // Cast the address to Class*.
+ Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
+ return MakeAddrLValue(Addr, E->getType());
}
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index 1163d63b4a20..0afe7dbb9f1d 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "CGLoopInfo.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/Sema/LoopHint.h"
#include "llvm/IR/BasicBlock.h"
@@ -20,9 +21,10 @@ using namespace llvm;
static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) {
- if (!Attrs.IsParallel && Attrs.VectorizerWidth == 0 &&
- Attrs.VectorizerUnroll == 0 &&
- Attrs.VectorizerEnable == LoopAttributes::VecUnspecified)
+ if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
+ Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
+ Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
+ Attrs.UnrollEnable == LoopAttributes::Unspecified)
return nullptr;
SmallVector<Metadata *, 4> Args;
@@ -30,29 +32,49 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) {
auto TempNode = MDNode::getTemporary(Ctx, None);
Args.push_back(TempNode.get());
- // Setting vectorizer.width
- if (Attrs.VectorizerWidth > 0) {
+ // Setting vectorize.width
+ if (Attrs.VectorizeWidth > 0) {
Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.width"),
ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt32Ty(Ctx), Attrs.VectorizerWidth))};
+ Type::getInt32Ty(Ctx), Attrs.VectorizeWidth))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- // Setting vectorizer.unroll
- if (Attrs.VectorizerUnroll > 0) {
+ // Setting interleave.count
+ if (Attrs.InterleaveCount > 0) {
Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.interleave.count"),
ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt32Ty(Ctx), Attrs.VectorizerUnroll))};
+ Type::getInt32Ty(Ctx), Attrs.InterleaveCount))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- // Setting vectorizer.enable
- if (Attrs.VectorizerEnable != LoopAttributes::VecUnspecified) {
- Metadata *Vals[] = {
- MDString::get(Ctx, "llvm.loop.vectorize.enable"),
- ConstantAsMetadata::get(ConstantInt::get(
- Type::getInt1Ty(Ctx),
- (Attrs.VectorizerEnable == LoopAttributes::VecEnable)))};
+ // Setting interleave.count
+ if (Attrs.UnrollCount > 0) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll.count"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.UnrollCount))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ // Setting vectorize.enable
+ if (Attrs.VectorizeEnable != LoopAttributes::Unspecified) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt1Ty(Ctx), (Attrs.VectorizeEnable ==
+ LoopAttributes::Enable)))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ // Setting unroll.full or unroll.disable
+ if (Attrs.UnrollEnable != LoopAttributes::Unspecified) {
+ std::string Name;
+ if (Attrs.UnrollEnable == LoopAttributes::Enable)
+ Name = "llvm.loop.unroll.enable";
+ else if (Attrs.UnrollEnable == LoopAttributes::Full)
+ Name = "llvm.loop.unroll.full";
+ else
+ Name = "llvm.loop.unroll.disable";
+ Metadata *Vals[] = {MDString::get(Ctx, Name)};
Args.push_back(MDNode::get(Ctx, Vals));
}
@@ -63,14 +85,17 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) {
}
LoopAttributes::LoopAttributes(bool IsParallel)
- : IsParallel(IsParallel), VectorizerEnable(LoopAttributes::VecUnspecified),
- VectorizerWidth(0), VectorizerUnroll(0) {}
+ : IsParallel(IsParallel), VectorizeEnable(LoopAttributes::Unspecified),
+ UnrollEnable(LoopAttributes::Unspecified), VectorizeWidth(0),
+ InterleaveCount(0), UnrollCount(0) {}
void LoopAttributes::clear() {
IsParallel = false;
- VectorizerWidth = 0;
- VectorizerUnroll = 0;
- VectorizerEnable = LoopAttributes::VecUnspecified;
+ VectorizeWidth = 0;
+ InterleaveCount = 0;
+ UnrollCount = 0;
+ VectorizeEnable = LoopAttributes::Unspecified;
+ UnrollEnable = LoopAttributes::Unspecified;
}
LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs)
@@ -78,8 +103,16 @@ LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs)
LoopID = createMetadata(Header->getContext(), Attrs);
}
-void LoopInfoStack::push(BasicBlock *Header,
+void LoopInfoStack::push(BasicBlock *Header) {
+ Active.push_back(LoopInfo(Header, StagedAttrs));
+ // Clear the attributes so nested loops do not inherit them.
+ StagedAttrs.clear();
+}
+
+void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
ArrayRef<const clang::Attr *> Attrs) {
+
+ // Identify loop hint attributes from Attrs.
for (const auto *Attr : Attrs) {
const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
@@ -87,28 +120,105 @@ void LoopInfoStack::push(BasicBlock *Header,
if (!LH)
continue;
+ auto *ValueExpr = LH->getValue();
+ unsigned ValueInt = 1;
+ if (ValueExpr) {
+ llvm::APSInt ValueAPS = ValueExpr->EvaluateKnownConstInt(Ctx);
+ ValueInt = ValueAPS.getSExtValue();
+ }
+
LoopHintAttr::OptionType Option = LH->getOption();
LoopHintAttr::LoopHintState State = LH->getState();
- switch (Option) {
- case LoopHintAttr::Vectorize:
- case LoopHintAttr::Interleave:
- if (State == LoopHintAttr::AssumeSafety) {
+ switch (State) {
+ case LoopHintAttr::Disable:
+ switch (Option) {
+ case LoopHintAttr::Vectorize:
+ // Disable vectorization by specifying a width of 1.
+ setVectorizeWidth(1);
+ break;
+ case LoopHintAttr::Interleave:
+ // Disable interleaving by speciyfing a count of 1.
+ setInterleaveCount(1);
+ break;
+ case LoopHintAttr::Unroll:
+ setUnrollState(LoopAttributes::Disable);
+ break;
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot be disabled.");
+ break;
+ }
+ break;
+ case LoopHintAttr::Enable:
+ switch (Option) {
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
+ setVectorizeEnable(true);
+ break;
+ case LoopHintAttr::Unroll:
+ setUnrollState(LoopAttributes::Enable);
+ break;
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot enabled.");
+ break;
+ }
+ break;
+ case LoopHintAttr::AssumeSafety:
+ switch (Option) {
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
// Apply "llvm.mem.parallel_loop_access" metadata to load/stores.
setParallel(true);
+ setVectorizeEnable(true);
+ break;
+ case LoopHintAttr::Unroll:
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot be used to assume mem safety.");
+ break;
+ }
+ break;
+ case LoopHintAttr::Full:
+ switch (Option) {
+ case LoopHintAttr::Unroll:
+ setUnrollState(LoopAttributes::Full);
+ break;
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
+ case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::VectorizeWidth:
+ case LoopHintAttr::InterleaveCount:
+ llvm_unreachable("Options cannot be used with 'full' hint.");
+ break;
}
break;
- case LoopHintAttr::VectorizeWidth:
- case LoopHintAttr::InterleaveCount:
- case LoopHintAttr::Unroll:
- case LoopHintAttr::UnrollCount:
- // Nothing to do here for these loop hints.
+ case LoopHintAttr::Numeric:
+ switch (Option) {
+ case LoopHintAttr::VectorizeWidth:
+ setVectorizeWidth(ValueInt);
+ break;
+ case LoopHintAttr::InterleaveCount:
+ setInterleaveCount(ValueInt);
+ break;
+ case LoopHintAttr::UnrollCount:
+ setUnrollCount(ValueInt);
+ break;
+ case LoopHintAttr::Unroll:
+ case LoopHintAttr::Vectorize:
+ case LoopHintAttr::Interleave:
+ llvm_unreachable("Options cannot be assigned a value.");
+ break;
+ }
break;
}
}
- Active.push_back(LoopInfo(Header, StagedAttrs));
- // Clear the attributes so nested loops do not inherit them.
- StagedAttrs.clear();
+ /// Stage the attributes.
+ push(Header);
}
void LoopInfoStack::pop() {
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index 2249937cd0d0..ec3390677fa9 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -29,6 +29,7 @@ class MDNode;
namespace clang {
class Attr;
+class ASTContext;
namespace CodeGen {
/// \brief Attributes that may be specified on loops.
@@ -39,17 +40,23 @@ struct LoopAttributes {
/// \brief Generate llvm.loop.parallel metadata for loads and stores.
bool IsParallel;
- /// \brief Values of llvm.loop.vectorize.enable metadata.
- enum LVEnableState { VecUnspecified, VecEnable, VecDisable };
+ /// \brief State of loop vectorization or unrolling.
+ enum LVEnableState { Unspecified, Enable, Disable, Full };
- /// \brief llvm.loop.vectorize.enable
- LVEnableState VectorizerEnable;
+ /// \brief Value for llvm.loop.vectorize.enable metadata.
+ LVEnableState VectorizeEnable;
- /// \brief llvm.loop.vectorize.width
- unsigned VectorizerWidth;
+ /// \brief Value for llvm.loop.unroll.* metadata (enable, disable, or full).
+ LVEnableState UnrollEnable;
- /// \brief llvm.loop.interleave.count
- unsigned VectorizerUnroll;
+ /// \brief Value for llvm.loop.vectorize.width metadata.
+ unsigned VectorizeWidth;
+
+ /// \brief Value for llvm.loop.interleave.count metadata.
+ unsigned InterleaveCount;
+
+ /// \brief llvm.unroll.
+ unsigned UnrollCount;
};
/// \brief Information used when generating a structured loop.
@@ -88,8 +95,12 @@ public:
/// \brief Begin a new structured loop. The set of staged attributes will be
/// applied to the loop and then cleared.
- void push(llvm::BasicBlock *Header,
- llvm::ArrayRef<const Attr *> Attrs = llvm::None);
+ void push(llvm::BasicBlock *Header);
+
+ /// \brief Begin a new structured loop. Stage attributes from the Attrs list.
+ /// The staged attributes are applied to the loop and then cleared.
+ void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
+ llvm::ArrayRef<const Attr *> Attrs);
/// \brief End the current loop.
void pop();
@@ -109,17 +120,25 @@ public:
/// \brief Set the next pushed loop as parallel.
void setParallel(bool Enable = true) { StagedAttrs.IsParallel = Enable; }
- /// \brief Set the next pushed loop 'vectorizer.enable'
- void setVectorizerEnable(bool Enable = true) {
- StagedAttrs.VectorizerEnable =
- Enable ? LoopAttributes::VecEnable : LoopAttributes::VecDisable;
+ /// \brief Set the next pushed loop 'vectorize.enable'
+ void setVectorizeEnable(bool Enable = true) {
+ StagedAttrs.VectorizeEnable =
+ Enable ? LoopAttributes::Enable : LoopAttributes::Disable;
}
- /// \brief Set the vectorizer width for the next loop pushed.
- void setVectorizerWidth(unsigned W) { StagedAttrs.VectorizerWidth = W; }
+ /// \brief Set the next pushed loop unroll state.
+ void setUnrollState(const LoopAttributes::LVEnableState &State) {
+ StagedAttrs.UnrollEnable = State;
+ }
+
+ /// \brief Set the vectorize width for the next loop pushed.
+ void setVectorizeWidth(unsigned W) { StagedAttrs.VectorizeWidth = W; }
+
+ /// \brief Set the interleave count for the next loop pushed.
+ void setInterleaveCount(unsigned C) { StagedAttrs.InterleaveCount = C; }
- /// \brief Set the vectorizer unroll for the next loop pushed.
- void setVectorizerUnroll(unsigned U) { StagedAttrs.VectorizerUnroll = U; }
+ /// \brief Set the unroll count for the next loop pushed.
+ void setUnrollCount(unsigned C) { StagedAttrs.UnrollCount = C; }
private:
/// \brief Returns true if there is LoopInfo on the stack.
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index eb76ad1ce1f7..2d5991b71fca 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -37,9 +37,8 @@ static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
/// Given the address of a variable of pointer type, find the correct
/// null to store into it.
-static llvm::Constant *getNullForVariable(llvm::Value *addr) {
- llvm::Type *type =
- cast<llvm::PointerType>(addr->getType())->getElementType();
+static llvm::Constant *getNullForVariable(Address addr) {
+ llvm::Type *type = addr.getElementType();
return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
}
@@ -47,7 +46,7 @@ static llvm::Constant *getNullForVariable(llvm::Value *addr) {
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
llvm::Constant *C =
- CGM.getObjCRuntime().GenerateConstantString(E->getString());
+ CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
// FIXME: This bitcast should just be made an invariant on the Runtime.
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
@@ -84,16 +83,15 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
if (ValueType->isObjCBoxableRecordType()) {
// Emit CodeGen for first parameter
// and cast value to correct type
- llvm::Value *Temporary = CreateMemTemp(SubExpr->getType());
+ Address Temporary = CreateMemTemp(SubExpr->getType());
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
- llvm::Value *BitCast = Builder.CreateBitCast(Temporary,
- ConvertType(ArgQT));
- Args.add(RValue::get(BitCast), ArgQT);
+ Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
+ Args.add(RValue::get(BitCast.getPointer()), ArgQT);
// Create char array to store type encoding
std::string Str;
getContext().getObjCEncodingForType(ValueType, Str);
- llvm::GlobalVariable *GV = CGM.GetAddrOfConstantCString(Str);
+ llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
// Cast type encoding to correct type
const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
@@ -131,8 +129,8 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
ArrayType::Normal, /*IndexTypeQuals=*/0);
// Allocate the temporary array(s).
- llvm::AllocaInst *Objects = CreateMemTemp(ElementArrayType, "objects");
- llvm::AllocaInst *Keys = nullptr;
+ Address Objects = CreateMemTemp(ElementArrayType, "objects");
+ Address Keys = Address::invalid();
if (DLE)
Keys = CreateMemTemp(ElementArrayType, "keys");
@@ -148,9 +146,9 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
if (ALE) {
// Emit the element and store it to the appropriate array slot.
const Expr *Rhs = ALE->getElement(i);
- LValue LV = LValue::MakeAddr(
- Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i),
- ElementType, Context.getTypeAlignInChars(Rhs->getType()), Context);
+ LValue LV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
llvm::Value *value = EmitScalarExpr(Rhs);
EmitStoreThroughLValue(RValue::get(value), LV, true);
@@ -160,17 +158,17 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
} else {
// Emit the key and store it to the appropriate array slot.
const Expr *Key = DLE->getKeyValueElement(i).Key;
- LValue KeyLV = LValue::MakeAddr(
- Builder.CreateStructGEP(Keys->getAllocatedType(), Keys, i),
- ElementType, Context.getTypeAlignInChars(Key->getType()), Context);
+ LValue KeyLV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
llvm::Value *keyValue = EmitScalarExpr(Key);
EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
// Emit the value and store it to the appropriate array slot.
const Expr *Value = DLE->getKeyValueElement(i).Value;
- LValue ValueLV = LValue::MakeAddr(
- Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i),
- ElementType, Context.getTypeAlignInChars(Value->getType()), Context);
+ LValue ValueLV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
llvm::Value *valueValue = EmitScalarExpr(Value);
EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
if (TrackNeededObjects) {
@@ -185,11 +183,11 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
const ParmVarDecl *argDecl = *PI++;
QualType ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Objects), ArgQT);
+ Args.add(RValue::get(Objects.getPointer()), ArgQT);
if (DLE) {
argDecl = *PI++;
ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Keys), ArgQT);
+ Args.add(RValue::get(Keys.getPointer()), ArgQT);
}
argDecl = *PI;
ArgQT = argDecl->getType().getUnqualifiedType();
@@ -275,10 +273,23 @@ shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
// receiver is loaded from a variable with precise lifetime.
case ObjCMessageExpr::Instance: {
const Expr *receiver = message->getInstanceReceiver();
+
+ // Look through OVEs.
+ if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
+ if (opaque->getSourceExpr())
+ receiver = opaque->getSourceExpr()->IgnoreParens();
+ }
+
const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
receiver = ice->getSubExpr()->IgnoreParens();
+ // Look through OVEs.
+ if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
+ if (opaque->getSourceExpr())
+ receiver = opaque->getSourceExpr()->IgnoreParens();
+ }
+
// Only __strong variables.
if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
return true;
@@ -312,6 +323,21 @@ shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
llvm_unreachable("invalid receiver kind");
}
+/// Given an expression of ObjC pointer type, check whether it was
+/// immediately loaded from an ARC __weak l-value.
+static const Expr *findWeakLValue(const Expr *E) {
+ assert(E->getType()->isObjCRetainableType());
+ E = E->IgnoreParens();
+ if (auto CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_LValueToRValue) {
+ if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
+ return CE->getSubExpr();
+ }
+ }
+
+ return nullptr;
+}
+
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
@@ -322,6 +348,17 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
const ObjCMethodDecl *method = E->getMethodDecl();
+ // If the method is -retain, and the receiver's being loaded from
+ // a __weak variable, peephole the entire operation to objc_loadWeakRetained.
+ if (method && E->getReceiverKind() == ObjCMessageExpr::Instance &&
+ method->getMethodFamily() == OMF_retain) {
+ if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
+ LValue lvalue = EmitLValue(lvalueExpr);
+ llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
+ return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
+ }
+ }
+
// We don't retain the receiver in delegate init calls, and this is
// safe because the receiver value is always loaded from 'self',
// which we zero out. We don't want to Block_copy block receivers,
@@ -390,7 +427,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
QualType ResultType = method ? method->getReturnType() : E->getType();
CallArgList Args;
- EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
+ EmitCallArgs(Args, method, E->arguments());
// For delegate init calls in ARC, do an unsafe store of null into
// self. This represents the call taking direct ownership of that
@@ -404,10 +441,8 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
"delegate init calls should only be marked in ARC");
// Do an unsafe store of null into self.
- llvm::Value *selfAddr =
- LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
- assert(selfAddr && "no self entry for a delegate init call?");
-
+ Address selfAddr =
+ GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
}
@@ -434,14 +469,13 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// For delegate init calls in ARC, implicitly store the result of
// the call back into self. This takes ownership of the value.
if (isDelegateInit) {
- llvm::Value *selfAddr =
- LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ Address selfAddr =
+ GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
llvm::Value *newSelf = result.getScalarVal();
// The delegate return type isn't necessarily a matching type; in
// fact, it's quite likely to be 'id'.
- llvm::Type *selfTy =
- cast<llvm::PointerType>(selfAddr->getType())->getElementType();
+ llvm::Type *selfTy = selfAddr.getElementType();
newSelf = Builder.CreateBitCast(newSelf, selfTy);
Builder.CreateStore(newSelf, selfAddr);
@@ -451,7 +485,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
}
namespace {
-struct FinishARCDealloc : EHScopeStack::Cleanup {
+struct FinishARCDealloc final : EHScopeStack::Cleanup {
void Emit(CodeGenFunction &CGF, Flags flags) override {
const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
@@ -523,7 +557,7 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
StartObjCMethod(OMD, OMD->getClassInterface());
- PGO.assignRegionCounters(OMD, CurFn);
+ PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
assert(isa<CompoundStmt>(OMD->getBody()));
incrementProfileCounter(OMD->getBody());
EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
@@ -536,19 +570,19 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
bool isAtomic, bool hasStrong) {
ASTContext &Context = CGF.getContext();
- llvm::Value *src =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
- ivar, 0).getAddress();
+ Address src =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
+ .getAddress();
// objc_copyStruct (ReturnValue, &structIvar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList args;
- llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
- args.add(RValue::get(dest), Context.VoidPtrTy);
+ Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
+ args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
- args.add(RValue::get(src), Context.VoidPtrTy);
+ args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
@@ -812,8 +846,8 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
// The 2nd argument is the address of the ivar.
llvm::Value *ivarAddr =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
- CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getPointer();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
@@ -843,7 +877,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- emitCPPObjectAtomicGetterCall(*this, ReturnValue,
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
ivar, AtomicHelperFn);
}
return;
@@ -873,10 +907,9 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Perform an atomic load. This does not impose ordering constraints.
- llvm::Value *ivarAddr = LV.getAddress();
+ Address ivarAddr = LV.getAddress();
ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
- load->setAlignment(strategy.getIvarAlignment().getQuantity());
load->setAtomic(llvm::Unordered);
// Store that value into the return address. Doing this with a
@@ -901,7 +934,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: Can't this be simpler? This might even be worse than the
// corresponding gcc code.
llvm::Value *cmd =
- Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
+ Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
EmitIvarOffset(classImpl->getClassInterface(), ivar);
@@ -916,11 +949,11 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
llvm::Instruction *CallInstruction;
- RValue RV = EmitCall(getTypes().arrangeFreeFunctionCall(propType, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
- getPropertyFn, ReturnValueSlot(), args, nullptr,
- &CallInstruction);
+ RValue RV = EmitCall(
+ getTypes().arrangeFreeFunctionCall(
+ propType, args, FunctionType::ExtInfo(), RequiredArgs::All),
+ getPropertyFn, ReturnValueSlot(), args, CGCalleeInfo(),
+ &CallInstruction);
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
call->setTailCall();
@@ -952,8 +985,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
switch (getEvaluationKind(ivarType)) {
case TEK_Complex: {
ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
- EmitStoreOfComplex(pair,
- MakeNaturalAlignAddrLValue(ReturnValue, ivarType),
+ EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType),
/*init*/ true);
return;
}
@@ -966,11 +998,15 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
- value = LV.getAddress();
+ value = LV.getAddress().getPointer();
} else {
// We want to load and autoreleaseReturnValue ARC __weak ivars.
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
- value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
+ if (getLangOpts().ObjCAutoRefCount) {
+ value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
+ } else {
+ value = EmitARCLoadWeak(LV.getAddress());
+ }
// Otherwise we want to do a simple load, suppressing the
// final autorelease.
@@ -1006,7 +1042,7 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// The first argument is the address of the ivar.
llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
CGF.LoadObjCSelf(), ivar, 0)
- .getAddress();
+ .getPointer();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
@@ -1014,7 +1050,7 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
ParmVarDecl *argVar = *OMD->param_begin();
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
VK_LValue, SourceLocation());
- llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -1052,7 +1088,7 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
// The first argument is the address of the ivar.
llvm::Value *ivarAddr =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
- CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ CGF.LoadObjCSelf(), ivar, 0).getPointer();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
@@ -1060,7 +1096,7 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
ParmVarDecl *argVar = *OMD->param_begin();
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
VK_LValue, SourceLocation());
- llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -1135,29 +1171,27 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
if (strategy.getIvarSize().isZero())
return;
- llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
+ Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
LValue ivarLValue =
EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
- llvm::Value *ivarAddr = ivarLValue.getAddress();
+ Address ivarAddr = ivarLValue.getAddress();
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
llvm::Type *bitcastType =
llvm::Type::getIntNTy(getLLVMContext(),
getContext().toBits(strategy.getIvarSize()));
- bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Cast both arguments to the chosen operation type.
- argAddr = Builder.CreateBitCast(argAddr, bitcastType);
- ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
+ ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
// This bitcast load is likely to cause some nasty IR.
llvm::Value *load = Builder.CreateLoad(argAddr);
// Perform an atomic store. There are no memory ordering requirements.
llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
- store->setAlignment(strategy.getIvarAlignment().getQuantity());
store->setAtomic(llvm::Unordered);
return;
}
@@ -1189,13 +1223,14 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
llvm::Value *cmd =
- Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
+ Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
llvm::Value *self =
Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
EmitIvarOffset(classImpl->getClassInterface(), ivar);
- llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
- arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
+ Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
+ llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
+ arg = Builder.CreateBitCast(arg, VoidPtrTy);
CallArgList args;
args.add(RValue::get(self), getContext().getObjCIdType());
@@ -1304,7 +1339,7 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
}
namespace {
- struct DestroyIvar : EHScopeStack::Cleanup {
+ struct DestroyIvar final : EHScopeStack::Cleanup {
private:
llvm::Value *addr;
const ObjCIvarDecl *ivar;
@@ -1328,7 +1363,7 @@ namespace {
/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
static void destroyARCStrongWithStore(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
llvm::Value *null = getNullForVariable(addr);
CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
@@ -1405,22 +1440,6 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
FinishFunction();
}
-bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
- CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
- it++; it++;
- const ABIArgInfo &AI = it->info;
- // FIXME. Is this sufficient check?
- return (AI.getKind() == ABIArgInfo::Indirect);
-}
-
-bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
- if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
- return false;
- if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
- return FDTTy->getDecl()->hasObjectMember();
- return false;
-}
-
llvm::Value *CodeGenFunction::LoadObjCSelf() {
VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
@@ -1458,7 +1477,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fast enumeration state.
QualType StateTy = CGM.getObjCFastEnumerationStateType();
- llvm::AllocaInst *StatePtr = CreateMemTemp(StateTy, "state.ptr");
+ Address StatePtr = CreateMemTemp(StateTy, "state.ptr");
EmitNullInitialization(StatePtr, StateTy);
// Number of elements in the items array.
@@ -1477,7 +1496,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
getContext().getConstantArrayType(getContext().getObjCIdType(),
llvm::APInt(32, NumItems),
ArrayType::Normal, 0);
- llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+ Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
// Emit the collection pointer. In ARC, we do a retain.
llvm::Value *Collection;
@@ -1498,14 +1517,16 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
CallArgList Args;
// The first argument is a temporary of the enumeration-state type.
- Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
+ Args.add(RValue::get(StatePtr.getPointer()),
+ getContext().getPointerType(StateTy));
// The second argument is a temporary array with space for NumItems
// pointers. We'll actually be loading elements from the array
// pointer written into the control state; this buffer is so that
// collections that *aren't* backed by arrays can still queue up
// batches of elements.
- Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
+ Args.add(RValue::get(ItemsPtr.getPointer()),
+ getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
@@ -1542,13 +1563,14 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Save the initial mutations value. This is the value at an
// address that was written into the state object by
// countByEnumeratingWithState:objects:count:.
- llvm::Value *StateMutationsPtrPtr = Builder.CreateStructGEP(
- StatePtr->getAllocatedType(), StatePtr, 2, "mutationsptr.ptr");
- llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
- "mutationsptr");
+ Address StateMutationsPtrPtr = Builder.CreateStructGEP(
+ StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr
+ = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
llvm::Value *initialMutations =
- Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
+ Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
+ "forcoll.initial-mutations");
// Start looping. This is the point we return to whenever we have a
// fresh, non-empty batch of objects.
@@ -1570,7 +1592,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// refreshes.
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
llvm::Value *currentMutations
- = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+ = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
+ "statemutations");
llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
@@ -1623,15 +1646,16 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fetch the buffer out of the enumeration state.
// TODO: this pointer should actually be invariant between
// refreshes, which would help us do certain loop optimizations.
- llvm::Value *StateItemsPtr = Builder.CreateStructGEP(
- StatePtr->getAllocatedType(), StatePtr, 1, "stateitems.ptr");
+ Address StateItemsPtr = Builder.CreateStructGEP(
+ StatePtr, 1, getPointerSize(), "stateitems.ptr");
llvm::Value *EnumStateItems =
Builder.CreateLoad(StateItemsPtr, "stateitems");
// Fetch the value at the current index from the buffer.
llvm::Value *CurrentItemPtr =
Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
- llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
+ llvm::Value *CurrentItem =
+ Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
// Cast that value to the right type.
CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
@@ -1735,15 +1759,8 @@ void CodeGenFunction::EmitObjCAtSynchronizedStmt(
CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
-/// Produce the code for a CK_ARCProduceObject. Just does a
-/// primitive retain.
-llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
- llvm::Value *value) {
- return EmitARCRetain(type, value);
-}
-
namespace {
- struct CallObjCRelease : EHScopeStack::Cleanup {
+ struct CallObjCRelease final : EHScopeStack::Cleanup {
CallObjCRelease(llvm::Value *object) : object(object) {}
llvm::Value *object;
@@ -1772,7 +1789,7 @@ llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
/// Given a number of pointers, inform the optimizer that they're
/// being intrinsically used up until this point in the program.
void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
- llvm::Constant *&fn = CGM.getARCEntrypoints().clang_arc_use;
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(CGM.VoidTy, None, true);
@@ -1838,7 +1855,7 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**)
static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
llvm::Constant *&fn,
StringRef fnName) {
if (!fn) {
@@ -1848,16 +1865,15 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
}
// Cast the argument to 'id*'.
- llvm::Type *origType = addr->getType();
+ llvm::Type *origType = addr.getElementType();
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
// Call the function.
- llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr);
+ llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
// Cast the result back to a dereference of the original type.
- if (origType != CGF.Int8PtrPtrTy)
- result = CGF.Builder.CreateBitCast(result,
- cast<llvm::PointerType>(origType)->getElementType());
+ if (origType != CGF.Int8PtrTy)
+ result = CGF.Builder.CreateBitCast(result, origType);
return result;
}
@@ -1865,13 +1881,12 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**, i8*)
static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
llvm::Value *value,
llvm::Constant *&fn,
StringRef fnName,
bool ignored) {
- assert(cast<llvm::PointerType>(addr->getType())->getElementType()
- == value->getType());
+ assert(addr.getElementType() == value->getType());
if (!fn) {
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
@@ -1884,7 +1899,7 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
llvm::Type *origType = value->getType();
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
};
llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
@@ -1897,11 +1912,11 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// void (i8**, i8**)
static void emitARCCopyOperation(CodeGenFunction &CGF,
- llvm::Value *dst,
- llvm::Value *src,
+ Address dst,
+ Address src,
llvm::Constant *&fn,
StringRef fnName) {
- assert(dst->getType() == src->getType());
+ assert(dst.getType() == src.getType());
if (!fn) {
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy };
@@ -1912,8 +1927,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
}
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy),
- CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy)
+ CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
};
CGF.EmitNounwindRuntimeCall(fn, args);
}
@@ -1932,7 +1947,7 @@ llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_retain,
+ CGM.getObjCEntrypoints().objc_retain,
"objc_retain");
}
@@ -1946,7 +1961,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
bool mandatory) {
llvm::Value *result
= emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_retainBlock,
+ CGM.getObjCEntrypoints().objc_retainBlock,
"objc_retainBlock");
// If the copy isn't mandatory, add !clang.arc.copy_on_escape to
@@ -1956,7 +1971,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
if (!mandatory && isa<llvm::Instruction>(result)) {
llvm::CallInst *call
= cast<llvm::CallInst>(result->stripPointerCasts());
- assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
+ assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
call->setMetadata("clang.arc.copy_on_escape",
llvm::MDNode::get(Builder.getContext(), None));
@@ -1975,7 +1990,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
// Fetch the void(void) inline asm which marks that we're going to
// retain the autoreleased return value.
llvm::InlineAsm *&marker
- = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
+ = CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker;
if (!marker) {
StringRef assembly
= CGM.getTargetCodeGenInfo()
@@ -2012,7 +2027,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
Builder.CreateCall(marker);
return emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
+ CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
"objc_retainAutoreleasedReturnValue");
}
@@ -2022,7 +2037,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
ARCPreciseLifetime_t precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
- llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
@@ -2050,12 +2065,10 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
/// At -O1 and above, just load and call objc_release.
///
/// call void \@objc_storeStrong(i8** %addr, i8* null)
-void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr,
+void CodeGenFunction::EmitARCDestroyStrong(Address addr,
ARCPreciseLifetime_t precise) {
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
- llvm::PointerType *addrTy = cast<llvm::PointerType>(addr->getType());
- llvm::Value *null = llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(addrTy->getElementType()));
+ llvm::Value *null = getNullForVariable(addr);
EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
return;
}
@@ -2066,13 +2079,12 @@ void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr,
/// Store into a strong object. Always calls this:
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
-llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
llvm::Value *value,
bool ignored) {
- assert(cast<llvm::PointerType>(addr->getType())->getElementType()
- == value->getType());
+ assert(addr.getElementType() == value->getType());
- llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
if (!fn) {
llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
llvm::FunctionType *fnType
@@ -2081,7 +2093,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
}
llvm::Value *args[] = {
- Builder.CreateBitCast(addr, Int8PtrPtrTy),
+ Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
Builder.CreateBitCast(value, Int8PtrTy)
};
EmitNounwindRuntimeCall(fn, args);
@@ -2130,7 +2142,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
return emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_autorelease,
+ CGM.getObjCEntrypoints().objc_autorelease,
"objc_autorelease");
}
@@ -2139,7 +2151,7 @@ llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
llvm::Value *
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
+ CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
"objc_autoreleaseReturnValue",
/*isTailCall*/ true);
}
@@ -2149,7 +2161,7 @@ CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
+ CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
"objc_retainAutoreleaseReturnValue",
/*isTailCall*/ true);
}
@@ -2178,32 +2190,32 @@ llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_retainAutorelease,
+ CGM.getObjCEntrypoints().objc_retainAutorelease,
"objc_retainAutorelease");
}
/// i8* \@objc_loadWeak(i8** %addr)
/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
-llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
return emitARCLoadOperation(*this, addr,
- CGM.getARCEntrypoints().objc_loadWeak,
+ CGM.getObjCEntrypoints().objc_loadWeak,
"objc_loadWeak");
}
/// i8* \@objc_loadWeakRetained(i8** %addr)
-llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
return emitARCLoadOperation(*this, addr,
- CGM.getARCEntrypoints().objc_loadWeakRetained,
+ CGM.getObjCEntrypoints().objc_loadWeakRetained,
"objc_loadWeakRetained");
}
/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
/// Returns %value.
-llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
llvm::Value *value,
bool ignored) {
return emitARCStoreOperation(*this, addr, value,
- CGM.getARCEntrypoints().objc_storeWeak,
+ CGM.getObjCEntrypoints().objc_storeWeak,
"objc_storeWeak", ignored);
}
@@ -2211,7 +2223,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
/// Returns %value. %addr is known to not have a current weak entry.
/// Essentially equivalent to:
/// *addr = nil; objc_storeWeak(addr, value);
-void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
+void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
// If we're initializing to null, just write null to memory; no need
// to get the runtime involved. But don't do this if optimization
// is enabled, because accounting for this would make the optimizer
@@ -2223,14 +2235,14 @@ void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
}
emitARCStoreOperation(*this, addr, value,
- CGM.getARCEntrypoints().objc_initWeak,
+ CGM.getObjCEntrypoints().objc_initWeak,
"objc_initWeak", /*ignored*/ true);
}
/// void \@objc_destroyWeak(i8** %addr)
/// Essentially objc_storeWeak(addr, nil).
-void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
- llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
+void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false);
@@ -2240,31 +2252,31 @@ void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
// Cast the argument to 'id*'.
addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
- EmitNounwindRuntimeCall(fn, addr);
+ EmitNounwindRuntimeCall(fn, addr.getPointer());
}
/// void \@objc_moveWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Leaves %src pointing to nothing.
/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
-void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
+void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
emitARCCopyOperation(*this, dst, src,
- CGM.getARCEntrypoints().objc_moveWeak,
+ CGM.getObjCEntrypoints().objc_moveWeak,
"objc_moveWeak");
}
/// void \@objc_copyWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Essentially
/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
-void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
+void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
emitARCCopyOperation(*this, dst, src,
- CGM.getARCEntrypoints().objc_copyWeak,
+ CGM.getObjCEntrypoints().objc_copyWeak,
"objc_copyWeak");
}
/// Produce the code to do a objc_autoreleasepool_push.
/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
- llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Int8PtrTy, false);
@@ -2279,7 +2291,7 @@ llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
assert(value->getType() == Int8PtrTy);
- llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
@@ -2332,25 +2344,25 @@ void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
}
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
}
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
}
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
CGF.EmitARCDestroyWeak(addr);
}
namespace {
- struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
+ struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
llvm::Value *Token;
CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
@@ -2359,7 +2371,7 @@ namespace {
CGF.EmitObjCAutoreleasePoolPop(Token);
}
};
- struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
+ struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup {
llvm::Value *Token;
CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
@@ -2932,7 +2944,9 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__assign_helper_atomic_property_",
&CGM.getModule());
-
+
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
StartFunction(FD, C.VoidTy, Fn, FI, args);
DeclRefExpr DstExpr(&dstDecl, false, DestTy,
@@ -3011,6 +3025,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__copy_helper_atomic_property_", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
StartFunction(FD, C.VoidTy, Fn, FI, args);
DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
@@ -3046,7 +3062,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
CharUnits Alignment
= getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
EmitAggExpr(TheCXXConstructExpr,
- AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
+ AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
+ Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index b52d623b948b..f0af3e924c09 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -166,9 +166,9 @@ protected:
/// where the C code specifies const char*.
llvm::Constant *MakeConstantString(const std::string &Str,
const std::string &Name="") {
- auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
- ConstStr, Zeros);
+ ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
+ Array.getPointer(), Zeros);
}
/// Emits a linkonce_odr string, whose name is the prefix followed by the
/// string value. This allows the linker to combine the strings between
@@ -191,34 +191,41 @@ protected:
/// first argument.
llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
- return new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
+ auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ GV->setAlignment(Align.getQuantity());
+ return GV;
}
/// Generates a global array. The vector must contain the same number of
/// elements that the array type declares, of the type specified as the array
/// element type.
llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
- return new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
+ auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ GV->setAlignment(Align.getQuantity());
+ return GV;
}
/// Generates a global array, inferring the array type from the specified
/// element type and the size of the initialiser.
llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
- return MakeGlobal(ArrayTy, V, Name, linkage);
+ return MakeGlobal(ArrayTy, V, Align, Name, linkage);
}
/// Returns a property name and encoding string.
llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
@@ -234,9 +241,7 @@ protected:
NameAndAttributes += TypeStr;
NameAndAttributes += '\0';
NameAndAttributes += PD->getNameAsString();
- auto *ConstStr = CGM.GetAddrOfConstantCString(NameAndAttributes);
- return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
- ConstStr, Zeros);
+ return MakeConstantString(NameAndAttributes);
}
return MakeConstantString(PD->getNameAsString());
}
@@ -275,6 +280,10 @@ protected:
if (V->getType() == Ty) return V;
return B.CreateBitCast(V, Ty);
}
+ Address EnforceType(CGBuilderTy &B, Address V, llvm::Type *Ty) {
+ if (V.getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
// Some zeros used for GEPs in lots of places.
llvm::Constant *Zeros[2];
/// Null pointer value. Mainly used as a terminator in various arrays.
@@ -435,7 +444,7 @@ private:
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- const std::string &TypeEncoding, bool lval);
+ const std::string &TypeEncoding);
/// Returns the variable used to store the offset of an instance variable.
llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar);
@@ -458,7 +467,7 @@ protected:
/// mechanism differs between the GCC and GNU runtimes, so this method must
/// be overridden in subclasses.
virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
- llvm::Value *ObjCSuper,
+ Address ObjCSuper,
llvm::Value *cmd,
MessageSendInfo &MSI) = 0;
/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
@@ -477,7 +486,7 @@ public:
CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
unsigned protocolClassVersion);
- llvm::Constant *GenerateConstantString(const StringLiteral *) override;
+ ConstantAddress GenerateConstantString(const StringLiteral *) override;
RValue
GenerateMessageSend(CodeGenFunction &CGF, ReturnValueSlot Return,
@@ -494,8 +503,8 @@ public:
const ObjCMethodDecl *Method) override;
llvm::Value *GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *OID) override;
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval = false) override;
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override;
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override;
llvm::Value *GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) override;
llvm::Constant *GetEHType(QualType T) override;
@@ -527,18 +536,18 @@ public:
const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) override;
llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) override;
+ Address AddrWeakObj) override;
void EmitObjCWeakAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) override;
+ llvm::Value *src, Address dst) override;
void EmitObjCGlobalAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal=false) override;
void EmitObjCIvarAssign(CodeGenFunction &CGF, llvm::Value *src,
- llvm::Value *dest, llvm::Value *ivarOffset) override;
+ Address dest, llvm::Value *ivarOffset) override;
void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) override;
- void EmitGCMemmoveCollectable(CodeGenFunction &CGF, llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ llvm::Value *src, Address dest) override;
+ void EmitGCMemmoveCollectable(CodeGenFunction &CGF, Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) override;
LValue EmitObjCValueForIvar(CodeGenFunction &CGF, QualType ObjectTy,
llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
@@ -593,11 +602,11 @@ protected:
imp->setMetadata(msgSendMDKind, node);
return imp.getInstruction();
}
- llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
- PtrToObjCSuperTy), cmd};
+ PtrToObjCSuperTy).getPointer(), cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
public:
@@ -647,7 +656,8 @@ class CGObjCGNUstep : public CGObjCGNU {
llvm::Function *LookupFn = SlotLookupFn;
// Store the receiver on the stack so that we can reload it later
- llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Address ReceiverPtr =
+ CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign());
Builder.CreateStore(Receiver, ReceiverPtr);
llvm::Value *self;
@@ -662,7 +672,7 @@ class CGObjCGNUstep : public CGObjCGNU {
LookupFn->setDoesNotCapture(1);
llvm::Value *args[] = {
- EnforceType(Builder, ReceiverPtr, PtrToIdTy),
+ EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
EnforceType(Builder, cmd, SelectorTy),
EnforceType(Builder, self, IdTy) };
llvm::CallSite slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args);
@@ -670,25 +680,27 @@ class CGObjCGNUstep : public CGObjCGNU {
slot->setMetadata(msgSendMDKind, node);
// Load the imp from the slot
- llvm::Value *imp = Builder.CreateLoad(
- Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4));
+ llvm::Value *imp = Builder.CreateAlignedLoad(
+ Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4),
+ CGF.getPointerAlign());
// The lookup function may have changed the receiver, so make sure we use
// the new one.
Receiver = Builder.CreateLoad(ReceiverPtr, true);
return imp;
}
- llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd,
MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+ llvm::Value *lookupArgs[] = {ObjCSuper.getPointer(), cmd};
llvm::CallInst *slot =
CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs);
slot->setOnlyReadsMemory();
- return Builder.CreateLoad(Builder.CreateStructGEP(nullptr, slot, 4));
+ return Builder.CreateAlignedLoad(Builder.CreateStructGEP(nullptr, slot, 4),
+ CGF.getPointerAlign());
}
public:
CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
@@ -807,10 +819,10 @@ protected:
return imp.getInstruction();
}
- llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper.getPointer(),
PtrToObjCSuperTy), cmd};
if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
@@ -1011,7 +1023,7 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF,
const std::string &Name,
bool isWeak) {
- llvm::GlobalVariable *ClassNameGV = CGM.GetAddrOfConstantCString(Name);
+ llvm::Constant *ClassName = MakeConstantString(Name);
// With the incompatible ABI, this will need to be replaced with a direct
// reference to the class symbol. For the compatible nonfragile ABI we are
// still performing this lookup at run time but emitting the symbol for the
@@ -1021,8 +1033,6 @@ llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF,
// with memoized versions or with static references if it's safe to do so.
if (!isWeak)
EmitClassRef(Name);
- llvm::Value *ClassName =
- CGF.Builder.CreateStructGEP(ClassNameGV->getValueType(), ClassNameGV, 0);
llvm::Constant *ClassLookupFn =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
@@ -1041,7 +1051,7 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
- const std::string &TypeEncoding, bool lval) {
+ const std::string &TypeEncoding) {
SmallVectorImpl<TypedSelector> &Types = SelectorTable[Sel];
llvm::GlobalAlias *SelValue = nullptr;
@@ -1055,29 +1065,34 @@ llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
}
if (!SelValue) {
SelValue = llvm::GlobalAlias::create(
- SelectorTy, llvm::GlobalValue::PrivateLinkage,
+ SelectorTy->getElementType(), 0, llvm::GlobalValue::PrivateLinkage,
".objc_selector_" + Sel.getAsString(), &TheModule);
Types.emplace_back(TypeEncoding, SelValue);
}
- if (lval) {
- llvm::Value *tmp = CGF.CreateTempAlloca(SelValue->getType());
- CGF.Builder.CreateStore(SelValue, tmp);
- return tmp;
- }
return SelValue;
}
-llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval) {
- return GetSelector(CGF, Sel, std::string(), lval);
+Address CGObjCGNU::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
+ llvm::Value *SelValue = GetSelector(CGF, Sel);
+
+ // Store it to a temporary. Does this satisfy the semantics of
+ // GetAddrOfSelector? Hopefully.
+ Address tmp = CGF.CreateTempAlloca(SelValue->getType(),
+ CGF.getPointerAlign());
+ CGF.Builder.CreateStore(SelValue, tmp);
+ return tmp;
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel) {
+ return GetSelector(CGF, Sel, std::string());
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
std::string SelTypes;
CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
- return GetSelector(CGF, Method->getSelector(), SelTypes, false);
+ return GetSelector(CGF, Method->getSelector(), SelTypes);
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
@@ -1160,21 +1175,23 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
fields.push_back(BVtable);
fields.push_back(typeName);
llvm::Constant *TI =
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- nullptr), fields, "__objc_eh_typeinfo_" + className,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr),
+ fields, CGM.getPointerAlign(),
+ "__objc_eh_typeinfo_" + className,
llvm::GlobalValue::LinkOnceODRLinkage);
return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
}
/// Generate an NSConstantString object.
-llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
std::string Str = SL->getString().str();
+ CharUnits Align = CGM.getPointerAlign();
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return old->getValue();
+ return ConstantAddress(old->getValue(), Align);
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -1197,11 +1214,11 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, nullptr),
- Ivars, ".objc_str");
+ Ivars, Align, ".objc_str");
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ObjCStr;
+ return ConstantAddress(ObjCStr, Align);
}
///Generates a message send where the super is the receiver. This is a message
@@ -1261,14 +1278,14 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (IsClassMessage) {
if (!MetaClassPtrAlias) {
MetaClassPtrAlias = llvm::GlobalAlias::create(
- IdTy, llvm::GlobalValue::InternalLinkage,
+ IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = MetaClassPtrAlias;
} else {
if (!ClassPtrAlias) {
ClassPtrAlias = llvm::GlobalAlias::create(
- IdTy, llvm::GlobalValue::InternalLinkage,
+ IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
".objc_class_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = ClassPtrAlias;
@@ -1281,16 +1298,20 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
// Get the superclass pointer
ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1);
// Load the superclass pointer
- ReceiverClass = Builder.CreateLoad(ReceiverClass);
+ ReceiverClass =
+ Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
// Construct the structure used to look up the IMP
llvm::StructType *ObjCSuperTy = llvm::StructType::get(
Receiver->getType(), IdTy, nullptr);
- llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
+
+ // FIXME: Is this really supposed to be a dynamic alloca?
+ Address ObjCSuper = Address(Builder.CreateAlloca(ObjCSuperTy),
+ CGF.getPointerAlign());
Builder.CreateStore(Receiver,
- Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 0));
+ Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
Builder.CreateStore(ReceiverClass,
- Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 1));
+ Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
@@ -1306,8 +1327,8 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, nullptr,
- &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
+ CGCalleeInfo(), &call);
call->setMetadata(msgSendMDKind, node);
return msgRet;
}
@@ -1419,8 +1440,8 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, nullptr,
- &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
+ CGCalleeInfo(), &call);
call->setMetadata(msgSendMDKind, node);
@@ -1435,16 +1456,14 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
msgRet = RValue::get(phi);
} else if (msgRet.isAggregate()) {
- llvm::Value *v = msgRet.getAggregateAddr();
- llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
- llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
- llvm::AllocaInst *NullVal =
- CGF.CreateTempAlloca(RetTy->getElementType(), "null");
- CGF.InitTempAlloca(NullVal,
- llvm::Constant::getNullValue(RetTy->getElementType()));
- phi->addIncoming(v, messageBB);
- phi->addIncoming(NullVal, startBB);
- msgRet = RValue::getAggregate(phi);
+ Address v = msgRet.getAggregateAddress();
+ llvm::PHINode *phi = Builder.CreatePHI(v.getType(), 2);
+ llvm::Type *RetTy = v.getElementType();
+ Address NullVal = CGF.CreateTempAlloca(RetTy, v.getAlignment(), "null");
+ CGF.InitTempAlloca(NullVal, llvm::Constant::getNullValue(RetTy));
+ phi->addIncoming(v.getPointer(), messageBB);
+ phi->addIncoming(NullVal.getPointer(), startBB);
+ msgRet = RValue::getAggregate(Address(phi, v.getAlignment()));
} else /* isComplex() */ {
std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2);
@@ -1517,7 +1536,8 @@ GenerateMethodList(StringRef ClassName,
Methods.push_back(MethodArray);
// Create an instance of the structure
- return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+ return MakeGlobal(ObjCMethodListTy, Methods, CGM.getPointerAlign(),
+ ".objc_method_list");
}
/// Generates an IvarList. Used in construction of a objc_class.
@@ -1557,7 +1577,8 @@ GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
nullptr);
// Create an instance of the structure
- return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+ return MakeGlobal(ObjCIvarListTy, Elements, CGM.getPointerAlign(),
+ ".objc_ivar_list");
}
/// Generate a class structure
@@ -1640,8 +1661,9 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") +
std::string(Name));
llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
- llvm::Constant *Class = MakeGlobal(ClassTy, Elements, ClassSym,
- llvm::GlobalValue::ExternalLinkage);
+ llvm::Constant *Class =
+ MakeGlobal(ClassTy, Elements, CGM.getPointerAlign(), ClassSym,
+ llvm::GlobalValue::ExternalLinkage);
if (ClassRef) {
ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
ClassRef->getType()));
@@ -1676,7 +1698,8 @@ GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
Methods.clear();
Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
Methods.push_back(Array);
- return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+ return MakeGlobal(ObjCMethodDescListTy, Methods, CGM.getPointerAlign(),
+ ".objc_method_list");
}
// Create the protocol list structure used in classes, categories and so on
@@ -1709,7 +1732,8 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
Elements.push_back(NULLPtr);
Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
Elements.push_back(ProtocolArray);
- return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+ return MakeGlobal(ProtocolListTy, Elements, CGM.getPointerAlign(),
+ ".objc_protocol_list");
}
llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
@@ -1749,7 +1773,8 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
Elements.push_back(MethodList);
Elements.push_back(MethodList);
Elements.push_back(MethodList);
- return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+ return MakeGlobal(ProtocolTy, Elements, CGM.getPointerAlign(),
+ ".objc_protocol");
}
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
@@ -1910,7 +1935,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
Elements.push_back(OptionalPropertyList);
ExistingProtocols[ProtocolName] =
llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
- ".objc_protocol"), IdTy);
+ CGM.getPointerAlign(), ".objc_protocol"), IdTy);
}
void CGObjCGNU::GenerateProtocolHolderCategory() {
// Collect information about instance methods
@@ -1952,10 +1977,12 @@ void CGObjCGNU::GenerateProtocolHolderCategory() {
ExistingProtocols.size()));
ProtocolElements.push_back(ProtocolArray);
Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
- ProtocolElements, ".objc_protocol_list"), PtrTy));
+ ProtocolElements, CGM.getPointerAlign(),
+ ".objc_protocol_list"), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements), PtrTy));
+ PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ PtrTy));
}
/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
@@ -1995,7 +2022,7 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
llvm::ConstantInt::get(Int32Ty, values.size()),
array };
llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
- nullptr), fields);
+ nullptr), fields, CharUnits::fromQuantity(4));
llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
return ptr;
}
@@ -2047,7 +2074,8 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
GenerateProtocolList(Protocols), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements), PtrTy));
+ PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ PtrTy));
}
llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
@@ -2225,7 +2253,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
llvm::GlobalVariable *IvarOffsetArray =
- MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
+ MakeGlobalArray(PtrToIntTy, IvarOffsetValues, CGM.getPointerAlign(),
+ ".ivar.offsets");
// Collect information about instance methods
@@ -2385,13 +2414,15 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, nullptr);
llvm::Type *StaticsListPtrTy =
llvm::PointerType::getUnqual(StaticsListTy);
- Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ Statics = MakeGlobal(StaticsListTy, Elements, CGM.getPointerAlign(),
+ ".objc_statics");
llvm::ArrayType *StaticsListArrayTy =
llvm::ArrayType::get(StaticsListPtrTy, 2);
Elements.clear();
Elements.push_back(Statics);
Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
- Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = MakeGlobal(StaticsListArrayTy, Elements,
+ CGM.getPointerAlign(), ".objc_statics_ptr");
Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
}
// Array of classes, categories, and constant objects
@@ -2442,7 +2473,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Number of static selectors
Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
llvm::GlobalVariable *SelectorList =
- MakeGlobalArray(SelStructTy, Selectors, ".objc_selector_list");
+ MakeGlobalArray(SelStructTy, Selectors, CGM.getPointerAlign(),
+ ".objc_selector_list");
Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
SelStructPtrTy));
@@ -2475,7 +2507,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
Elements.push_back(ClassList);
// Construct the symbol table
- llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+ llvm::Constant *SymTab =
+ MakeGlobal(SymTabTy, Elements, CGM.getPointerAlign());
// The symbol table is contained in a module which has some version-checking
// constants
@@ -2516,7 +2549,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
break;
}
- llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements, CGM.getPointerAlign());
// Create the load function calling the runtime entry point with the module
// structure
@@ -2526,7 +2559,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
&TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
- CGBuilderTy Builder(VMContext);
+ CGBuilderTy Builder(CGM, VMContext);
Builder.SetInsertPoint(EntryBB);
llvm::FunctionType *FT =
@@ -2678,57 +2711,63 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
}
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) {
+ Address AddrWeakObj) {
CGBuilderTy &B = CGF.Builder;
AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
- return B.CreateCall(WeakReadFn.getType(), WeakReadFn, AddrWeakObj);
+ return B.CreateCall(WeakReadFn.getType(), WeakReadFn,
+ AddrWeakObj.getPointer());
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(WeakAssignFn.getType(), WeakAssignFn, {src, dst});
+ B.CreateCall(WeakAssignFn.getType(), WeakAssignFn,
+ {src, dst.getPointer()});
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
bool threadlocal) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
// FIXME. Add threadloca assign API
assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
- B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn, {src, dst});
+ B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn,
+ {src, dst.getPointer()});
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
llvm::Value *ivarOffset) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, IdTy);
- B.CreateCall(IvarAssignFn.getType(), IvarAssignFn, {src, dst, ivarOffset});
+ B.CreateCall(IvarAssignFn.getType(), IvarAssignFn,
+ {src, dst.getPointer(), ivarOffset});
}
void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn, {src, dst});
+ B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn,
+ {src, dst.getPointer()});
}
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) {
CGBuilderTy &B = CGF.Builder;
DestPtr = EnforceType(B, DestPtr, PtrTy);
SrcPtr = EnforceType(B, SrcPtr, PtrTy);
- B.CreateCall(MemMoveFn.getType(), MemMoveFn, {DestPtr, SrcPtr, Size});
+ B.CreateCall(MemMoveFn.getType(), MemMoveFn,
+ {DestPtr.getPointer(), SrcPtr.getPointer(), Size});
}
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
@@ -2811,17 +2850,22 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
if (RuntimeVersion < 10)
return CGF.Builder.CreateZExtOrBitCast(
- CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
- ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
+ CGF.Builder.CreateDefaultAlignedLoad(CGF.Builder.CreateAlignedLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar),
+ CGF.getPointerAlign(), "ivar")),
PtrDiffTy);
std::string name = "__objc_ivar_offset_value_" +
Interface->getNameAsString() +"." + Ivar->getNameAsString();
+ CharUnits Align = CGM.getIntAlign();
llvm::Value *Offset = TheModule.getGlobalVariable(name);
- if (!Offset)
- Offset = new llvm::GlobalVariable(TheModule, IntTy,
+ if (!Offset) {
+ auto GV = new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::LinkOnceAnyLinkage,
llvm::Constant::getNullValue(IntTy), name);
- Offset = CGF.Builder.CreateLoad(Offset);
+ GV->setAlignment(Align.getQuantity());
+ Offset = GV;
+ }
+ Offset = CGF.Builder.CreateAlignedLoad(Offset, Align);
if (Offset->getType() != PtrDiffTy)
Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
return Offset;
@@ -2845,6 +2889,7 @@ clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
case ObjCRuntime::FragileMacOSX:
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
llvm_unreachable("these runtimes are not GNU runtimes");
}
llvm_unreachable("bad runtime");
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index a45446a7065a..5f3ebbd75765 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -659,9 +659,6 @@ public:
// MessageRefCPtrTy - clang type for struct _message_ref_t*
QualType MessageRefCPtrTy;
- // MessengerTy - Type of the messenger (shown as IMP above)
- llvm::FunctionType *MessengerTy;
-
// SuperMessageRefTy - LLVM for:
// struct _super_message_ref_t {
// SUPER_IMP messenger;
@@ -735,20 +732,6 @@ public:
class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
public:
- // FIXME - accessibility
- class GC_IVAR {
- public:
- unsigned ivar_bytepos;
- unsigned ivar_size;
- GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
- : ivar_bytepos(bytepos), ivar_size(size) {}
-
- // Allow sorting based on byte pos.
- bool operator<(const GC_IVAR &b) const {
- return ivar_bytepos < b.ivar_bytepos;
- }
- };
-
class SKIP_SCAN {
public:
unsigned skip;
@@ -830,10 +813,6 @@ protected:
// FIXME! May not be needing this after all.
unsigned ObjCABI;
- // gc ivar layout bitmap calculation helper caches.
- SmallVector<GC_IVAR, 16> SkipIvars;
- SmallVector<GC_IVAR, 16> IvarsInfo;
-
// arc/mrr layout of captured block literal variables.
SmallVector<RUN_SKIP, 16> RunSkipBlockVars;
@@ -854,7 +833,7 @@ protected:
llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
/// DefinedCategoryNames - list of category names in form Class_Category.
- llvm::SetVector<std::string> DefinedCategoryNames;
+ llvm::SmallSetVector<std::string, 16> DefinedCategoryNames;
/// MethodVarTypes - uniqued method type signatures. We have to use
/// a StringMap here because have no other unique reference.
@@ -934,20 +913,28 @@ protected:
/// BuildIvarLayout - Builds ivar layout bitmap for the class
/// implementation for the __strong or __weak case.
///
+ /// \param hasMRCWeakIvars - Whether we are compiling in MRC and there
+ /// are any weak ivars defined directly in the class. Meaningless unless
+ /// building a weak layout. Does not guarantee that the layout will
+ /// actually have any entries, because the ivar might be under-aligned.
llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
- bool ForStrongLayout);
-
- llvm::Constant *BuildIvarLayoutBitmap(std::string &BitMap);
+ CharUnits beginOffset,
+ CharUnits endOffset,
+ bool forStrongLayout,
+ bool hasMRCWeakIvars);
- void BuildAggrIvarRecordLayout(const RecordType *RT,
- unsigned int BytePos, bool ForStrongLayout,
- bool &HasUnion);
- void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
- const llvm::StructLayout *Layout,
- const RecordDecl *RD,
- ArrayRef<const FieldDecl*> RecFields,
- unsigned int BytePos, bool ForStrongLayout,
- bool &HasUnion);
+ llvm::Constant *BuildStrongIvarLayout(const ObjCImplementationDecl *OI,
+ CharUnits beginOffset,
+ CharUnits endOffset) {
+ return BuildIvarLayout(OI, beginOffset, endOffset, true, false);
+ }
+
+ llvm::Constant *BuildWeakIvarLayout(const ObjCImplementationDecl *OI,
+ CharUnits beginOffset,
+ CharUnits endOffset,
+ bool hasMRCWeakIvars) {
+ return BuildIvarLayout(OI, beginOffset, endOffset, false, hasMRCWeakIvars);
+ }
Qualifiers::ObjCLifetime getBlockCaptureLifetime(QualType QT, bool ByrefLayout);
@@ -970,7 +957,6 @@ protected:
llvm::Constant *getBitmapBlockLayout(bool ComputeByrefLayout);
-
/// GetIvarLayoutName - Returns a unique constant for the given
/// ivar layout bitmap.
llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
@@ -1002,6 +988,7 @@ protected:
/// defined. The return value has type ProtocolPtrTy.
llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+public:
/// CreateMetadataVar - Create a global variable with internal
/// linkage for use by the Objective-C runtime.
///
@@ -1017,9 +1004,10 @@ protected:
/// \param AddToUsed - Whether the variable should be added to
/// "llvm.used".
llvm::GlobalVariable *CreateMetadataVar(Twine Name, llvm::Constant *Init,
- StringRef Section, unsigned Align,
+ StringRef Section, CharUnits Align,
bool AddToUsed);
+protected:
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
ReturnValueSlot Return,
QualType ResultType,
@@ -1029,6 +1017,7 @@ protected:
bool IsSuper,
const CallArgList &CallArgs,
const ObjCMethodDecl *OMD,
+ const ObjCInterfaceDecl *ClassReceiver,
const ObjCCommonTypesHelper &ObjCTypes);
/// EmitImageInfo - Emit the image info marker used to encode some module
@@ -1039,7 +1028,11 @@ public:
CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
- llvm::Constant *GenerateConstantString(const StringLiteral *SL) override;
+ bool isNonFragileABI() const {
+ return ObjCABI == 2;
+ }
+
+ ConstantAddress GenerateConstantString(const StringLiteral *SL) override;
llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=nullptr) override;
@@ -1084,7 +1077,9 @@ private:
/// EmitClassExtension - Generate the class extension structure used
/// to store the weak ivar layout and properties. The return value
/// has type ClassExtensionPtrTy.
- llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID,
+ CharUnits instanceSize,
+ bool hasMRCWeakIvars);
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class.
@@ -1172,8 +1167,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval=false);
+ llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
+ Address EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel);
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
@@ -1199,8 +1194,8 @@ public:
llvm::Value *GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) override;
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval = false) override;
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override;
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override;
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1236,19 +1231,19 @@ public:
void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) override;
llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) override;
+ Address AddrWeakObj) override;
void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) override;
+ llvm::Value *src, Address dst) override;
void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal = false) override;
void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
llvm::Value *ivarOffset) override;
void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) override;
+ llvm::Value *src, Address dest) override;
void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *dest, llvm::Value *src,
+ Address dest, Address src,
llvm::Value *size) override;
LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
@@ -1395,8 +1390,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval=false);
+ llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
+ Address EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel);
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
@@ -1474,9 +1469,10 @@ public:
llvm::Value *GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) override;
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lvalue = false) override
- { return EmitSelector(CGF, Sel, lvalue); }
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override
+ { return EmitSelector(CGF, Sel); }
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override
+ { return EmitSelectorAddr(CGF, Sel); }
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1531,19 +1527,19 @@ public:
void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) override;
llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) override;
+ Address AddrWeakObj) override;
void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) override;
+ llvm::Value *src, Address edst) override;
void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal = false) override;
void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
llvm::Value *ivarOffset) override;
void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) override;
+ llvm::Value *src, Address dest) override;
void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *dest, llvm::Value *src,
+ Address dest, Address src,
llvm::Value *size) override;
LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
@@ -1645,7 +1641,7 @@ struct NullReturnState {
// memory or (2) agg values in registers.
if (result.isAggregate()) {
assert(result.isAggregate() && "null init of non-aggregate result?");
- CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+ CGF.EmitNullInitialization(result.getAggregateAddress(), resultType);
if (contBB) CGF.EmitBlock(contBB);
return result;
}
@@ -1711,9 +1707,11 @@ llvm::Value *CGObjCMac::GetClass(CodeGenFunction &CGF,
}
/// GetSelector - Return the pointer to the unique'd string for this selector.
-llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval) {
- return EmitSelector(CGF, Sel, lval);
+llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, Selector Sel) {
+ return EmitSelector(CGF, Sel);
+}
+Address CGObjCMac::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
+ return EmitSelectorAddr(CGF, Sel);
}
llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl
*Method) {
@@ -1756,7 +1754,7 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
};
*/
-llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ConstantAddress CGObjCCommonMac::GenerateConstantString(
const StringLiteral *SL) {
return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
CGM.GetAddrOfConstantCFString(SL) :
@@ -1783,13 +1781,14 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- llvm::Value *ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+ Address ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
+ "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateStore(
ReceiverAsObject,
- CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0));
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
@@ -1803,12 +1802,13 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// isa" is the first ivar in a class (which it must be).
Target = EmitClassRef(CGF, Class->getSuperClass());
Target = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, Target, 0);
- Target = CGF.Builder.CreateLoad(Target);
+ Target = CGF.Builder.CreateAlignedLoad(Target, CGF.getPointerAlign());
} else {
llvm::Constant *MetaClassPtr = EmitMetaClassRef(Class);
llvm::Value *SuperPtr =
CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, MetaClassPtr, 1);
- llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ llvm::Value *Super =
+ CGF.Builder.CreateAlignedLoad(SuperPtr, CGF.getPointerAlign());
Target = Super;
}
} else if (isCategoryImpl)
@@ -1816,19 +1816,19 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
else {
llvm::Value *ClassPtr = EmitSuperClassRef(Class);
ClassPtr = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, ClassPtr, 1);
- Target = CGF.Builder.CreateLoad(ClassPtr);
+ Target = CGF.Builder.CreateAlignedLoad(ClassPtr, CGF.getPointerAlign());
}
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
// ObjCTypes types.
llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
- CGF.Builder.CreateStore(
- Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1));
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
return EmitMessageSend(CGF, Return, ResultType,
EmitSelector(CGF, Sel),
- ObjCSuper, ObjCTypes.SuperPtrCTy,
- true, CallArgs, Method, ObjCTypes);
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, Class, ObjCTypes);
}
/// Generate code for a message send expression.
@@ -1843,7 +1843,16 @@ CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
return EmitMessageSend(CGF, Return, ResultType,
EmitSelector(CGF, Sel),
Receiver, CGF.getContext().getObjCIdType(),
- false, CallArgs, Method, ObjCTypes);
+ false, CallArgs, Method, Class, ObjCTypes);
+}
+
+static bool isWeakLinkedClass(const ObjCInterfaceDecl *ID) {
+ do {
+ if (ID->isWeakImported())
+ return true;
+ } while ((ID = ID->getSuperClass()));
+
+ return false;
}
CodeGen::RValue
@@ -1856,6 +1865,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
bool IsSuper,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method,
+ const ObjCInterfaceDecl *ClassReceiver,
const ObjCCommonTypesHelper &ObjCTypes) {
CallArgList ActualArgs;
if (!IsSuper)
@@ -1872,11 +1882,38 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
CGM.getContext().getCanonicalType(ResultType) &&
"Result type mismatch!");
+ bool ReceiverCanBeNull = true;
+
+ // Super dispatch assumes that self is non-null; even the messenger
+ // doesn't have a null check internally.
+ if (IsSuper) {
+ ReceiverCanBeNull = false;
+
+ // If this is a direct dispatch of a class method, check whether the class,
+ // or anything in its hierarchy, was weak-linked.
+ } else if (ClassReceiver && Method && Method->isClassMethod()) {
+ ReceiverCanBeNull = isWeakLinkedClass(ClassReceiver);
+
+ // If we're emitting a method, and self is const (meaning just ARC, for now),
+ // and the receiver is a load of self, then self is a valid object.
+ } else if (auto CurMethod =
+ dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ auto Self = CurMethod->getSelfDecl();
+ if (Self->getType().isConstQualified()) {
+ if (auto LI = dyn_cast<llvm::LoadInst>(Arg0->stripPointerCasts())) {
+ llvm::Value *SelfAddr = CGF.GetAddrOfLocalVar(Self).getPointer();
+ if (SelfAddr == LI->getPointerOperand()) {
+ ReceiverCanBeNull = false;
+ }
+ }
+ }
+ }
+
NullReturnState nullReturn;
llvm::Constant *Fn = nullptr;
if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
- if (!IsSuper) nullReturn.init(CGF, Arg0);
+ if (ReceiverCanBeNull) nullReturn.init(CGF, Arg0);
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
: ObjCTypes.getSendStretFn(IsSuper);
} else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
@@ -1888,76 +1925,182 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
} else {
// arm64 uses objc_msgSend for stret methods and yet null receiver check
// must be made for it.
- if (!IsSuper && CGM.ReturnTypeUsesSRet(MSI.CallInfo))
+ if (ReceiverCanBeNull && CGM.ReturnTypeUsesSRet(MSI.CallInfo))
nullReturn.init(CGF, Arg0);
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
: ObjCTypes.getSendFn(IsSuper);
}
-
- bool requiresnullCheck = false;
- if (CGM.getLangOpts().ObjCAutoRefCount && Method)
+
+ // Emit a null-check if there's a consumed argument other than the receiver.
+ bool RequiresNullCheck = false;
+ if (ReceiverCanBeNull && CGM.getLangOpts().ObjCAutoRefCount && Method) {
for (const auto *ParamDecl : Method->params()) {
if (ParamDecl->hasAttr<NSConsumedAttr>()) {
if (!nullReturn.NullBB)
nullReturn.init(CGF, Arg0);
- requiresnullCheck = true;
+ RequiresNullCheck = true;
break;
}
}
+ }
+ llvm::Instruction *CallSite;
Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
- RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs,
+ CGCalleeInfo(), &CallSite);
+
+ // Mark the call as noreturn if the method is marked noreturn and the
+ // receiver cannot be null.
+ if (Method && Method->hasAttr<NoReturnAttr>() && !ReceiverCanBeNull) {
+ llvm::CallSite(CallSite).setDoesNotReturn();
+ }
+
return nullReturn.complete(CGF, rvalue, ResultType, CallArgs,
- requiresnullCheck ? Method : nullptr);
+ RequiresNullCheck ? Method : nullptr);
}
-static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT,
+ bool pointee = false) {
+ // Note that GC qualification applies recursively to C pointer types
+ // that aren't otherwise decorated. This is weird, but it's probably
+ // an intentional workaround to the unreliable placement of GC qualifiers.
if (FQT.isObjCGCStrong())
return Qualifiers::Strong;
-
- if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
+
+ if (FQT.isObjCGCWeak())
return Qualifiers::Weak;
+
+ if (auto ownership = FQT.getObjCLifetime()) {
+ // Ownership does not apply recursively to C pointer types.
+ if (pointee) return Qualifiers::GCNone;
+ switch (ownership) {
+ case Qualifiers::OCL_Weak: return Qualifiers::Weak;
+ case Qualifiers::OCL_Strong: return Qualifiers::Strong;
+ case Qualifiers::OCL_ExplicitNone: return Qualifiers::GCNone;
+ case Qualifiers::OCL_Autoreleasing: llvm_unreachable("autoreleasing ivar?");
+ case Qualifiers::OCL_None: llvm_unreachable("known nonzero");
+ }
+ llvm_unreachable("bad objc ownership");
+ }
- // check for __unsafe_unretained
- if (FQT.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
- return Qualifiers::GCNone;
-
+ // Treat unqualified retainable pointers as strong.
if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
return Qualifiers::Strong;
- if (const PointerType *PT = FQT->getAs<PointerType>())
- return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+ // Walk into C pointer types, but only in GC.
+ if (Ctx.getLangOpts().getGC() != LangOptions::NonGC) {
+ if (const PointerType *PT = FQT->getAs<PointerType>())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType(), /*pointee*/ true);
+ }
return Qualifiers::GCNone;
}
+namespace {
+ struct IvarInfo {
+ CharUnits Offset;
+ uint64_t SizeInWords;
+ IvarInfo(CharUnits offset, uint64_t sizeInWords)
+ : Offset(offset), SizeInWords(sizeInWords) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const IvarInfo &other) const {
+ return Offset < other.Offset;
+ }
+ };
+
+ /// A helper class for building GC layout strings.
+ class IvarLayoutBuilder {
+ CodeGenModule &CGM;
+
+ /// The start of the layout. Offsets will be relative to this value,
+ /// and entries less than this value will be silently discarded.
+ CharUnits InstanceBegin;
+
+ /// The end of the layout. Offsets will never exceed this value.
+ CharUnits InstanceEnd;
+
+ /// Whether we're generating the strong layout or the weak layout.
+ bool ForStrongLayout;
+
+ /// Whether the offsets in IvarsInfo might be out-of-order.
+ bool IsDisordered = false;
+
+ llvm::SmallVector<IvarInfo, 8> IvarsInfo;
+ public:
+ IvarLayoutBuilder(CodeGenModule &CGM, CharUnits instanceBegin,
+ CharUnits instanceEnd, bool forStrongLayout)
+ : CGM(CGM), InstanceBegin(instanceBegin), InstanceEnd(instanceEnd),
+ ForStrongLayout(forStrongLayout) {
+ }
+
+ void visitRecord(const RecordType *RT, CharUnits offset);
+
+ template <class Iterator, class GetOffsetFn>
+ void visitAggregate(Iterator begin, Iterator end,
+ CharUnits aggrOffset,
+ const GetOffsetFn &getOffset);
+
+ void visitField(const FieldDecl *field, CharUnits offset);
+
+ /// Add the layout of a block implementation.
+ void visitBlock(const CGBlockInfo &blockInfo);
+
+ /// Is there any information for an interesting bitmap?
+ bool hasBitmapData() const { return !IvarsInfo.empty(); }
+
+ llvm::Constant *buildBitmap(CGObjCCommonMac &CGObjC,
+ llvm::SmallVectorImpl<unsigned char> &buffer);
+
+ static void dump(ArrayRef<unsigned char> buffer) {
+ const unsigned char *s = buffer.data();
+ for (unsigned i = 0, e = buffer.size(); i < e; i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+ };
+}
+
llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
- if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
- !CGM.getLangOpts().ObjCAutoRefCount)
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
return nullPtr;
- bool hasUnion = false;
- SkipIvars.clear();
- IvarsInfo.clear();
- unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
+ IvarLayoutBuilder builder(CGM, CharUnits::Zero(), blockInfo.BlockSize,
+ /*for strong layout*/ true);
+
+ builder.visitBlock(blockInfo);
+
+ if (!builder.hasBitmapData())
+ return nullPtr;
+
+ llvm::SmallVector<unsigned char, 32> buffer;
+ llvm::Constant *C = builder.buildBitmap(*this, buffer);
+ if (CGM.getLangOpts().ObjCGCBitmapPrint && !buffer.empty()) {
+ printf("\n block variable layout for block: ");
+ builder.dump(buffer);
+ }
+ return C;
+}
+
+void IvarLayoutBuilder::visitBlock(const CGBlockInfo &blockInfo) {
// __isa is the first field in block descriptor and must assume by runtime's
// convention that it is GC'able.
- IvarsInfo.push_back(GC_IVAR(0, 1));
+ IvarsInfo.push_back(IvarInfo(CharUnits::Zero(), 1));
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
- // Calculate the basic layout of the block structure.
- const llvm::StructLayout *layout =
- CGM.getDataLayout().getStructLayout(blockInfo.StructureType);
-
// Ignore the optional 'this' capture: C++ objects are not assumed
// to be GC'ed.
+ CharUnits lastFieldOffset;
+
// Walk the captured variables.
for (const auto &CI : blockDecl->captures()) {
const VarDecl *variable = CI.getVariable();
@@ -1968,64 +2111,51 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
// Ignore constant captures.
if (capture.isConstant()) continue;
- uint64_t fieldOffset = layout->getElementOffset(capture.getIndex());
+ CharUnits fieldOffset = capture.getOffset();
+
+ // Block fields are not necessarily ordered; if we detect that we're
+ // adding them out-of-order, make sure we sort later.
+ if (fieldOffset < lastFieldOffset)
+ IsDisordered = true;
+ lastFieldOffset = fieldOffset;
// __block variables are passed by their descriptor address.
if (CI.isByRef()) {
- IvarsInfo.push_back(GC_IVAR(fieldOffset, /*size in words*/ 1));
+ IvarsInfo.push_back(IvarInfo(fieldOffset, /*size in words*/ 1));
continue;
}
assert(!type->isArrayType() && "array variable should not be caught");
if (const RecordType *record = type->getAs<RecordType>()) {
- BuildAggrIvarRecordLayout(record, fieldOffset, true, hasUnion);
+ visitRecord(record, fieldOffset);
continue;
}
Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type);
- unsigned fieldSize = CGM.getContext().getTypeSize(type);
-
- if (GCAttr == Qualifiers::Strong)
- IvarsInfo.push_back(GC_IVAR(fieldOffset,
- fieldSize / WordSizeInBits));
- else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)
- SkipIvars.push_back(GC_IVAR(fieldOffset,
- fieldSize / ByteSizeInBits));
- }
-
- if (IvarsInfo.empty())
- return nullPtr;
- // Sort on byte position; captures might not be allocated in order,
- // and unions can do funny things.
- llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
- llvm::array_pod_sort(SkipIvars.begin(), SkipIvars.end());
-
- std::string BitMap;
- llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
- if (CGM.getLangOpts().ObjCGCBitmapPrint) {
- printf("\n block variable layout for block: ");
- const unsigned char *s = (const unsigned char*)BitMap.c_str();
- for (unsigned i = 0, e = BitMap.size(); i < e; i++)
- if (!(s[i] & 0xf0))
- printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
- else
- printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
- printf("\n");
+ if (GCAttr == Qualifiers::Strong) {
+ assert(CGM.getContext().getTypeSize(type)
+ == CGM.getTarget().getPointerWidth(0));
+ IvarsInfo.push_back(IvarInfo(fieldOffset, /*size in words*/ 1));
+ }
}
-
- return C;
}
+
/// getBlockCaptureLifetime - This routine returns life time of the captured
/// block variable for the purpose of block layout meta-data generation. FQT is
/// the type of the variable captured in the block.
Qualifiers::ObjCLifetime CGObjCCommonMac::getBlockCaptureLifetime(QualType FQT,
bool ByrefLayout) {
+ // If it has an ownership qualifier, we're done.
+ if (auto lifetime = FQT.getObjCLifetime())
+ return lifetime;
+
+ // If it doesn't, and this is ARC, it has no ownership.
if (CGM.getLangOpts().ObjCAutoRefCount)
- return FQT.getObjCLifetime();
+ return Qualifiers::OCL_None;
- // MRR.
+ // In MRC, retainable pointers are owned by non-__block variables.
if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
return ByrefLayout ? Qualifiers::OCL_ExplicitNone : Qualifiers::OCL_Strong;
@@ -2361,9 +2491,8 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
}
}
- int e = Layout.size()-1;
- while (e >= 0) {
- unsigned char inst = Layout[e--];
+ while (!Layout.empty()) {
+ unsigned char inst = Layout.back();
enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES || opcode == BLOCK_LAYOUT_NON_OBJECT_WORDS)
Layout.pop_back();
@@ -2376,19 +2505,19 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
// Block variable layout instruction has been inlined.
if (CGM.getLangOpts().ObjCGCBitmapPrint) {
if (ComputeByrefLayout)
- printf("\n Inline instruction for BYREF variable layout: ");
+ printf("\n Inline BYREF variable layout: ");
else
- printf("\n Inline instruction for block variable layout: ");
- printf("0x0%" PRIx64 "\n", Result);
- }
- if (WordSizeInBytes == 8) {
- const llvm::APInt Instruction(64, Result);
- return llvm::Constant::getIntegerValue(CGM.Int64Ty, Instruction);
- }
- else {
- const llvm::APInt Instruction(32, Result);
- return llvm::Constant::getIntegerValue(CGM.Int32Ty, Instruction);
+ printf("\n Inline block variable layout: ");
+ printf("0x0%" PRIx64 "", Result);
+ if (auto numStrong = (Result & 0xF00) >> 8)
+ printf(", BL_STRONG:%d", (int) numStrong);
+ if (auto numByref = (Result & 0x0F0) >> 4)
+ printf(", BL_BYREF:%d", (int) numByref);
+ if (auto numWeak = (Result & 0x00F) >> 0)
+ printf(", BL_WEAK:%d", (int) numWeak);
+ printf(", BL_OPERATOR:0\n");
}
+ return llvm::ConstantInt::get(CGM.IntPtrTy, Result);
}
unsigned char inst = (BLOCK_LAYOUT_OPERATOR << 4) | 0;
@@ -2399,9 +2528,9 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
if (CGM.getLangOpts().ObjCGCBitmapPrint) {
if (ComputeByrefLayout)
- printf("\n BYREF variable layout: ");
+ printf("\n Byref variable layout: ");
else
- printf("\n block variable layout: ");
+ printf("\n Block variable layout: ");
for (unsigned i = 0, e = BitMap.size(); i != e; i++) {
unsigned char inst = BitMap[i];
enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
@@ -2443,7 +2572,7 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
llvm::GlobalVariable *Entry = CreateMetadataVar(
"OBJC_CLASS_NAME_",
llvm::ConstantDataArray::getString(VMContext, BitMap, false),
- "__TEXT,__objc_classname,cstring_literals", 1, true);
+ "__TEXT,__objc_classname,cstring_literals", CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -2511,6 +2640,8 @@ llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM,
if (const RecordType *record = T->getAs<RecordType>()) {
BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion, true /*ByrefLayout */);
llvm::Constant *Result = getBitmapBlockLayout(true);
+ if (isa<llvm::ConstantInt>(Result))
+ Result = llvm::ConstantExpr::getIntToPtr(Result, CGM.Int8PtrTy);
return Result;
}
llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
@@ -2699,7 +2830,7 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
// No special section, but goes in llvm.used
return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), Init,
- StringRef(), 0, true);
+ StringRef(), CGM.getPointerAlign(), true);
}
/*
@@ -2738,7 +2869,7 @@ CGObjCMac::EmitProtocolList(Twine Name,
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- 4, false);
+ CGM.getPointerAlign(), false);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
}
@@ -2779,15 +2910,26 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
const ObjCCommonTypesHelper &ObjCTypes) {
SmallVector<llvm::Constant *, 16> Properties;
llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+
+ auto AddProperty = [&](const ObjCPropertyDecl *PD) {
+ llvm::Constant *Prop[] = {GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)};
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ };
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ for (const ObjCCategoryDecl *ClassExt : OID->known_extensions())
+ for (auto *PD : ClassExt->properties()) {
+ PropertySet.insert(PD->getIdentifier());
+ AddProperty(PD);
+ }
for (const auto *PD : OCD->properties()) {
- PropertySet.insert(PD->getIdentifier());
- llvm::Constant *Prop[] = {
- GetPropertyName(PD->getIdentifier()),
- GetPropertyTypeString(PD, Container)
- };
- Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
- Prop));
+ // Don't emit duplicate metadata for properties that were already in a
+ // class extension.
+ if (!PropertySet.insert(PD->getIdentifier()).second)
+ continue;
+ AddProperty(PD);
}
+
if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
for (const auto *P : OID->all_referenced_protocols())
PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
@@ -2815,7 +2957,7 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
CreateMetadataVar(Name, Init,
(ObjCABI == 2) ? "__DATA, __objc_const" :
"__OBJC,__property,regular,no_dead_strip",
- (ObjCABI == 2) ? 8 : 4,
+ CGM.getPointerAlign(),
true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
}
@@ -2834,7 +2976,7 @@ CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
llvm::GlobalVariable *GV = CreateMetadataVar(
Name, Init, (ObjCABI == 2) ? "__DATA, __objc_const" : StringRef(),
- (ObjCABI == 2) ? 8 : 4, true);
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
}
@@ -2872,7 +3014,8 @@ CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
Values[1] = llvm::ConstantArray::get(AT, Methods);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.MethodDescriptionListPtrTy);
}
@@ -2944,7 +3087,8 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
llvm::GlobalVariable *GV =
CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init,
- "__OBJC,__category,regular,no_dead_strip", 4, true);
+ "__OBJC,__category,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
DefinedCategories.push_back(GV);
DefinedCategoryNames.insert(ExtName.str());
// method definition entries must be clear for next implementation.
@@ -2952,10 +3096,24 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
}
enum FragileClassFlags {
+ /// Apparently: is not a meta-class.
FragileABI_Class_Factory = 0x00001,
+
+ /// Is a meta-class.
FragileABI_Class_Meta = 0x00002,
+
+ /// Has a non-trivial constructor or destructor.
FragileABI_Class_HasCXXStructors = 0x02000,
- FragileABI_Class_Hidden = 0x20000
+
+ /// Has hidden visibility.
+ FragileABI_Class_Hidden = 0x20000,
+
+ /// Class implementation was compiled under ARC.
+ FragileABI_Class_CompiledByARC = 0x04000000,
+
+ /// Class implementation was compiled under MRC and has MRC weak ivars.
+ /// Exclusive with CompiledByARC.
+ FragileABI_Class_HasMRCWeakIvars = 0x08000000,
};
enum NonFragileClassFlags {
@@ -2965,7 +3123,7 @@ enum NonFragileClassFlags {
/// Is a root class.
NonFragileABI_Class_Root = 0x00002,
- /// Has a C++ constructor and destructor.
+ /// Has a non-trivial constructor or destructor.
NonFragileABI_Class_HasCXXStructors = 0x00004,
/// Has hidden visibility.
@@ -2981,9 +3139,46 @@ enum NonFragileClassFlags {
NonFragileABI_Class_CompiledByARC = 0x00080,
/// Class has non-trivial destructors, but zero-initialization is okay.
- NonFragileABI_Class_HasCXXDestructorOnly = 0x00100
+ NonFragileABI_Class_HasCXXDestructorOnly = 0x00100,
+
+ /// Class implementation was compiled under MRC and has MRC weak ivars.
+ /// Exclusive with CompiledByARC.
+ NonFragileABI_Class_HasMRCWeakIvars = 0x00200,
};
+static bool hasWeakMember(QualType type) {
+ if (type.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ return true;
+ }
+
+ if (auto recType = type->getAs<RecordType>()) {
+ for (auto field : recType->getDecl()->fields()) {
+ if (hasWeakMember(field->getType()))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// For compatibility, we only want to set the "HasMRCWeakIvars" flag
+/// (and actually fill in a layout string) if we really do have any
+/// __weak ivars.
+static bool hasMRCWeakIvars(CodeGenModule &CGM,
+ const ObjCImplementationDecl *ID) {
+ if (!CGM.getLangOpts().ObjCWeak) return false;
+ assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
+
+ for (const ObjCIvarDecl *ivar =
+ ID->getClassInterface()->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ if (hasWeakMember(ivar->getType()))
+ return true;
+ }
+
+ return false;
+}
+
/*
struct _objc_class {
Class isa;
@@ -3017,8 +3212,16 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
unsigned Flags = FragileABI_Class_Factory;
if (ID->hasNonZeroConstructors() || ID->hasDestructors())
Flags |= FragileABI_Class_HasCXXStructors;
- unsigned Size =
- CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
+
+ bool hasMRCWeak = false;
+
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ Flags |= FragileABI_Class_CompiledByARC;
+ else if ((hasMRCWeak = hasMRCWeakIvars(CGM, ID)))
+ Flags |= FragileABI_Class_HasMRCWeakIvars;
+
+ CharUnits Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize();
// FIXME: Set CXX-structors flag.
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
@@ -3062,7 +3265,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Version is always 0.
Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
- Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size.getQuantity());
Values[ 6] = EmitIvarList(ID, false);
Values[7] = EmitMethodList("OBJC_INSTANCE_METHODS_" + ID->getName(),
"__OBJC,__inst_meth,regular,no_dead_strip",
@@ -3070,8 +3273,8 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
// cache is always NULL.
Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
Values[ 9] = Protocols;
- Values[10] = BuildIvarLayout(ID, true);
- Values[11] = EmitClassExtension(ID);
+ Values[10] = BuildStrongIvarLayout(ID, CharUnits::Zero(), Size);
+ Values[11] = EmitClassExtension(ID, Size, hasMRCWeak);
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
Values);
std::string Name("OBJC_CLASS_");
@@ -3084,10 +3287,10 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
"Forward metaclass reference has incorrect type.");
GV->setInitializer(Init);
GV->setSection(Section);
- GV->setAlignment(4);
+ GV->setAlignment(CGM.getPointerAlign().getQuantity());
CGM.addCompilerUsedGlobal(GV);
} else
- GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ GV = CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
DefinedClasses.push_back(GV);
ImplementedClasses.push_back(Interface);
// method definition entries must be clear for next implementation.
@@ -3198,6 +3401,10 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
}
/*
+ Emit a "class extension", which in this specific context means extra
+ data that doesn't fit in the normal fragile-ABI class structure, and
+ has nothing to do with the language concept of a class extension.
+
struct objc_class_ext {
uint32_t size;
const char *weak_ivar_layout;
@@ -3205,13 +3412,15 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
};
*/
llvm::Constant *
-CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID,
+ CharUnits InstanceSize, bool hasMRCWeakIvars) {
uint64_t Size =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Values[1] = BuildIvarLayout(ID, false);
+ Values[1] = BuildWeakIvarLayout(ID, CharUnits::Zero(), InstanceSize,
+ hasMRCWeakIvars);
Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
ID, ID->getClassInterface(), ObjCTypes);
@@ -3222,7 +3431,8 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
llvm::Constant *Init =
llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init,
- "__OBJC,__class_ext,regular,no_dead_strip", 4, true);
+ "__OBJC,__class_ext,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
}
/*
@@ -3280,11 +3490,12 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
if (ForClass)
GV =
CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init,
- "__OBJC,__class_vars,regular,no_dead_strip", 4, true);
+ "__OBJC,__class_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
else
GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init,
- "__OBJC,__instance_vars,regular,no_dead_strip", 4,
- true);
+ "__OBJC,__instance_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
}
@@ -3334,7 +3545,8 @@ llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
Values[2] = llvm::ConstantArray::get(AT, Methods);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
}
@@ -3359,7 +3571,7 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::Constant *Init,
StringRef Section,
- unsigned Align,
+ CharUnits Align,
bool AddToUsed) {
llvm::Type *Ty = Init->getType();
llvm::GlobalVariable *GV =
@@ -3367,8 +3579,7 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::GlobalValue::PrivateLinkage, Init, Name);
if (!Section.empty())
GV->setSection(Section);
- if (Align)
- GV->setAlignment(Align);
+ GV->setAlignment(Align.getQuantity());
if (AddToUsed)
CGM.addCompilerUsedGlobal(GV);
return GV;
@@ -3421,16 +3632,16 @@ void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
}
namespace {
- struct PerformFragileFinally : EHScopeStack::Cleanup {
+ struct PerformFragileFinally final : EHScopeStack::Cleanup {
const Stmt &S;
- llvm::Value *SyncArgSlot;
- llvm::Value *CallTryExitVar;
- llvm::Value *ExceptionData;
+ Address SyncArgSlot;
+ Address CallTryExitVar;
+ Address ExceptionData;
ObjCTypesHelper &ObjCTypes;
PerformFragileFinally(const Stmt *S,
- llvm::Value *SyncArgSlot,
- llvm::Value *CallTryExitVar,
- llvm::Value *ExceptionData,
+ Address SyncArgSlot,
+ Address CallTryExitVar,
+ Address ExceptionData,
ObjCTypesHelper *ObjCTypes)
: S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
@@ -3447,7 +3658,7 @@ namespace {
CGF.EmitBlock(FinallyCallExit);
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryExitFn(),
- ExceptionData);
+ ExceptionData.getPointer());
CGF.EmitBlock(FinallyNoCallExit);
@@ -3568,7 +3779,7 @@ void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
void FragileHazards::emitHazardsInNewBlocks() {
if (Locals.empty()) return;
- CGBuilderTy Builder(CGF.getLLVMContext());
+ CGBuilderTy Builder(CGF, CGF.getLLVMContext());
// Iterate through all blocks, skipping those prior to the try.
for (llvm::Function::iterator
@@ -3607,6 +3818,10 @@ static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
if (V) S.insert(V);
}
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, Address V) {
+ if (V.isValid()) S.insert(V.getPointer());
+}
+
void FragileHazards::collectLocals() {
// Compute a set of allocas to ignore.
llvm::DenseSet<llvm::Value*> AllocasToIgnore;
@@ -3760,21 +3975,23 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// @synchronized. We can't avoid a temp here because we need the
// value to be preserved. If the backend ever does liveness
// correctly after setjmp, this will be unnecessary.
- llvm::Value *SyncArgSlot = nullptr;
+ Address SyncArgSlot = Address::invalid();
if (!isTry) {
llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
CGF.EmitNounwindRuntimeCall(ObjCTypes.getSyncEnterFn(), SyncArg);
- SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(), "sync.arg");
+ SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(),
+ CGF.getPointerAlign(), "sync.arg");
CGF.Builder.CreateStore(SyncArg, SyncArgSlot);
}
// Allocate memory for the setjmp buffer. This needs to be kept
// live throughout the try and catch blocks.
- llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
- "exceptiondata.ptr");
+ Address ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ CGF.getPointerAlign(),
+ "exceptiondata.ptr");
// Create the fragile hazards. Note that this will not capture any
// of the allocas required for exception processing, but will
@@ -3790,12 +4007,13 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// The setjmp-safety rule here is that we should always store to this
// variable in a place that dominates the branch through the cleanup
// without passing through any setjmps.
- llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
- "_call_try_exit");
+ Address CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ CharUnits::One(),
+ "_call_try_exit");
// A slot containing the exception to rethrow. Only needed when we
// have both a @catch and a @finally.
- llvm::Value *PropagatingExnVar = nullptr;
+ Address PropagatingExnVar = Address::invalid();
// Push a normal cleanup to leave the try scope.
CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalAndEHCleanup, &S,
@@ -3808,13 +4026,14 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// - Call objc_exception_try_enter to push ExceptionData on top of
// the EH stack.
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData);
+ ExceptionData.getPointer());
// - Call setjmp on the exception data buffer.
llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP(
- ObjCTypes.ExceptionDataTy, ExceptionData, GEPIndexes, "setjmp_buffer");
+ ObjCTypes.ExceptionDataTy, ExceptionData.getPointer(), GEPIndexes,
+ "setjmp_buffer");
llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall(
ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
SetJmpResult->setCanReturnTwice();
@@ -3854,7 +4073,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// nothing can cross this so the value is already in SSA form.
llvm::CallInst *Caught =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData, "caught");
+ ExceptionData.getPointer(), "caught");
// Push the exception to rethrow onto the EH value stack for the
// benefit of any @throws in the handlers.
@@ -3870,13 +4089,14 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Save the currently-propagating exception before
// objc_exception_try_enter clears the exception slot.
PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(),
+ CGF.getPointerAlign(),
"propagating_exception");
CGF.Builder.CreateStore(Caught, PropagatingExnVar);
// Enter a new exception try block (in case a @catch block
// throws an exception).
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData);
+ ExceptionData.getPointer());
llvm::CallInst *SetJmpResult =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(),
@@ -3928,7 +4148,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
// These types work out because ConvertType(id) == i8*.
- CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+ EmitInitOfCatchParam(CGF, Caught, CatchParam);
}
CGF.EmitStmt(CatchStmt->getCatchBody());
@@ -3975,7 +4195,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *Tmp =
CGF.Builder.CreateBitCast(Caught,
CGF.ConvertType(CatchParam->getType()));
- CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+ EmitInitOfCatchParam(CGF, Tmp, CatchParam);
CGF.EmitStmt(CatchStmt->getCatchBody());
@@ -4008,10 +4228,10 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Extract the new exception and save it to the
// propagating-exception slot.
- assert(PropagatingExnVar);
+ assert(PropagatingExnVar.isValid());
llvm::CallInst *NewCaught =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData, "caught");
+ ExceptionData.getPointer(), "caught");
CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
// Don't pop the catch handler; the throw already did.
@@ -4036,14 +4256,14 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
if (CGF.HaveInsertPoint()) {
// If we have a propagating-exception variable, check it.
llvm::Value *PropagatingExn;
- if (PropagatingExnVar) {
+ if (PropagatingExnVar.isValid()) {
PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar);
// Otherwise, just look in the buffer for the exception to throw.
} else {
llvm::CallInst *Caught =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData);
+ ExceptionData.getPointer());
PropagatingExn = Caught;
}
@@ -4083,14 +4303,13 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
/// object: objc_read_weak (id *src)
///
llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) {
- llvm::Type* DestTy =
- cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ Address AddrWeakObj) {
+ llvm::Type* DestTy = AddrWeakObj.getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj, "weakread");
+ AddrWeakObj.getPointer(), "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -4099,7 +4318,7 @@ llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
/// objc_assign_weak (id src, id *dst)
///
void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -4110,7 +4329,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
return;
@@ -4120,7 +4339,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_global (id src, id *dst)
///
void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
@@ -4132,7 +4351,7 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
@@ -4146,7 +4365,7 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
///
void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
llvm::Value *ivarOffset) {
assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
llvm::Type * SrcTy = src->getType();
@@ -4159,7 +4378,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst, ivarOffset };
+ llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
return;
}
@@ -4168,7 +4387,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_strongCast (id src, id *dst)
///
void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -4179,19 +4398,19 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
- args, "weakassign");
+ args, "strongassign");
return;
}
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *args[] = { DestPtr, SrcPtr, size };
+ llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -4283,7 +4502,7 @@ void CGObjCCommonMac::EmitImageInfo() {
// Indicate whether we're compiling this to run on a simulator.
const llvm::Triple &Triple = CGM.getTarget().getTriple();
- if (Triple.isiOS() &&
+ if ((Triple.isiOS() || Triple.isWatchOS()) &&
(Triple.getArch() == llvm::Triple::x86 ||
Triple.getArch() == llvm::Triple::x86_64))
Mod.addModuleFlag(llvm::Module::Error, "Objective-C Is Simulated",
@@ -4312,7 +4531,8 @@ void CGObjCMac::EmitModuleInfo() {
};
CreateMetadataVar("OBJC_MODULES",
llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
- "__OBJC,__module_info,regular,no_dead_strip", 4, true);
+ "__OBJC,__module_info,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
}
llvm::Constant *CGObjCMac::EmitModuleSymbols() {
@@ -4356,7 +4576,8 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV = CreateMetadataVar(
- "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip", 4, true);
+ "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
@@ -4372,10 +4593,11 @@ llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
ObjCTypes.ClassPtrTy);
Entry = CreateMetadataVar(
"OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip", 4, true);
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ CGM.getPointerAlign(), true);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, CGF.getPointerAlign());
}
llvm::Value *CGObjCMac::EmitClassRef(CodeGenFunction &CGF,
@@ -4388,23 +4610,25 @@ llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
return EmitClassRefFromId(CGF, II);
}
-llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel,
- bool lvalue) {
- llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel) {
+ return CGF.Builder.CreateLoad(EmitSelectorAddr(CGF, Sel));
+}
+
+Address CGObjCMac::EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel) {
+ CharUnits Align = CGF.getPointerAlign();
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
if (!Entry) {
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
ObjCTypes.SelectorPtrTy);
Entry = CreateMetadataVar(
"OBJC_SELECTOR_REFERENCES_", Casted,
- "__OBJC,__message_refs,literal_pointers,no_dead_strip", 4, true);
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip", Align, true);
Entry->setExternallyInitialized(true);
}
- if (lvalue)
- return Entry;
- return CGF.Builder.CreateLoad(Entry);
+ return Address(Entry, Align);
}
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
@@ -4415,7 +4639,7 @@ llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
llvm::ConstantDataArray::getString(VMContext, RuntimeName),
((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4435,308 +4659,247 @@ llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
}
-void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
- unsigned int BytePos,
- bool ForStrongLayout,
- bool &HasUnion) {
+void IvarLayoutBuilder::visitRecord(const RecordType *RT,
+ CharUnits offset) {
const RecordDecl *RD = RT->getDecl();
- // FIXME - Use iterator.
- SmallVector<const FieldDecl*, 16> Fields(RD->fields());
- llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
- const llvm::StructLayout *RecLayout =
- CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty));
- BuildAggrIvarLayout(nullptr, RecLayout, RD, Fields, BytePos, ForStrongLayout,
- HasUnion);
-}
+ // If this is a union, remember that we had one, because it might mess
+ // up the ordering of layout entries.
+ if (RD->isUnion())
+ IsDisordered = true;
+
+ const ASTRecordLayout *recLayout = nullptr;
+ visitAggregate(RD->field_begin(), RD->field_end(), offset,
+ [&](const FieldDecl *field) -> CharUnits {
+ if (!recLayout)
+ recLayout = &CGM.getContext().getASTRecordLayout(RD);
+ auto offsetInBits = recLayout->getFieldOffset(field->getFieldIndex());
+ return CGM.getContext().toCharUnitsFromBits(offsetInBits);
+ });
+}
+
+template <class Iterator, class GetOffsetFn>
+void IvarLayoutBuilder::visitAggregate(Iterator begin, Iterator end,
+ CharUnits aggregateOffset,
+ const GetOffsetFn &getOffset) {
+ for (; begin != end; ++begin) {
+ auto field = *begin;
+
+ // Skip over bitfields.
+ if (field->isBitField()) {
+ continue;
+ }
-void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
- const llvm::StructLayout *Layout,
- const RecordDecl *RD,
- ArrayRef<const FieldDecl*> RecFields,
- unsigned int BytePos, bool ForStrongLayout,
- bool &HasUnion) {
- bool IsUnion = (RD && RD->isUnion());
- uint64_t MaxUnionIvarSize = 0;
- uint64_t MaxSkippedUnionIvarSize = 0;
- const FieldDecl *MaxField = nullptr;
- const FieldDecl *MaxSkippedField = nullptr;
- const FieldDecl *LastFieldBitfieldOrUnnamed = nullptr;
- uint64_t MaxFieldOffset = 0;
- uint64_t MaxSkippedFieldOffset = 0;
- uint64_t LastBitfieldOrUnnamedOffset = 0;
- uint64_t FirstFieldDelta = 0;
+ // Compute the offset of the field within the aggregate.
+ CharUnits fieldOffset = aggregateOffset + getOffset(field);
- if (RecFields.empty())
- return;
- unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
- if (!RD && CGM.getLangOpts().ObjCAutoRefCount) {
- const FieldDecl *FirstField = RecFields[0];
- FirstFieldDelta =
- ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
+ visitField(field, fieldOffset);
}
-
- for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
- const FieldDecl *Field = RecFields[i];
- uint64_t FieldOffset;
- if (RD) {
- // Note that 'i' here is actually the field index inside RD of Field,
- // although this dependency is hidden.
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- FieldOffset = (RL.getFieldOffset(i) / ByteSizeInBits) - FirstFieldDelta;
- } else
- FieldOffset =
- ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)) - FirstFieldDelta;
+}
- // Skip over unnamed or bitfields
- if (!Field->getIdentifier() || Field->isBitField()) {
- LastFieldBitfieldOrUnnamed = Field;
- LastBitfieldOrUnnamedOffset = FieldOffset;
- continue;
- }
+/// Collect layout information for the given fields into IvarsInfo.
+void IvarLayoutBuilder::visitField(const FieldDecl *field,
+ CharUnits fieldOffset) {
+ QualType fieldType = field->getType();
- LastFieldBitfieldOrUnnamed = nullptr;
- QualType FQT = Field->getType();
- if (FQT->isRecordType() || FQT->isUnionType()) {
- if (FQT->isUnionType())
- HasUnion = true;
+ // Drill down into arrays.
+ uint64_t numElts = 1;
+ while (auto arrayType = CGM.getContext().getAsConstantArrayType(fieldType)) {
+ numElts *= arrayType->getSize().getZExtValue();
+ fieldType = arrayType->getElementType();
+ }
- BuildAggrIvarRecordLayout(FQT->getAs<RecordType>(),
- BytePos + FieldOffset,
- ForStrongLayout, HasUnion);
- continue;
- }
+ assert(!fieldType->isArrayType() && "ivar of non-constant array type?");
- if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
- const ConstantArrayType *CArray =
- dyn_cast_or_null<ConstantArrayType>(Array);
- uint64_t ElCount = CArray->getSize().getZExtValue();
- assert(CArray && "only array with known element size is supported");
- FQT = CArray->getElementType();
- while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
- const ConstantArrayType *CArray =
- dyn_cast_or_null<ConstantArrayType>(Array);
- ElCount *= CArray->getSize().getZExtValue();
- FQT = CArray->getElementType();
- }
- if (FQT->isRecordType() && ElCount) {
- int OldIndex = IvarsInfo.size() - 1;
- int OldSkIndex = SkipIvars.size() -1;
+ // If we ended up with a zero-sized array, we've done what we can do within
+ // the limits of this layout encoding.
+ if (numElts == 0) return;
- const RecordType *RT = FQT->getAs<RecordType>();
- BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
- ForStrongLayout, HasUnion);
+ // Recurse if the base element type is a record type.
+ if (auto recType = fieldType->getAs<RecordType>()) {
+ size_t oldEnd = IvarsInfo.size();
- // Replicate layout information for each array element. Note that
- // one element is already done.
- uint64_t ElIx = 1;
- for (int FirstIndex = IvarsInfo.size() - 1,
- FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
- uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
- for (int i = OldIndex+1; i <= FirstIndex; ++i)
- IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
- IvarsInfo[i].ivar_size));
- for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
- SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
- SkipIvars[i].ivar_size));
- }
- continue;
- }
- }
- // At this point, we are done with Record/Union and array there of.
- // For other arrays we are down to its element type.
- Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
-
- unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
- if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
- || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
- if (IsUnion) {
- uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
- if (UnionIvarSize > MaxUnionIvarSize) {
- MaxUnionIvarSize = UnionIvarSize;
- MaxField = Field;
- MaxFieldOffset = FieldOffset;
- }
- } else {
- IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
- FieldSize / WordSizeInBits));
- }
- } else if ((ForStrongLayout &&
- (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak))
- || (!ForStrongLayout && GCAttr != Qualifiers::Weak)) {
- if (IsUnion) {
- // FIXME: Why the asymmetry? We divide by word size in bits on other
- // side.
- uint64_t UnionIvarSize = FieldSize / ByteSizeInBits;
- if (UnionIvarSize > MaxSkippedUnionIvarSize) {
- MaxSkippedUnionIvarSize = UnionIvarSize;
- MaxSkippedField = Field;
- MaxSkippedFieldOffset = FieldOffset;
+ visitRecord(recType, fieldOffset);
+
+ // If we have an array, replicate the first entry's layout information.
+ auto numEltEntries = IvarsInfo.size() - oldEnd;
+ if (numElts != 1 && numEltEntries != 0) {
+ CharUnits eltSize = CGM.getContext().getTypeSizeInChars(recType);
+ for (uint64_t eltIndex = 1; eltIndex != numElts; ++eltIndex) {
+ // Copy the last numEltEntries onto the end of the array, adjusting
+ // each for the element size.
+ for (size_t i = 0; i != numEltEntries; ++i) {
+ auto firstEntry = IvarsInfo[oldEnd + i];
+ IvarsInfo.push_back(IvarInfo(firstEntry.Offset + eltIndex * eltSize,
+ firstEntry.SizeInWords));
}
- } else {
- // FIXME: Why the asymmetry, we divide by byte size in bits here?
- SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
- FieldSize / ByteSizeInBits));
}
}
+
+ return;
}
- if (LastFieldBitfieldOrUnnamed) {
- if (LastFieldBitfieldOrUnnamed->isBitField()) {
- // Last field was a bitfield. Must update skip info.
- uint64_t BitFieldSize
- = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext());
- GC_IVAR skivar;
- skivar.ivar_bytepos = BytePos + LastBitfieldOrUnnamedOffset;
- skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
- + ((BitFieldSize % ByteSizeInBits) != 0);
- SkipIvars.push_back(skivar);
- } else {
- assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
- // Last field was unnamed. Must update skip info.
- unsigned FieldSize
- = CGM.getContext().getTypeSize(LastFieldBitfieldOrUnnamed->getType());
- SkipIvars.push_back(GC_IVAR(BytePos + LastBitfieldOrUnnamedOffset,
- FieldSize / ByteSizeInBits));
- }
+ // Classify the element type.
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), fieldType);
+
+ // If it matches what we're looking for, add an entry.
+ if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
+ || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
+ assert(CGM.getContext().getTypeSizeInChars(fieldType)
+ == CGM.getPointerSize());
+ IvarsInfo.push_back(IvarInfo(fieldOffset, numElts));
}
+}
- if (MaxField)
- IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
- MaxUnionIvarSize));
- if (MaxSkippedField)
- SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
- MaxSkippedUnionIvarSize));
-}
-
-/// BuildIvarLayoutBitmap - This routine is the horsework for doing all
-/// the computations and returning the layout bitmap (for ivar or blocks) in
-/// the given argument BitMap string container. Routine reads
-/// two containers, IvarsInfo and SkipIvars which are assumed to be
-/// filled already by the caller.
-llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
- unsigned int WordsToScan, WordsToSkip;
- llvm::Type *PtrTy = CGM.Int8PtrTy;
-
- // Build the string of skip/scan nibbles
- SmallVector<SKIP_SCAN, 32> SkipScanIvars;
- unsigned int WordSize =
- CGM.getTypes().getDataLayout().getTypeAllocSize(PtrTy);
- if (IvarsInfo[0].ivar_bytepos == 0) {
- WordsToSkip = 0;
- WordsToScan = IvarsInfo[0].ivar_size;
+/// buildBitmap - This routine does the horsework of taking the offsets of
+/// strong/weak references and creating a bitmap. The bitmap is also
+/// returned in the given buffer, suitable for being passed to \c dump().
+llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
+ llvm::SmallVectorImpl<unsigned char> &buffer) {
+ // The bitmap is a series of skip/scan instructions, aligned to word
+ // boundaries. The skip is performed first.
+ const unsigned char MaxNibble = 0xF;
+ const unsigned char SkipMask = 0xF0, SkipShift = 4;
+ const unsigned char ScanMask = 0x0F, ScanShift = 0;
+
+ assert(!IvarsInfo.empty() && "generating bitmap for no data");
+
+ // Sort the ivar info on byte position in case we encounterred a
+ // union nested in the ivar list.
+ if (IsDisordered) {
+ // This isn't a stable sort, but our algorithm should handle it fine.
+ llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
} else {
- WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
- WordsToScan = IvarsInfo[0].ivar_size;
- }
- for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
- unsigned int TailPrevGCObjC =
- IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
- if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
- // consecutive 'scanned' object pointers.
- WordsToScan += IvarsInfo[i].ivar_size;
- } else {
- // Skip over 'gc'able object pointer which lay over each other.
- if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
- continue;
- // Must skip over 1 or more words. We save current skip/scan values
- // and start a new pair.
- SKIP_SCAN SkScan;
- SkScan.skip = WordsToSkip;
- SkScan.scan = WordsToScan;
- SkipScanIvars.push_back(SkScan);
-
- // Skip the hole.
- SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
- SkScan.scan = 0;
- SkipScanIvars.push_back(SkScan);
- WordsToSkip = 0;
- WordsToScan = IvarsInfo[i].ivar_size;
+#ifndef NDEBUG
+ for (unsigned i = 1; i != IvarsInfo.size(); ++i) {
+ assert(IvarsInfo[i - 1].Offset <= IvarsInfo[i].Offset);
}
+#endif
}
- if (WordsToScan > 0) {
- SKIP_SCAN SkScan;
- SkScan.skip = WordsToSkip;
- SkScan.scan = WordsToScan;
- SkipScanIvars.push_back(SkScan);
- }
-
- if (!SkipIvars.empty()) {
- unsigned int LastIndex = SkipIvars.size()-1;
- int LastByteSkipped =
- SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
- LastIndex = IvarsInfo.size()-1;
- int LastByteScanned =
- IvarsInfo[LastIndex].ivar_bytepos +
- IvarsInfo[LastIndex].ivar_size * WordSize;
- // Compute number of bytes to skip at the tail end of the last ivar scanned.
- if (LastByteSkipped > LastByteScanned) {
- unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
- SKIP_SCAN SkScan;
- SkScan.skip = TotalWords - (LastByteScanned/WordSize);
- SkScan.scan = 0;
- SkipScanIvars.push_back(SkScan);
+ assert(IvarsInfo.back().Offset < InstanceEnd);
+
+ assert(buffer.empty());
+
+ // Skip the next N words.
+ auto skip = [&](unsigned numWords) {
+ assert(numWords > 0);
+
+ // Try to merge into the previous byte. Since scans happen second, we
+ // can't do this if it includes a scan.
+ if (!buffer.empty() && !(buffer.back() & ScanMask)) {
+ unsigned lastSkip = buffer.back() >> SkipShift;
+ if (lastSkip < MaxNibble) {
+ unsigned claimed = std::min(MaxNibble - lastSkip, numWords);
+ numWords -= claimed;
+ lastSkip += claimed;
+ buffer.back() = (lastSkip << SkipShift);
+ }
}
- }
- // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
- // as 0xMN.
- int SkipScan = SkipScanIvars.size()-1;
- for (int i = 0; i <= SkipScan; i++) {
- if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
- && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
- // 0xM0 followed by 0x0N detected.
- SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
- for (int j = i+1; j < SkipScan; j++)
- SkipScanIvars[j] = SkipScanIvars[j+1];
- --SkipScan;
+
+ while (numWords >= MaxNibble) {
+ buffer.push_back(MaxNibble << SkipShift);
+ numWords -= MaxNibble;
}
- }
-
- // Generate the string.
- for (int i = 0; i <= SkipScan; i++) {
- unsigned char byte;
- unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
- unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
- unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
- unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
-
- // first skip big.
- for (unsigned int ix = 0; ix < skip_big; ix++)
- BitMap += (unsigned char)(0xf0);
-
- // next (skip small, scan)
- if (skip_small) {
- byte = skip_small << 4;
- if (scan_big > 0) {
- byte |= 0xf;
- --scan_big;
- } else if (scan_small) {
- byte |= scan_small;
- scan_small = 0;
+ if (numWords) {
+ buffer.push_back(numWords << SkipShift);
+ }
+ };
+
+ // Scan the next N words.
+ auto scan = [&](unsigned numWords) {
+ assert(numWords > 0);
+
+ // Try to merge into the previous byte. Since scans happen second, we can
+ // do this even if it includes a skip.
+ if (!buffer.empty()) {
+ unsigned lastScan = (buffer.back() & ScanMask) >> ScanShift;
+ if (lastScan < MaxNibble) {
+ unsigned claimed = std::min(MaxNibble - lastScan, numWords);
+ numWords -= claimed;
+ lastScan += claimed;
+ buffer.back() = (buffer.back() & SkipMask) | (lastScan << ScanShift);
}
- BitMap += byte;
}
- // next scan big
- for (unsigned int ix = 0; ix < scan_big; ix++)
- BitMap += (unsigned char)(0x0f);
- // last scan small
- if (scan_small) {
- byte = scan_small;
- BitMap += byte;
+
+ while (numWords >= MaxNibble) {
+ buffer.push_back(MaxNibble << ScanShift);
+ numWords -= MaxNibble;
+ }
+ if (numWords) {
+ buffer.push_back(numWords << ScanShift);
+ }
+ };
+
+ // One past the end of the last scan.
+ unsigned endOfLastScanInWords = 0;
+ const CharUnits WordSize = CGM.getPointerSize();
+
+ // Consider all the scan requests.
+ for (auto &request : IvarsInfo) {
+ CharUnits beginOfScan = request.Offset - InstanceBegin;
+
+ // Ignore scan requests that don't start at an even multiple of the
+ // word size. We can't encode them.
+ if ((beginOfScan % WordSize) != 0) continue;
+
+ // Ignore scan requests that start before the instance start.
+ // This assumes that scans never span that boundary. The boundary
+ // isn't the true start of the ivars, because in the fragile-ARC case
+ // it's rounded up to word alignment, but the test above should leave
+ // us ignoring that possibility.
+ if (beginOfScan.isNegative()) {
+ assert(request.Offset + request.SizeInWords * WordSize <= InstanceBegin);
+ continue;
}
+
+ unsigned beginOfScanInWords = beginOfScan / WordSize;
+ unsigned endOfScanInWords = beginOfScanInWords + request.SizeInWords;
+
+ // If the scan starts some number of words after the last one ended,
+ // skip forward.
+ if (beginOfScanInWords > endOfLastScanInWords) {
+ skip(beginOfScanInWords - endOfLastScanInWords);
+
+ // Otherwise, start scanning where the last left off.
+ } else {
+ beginOfScanInWords = endOfLastScanInWords;
+
+ // If that leaves us with nothing to scan, ignore this request.
+ if (beginOfScanInWords >= endOfScanInWords) continue;
+ }
+
+ // Scan to the end of the request.
+ assert(beginOfScanInWords < endOfScanInWords);
+ scan(endOfScanInWords - beginOfScanInWords);
+ endOfLastScanInWords = endOfScanInWords;
}
- // null terminate string.
- unsigned char zero = 0;
- BitMap += zero;
- llvm::GlobalVariable *Entry = CreateMetadataVar(
+ if (buffer.empty())
+ return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+
+ // For GC layouts, emit a skip to the end of the allocation so that we
+ // have precise information about the entire thing. This isn't useful
+ // or necessary for the ARC-style layout strings.
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC) {
+ unsigned lastOffsetInWords =
+ (InstanceEnd - InstanceBegin + WordSize - CharUnits::One()) / WordSize;
+ if (lastOffsetInWords > endOfLastScanInWords) {
+ skip(lastOffsetInWords - endOfLastScanInWords);
+ }
+ }
+
+ // Null terminate the string.
+ buffer.push_back(0);
+
+ bool isNonFragileABI = CGObjC.isNonFragileABI();
+
+ llvm::GlobalVariable *Entry = CGObjC.CreateMetadataVar(
"OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, BitMap, false),
- ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- 1, true);
- return getConstantGEP(VMContext, Entry, 0, 0);
+ llvm::ConstantDataArray::get(CGM.getLLVMContext(), buffer),
+ (isNonFragileABI ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ CharUnits::One(), true);
+ return getConstantGEP(CGM.getLLVMContext(), Entry, 0, 0);
}
/// BuildIvarLayout - Builds ivar layout bitmap for the class
@@ -4755,62 +4918,75 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
/// 2. When ForStrongLayout is false, following ivars are scanned:
/// - __weak anything
///
-llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
- const ObjCImplementationDecl *OMD,
- bool ForStrongLayout) {
- bool hasUnion = false;
-
+llvm::Constant *
+CGObjCCommonMac::BuildIvarLayout(const ObjCImplementationDecl *OMD,
+ CharUnits beginOffset, CharUnits endOffset,
+ bool ForStrongLayout, bool HasMRCWeakIvars) {
+ // If this is MRC, and we're either building a strong layout or there
+ // are no weak ivars, bail out early.
llvm::Type *PtrTy = CGM.Int8PtrTy;
if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
- !CGM.getLangOpts().ObjCAutoRefCount)
+ !CGM.getLangOpts().ObjCAutoRefCount &&
+ (ForStrongLayout || !HasMRCWeakIvars))
return llvm::Constant::getNullValue(PtrTy);
const ObjCInterfaceDecl *OI = OMD->getClassInterface();
- SmallVector<const FieldDecl*, 32> RecFields;
- if (CGM.getLangOpts().ObjCAutoRefCount) {
+ SmallVector<const ObjCIvarDecl*, 32> ivars;
+
+ // GC layout strings include the complete object layout, possibly
+ // inaccurately in the non-fragile ABI; the runtime knows how to fix this
+ // up.
+ //
+ // ARC layout strings only include the class's ivars. In non-fragile
+ // runtimes, that means starting at InstanceStart, rounded up to word
+ // alignment. In fragile runtimes, there's no InstanceStart, so it means
+ // starting at the offset of the first ivar, rounded up to word alignment.
+ //
+ // MRC weak layout strings follow the ARC style.
+ CharUnits baseOffset;
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar())
- RecFields.push_back(cast<FieldDecl>(IVD));
+ ivars.push_back(IVD);
+
+ if (isNonFragileABI()) {
+ baseOffset = beginOffset; // InstanceStart
+ } else if (!ivars.empty()) {
+ baseOffset =
+ CharUnits::fromQuantity(ComputeIvarBaseOffset(CGM, OMD, ivars[0]));
+ } else {
+ baseOffset = CharUnits::Zero();
+ }
+
+ baseOffset = baseOffset.RoundUpToAlignment(CGM.getPointerAlign());
}
else {
- SmallVector<const ObjCIvarDecl*, 32> Ivars;
- CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+ CGM.getContext().DeepCollectObjCIvars(OI, true, ivars);
- // FIXME: This is not ideal; we shouldn't have to do this copy.
- RecFields.append(Ivars.begin(), Ivars.end());
+ baseOffset = CharUnits::Zero();
}
- if (RecFields.empty())
+ if (ivars.empty())
return llvm::Constant::getNullValue(PtrTy);
- SkipIvars.clear();
- IvarsInfo.clear();
+ IvarLayoutBuilder builder(CGM, baseOffset, endOffset, ForStrongLayout);
+
+ builder.visitAggregate(ivars.begin(), ivars.end(), CharUnits::Zero(),
+ [&](const ObjCIvarDecl *ivar) -> CharUnits {
+ return CharUnits::fromQuantity(ComputeIvarBaseOffset(CGM, OMD, ivar));
+ });
- BuildAggrIvarLayout(OMD, nullptr, nullptr, RecFields, 0, ForStrongLayout,
- hasUnion);
- if (IvarsInfo.empty())
+ if (!builder.hasBitmapData())
return llvm::Constant::getNullValue(PtrTy);
- // Sort on byte position in case we encounterred a union nested in
- // the ivar list.
- if (hasUnion && !IvarsInfo.empty())
- std::sort(IvarsInfo.begin(), IvarsInfo.end());
- if (hasUnion && !SkipIvars.empty())
- std::sort(SkipIvars.begin(), SkipIvars.end());
-
- std::string BitMap;
- llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+
+ llvm::SmallVector<unsigned char, 4> buffer;
+ llvm::Constant *C = builder.buildBitmap(*this, buffer);
- if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ if (CGM.getLangOpts().ObjCGCBitmapPrint && !buffer.empty()) {
printf("\n%s ivar layout for class '%s': ",
ForStrongLayout ? "strong" : "weak",
OMD->getClassInterface()->getName().str().c_str());
- const unsigned char *s = (const unsigned char*)BitMap.c_str();
- for (unsigned i = 0, e = BitMap.size(); i < e; i++)
- if (!(s[i] & 0xf0))
- printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
- else
- printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
- printf("\n");
+ builder.dump(buffer);
}
return C;
}
@@ -4825,7 +5001,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4847,7 +5023,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4866,7 +5042,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4879,7 +5055,7 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
Entry = CreateMetadataVar(
"OBJC_PROP_NAME_ATTR_",
llvm::ConstantDataArray::getString(VMContext, Ident->getName()),
- "__TEXT,__cstring,cstring_literals", 1, true);
+ "__TEXT,__cstring,cstring_literals", CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -5583,8 +5759,14 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
std::string ClassName = ID->getObjCRuntimeNameAsString();
llvm::Constant *Values[10]; // 11 for 64bit targets!
+ CharUnits beginInstance = CharUnits::fromQuantity(InstanceStart);
+ CharUnits endInstance = CharUnits::fromQuantity(InstanceSize);
+
+ bool hasMRCWeak = false;
if (CGM.getLangOpts().ObjCAutoRefCount)
flags |= NonFragileABI_Class_CompiledByARC;
+ else if ((hasMRCWeak = hasMRCWeakIvars(CGM, ID)))
+ flags |= NonFragileABI_Class_HasMRCWeakIvars;
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
@@ -5592,7 +5774,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
// FIXME. For 64bit targets add 0 here.
Values[ 3] = (flags & NonFragileABI_Class_Meta)
? GetIvarLayoutName(nullptr, ObjCTypes)
- : BuildIvarLayout(ID, true);
+ : BuildStrongIvarLayout(ID, beginInstance, endInstance);
Values[ 4] = GetClassName(ID->getObjCRuntimeNameAsString());
// const struct _method_list_t * const baseMethods;
std::vector<llvm::Constant*> Methods;
@@ -5639,7 +5821,8 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
} else {
Values[ 7] = EmitIvarList(ID);
- Values[ 8] = BuildIvarLayout(ID, false);
+ Values[ 8] = BuildWeakIvarLayout(ID, beginInstance, endInstance,
+ hasMRCWeak);
Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
ID, ID->getClassInterface(), ObjCTypes);
}
@@ -5731,7 +5914,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Make this entry NULL for any iOS device target, any iOS simulator target,
// OS X with deployment target 10.9 or later.
const llvm::Triple &Triple = CGM.getTarget().getTriple();
- if (Triple.isiOS() || (Triple.isMacOSX() && !Triple.isMacOSXVersionLT(10, 9)))
+ if (Triple.isiOS() || Triple.isWatchOS() ||
+ (Triple.isMacOSX() && !Triple.isMacOSXVersionLT(10, 9)))
// This entry will be null.
ObjCEmptyVtableVar = nullptr;
else
@@ -5887,9 +6071,11 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
ProtocolName += PD->getObjCRuntimeNameAsString();
+ CharUnits Align = CGF.getPointerAlign();
+
llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
if (PTGV)
- return CGF.Builder.CreateLoad(PTGV);
+ return CGF.Builder.CreateAlignedLoad(PTGV, Align);
PTGV = new llvm::GlobalVariable(
CGM.getModule(),
Init->getType(), false,
@@ -5898,8 +6084,9 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
ProtocolName);
PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ PTGV->setAlignment(Align.getQuantity());
CGM.addCompilerUsedGlobal(PTGV);
- return CGF.Builder.CreateLoad(PTGV);
+ return CGF.Builder.CreateAlignedLoad(PTGV, Align);
}
/// GenerateCategory - Build metadata for a category implementation.
@@ -6428,7 +6615,8 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
llvm::Value *IvarOffsetValue = ObjCIvarOffsetVariable(Interface, Ivar);
- IvarOffsetValue = CGF.Builder.CreateLoad(IvarOffsetValue, "ivar");
+ IvarOffsetValue = CGF.Builder.CreateAlignedLoad(IvarOffsetValue,
+ CGF.getSizeAlign(), "ivar");
if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
cast<llvm::LoadInst>(IvarOffsetValue)
->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
@@ -6559,16 +6747,17 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
}
}
- llvm::Value *mref =
- CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy);
+ Address mref =
+ Address(CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy),
+ CGF.getPointerAlign());
// Update the message ref argument.
- args[1].RV = RValue::get(mref);
+ args[1].RV = RValue::get(mref.getPointer());
// Load the function to call from the message ref table.
- llvm::Value *callee =
- CGF.Builder.CreateStructGEP(ObjCTypes.MessageRefTy, mref, 0);
- callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
+ Address calleeAddr =
+ CGF.Builder.CreateStructGEP(mref, 0, CharUnits::Zero());
+ llvm::Value *callee = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
@@ -6594,7 +6783,7 @@ CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
: EmitMessageSend(CGF, Return, ResultType,
EmitSelector(CGF, Sel),
Receiver, CGF.getContext().getObjCIdType(),
- false, CallArgs, Method, ObjCTypes);
+ false, CallArgs, Method, Class, ObjCTypes);
}
llvm::GlobalVariable *
@@ -6617,6 +6806,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
IdentifierInfo *II,
bool Weak,
const ObjCInterfaceDecl *ID) {
+ CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = ClassReferences[II];
if (!Entry) {
@@ -6627,13 +6817,11 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(
- ObjCTypes.ClassnfABIPtrTy));
+ Entry->setAlignment(Align.getQuantity());
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
}
llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
@@ -6644,12 +6832,13 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
CodeGenFunction &CGF) {
IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
- return EmitClassRefFromId(CGF, II, false, 0);
+ return EmitClassRefFromId(CGF, II, false, nullptr);
}
llvm::Value *
CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) {
+ CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
if (!Entry) {
@@ -6660,13 +6849,11 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(
- ObjCTypes.ClassnfABIPtrTy));
+ Entry->setAlignment(Align.getQuantity());
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
}
/// EmitMetaClassRef - Return a Value * of the address of _class_t
@@ -6675,6 +6862,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID,
bool Weak) {
+ CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
if (!Entry) {
llvm::SmallString<64> MetaClassName(getMetaclassSymbolPrefix());
@@ -6685,14 +6873,13 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABIPtrTy));
+ Entry->setAlignment(Align.getQuantity());
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
}
/// GetClass - Return a reference to the class for the given interface
@@ -6727,14 +6914,15 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// ...
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- llvm::Value *ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+ Address ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
+ "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateStore(
ReceiverAsObject,
- CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0));
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
@@ -6749,22 +6937,33 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(
- Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1));
+ Target, CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
return (isVTableDispatchedSelector(Sel))
? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
- ObjCSuper, ObjCTypes.SuperPtrCTy,
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
true, CallArgs, Method)
: EmitMessageSend(CGF, Return, ResultType,
EmitSelector(CGF, Sel),
- ObjCSuper, ObjCTypes.SuperPtrCTy,
- true, CallArgs, Method, ObjCTypes);
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, Class, ObjCTypes);
}
llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
- Selector Sel, bool lval) {
+ Selector Sel) {
+ Address Addr = EmitSelectorAddr(CGF, Sel);
+
+ llvm::LoadInst* LI = CGF.Builder.CreateLoad(Addr);
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext, None));
+ return LI;
+}
+
+Address CGObjCNonFragileABIMac::EmitSelectorAddr(CodeGenFunction &CGF,
+ Selector Sel) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+ CharUnits Align = CGF.getPointerAlign();
if (!Entry) {
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
@@ -6774,23 +6973,19 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
Casted, "OBJC_SELECTOR_REFERENCES_");
Entry->setExternallyInitialized(true);
Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ Entry->setAlignment(Align.getQuantity());
CGM.addCompilerUsedGlobal(Entry);
}
- if (lval)
- return Entry;
- llvm::LoadInst* LI = CGF.Builder.CreateLoad(Entry);
-
- LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext, None));
- return LI;
+ return Address(Entry, Align);
}
+
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
///
void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src,
- llvm::Value *dst,
+ Address dst,
llvm::Value *ivarOffset) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
@@ -6802,7 +6997,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst, ivarOffset };
+ llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -6811,7 +7006,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
///
void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -6822,19 +7017,19 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "weakassign");
}
void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *args[] = { DestPtr, SrcPtr, Size };
+ llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -6843,13 +7038,12 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
///
llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) {
- llvm::Type* DestTy =
- cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ Address AddrWeakObj) {
+ llvm::Type *DestTy = AddrWeakObj.getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj, "weakread");
+ AddrWeakObj.getPointer(), "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -6858,7 +7052,7 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
/// objc_assign_weak (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -6869,7 +7063,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
}
@@ -6878,7 +7072,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_global (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
@@ -6890,7 +7084,7 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
@@ -7043,6 +7237,7 @@ CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
return new CGObjCNonFragileABIMac(CGM);
case ObjCRuntime::GNUstep:
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 2ac6bb2e8a93..7be9ae996040 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -136,12 +136,13 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
CGF.CGM.getContext().toBits(StorageSize),
CharUnits::fromQuantity(0)));
- V = CGF.Builder.CreateBitCast(V,
- llvm::Type::getIntNPtrTy(CGF.getLLVMContext(),
+ Address Addr(V, Alignment);
+ Addr = CGF.Builder.CreateElementBitCast(Addr,
+ llvm::Type::getIntNTy(CGF.getLLVMContext(),
Info->StorageSize));
- return LValue::MakeBitfield(V, *Info,
+ return LValue::MakeBitfield(Addr, *Info,
IvarTy.withCVRQualifiers(CVRQualifiers),
- Alignment);
+ AlignmentSource::Decl);
}
namespace {
@@ -152,7 +153,7 @@ namespace {
llvm::Constant *TypeInfo;
};
- struct CallObjCEndCatch : EHScopeStack::Cleanup {
+ struct CallObjCEndCatch final : EHScopeStack::Cleanup {
CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
MightThrow(MightThrow), Fn(Fn) {}
bool MightThrow;
@@ -255,24 +256,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
CGF.EmitAutoVarDecl(*CatchParam);
-
- llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
-
- switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
- case Qualifiers::OCL_Strong:
- CastExn = CGF.EmitARCRetainNonBlock(CastExn);
- // fallthrough
-
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- CGF.Builder.CreateStore(CastExn, CatchParamAddr);
- break;
-
- case Qualifiers::OCL_Weak:
- CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
- break;
- }
+ EmitInitOfCatchParam(CGF, CastExn, CatchParam);
}
CGF.ObjCEHValueStack.push_back(Exn);
@@ -296,8 +280,32 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.EmitBlock(Cont.getBlock());
}
+void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF,
+ llvm::Value *exn,
+ const VarDecl *paramDecl) {
+
+ Address paramAddr = CGF.GetAddrOfLocalVar(paramDecl);
+
+ switch (paramDecl->getType().getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ exn = CGF.EmitARCRetainNonBlock(exn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(exn, paramAddr);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(paramAddr, exn);
+ return;
+ }
+ llvm_unreachable("invalid ownership qualifier");
+}
+
namespace {
- struct CallSyncExit : EHScopeStack::Cleanup {
+ struct CallSyncExit final : EHScopeStack::Cleanup {
llvm::Value *SyncExitFn;
llvm::Value *SyncArg;
CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 475254649866..28d88dd10be9 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -100,6 +100,10 @@ protected:
llvm::Constant *beginCatchFn,
llvm::Constant *endCatchFn,
llvm::Constant *exceptionRethrowFn);
+
+ void EmitInitOfCatchParam(CodeGenFunction &CGF, llvm::Value *exn,
+ const VarDecl *paramDecl);
+
/// Emits an \@synchronize() statement, using the \p syncEnterFn and
/// \p syncExitFn arguments as the functions called to lock and unlock
/// the object. This function can be called by subclasses that use
@@ -116,11 +120,16 @@ public:
/// this compilation unit with the runtime library.
virtual llvm::Function *ModuleInitFunction() = 0;
- /// Get a selector for the specified name and type values. The
- /// return value should have the LLVM type for pointer-to
+ /// Get a selector for the specified name and type values.
+ /// The result should have the LLVM type for ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) = 0;
+
+ /// Get the address of a selector for the specified name and type values.
+ /// This is a rarely-used language extension, but sadly it exists.
+ ///
+ /// The result should have the LLVM type for a pointer to
/// ASTContext::getObjCSelType().
- virtual llvm::Value *GetSelector(CodeGenFunction &CGF,
- Selector Sel, bool lval=false) = 0;
+ virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) = 0;
/// Get a typed selector.
virtual llvm::Value *GetSelector(CodeGenFunction &CGF,
@@ -133,7 +142,7 @@ public:
virtual llvm::Constant *GetEHType(QualType T) = 0;
/// Generate a constant string object.
- virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
+ virtual ConstantAddress GenerateConstantString(const StringLiteral *) = 0;
/// Generate a category. A category contains a list of methods (and
/// accompanying metadata) and a list of protocols.
@@ -238,17 +247,17 @@ public:
const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) = 0;
virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) = 0;
+ Address AddrWeakObj) = 0;
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) = 0;
+ llvm::Value *src, Address dest) = 0;
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal=false) = 0;
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
llvm::Value *ivarOffset) = 0;
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) = 0;
+ llvm::Value *src, Address dest) = 0;
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
@@ -259,15 +268,18 @@ public:
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) = 0;
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) = 0;
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
+
+ /// Returns an i8* which points to the byref layout information.
virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
QualType T) = 0;
+
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name,
bool Weak = false) = 0;
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index 079ef7234d9c..8af39ceecdfe 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -55,6 +55,28 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
case BuiltinType::OCLImage2dArray:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image2d_array_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2dDepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_depth_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2dArrayDepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_array_depth_t"),
+ ImgAddrSpc);
+ case BuiltinType::OCLImage2dMSAA:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_msaa_t"), ImgAddrSpc);
+ case BuiltinType::OCLImage2dArrayMSAA:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_array_msaa_t"),
+ ImgAddrSpc);
+ case BuiltinType::OCLImage2dMSAADepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_msaa_depth_t"),
+ ImgAddrSpc);
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.image2d_array_msaa_depth_t"),
+ ImgAddrSpc);
case BuiltinType::OCLImage3d:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image3d_t"), ImgAddrSpc);
@@ -63,5 +85,17 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
case BuiltinType::OCLEvent:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.event_t"), 0);
+ case BuiltinType::OCLClkEvent:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.clk_event_t"), 0);
+ case BuiltinType::OCLQueue:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.queue_t"), 0);
+ case BuiltinType::OCLNDRange:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.ndrange_t"), 0);
+ case BuiltinType::OCLReserveID:
+ return llvm::PointerType::get(
+ llvm::StructType::create(Ctx, "opencl.reserve_id_t"), 0);
}
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 81488398bb86..0ba7e0639acc 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -41,25 +41,29 @@ public:
/// \brief Region for constructs that do not require function outlining,
/// like 'for', 'sections', 'atomic' etc. directives.
InlinedRegion,
+ /// \brief Region with outlined function for standalone 'target' directive.
+ TargetRegion,
};
CGOpenMPRegionInfo(const CapturedStmt &CS,
const CGOpenMPRegionKind RegionKind,
- const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind)
+ const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
+ bool HasCancel)
: CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
- CodeGen(CodeGen), Kind(Kind) {}
+ CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
- const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind)
+ const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
+ bool HasCancel)
: CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
- Kind(Kind) {}
+ Kind(Kind), HasCancel(HasCancel) {}
/// \brief Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
virtual const VarDecl *getThreadIDVariable() const = 0;
/// \brief Emit the captured statement body.
- virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
+ void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
/// \brief Get an LValue for the current ThreadID variable.
/// \return LValue for thread id variable. This LValue always has type int32*.
@@ -69,6 +73,8 @@ public:
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
+ bool hasCancel() const { return HasCancel; }
+
static bool classof(const CGCapturedStmtInfo *Info) {
return Info->getKind() == CR_OpenMP;
}
@@ -77,6 +83,7 @@ protected:
CGOpenMPRegionKind RegionKind;
const RegionCodeGenTy &CodeGen;
OpenMPDirectiveKind Kind;
+ bool HasCancel;
};
/// \brief API for captured statement code generation in OpenMP constructs.
@@ -84,8 +91,9 @@ class CGOpenMPOutlinedRegionInfo : public CGOpenMPRegionInfo {
public:
CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind)
- : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind),
+ OpenMPDirectiveKind Kind, bool HasCancel)
+ : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
+ HasCancel),
ThreadIDVar(ThreadIDVar) {
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
@@ -114,8 +122,8 @@ public:
CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
const VarDecl *ThreadIDVar,
const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind)
- : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind),
+ OpenMPDirectiveKind Kind, bool HasCancel)
+ : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
ThreadIDVar(ThreadIDVar) {
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
@@ -147,8 +155,9 @@ class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
public:
CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind)
- : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind), OldCSI(OldCSI),
+ OpenMPDirectiveKind Kind, bool HasCancel)
+ : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
+ OldCSI(OldCSI),
OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
// \brief Retrieve the value of the context parameter.
llvm::Value *getContextValue() const override {
@@ -156,7 +165,7 @@ public:
return OuterRegionInfo->getContextValue();
llvm_unreachable("No context value for inlined OpenMP region");
}
- virtual void setContextValue(llvm::Value *V) override {
+ void setContextValue(llvm::Value *V) override {
if (OuterRegionInfo) {
OuterRegionInfo->setContextValue(V);
return;
@@ -204,6 +213,29 @@ private:
CGOpenMPRegionInfo *OuterRegionInfo;
};
+/// \brief API for captured statement code generation in OpenMP target
+/// constructs. For this captures, implicit parameters are used instead of the
+/// captured fields.
+class CGOpenMPTargetRegionInfo : public CGOpenMPRegionInfo {
+public:
+ CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
+ const RegionCodeGenTy &CodeGen)
+ : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
+ /*HasCancel = */ false) {}
+
+ /// \brief This is unused for target regions because each starts executing
+ /// with a single thread.
+ const VarDecl *getThreadIDVariable() const override { return nullptr; }
+
+ /// \brief Get the name of the capture helper.
+ StringRef getHelperName() const override { return ".omp_offloading."; }
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return CGOpenMPRegionInfo::classof(Info) &&
+ cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
+ }
+};
+
/// \brief RAII for emitting code of OpenMP constructs.
class InlinedOpenMPRegionRAII {
CodeGenFunction &CGF;
@@ -214,11 +246,11 @@ public:
/// a list of functions used for code generation of implicitly inlined
/// regions.
InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind)
+ OpenMPDirectiveKind Kind, bool HasCancel)
: CGF(CGF) {
// Start emission for the construct.
- CGF.CapturedStmtInfo =
- new CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, CodeGen, Kind);
+ CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
+ CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
}
~InlinedOpenMPRegionRAII() {
// Restore original CapturedStmtInfo only if we're done with code emission.
@@ -229,20 +261,25 @@ public:
}
};
-} // namespace
+} // anonymous namespace
+
+static LValue emitLoadOfPointerLValue(CodeGenFunction &CGF, Address PtrAddr,
+ QualType Ty) {
+ AlignmentSource Source;
+ CharUnits Align = CGF.getNaturalPointeeTypeAlignment(Ty, &Source);
+ return CGF.MakeAddrLValue(Address(CGF.Builder.CreateLoad(PtrAddr), Align),
+ Ty->getPointeeType(), Source);
+}
LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
- return CGF.MakeNaturalAlignAddrLValue(
- CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(getThreadIDVariable()),
- CGF.PointerAlignInBytes),
- getThreadIDVariable()
- ->getType()
- ->castAs<PointerType>()
- ->getPointeeType());
+ return emitLoadOfPointerLValue(CGF,
+ CGF.GetAddrOfLocalVar(getThreadIDVariable()),
+ getThreadIDVariable()->getType());
}
void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
+ if (!CGF.HaveInsertPoint())
+ return;
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -258,9 +295,9 @@ void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
CodeGenFunction &CGF) {
- return CGF.MakeNaturalAlignAddrLValue(
- CGF.GetAddrOfLocalVar(getThreadIDVariable()),
- getThreadIDVariable()->getType());
+ return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
+ getThreadIDVariable()->getType(),
+ AlignmentSource::Decl);
}
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
@@ -280,6 +317,25 @@ void CGOpenMPRuntime::clear() {
InternalVars.clear();
}
+// Layout information for ident_t.
+static CharUnits getIdentAlign(CodeGenModule &CGM) {
+ return CGM.getPointerAlign();
+}
+static CharUnits getIdentSize(CodeGenModule &CGM) {
+ assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign()));
+ return CharUnits::fromQuantity(16) + CGM.getPointerSize();
+}
+static CharUnits getOffsetOfIdentField(CGOpenMPRuntime::IdentFieldIndex Field) {
+ // All the fields except the last are i32, so this works beautifully.
+ return unsigned(Field) * CharUnits::fromQuantity(4);
+}
+static Address createIdentFieldGEP(CodeGenFunction &CGF, Address Addr,
+ CGOpenMPRuntime::IdentFieldIndex Field,
+ const llvm::Twine &Name = "") {
+ auto Offset = getOffsetOfIdentField(Field);
+ return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name);
+}
+
llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
@@ -287,9 +343,17 @@ llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
"thread id variable must be of type kmp_int32 *");
const CapturedStmt *CS = cast<CapturedStmt>(D.getAssociatedStmt());
CodeGenFunction CGF(CGM, true);
- CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind);
+ bool HasCancel = false;
+ if (auto *OPD = dyn_cast<OMPParallelDirective>(&D))
+ HasCancel = OPD->hasCancel();
+ else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
+ HasCancel = OPSD->hasCancel();
+ else if (auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
+ HasCancel = OPFD->hasCancel();
+ CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
+ HasCancel);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- return CGF.GenerateCapturedStmtFunction(*CS);
+ return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
}
llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
@@ -300,13 +364,14 @@ llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
auto *CS = cast<CapturedStmt>(D.getAssociatedStmt());
CodeGenFunction CGF(CGM, true);
CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
- InnermostKind);
+ InnermostKind,
+ cast<OMPTaskDirective>(D).hasCancel());
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
return CGF.GenerateCapturedStmtFunction(*CS);
}
-llvm::Value *
-CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
+Address CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
+ CharUnits Align = getIdentAlign(CGM);
llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
if (!Entry) {
if (!DefaultOpenMPPSource) {
@@ -315,7 +380,7 @@ CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
// Taken from
// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
DefaultOpenMPPSource =
- CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;");
+ CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
DefaultOpenMPPSource =
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
@@ -323,6 +388,7 @@ CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
CGM.getModule(), IdentTy, /*isConstant*/ true,
llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
DefaultOpenMPLocation->setUnnamedAddr(true);
+ DefaultOpenMPLocation->setAlignment(Align.getQuantity());
llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
llvm::Constant *Values[] = {Zero,
@@ -330,10 +396,9 @@ CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
Zero, Zero, DefaultOpenMPPSource};
llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
DefaultOpenMPLocation->setInitializer(Init);
- OpenMPDefaultLocMap[Flags] = DefaultOpenMPLocation;
- return DefaultOpenMPLocation;
+ OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
}
- return Entry;
+ return Address(Entry, Align);
}
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
@@ -342,34 +407,33 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
// If no debug info is generated - return global default location.
if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::NoDebugInfo ||
Loc.isInvalid())
- return getOrCreateDefaultLocation(Flags);
+ return getOrCreateDefaultLocation(Flags).getPointer();
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- llvm::Value *LocValue = nullptr;
+ Address LocValue = Address::invalid();
auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
if (I != OpenMPLocThreadIDMap.end())
- LocValue = I->second.DebugLoc;
+ LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM));
+
// OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
// GetOpenMPThreadID was called before this routine.
- if (LocValue == nullptr) {
+ if (!LocValue.isValid()) {
// Generate "ident_t .kmpc_loc.addr;"
- llvm::AllocaInst *AI = CGF.CreateTempAlloca(IdentTy, ".kmpc_loc.addr");
- AI->setAlignment(CGM.getDataLayout().getPrefTypeAlignment(IdentTy));
+ Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM),
+ ".kmpc_loc.addr");
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
- Elem.second.DebugLoc = AI;
+ Elem.second.DebugLoc = AI.getPointer();
LocValue = AI;
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
- llvm::ConstantExpr::getSizeOf(IdentTy),
- CGM.PointerAlignInBytes);
+ CGM.getSize(getIdentSize(CGF.CGM)));
}
// char **psource = &.kmpc_loc_<flags>.addr.psource;
- auto *PSource = CGF.Builder.CreateConstInBoundsGEP2_32(IdentTy, LocValue, 0,
- IdentField_PSource);
+ Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource);
auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
if (OMPDebugLoc == nullptr) {
@@ -389,7 +453,9 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
// *psource = ";<File>;<Function>;<Line>;<Column>;;";
CGF.Builder.CreateStore(OMPDebugLoc, PSource);
- return LocValue;
+ // Our callers always pass this to a runtime function, so for
+ // convenience, go ahead and return a naked pointer.
+ return LocValue.getPointer();
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
@@ -493,6 +559,17 @@ CGOpenMPRuntime::createRuntimeFunction(OpenMPRTLFunction Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
break;
}
+ case OMPRTL__kmpc_critical_with_hint: {
+ // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit, uintptr_t hint);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy),
+ CGM.IntPtrTy};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
+ break;
+ }
case OMPRTL__kmpc_threadprivate_register: {
// Build void __kmpc_threadprivate_register(ident_t *, void *data,
// kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
@@ -838,10 +915,46 @@ CGOpenMPRuntime::createRuntimeFunction(OpenMPRTLFunction Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
break;
}
+ case OMPRTL__tgt_target: {
+ // Build int32_t __tgt_target(int32_t device_id, void *host_ptr, int32_t
+ // arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
+ // *arg_types);
+ llvm::Type *TypeParams[] = {CGM.Int32Ty,
+ CGM.VoidPtrTy,
+ CGM.Int32Ty,
+ CGM.VoidPtrPtrTy,
+ CGM.VoidPtrPtrTy,
+ CGM.SizeTy->getPointerTo(),
+ CGM.Int32Ty->getPointerTo()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
+ break;
+ }
}
return RTLFn;
}
+static llvm::Value *getTypeSize(CodeGenFunction &CGF, QualType Ty) {
+ auto &C = CGF.getContext();
+ llvm::Value *Size = nullptr;
+ auto SizeInChars = C.getTypeSizeInChars(Ty);
+ if (SizeInChars.isZero()) {
+ // getTypeSizeInChars() returns 0 for a VLA.
+ while (auto *VAT = C.getAsVariableArrayType(Ty)) {
+ llvm::Value *ArraySize;
+ std::tie(ArraySize, Ty) = CGF.getVLASize(VAT);
+ Size = Size ? CGF.Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
+ }
+ SizeInChars = C.getTypeSizeInChars(Ty);
+ assert(!SizeInChars.isZero());
+ Size = CGF.Builder.CreateNUWMul(
+ Size, llvm::ConstantInt::get(CGF.SizeTy, SizeInChars.getQuantity()));
+ } else
+ Size = llvm::ConstantInt::get(CGF.SizeTy, SizeInChars.getQuantity());
+ return Size;
+}
+
llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
@@ -939,25 +1052,27 @@ CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
Twine(CGM.getMangledName(VD)) + ".cache.");
}
-llvm::Value *CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- llvm::Value *VDAddr,
- SourceLocation Loc) {
+Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ Address VDAddr,
+ SourceLocation Loc) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
return VDAddr;
- auto VarTy = VDAddr->getType()->getPointerElementType();
+ auto VarTy = VDAddr.getElementType();
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreatePointerCast(VDAddr, CGM.Int8PtrTy),
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
+ CGM.Int8PtrTy),
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
- return CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args);
+ return Address(CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
+ VDAddr.getAlignment());
}
void CGOpenMPRuntime::emitThreadPrivateVarInit(
- CodeGenFunction &CGF, llvm::Value *VDAddr, llvm::Value *Ctor,
+ CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
// Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
// library.
@@ -967,14 +1082,15 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
llvm::Value *Args[] = {OMPLoc,
- CGF.Builder.CreatePointerCast(VDAddr, CGM.VoidPtrTy),
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
+ CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
}
llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
- const VarDecl *VD, llvm::Value *VDAddr, SourceLocation Loc,
+ const VarDecl *VD, Address VDAddr, SourceLocation Loc,
bool PerformInit, CodeGenFunction *CGF) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
@@ -1001,21 +1117,19 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
/*isVariadic=*/false);
auto FTy = CGM.getTypes().GetFunctionType(FI);
auto Fn = CGM.CreateGlobalInitOrDestructFunction(
- FTy, ".__kmpc_global_ctor_.", Loc);
+ FTy, ".__kmpc_global_ctor_.", FI, Loc);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
Args, SourceLocation());
auto ArgVal = CtorCGF.EmitLoadOfScalar(
- CtorCGF.GetAddrOfLocalVar(&Dst),
- /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
- auto Arg = CtorCGF.Builder.CreatePointerCast(
- ArgVal,
- CtorCGF.ConvertTypeForMem(CGM.getContext().getPointerType(ASTTy)));
+ Address Arg = Address(ArgVal, VDAddr.getAlignment());
+ Arg = CtorCGF.Builder.CreateElementBitCast(Arg,
+ CtorCGF.ConvertTypeForMem(ASTTy));
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
/*IsInitializer=*/true);
ArgVal = CtorCGF.EmitLoadOfScalar(
- CtorCGF.GetAddrOfLocalVar(&Dst),
- /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
CtorCGF.FinishFunction();
@@ -1035,14 +1149,13 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
/*isVariadic=*/false);
auto FTy = CGM.getTypes().GetFunctionType(FI);
auto Fn = CGM.CreateGlobalInitOrDestructFunction(
- FTy, ".__kmpc_global_dtor_.", Loc);
+ FTy, ".__kmpc_global_dtor_.", FI, Loc);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
SourceLocation());
auto ArgVal = DtorCGF.EmitLoadOfScalar(
DtorCGF.GetAddrOfLocalVar(&Dst),
- /*Volatile=*/false, CGM.PointerAlignInBytes,
- CGM.getContext().VoidPtrTy, Dst.getLocation());
- DtorCGF.emitDestroy(ArgVal, ASTTy,
+ /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
+ DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
DtorCGF.getDestroyer(ASTTy.isDestructedType()),
DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
@@ -1074,7 +1187,8 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
auto InitFunctionTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
- InitFunctionTy, ".__omp_threadprivate_init_.");
+ InitFunctionTy, ".__omp_threadprivate_init_.",
+ CGM.getTypes().arrangeNullaryFunction());
CodeGenFunction InitCGF(CGM);
FunctionArgList ArgList;
InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
@@ -1149,25 +1263,27 @@ static void emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
- llvm::Value *CapturedStruct,
+ ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) {
+ if (!CGF.HaveInsertPoint())
+ return;
auto *RTLoc = emitUpdateLocation(CGF, Loc);
- auto &&ThenGen =
- [this, OutlinedFn, CapturedStruct, RTLoc](CodeGenFunction &CGF) {
- // Build call __kmpc_fork_call(loc, 1, microtask,
- // captured_struct/*context*/)
- llvm::Value *Args[] = {
- RTLoc,
- CGF.Builder.getInt32(
- 1), // Number of arguments after 'microtask' argument
- // (there is only one additional argument - 'context')
- CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy()),
- CGF.EmitCastToVoidPtr(CapturedStruct)};
- auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_call);
- CGF.EmitRuntimeCall(RTLFn, Args);
- };
- auto &&ElseGen = [this, OutlinedFn, CapturedStruct, RTLoc, Loc](
- CodeGenFunction &CGF) {
+ auto &&ThenGen = [this, OutlinedFn, CapturedVars,
+ RTLoc](CodeGenFunction &CGF) {
+ // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
+ llvm::Value *Args[] = {
+ RTLoc,
+ CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
+ CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
+ llvm::SmallVector<llvm::Value *, 16> RealArgs;
+ RealArgs.append(std::begin(Args), std::end(Args));
+ RealArgs.append(CapturedVars.begin(), CapturedVars.end());
+
+ auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_call);
+ CGF.EmitRuntimeCall(RTLFn, RealArgs);
+ };
+ auto &&ElseGen = [this, OutlinedFn, CapturedVars, RTLoc,
+ Loc](CodeGenFunction &CGF) {
auto ThreadID = getThreadID(CGF, Loc);
// Build calls:
// __kmpc_serialized_parallel(&Loc, GTid);
@@ -1177,11 +1293,14 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// OutlinedFn(&GTid, &zero, CapturedStruct);
auto ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
- auto Int32Ty = CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32,
- /*Signed*/ true);
- auto ZeroAddr = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".zero.addr");
+ Address ZeroAddr =
+ CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
+ /*Name*/ ".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- llvm::Value *OutlinedFnArgs[] = {ThreadIDAddr, ZeroAddr, CapturedStruct};
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+ OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs);
// __kmpc_end_serialized_parallel(&Loc, GTid);
@@ -1203,8 +1322,8 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// regular serial code region, get thread ID by calling kmp_int32
// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
// return the address of that temp.
-llvm::Value *CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
- SourceLocation Loc) {
+Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
+ SourceLocation Loc) {
if (auto OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
@@ -1215,7 +1334,7 @@ llvm::Value *CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
CGF.EmitStoreOfScalar(ThreadID,
- CGF.MakeNaturalAlignAddrLValue(ThreadIDTemp, Int32Ty));
+ CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
return ThreadIDTemp;
}
@@ -1246,7 +1365,7 @@ llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
}
namespace {
-template <size_t N> class CallEndCleanup : public EHScopeStack::Cleanup {
+template <size_t N> class CallEndCleanup final : public EHScopeStack::Cleanup {
llvm::Value *Callee;
llvm::Value *Args[N];
@@ -1257,39 +1376,50 @@ public:
std::copy(CleanupArgs.begin(), CleanupArgs.end(), std::begin(Args));
}
void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
+ if (!CGF.HaveInsertPoint())
+ return;
CGF.EmitRuntimeCall(Callee, Args);
}
};
-} // namespace
+} // anonymous namespace
void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
- SourceLocation Loc) {
- // __kmpc_critical(ident_t *, gtid, Lock);
+ SourceLocation Loc, const Expr *Hint) {
+ // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
// CriticalOpGen();
// __kmpc_end_critical(ident_t *, gtid, Lock);
// Prepare arguments and build a call to __kmpc_critical
- {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- getCriticalRegionLock(CriticalName)};
+ if (!CGF.HaveInsertPoint())
+ return;
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ getCriticalRegionLock(CriticalName)};
+ if (Hint) {
+ llvm::SmallVector<llvm::Value *, 8> ArgsWithHint(std::begin(Args),
+ std::end(Args));
+ auto *HintVal = CGF.EmitScalarExpr(Hint);
+ ArgsWithHint.push_back(
+ CGF.Builder.CreateIntCast(HintVal, CGM.IntPtrTy, /*isSigned=*/false));
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_critical_with_hint),
+ ArgsWithHint);
+ } else
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_critical), Args);
- // Build a call to __kmpc_end_critical
- CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
- NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_critical),
- llvm::makeArrayRef(Args));
- emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
- }
+ // Build a call to __kmpc_end_critical
+ CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_critical),
+ llvm::makeArrayRef(Args));
+ emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
}
static void emitIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond,
- OpenMPDirectiveKind Kind,
+ OpenMPDirectiveKind Kind, SourceLocation Loc,
const RegionCodeGenTy &BodyOpGen) {
llvm::Value *CallBool = CGF.EmitScalarConversion(
IfCond,
CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true),
- CGF.getContext().BoolTy);
+ CGF.getContext().BoolTy, Loc);
auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
auto *ContBlock = CGF.createBasicBlock("omp_if.end");
@@ -1305,6 +1435,8 @@ static void emitIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond,
void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// if(__kmpc_master(ident_t *, gtid)) {
// MasterOpGen();
// __kmpc_end_master(ident_t *, gtid);
@@ -1315,17 +1447,20 @@ void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_master), Args);
typedef CallEndCleanup<std::extent<decltype(Args)>::value>
MasterCallEndCleanup;
- emitIfStmt(CGF, IsMaster, OMPD_master, [&](CodeGenFunction &CGF) -> void {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- CGF.EHStack.pushCleanup<MasterCallEndCleanup>(
- NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_master),
- llvm::makeArrayRef(Args));
- MasterOpGen(CGF);
- });
+ emitIfStmt(
+ CGF, IsMaster, OMPD_master, Loc, [&](CodeGenFunction &CGF) -> void {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ CGF.EHStack.pushCleanup<MasterCallEndCleanup>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_master),
+ llvm::makeArrayRef(Args));
+ MasterOpGen(CGF);
+ });
}
void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Build call __kmpc_omp_taskyield(loc, thread_id, 0);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
@@ -1336,6 +1471,8 @@ void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// __kmpc_taskgroup(ident_t *, gtid);
// TaskgroupOpGen();
// __kmpc_end_taskgroup(ident_t *, gtid);
@@ -1352,6 +1489,21 @@ void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
}
}
+/// Given an array of pointers to variables, project the address of a
+/// given variable.
+static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
+ unsigned Index, const VarDecl *Var) {
+ // Pull out the pointer to the variable.
+ Address PtrAddr =
+ CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
+
+ Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
+ Addr = CGF.Builder.CreateElementBitCast(
+ Addr, CGF.ConvertTypeForMem(Var->getType()));
+ return Addr;
+}
+
static llvm::Value *emitCopyprivateCopyFunction(
CodeGenModule &CGM, llvm::Type *ArgsType,
ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
@@ -1371,40 +1523,31 @@ static llvm::Value *emitCopyprivateCopyFunction(
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
".omp.copyprivate.copy_func", &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, CGFI, Fn);
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
// Dest = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
- auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
- auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
+ Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsType), CGF.getPointerAlign());
+ Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsType), CGF.getPointerAlign());
// *(Type0*)Dst[0] = *(Type0*)Src[0];
// *(Type1*)Dst[1] = *(Type1*)Src[1];
// ...
// *(Typen*)Dst[n] = *(Typen*)Src[n];
for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
- auto *DestAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(nullptr, LHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType())));
- auto *SrcAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(nullptr, RHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType())));
+ auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
+ Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
+
+ auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
+ Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
+
auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
QualType Type = VD->getType();
- CGF.EmitOMPCopy(CGF, Type, DestAddr, SrcAddr,
- cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl()),
- AssignmentOps[I]);
+ CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
}
CGF.FinishFunction();
return Fn;
@@ -1417,6 +1560,8 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> DstExprs,
ArrayRef<const Expr *> AssignmentOps) {
+ if (!CGF.HaveInsertPoint())
+ return;
assert(CopyprivateVars.size() == SrcExprs.size() &&
CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size());
@@ -1430,13 +1575,12 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
// call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
// <copy_func>, did_it);
- llvm::AllocaInst *DidIt = nullptr;
+ Address DidIt = Address::invalid();
if (!CopyprivateVars.empty()) {
// int32 did_it = 0;
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
- CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(0), DidIt,
- DidIt->getAlignment());
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
}
// Prepare arguments and build a call to __kmpc_single
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
@@ -1444,52 +1588,51 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_single), Args);
typedef CallEndCleanup<std::extent<decltype(Args)>::value>
SingleCallEndCleanup;
- emitIfStmt(CGF, IsSingle, OMPD_single, [&](CodeGenFunction &CGF) -> void {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- CGF.EHStack.pushCleanup<SingleCallEndCleanup>(
- NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_single),
- llvm::makeArrayRef(Args));
- SingleOpGen(CGF);
- if (DidIt) {
- // did_it = 1;
- CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(1), DidIt,
- DidIt->getAlignment());
- }
- });
+ emitIfStmt(
+ CGF, IsSingle, OMPD_single, Loc, [&](CodeGenFunction &CGF) -> void {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ CGF.EHStack.pushCleanup<SingleCallEndCleanup>(
+ NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_single),
+ llvm::makeArrayRef(Args));
+ SingleOpGen(CGF);
+ if (DidIt.isValid()) {
+ // did_it = 1;
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
+ }
+ });
// call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
// <copy_func>, did_it);
- if (DidIt) {
+ if (DidIt.isValid()) {
llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
auto CopyprivateArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
// Create a list of all private variables for copyprivate.
- auto *CopyprivateList =
+ Address CopyprivateList =
CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
- auto *Elem = CGF.Builder.CreateStructGEP(
- CopyprivateList->getAllocatedType(), CopyprivateList, I);
- CGF.Builder.CreateAlignedStore(
+ Address Elem = CGF.Builder.CreateConstArrayGEP(
+ CopyprivateList, I, CGF.getPointerSize());
+ CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(CopyprivateVars[I]).getAddress(), CGF.VoidPtrTy),
- Elem, CGM.PointerAlignInBytes);
+ CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
}
// Build function that copies private values from single region to all other
// threads in the corresponding parallel region.
auto *CpyFn = emitCopyprivateCopyFunction(
CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
CopyprivateVars, SrcExprs, DstExprs, AssignmentOps);
- auto *BufSize = llvm::ConstantInt::get(
- CGM.SizeTy, C.getTypeSizeInChars(CopyprivateArrayTy).getQuantity());
- auto *CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
- CGF.VoidPtrTy);
- auto *DidItVal =
- CGF.Builder.CreateAlignedLoad(DidIt, CGF.PointerAlignInBytes);
+ auto *BufSize = getTypeSize(CGF, CopyprivateArrayTy);
+ Address CL =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
+ CGF.VoidPtrTy);
+ auto *DidItVal = CGF.Builder.CreateLoad(DidIt);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
getThreadID(CGF, Loc), // i32 <gtid>
BufSize, // size_t <buf_size>
- CL, // void *<copyprivate list>
+ CL.getPointer(), // void *<copyprivate list>
CpyFn, // void (*) (void *, void *) <copy_func>
DidItVal // i32 did_it
};
@@ -1499,26 +1642,30 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
- SourceLocation Loc) {
+ SourceLocation Loc, bool IsThreads) {
+ if (!CGF.HaveInsertPoint())
+ return;
// __kmpc_ordered(ident_t *, gtid);
// OrderedOpGen();
// __kmpc_end_ordered(ident_t *, gtid);
// Prepare arguments and build a call to __kmpc_ordered
- {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ if (IsThreads) {
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_ordered), Args);
// Build a call to __kmpc_end_ordered
CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_ordered),
llvm::makeArrayRef(Args));
- emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
}
+ emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
}
void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDirectiveKind Kind,
- bool CheckForCancel) {
+ OpenMPDirectiveKind Kind, bool EmitChecks,
+ bool ForceSimpleCall) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Build call __kmpc_cancel_barrier(loc, thread_id);
// Build call __kmpc_barrier(loc, thread_id);
OpenMPLocationFlags Flags = OMP_IDENT_KMPC;
@@ -1538,16 +1685,19 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
}
// Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
// thread_id);
+ auto *OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
+ // Do not emit barrier call in the single directive emitted in some rare cases
+ // for sections directives.
+ if (OMPRegionInfo && OMPRegionInfo->getDirectiveKind() == OMPD_single)
+ return;
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
getThreadID(CGF, Loc)};
- if (auto *OMPRegionInfo =
- dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- auto CancelDestination =
- CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
- if (CancelDestination.isValid()) {
+ if (OMPRegionInfo) {
+ if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
auto *Result = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
- if (CheckForCancel) {
+ if (EmitChecks) {
// if (__kmpc_cancel_barrier()) {
// exit from construct;
// }
@@ -1557,6 +1707,8 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// exit from construct;
+ auto CancelDestination =
+ CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDestination);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
}
@@ -1623,65 +1775,87 @@ bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
return Schedule != OMP_sch_static;
}
-void CGOpenMPRuntime::emitForInit(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPScheduleClauseKind ScheduleKind,
- unsigned IVSize, bool IVSigned, bool Ordered,
- llvm::Value *IL, llvm::Value *LB,
- llvm::Value *UB, llvm::Value *ST,
- llvm::Value *Chunk) {
+void CGOpenMPRuntime::emitForDispatchInit(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, llvm::Value *UB,
+ llvm::Value *Chunk) {
+ if (!CGF.HaveInsertPoint())
+ return;
OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);
- if (Ordered ||
- (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
- Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked)) {
- // Call __kmpc_dispatch_init(
- // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
- // kmp_int[32|64] lower, kmp_int[32|64] upper,
- // kmp_int[32|64] stride, kmp_int[32|64] chunk);
+ assert(Ordered ||
+ (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
+ Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked));
+ // Call __kmpc_dispatch_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
+ // kmp_int[32|64] lower, kmp_int[32|64] upper,
+ // kmp_int[32|64] stride, kmp_int[32|64] chunk);
+
+ // If the Chunk was not specified in the clause - use default value 1.
+ if (Chunk == nullptr)
+ Chunk = CGF.Builder.getIntN(IVSize, 1);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ CGF.Builder.getIntN(IVSize, 0), // Lower
+ UB, // Upper
+ CGF.Builder.getIntN(IVSize, 1), // Stride
+ Chunk // Chunk
+ };
+ CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
+}
+void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, Address IL, Address LB,
+ Address UB, Address ST,
+ llvm::Value *Chunk) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ OpenMPSchedType Schedule =
+ getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);
+ assert(!Ordered);
+ assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
+ Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked);
+
+ // Call __kmpc_for_static_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);
+ if (Chunk == nullptr) {
+ assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) &&
+ "expected static non-chunked schedule");
// If the Chunk was not specified in the clause - use default value 1.
- if (Chunk == nullptr)
Chunk = CGF.Builder.getIntN(IVSize, 1);
- llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
- getThreadID(CGF, Loc),
- CGF.Builder.getInt32(Schedule), // Schedule type
- CGF.Builder.getIntN(IVSize, 0), // Lower
- UB, // Upper
- CGF.Builder.getIntN(IVSize, 1), // Stride
- Chunk // Chunk
- };
- CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
} else {
- // Call __kmpc_for_static_init(
- // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
- // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
- // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
- // kmp_int[32|64] incr, kmp_int[32|64] chunk);
- if (Chunk == nullptr) {
- assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) &&
- "expected static non-chunked schedule");
- // If the Chunk was not specified in the clause - use default value 1.
- Chunk = CGF.Builder.getIntN(IVSize, 1);
- } else
- assert((Schedule == OMP_sch_static_chunked ||
- Schedule == OMP_ord_static_chunked) &&
- "expected static chunked schedule");
- llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
- getThreadID(CGF, Loc),
- CGF.Builder.getInt32(Schedule), // Schedule type
- IL, // &isLastIter
- LB, // &LB
- UB, // &UB
- ST, // &Stride
- CGF.Builder.getIntN(IVSize, 1), // Incr
- Chunk // Chunk
- };
- CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args);
+ assert((Schedule == OMP_sch_static_chunked ||
+ Schedule == OMP_ord_static_chunked) &&
+ "expected static chunked schedule");
}
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ IL.getPointer(), // &isLastIter
+ LB.getPointer(), // &LB
+ UB.getPointer(), // &UB
+ ST.getPointer(), // &Stride
+ CGF.Builder.getIntN(IVSize, 1), // Incr
+ Chunk // Chunk
+ };
+ CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args);
}
void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
getThreadID(CGF, Loc)};
@@ -1693,6 +1867,8 @@ void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned IVSize,
bool IVSigned) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
getThreadID(CGF, Loc)};
@@ -1701,30 +1877,32 @@ void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
- bool IVSigned, llvm::Value *IL,
- llvm::Value *LB, llvm::Value *UB,
- llvm::Value *ST) {
+ bool IVSigned, Address IL,
+ Address LB, Address UB,
+ Address ST) {
// Call __kmpc_dispatch_next(
// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
// kmp_int[32|64] *p_stride);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), getThreadID(CGF, Loc),
- IL, // &isLastIter
- LB, // &Lower
- UB, // &Upper
- ST // &Stride
+ IL.getPointer(), // &isLastIter
+ LB.getPointer(), // &Lower
+ UB.getPointer(), // &Upper
+ ST.getPointer() // &Stride
};
llvm::Value *Call =
CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
return CGF.EmitScalarConversion(
Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true),
- CGF.getContext().BoolTy);
+ CGF.getContext().BoolTy, Loc);
}
void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
@@ -1736,6 +1914,8 @@ void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
OpenMPProcBindClauseKind ProcBind,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Constants for proc bind value accepted by the runtime.
enum ProcBindTy {
ProcBindFalse = 0,
@@ -1768,6 +1948,8 @@ void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Build call void __kmpc_flush(ident_t *loc)
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
emitUpdateLocation(CGF, Loc));
@@ -1785,7 +1967,7 @@ enum KmpTaskTFields {
/// \brief Function with call of destructors for private variables.
KmpTaskTDestructors,
};
-} // namespace
+} // anonymous namespace
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
if (!KmpRoutineEntryPtrTy) {
@@ -1799,14 +1981,15 @@ void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
}
}
-static void addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
- QualType FieldTy) {
+static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
+ QualType FieldTy) {
auto *Field = FieldDecl::Create(
C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
/*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
Field->setAccess(AS_public);
DC->addDecl(Field);
+ return Field;
}
namespace {
@@ -1820,11 +2003,10 @@ struct PrivateHelpersTy {
const VarDecl *PrivateElemInit;
};
typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
-} // namespace
+} // anonymous namespace
static RecordDecl *
-createPrivatesRecordDecl(CodeGenModule &CGM,
- const ArrayRef<PrivateDataTy> Privates) {
+createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
if (!Privates.empty()) {
auto &C = CGM.getContext();
// Build struct .kmp_privates_t. {
@@ -1833,9 +2015,16 @@ createPrivatesRecordDecl(CodeGenModule &CGM,
auto *RD = C.buildImplicitRecord(".kmp_privates.t");
RD->startDefinition();
for (auto &&Pair : Privates) {
- auto Type = Pair.second.Original->getType();
+ auto *VD = Pair.second.Original;
+ auto Type = VD->getType();
Type = Type.getNonReferenceType();
- addFieldToRecordDecl(C, RD, Type);
+ auto *FD = addFieldToRecordDecl(C, RD, Type);
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
+ E(VD->getAttrs().end());
+ I != E; ++I)
+ FD->addAttr(*I);
+ }
}
RD->completeDefinition();
return RD;
@@ -1865,7 +2054,7 @@ createKmpTaskTRecordDecl(CodeGenModule &CGM, QualType KmpInt32Ty,
static RecordDecl *
createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
- const ArrayRef<PrivateDataTy> Privates) {
+ ArrayRef<PrivateDataTy> Privates) {
auto &C = CGM.getContext();
// Build struct kmp_task_t_with_privates {
// kmp_task_t task_data;
@@ -1900,7 +2089,8 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty);
ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc,
- /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy);
+ /*Id=*/nullptr,
+ KmpTaskTWithPrivatesPtrQTy.withRestrict());
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
FunctionType::ExtInfo Info;
@@ -1911,7 +2101,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
auto *TaskEntry =
llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage,
".omp_task_entry.", &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, TaskEntryFnInfo, TaskEntry);
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskEntry, TaskEntryFnInfo);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args);
@@ -1919,12 +2109,9 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
// TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
// tt->task_data.shareds);
auto *GtidParam = CGF.EmitLoadOfScalar(
- CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false,
- C.getTypeAlignInChars(KmpInt32Ty).getQuantity(), KmpInt32Ty, Loc);
- auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes);
- LValue TDBase =
- CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy);
+ CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
+ LValue TDBase = emitLoadOfPointerLValue(
+ CGF, CGF.GetAddrOfLocalVar(&TaskTypeArg), KmpTaskTWithPrivatesPtrQTy);
auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
LValue Base =
@@ -1945,7 +2132,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivatesLVal.getAddress(), CGF.VoidPtrTy);
+ PrivatesLVal.getPointer(), CGF.VoidPtrTy);
} else {
PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
@@ -1955,7 +2142,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
CGF.EmitCallOrInvoke(TaskFunction, CallArgs);
CGF.EmitStoreThroughLValue(
RValue::get(CGF.Builder.getInt32(/*C=*/0)),
- CGF.MakeNaturalAlignAddrLValue(CGF.ReturnValue, KmpInt32Ty));
+ CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
CGF.FinishFunction();
return TaskEntry;
}
@@ -1969,7 +2156,8 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty);
ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc,
- /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy);
+ /*Id=*/nullptr,
+ KmpTaskTWithPrivatesPtrQTy.withRestrict());
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
FunctionType::ExtInfo Info;
@@ -1980,16 +2168,15 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
auto *DestructorFn =
llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
".omp_task_destructor.", &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, DestructorFnInfo, DestructorFn);
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, DestructorFn,
+ DestructorFnInfo);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
Args);
- auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes);
- LValue Base =
- CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy);
+ LValue Base = emitLoadOfPointerLValue(
+ CGF, CGF.GetAddrOfLocalVar(&TaskTypeArg), KmpTaskTWithPrivatesPtrQTy);
auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
@@ -2017,10 +2204,10 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
/// \endcode
static llvm::Value *
emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
- const ArrayRef<const Expr *> PrivateVars,
- const ArrayRef<const Expr *> FirstprivateVars,
+ ArrayRef<const Expr *> PrivateVars,
+ ArrayRef<const Expr *> FirstprivateVars,
QualType PrivatesQTy,
- const ArrayRef<PrivateDataTy> Privates) {
+ ArrayRef<PrivateDataTy> Privates) {
auto &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl TaskPrivatesArg(
@@ -2058,8 +2245,8 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
auto *TaskPrivatesMap = llvm::Function::Create(
TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage,
".omp_task_privates_map.", &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, TaskPrivatesMapFnInfo,
- TaskPrivatesMap);
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
+ TaskPrivatesMapFnInfo);
TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
@@ -2067,22 +2254,17 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
TaskPrivatesMapFnInfo, Args);
// *privi = &.privates.privi;
- auto *TaskPrivatesArgAddr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(&TaskPrivatesArg), CGM.PointerAlignInBytes);
- LValue Base =
- CGF.MakeNaturalAlignAddrLValue(TaskPrivatesArgAddr, PrivatesQTy);
+ LValue Base = emitLoadOfPointerLValue(
+ CGF, CGF.GetAddrOfLocalVar(&TaskPrivatesArg), TaskPrivatesArg.getType());
auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
Counter = 0;
for (auto *Field : PrivatesQTyRD->fields()) {
auto FieldLVal = CGF.EmitLValueForField(Base, Field);
auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
- auto RefLVal = CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(VD),
- VD->getType());
- auto RefLoadRVal = CGF.EmitLoadOfLValue(RefLVal, Loc);
- CGF.EmitStoreOfScalar(
- FieldLVal.getAddress(),
- CGF.MakeNaturalAlignAddrLValue(RefLoadRVal.getScalarVal(),
- RefLVal.getType()->getPointeeType()));
+ auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
+ auto RefLoadLVal =
+ emitLoadOfPointerLValue(CGF, RefLVal.getAddress(), RefLVal.getType());
+ CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
++Counter;
}
CGF.FinishFunction();
@@ -2097,13 +2279,15 @@ static int array_pod_sort_comparator(const PrivateDataTy *P1,
void CGOpenMPRuntime::emitTaskCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
- llvm::Value *TaskFunction, QualType SharedsTy, llvm::Value *Shareds,
+ llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
ArrayRef<const Expr *> PrivateCopies,
ArrayRef<const Expr *> FirstprivateVars,
ArrayRef<const Expr *> FirstprivateCopies,
ArrayRef<const Expr *> FirstprivateInits,
ArrayRef<std::pair<OpenMPDependClauseKind, const Expr *>> Dependences) {
+ if (!CGF.HaveInsertPoint())
+ return;
auto &C = CGM.getContext();
llvm::SmallVector<PrivateDataTy, 8> Privates;
// Aggregate privates and sort them by the alignment.
@@ -2111,7 +2295,7 @@ void CGOpenMPRuntime::emitTaskCall(
for (auto *E : PrivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.push_back(std::make_pair(
- C.getTypeAlignInChars(VD->getType()),
+ C.getDeclAlign(VD),
PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
/*PrivateElemInit=*/nullptr)));
++I;
@@ -2121,7 +2305,7 @@ void CGOpenMPRuntime::emitTaskCall(
for (auto *E : FirstprivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.push_back(std::make_pair(
- C.getTypeAlignInChars(VD->getType()),
+ C.getDeclAlign(VD),
PrivateHelpersTy(
VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl()))));
@@ -2146,8 +2330,7 @@ void CGOpenMPRuntime::emitTaskCall(
C.getPointerType(KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo();
- auto KmpTaskTWithPrivatesTySize =
- CGM.getSize(C.getTypeSizeInChars(KmpTaskTWithPrivatesQTy));
+ auto *KmpTaskTWithPrivatesTySize = getTypeSize(CGF, KmpTaskTWithPrivatesQTy);
QualType SharedsPtrTy = C.getPointerType(SharedsTy);
// Emit initial values for private copies (if any).
@@ -2188,12 +2371,12 @@ void CGOpenMPRuntime::emitTaskCall(
CGF.Builder.getInt32(/*C=*/0))
: CGF.Builder.getInt32(Final.getInt() ? FinalFlag : 0);
TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
- auto SharedsSize = C.getTypeSizeInChars(SharedsTy);
- llvm::Value *AllocArgs[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), TaskFlags,
- KmpTaskTWithPrivatesTySize, CGM.getSize(SharedsSize),
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskEntry,
- KmpRoutineEntryPtrTy)};
+ auto *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
+ llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
+ getThreadID(CGF, Loc), TaskFlags,
+ KmpTaskTWithPrivatesTySize, SharedsSize,
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TaskEntry, KmpRoutineEntryPtrTy)};
auto *NewTask = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -2204,12 +2387,15 @@ void CGOpenMPRuntime::emitTaskCall(
CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
// Fill the data in the resulting kmp_task_t record.
// Copy shareds if there are any.
- llvm::Value *KmpTaskSharedsPtr = nullptr;
+ Address KmpTaskSharedsPtr = Address::invalid();
if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
- KmpTaskSharedsPtr = CGF.EmitLoadOfScalar(
- CGF.EmitLValueForField(
- TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
- Loc);
+ KmpTaskSharedsPtr =
+ Address(CGF.EmitLoadOfScalar(
+ CGF.EmitLValueForField(
+ TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
+ KmpTaskTShareds)),
+ Loc),
+ CGF.getNaturalTypeAlignment(SharedsTy));
CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy);
}
// Emit initial values for private copies (if any).
@@ -2220,7 +2406,7 @@ void CGOpenMPRuntime::emitTaskCall(
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
LValue SharedsBase;
if (!FirstprivateVars.empty()) {
- SharedsBase = CGF.MakeNaturalAlignAddrLValue(
+ SharedsBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
SharedsTy);
@@ -2237,6 +2423,9 @@ void CGOpenMPRuntime::emitTaskCall(
auto *SharedField = CapturesInfo.lookup(OriginalVD);
auto SharedRefLValue =
CGF.EmitLValueForField(SharedsBase, SharedField);
+ SharedRefLValue = CGF.MakeAddrLValue(
+ Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
+ SharedRefLValue.getType(), AlignmentSource::Decl);
QualType Type = OriginalVD->getType();
if (Type->isArrayType()) {
// Initialize firstprivate array.
@@ -2251,10 +2440,10 @@ void CGOpenMPRuntime::emitTaskCall(
CGF.EmitOMPAggregateAssign(
PrivateLValue.getAddress(), SharedRefLValue.getAddress(),
Type, [&CGF, Elem, Init, &CapturesInfo](
- llvm::Value *DestElement, llvm::Value *SrcElement) {
+ Address DestElement, Address SrcElement) {
// Clean up any temporaries needed by the initialization.
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, [SrcElement]() -> llvm::Value *{
+ InitScope.addPrivate(Elem, [SrcElement]() -> Address {
return SrcElement;
});
(void)InitScope.Privatize();
@@ -2268,7 +2457,7 @@ void CGOpenMPRuntime::emitTaskCall(
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, [SharedRefLValue]() -> llvm::Value *{
+ InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
return SharedRefLValue.getAddress();
});
(void)InitScope.Privatize();
@@ -2298,15 +2487,15 @@ void CGOpenMPRuntime::emitTaskCall(
Destructor);
// Process list of dependences.
- llvm::Value *DependInfo = nullptr;
- unsigned DependencesNumber = Dependences.size();
- if (!Dependences.empty()) {
+ Address DependenciesArray = Address::invalid();
+ unsigned NumDependencies = Dependences.size();
+ if (NumDependencies) {
// Dependence kind for RTL.
- enum RTLDependenceKindTy { DepIn = 1, DepOut = 2, DepInOut = 3 };
+ enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
RecordDecl *KmpDependInfoRD;
- QualType FlagsTy = C.getIntTypeForBitwidth(
- C.toBits(C.getTypeSizeInChars(C.BoolTy)), /*Signed=*/false);
+ QualType FlagsTy =
+ C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
if (KmpDependInfoTy.isNull()) {
KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
@@ -2319,25 +2508,37 @@ void CGOpenMPRuntime::emitTaskCall(
} else {
KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
}
+ CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
// Define type kmp_depend_info[<Dependences.size()>];
QualType KmpDependInfoArrayTy = C.getConstantArrayType(
- KmpDependInfoTy, llvm::APInt(/*numBits=*/64, Dependences.size()),
+ KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
ArrayType::Normal, /*IndexTypeQuals=*/0);
// kmp_depend_info[<Dependences.size()>] deps;
- DependInfo = CGF.CreateMemTemp(KmpDependInfoArrayTy);
- for (unsigned i = 0; i < DependencesNumber; ++i) {
- auto Addr = CGF.EmitLValue(Dependences[i].second);
- auto *Size = llvm::ConstantInt::get(
- CGF.SizeTy,
- C.getTypeSizeInChars(Dependences[i].second->getType()).getQuantity());
- auto Base = CGF.MakeNaturalAlignAddrLValue(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, DependInfo, i),
+ DependenciesArray = CGF.CreateMemTemp(KmpDependInfoArrayTy);
+ for (unsigned i = 0; i < NumDependencies; ++i) {
+ const Expr *E = Dependences[i].second;
+ auto Addr = CGF.EmitLValue(E);
+ llvm::Value *Size;
+ QualType Ty = E->getType();
+ if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
+ LValue UpAddrLVal =
+ CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
+ llvm::Value *UpAddr =
+ CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
+ llvm::Value *LowIntPtr =
+ CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
+ llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
+ Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
+ } else
+ Size = getTypeSize(CGF, Ty);
+ auto Base = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize),
KmpDependInfoTy);
// deps[i].base_addr = &<Dependences[i].second>;
auto BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
CGF.EmitStoreOfScalar(
- CGF.Builder.CreatePtrToInt(Addr.getAddress(), CGF.IntPtrTy),
+ CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
BaseAddrLVal);
// deps[i].len = sizeof(<Dependences[i].second>);
auto LenLVal = CGF.EmitLValueForField(
@@ -2349,12 +2550,13 @@ void CGOpenMPRuntime::emitTaskCall(
case OMPC_DEPEND_in:
DepKind = DepIn;
break;
+ // Out and InOut dependencies must use the same code.
case OMPC_DEPEND_out:
- DepKind = DepOut;
- break;
case OMPC_DEPEND_inout:
DepKind = DepInOut;
break;
+ case OMPC_DEPEND_source:
+ case OMPC_DEPEND_sink:
case OMPC_DEPEND_unknown:
llvm_unreachable("Unknown task dependence type");
}
@@ -2363,8 +2565,8 @@ void CGOpenMPRuntime::emitTaskCall(
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
FlagsLVal);
}
- DependInfo = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, DependInfo, 0),
+ DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
CGF.VoidPtrTy);
}
@@ -2378,40 +2580,48 @@ void CGOpenMPRuntime::emitTaskCall(
// list is not empty
auto *ThreadID = getThreadID(CGF, Loc);
auto *UpLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *TaskArgs[] = {UpLoc, ThreadID, NewTask};
- llvm::Value *DepTaskArgs[] = {
- UpLoc,
- ThreadID,
- NewTask,
- DependInfo ? CGF.Builder.getInt32(DependencesNumber) : nullptr,
- DependInfo,
- DependInfo ? CGF.Builder.getInt32(0) : nullptr,
- DependInfo ? llvm::ConstantPointerNull::get(CGF.VoidPtrTy) : nullptr};
- auto &&ThenCodeGen = [this, DependInfo, &TaskArgs,
- &DepTaskArgs](CodeGenFunction &CGF) {
- // TODO: add check for untied tasks.
- CGF.EmitRuntimeCall(
- createRuntimeFunction(DependInfo ? OMPRTL__kmpc_omp_task_with_deps
- : OMPRTL__kmpc_omp_task),
- DependInfo ? makeArrayRef(DepTaskArgs) : makeArrayRef(TaskArgs));
+ llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
+ llvm::Value *DepTaskArgs[7];
+ if (NumDependencies) {
+ DepTaskArgs[0] = UpLoc;
+ DepTaskArgs[1] = ThreadID;
+ DepTaskArgs[2] = NewTask;
+ DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
+ DepTaskArgs[4] = DependenciesArray.getPointer();
+ DepTaskArgs[5] = CGF.Builder.getInt32(0);
+ DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
+ auto &&ThenCodeGen = [this, NumDependencies,
+ &TaskArgs, &DepTaskArgs](CodeGenFunction &CGF) {
+ // TODO: add check for untied tasks.
+ if (NumDependencies) {
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps),
+ DepTaskArgs);
+ } else {
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
+ TaskArgs);
+ }
};
typedef CallEndCleanup<std::extent<decltype(TaskArgs)>::value>
IfCallEndCleanup;
- llvm::Value *DepWaitTaskArgs[] = {
- UpLoc,
- ThreadID,
- DependInfo ? CGF.Builder.getInt32(DependencesNumber) : nullptr,
- DependInfo,
- DependInfo ? CGF.Builder.getInt32(0) : nullptr,
- DependInfo ? llvm::ConstantPointerNull::get(CGF.VoidPtrTy) : nullptr};
+
+ llvm::Value *DepWaitTaskArgs[6];
+ if (NumDependencies) {
+ DepWaitTaskArgs[0] = UpLoc;
+ DepWaitTaskArgs[1] = ThreadID;
+ DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
+ DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
+ DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
auto &&ElseCodeGen = [this, &TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
- DependInfo, &DepWaitTaskArgs](CodeGenFunction &CGF) {
+ NumDependencies, &DepWaitTaskArgs](CodeGenFunction &CGF) {
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
// Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
// kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
// ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
// is specified.
- if (DependInfo)
+ if (NumDependencies)
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
DepWaitTaskArgs);
// Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
@@ -2429,6 +2639,7 @@ void CGOpenMPRuntime::emitTaskCall(
llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
CGF.EmitCallOrInvoke(TaskEntry, OutlinedFnArgs);
};
+
if (IfCond) {
emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
} else {
@@ -2437,8 +2648,89 @@ void CGOpenMPRuntime::emitTaskCall(
}
}
+/// \brief Emit reduction operation for each element of array (required for
+/// array sections) LHS op = RHS.
+/// \param Type Type of array.
+/// \param LHSVar Variable on the left side of the reduction operation
+/// (references element of array in original variable).
+/// \param RHSVar Variable on the right side of the reduction operation
+/// (references element of array in original variable).
+/// \param RedOpGen Generator of reduction operation with use of LHSVar and
+/// RHSVar.
+static void EmitOMPAggregateReduction(
+ CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
+ const VarDecl *RHSVar,
+ const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
+ const Expr *, const Expr *)> &RedOpGen,
+ const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
+ const Expr *UpExpr = nullptr) {
+ // Perform element-by-element initialization.
+ QualType ElementTy;
+ Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
+ Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
+
+ // Drill down to the base element type on both arrays.
+ auto ArrayTy = Type->getAsArrayTypeUnsafe();
+ auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
+
+ auto RHSBegin = RHSAddr.getPointer();
+ auto LHSBegin = LHSAddr.getPointer();
+ // Cast from pointer to array type to pointer to single element.
+ auto LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
+ // The basic structure here is a while-do loop.
+ auto BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
+ auto DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
+ auto IsEmpty =
+ CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
+ CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+
+ // Enter the loop body, making that address the current address.
+ auto EntryBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(BodyBB);
+
+ CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
+
+ llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
+ RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
+ RHSElementPHI->addIncoming(RHSBegin, EntryBB);
+ Address RHSElementCurrent =
+ Address(RHSElementPHI,
+ RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
+ LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ LHSElementPHI->addIncoming(LHSBegin, EntryBB);
+ Address LHSElementCurrent =
+ Address(LHSElementPHI,
+ LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ // Emit copy.
+ CodeGenFunction::OMPPrivateScope Scope(CGF);
+ Scope.addPrivate(LHSVar, [=]() -> Address { return LHSElementCurrent; });
+ Scope.addPrivate(RHSVar, [=]() -> Address { return RHSElementCurrent; });
+ Scope.Privatize();
+ RedOpGen(CGF, XExpr, EExpr, UpExpr);
+ Scope.ForceCleanup();
+
+ // Shift the address forward by one element.
+ auto LHSElementNext = CGF.Builder.CreateConstGEP1_32(
+ LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ auto RHSElementNext = CGF.Builder.CreateConstGEP1_32(
+ RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
+ // Check whether we've reached the end.
+ auto Done =
+ CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
+ CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
+ LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
+ RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
+
+ // Done.
+ CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
+}
+
static llvm::Value *emitReductionFunction(CodeGenModule &CGM,
llvm::Type *ArgsType,
+ ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps) {
@@ -2458,48 +2750,66 @@ static llvm::Value *emitReductionFunction(CodeGenModule &CGM,
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
".omp.reduction.reduction_func", &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, CGFI, Fn);
+ CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
// Dst = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
- auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
- auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
+ Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsType), CGF.getPointerAlign());
+ Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsType), CGF.getPointerAlign());
// ...
// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
// ...
CodeGenFunction::OMPPrivateScope Scope(CGF);
- for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I) {
- Scope.addPrivate(
- cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl()),
- [&]() -> llvm::Value *{
- return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, RHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(RHSExprs[I]->getType())));
- });
- Scope.addPrivate(
- cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl()),
- [&]() -> llvm::Value *{
- return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, LHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(LHSExprs[I]->getType())));
- });
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
+ auto RHSVar = cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
+ Scope.addPrivate(RHSVar, [&]() -> Address {
+ return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
+ });
+ auto LHSVar = cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
+ Scope.addPrivate(LHSVar, [&]() -> Address {
+ return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
+ });
+ QualType PrivTy = (*IPriv)->getType();
+ if (PrivTy->isArrayType()) {
+ // Get array size and emit VLA type.
+ ++Idx;
+ Address Elem =
+ CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(
+ CGF,
+ cast<OpaqueValueExpr>(
+ CGF.getContext().getAsVariableArrayType(PrivTy)->getSizeExpr()),
+ RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
+ CGF.EmitVariablyModifiedType(PrivTy);
+ }
}
Scope.Privatize();
+ IPriv = Privates.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
for (auto *E : ReductionOps) {
- CGF.EmitIgnoredExpr(E);
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Emit reduction for array section.
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ [=](CodeGenFunction &CGF, const Expr *,
+ const Expr *,
+ const Expr *) { CGF.EmitIgnoredExpr(E); });
+ } else
+ // Emit reduction for array subscript or single variable.
+ CGF.EmitIgnoredExpr(E);
+ ++IPriv, ++ILHS, ++IRHS;
}
Scope.ForceCleanup();
CGF.FinishFunction();
@@ -2507,10 +2817,13 @@ static llvm::Value *emitReductionFunction(CodeGenModule &CGM,
}
void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
bool WithNowait, bool SimpleReduction) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Next code should be emitted for reduction:
//
// static kmp_critical_name lock = { 0 };
@@ -2550,32 +2863,68 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
if (SimpleReduction) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
+ auto IPriv = Privates.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
for (auto *E : ReductionOps) {
- CGF.EmitIgnoredExpr(E);
+ if ((*IPriv)->getType()->isArrayType()) {
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(
+ CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ [=](CodeGenFunction &CGF, const Expr *, const Expr *,
+ const Expr *) { CGF.EmitIgnoredExpr(E); });
+ } else
+ CGF.EmitIgnoredExpr(E);
+ ++IPriv, ++ILHS, ++IRHS;
}
return;
}
// 1. Build a list of reduction variables.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, RHSExprs.size());
+ auto Size = RHSExprs.size();
+ for (auto *E : Privates) {
+ if (E->getType()->isArrayType())
+ // Reserve place for array size.
+ ++Size;
+ }
+ llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
QualType ReductionArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- auto *ReductionList =
+ Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I) {
- auto *Elem = CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, ReductionList, I);
- CGF.Builder.CreateAlignedStore(
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem =
+ CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
+ CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getAddress(), CGF.VoidPtrTy),
- Elem, CGM.PointerAlignInBytes);
+ CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
+ CGF.getPointerSize());
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreateIntToPtr(
+ CGF.Builder.CreateIntCast(
+ CGF.getVLASize(CGF.getContext().getAsVariableArrayType(
+ (*IPriv)->getType()))
+ .first,
+ CGF.SizeTy, /*isSigned=*/false),
+ CGF.VoidPtrTy),
+ Elem);
+ }
}
// 2. Emit reduce_func().
auto *ReductionFn = emitReductionFunction(
- CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), LHSExprs,
- RHSExprs, ReductionOps);
+ CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
+ LHSExprs, RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
auto *Lock = getCriticalRegionLock(".reduction");
@@ -2586,10 +2935,10 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
CGF, Loc,
static_cast<OpenMPLocationFlags>(OMP_IDENT_KMPC | OMP_ATOMIC_REDUCE));
auto *ThreadId = getThreadID(CGF, Loc);
- auto *ReductionArrayTySize = llvm::ConstantInt::get(
- CGM.SizeTy, C.getTypeSizeInChars(ReductionArrayTy).getQuantity());
- auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList,
- CGF.VoidPtrTy);
+ auto *ReductionArrayTySize = getTypeSize(CGF, ReductionArrayTy);
+ auto *RL =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList.getPointer(),
+ CGF.VoidPtrTy);
llvm::Value *Args[] = {
IdentTLoc, // ident_t *<loc>
ThreadId, // i32 <gtid>
@@ -2632,8 +2981,22 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
: OMPRTL__kmpc_end_reduce),
llvm::makeArrayRef(EndArgs));
+ auto IPriv = Privates.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
for (auto *E : ReductionOps) {
- CGF.EmitIgnoredExpr(E);
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Emit reduction for array section.
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(
+ CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ [=](CodeGenFunction &CGF, const Expr *, const Expr *,
+ const Expr *) { CGF.EmitIgnoredExpr(E); });
+ } else
+ // Emit reduction for array subscript or single variable.
+ CGF.EmitIgnoredExpr(E);
+ ++IPriv, ++ILHS, ++IRHS;
}
}
@@ -2663,62 +3026,84 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
createRuntimeFunction(OMPRTL__kmpc_end_reduce),
llvm::makeArrayRef(EndArgs));
}
- auto I = LHSExprs.begin();
+ auto ILHS = LHSExprs.begin();
+ auto IRHS = RHSExprs.begin();
+ auto IPriv = Privates.begin();
for (auto *E : ReductionOps) {
- const Expr *XExpr = nullptr;
- const Expr *EExpr = nullptr;
- const Expr *UpExpr = nullptr;
- BinaryOperatorKind BO = BO_Comma;
- if (auto *BO = dyn_cast<BinaryOperator>(E)) {
- if (BO->getOpcode() == BO_Assign) {
- XExpr = BO->getLHS();
- UpExpr = BO->getRHS();
- }
- }
- // Try to emit update expression as a simple atomic.
- auto *RHSExpr = UpExpr;
- if (RHSExpr) {
- // Analyze RHS part of the whole expression.
- if (auto *ACO = dyn_cast<AbstractConditionalOperator>(
- RHSExpr->IgnoreParenImpCasts())) {
- // If this is a conditional operator, analyze its condition for
- // min/max reduction operator.
- RHSExpr = ACO->getCond();
+ const Expr *XExpr = nullptr;
+ const Expr *EExpr = nullptr;
+ const Expr *UpExpr = nullptr;
+ BinaryOperatorKind BO = BO_Comma;
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Assign) {
+ XExpr = BO->getLHS();
+ UpExpr = BO->getRHS();
+ }
}
- if (auto *BORHS =
- dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
- EExpr = BORHS->getRHS();
- BO = BORHS->getOpcode();
+ // Try to emit update expression as a simple atomic.
+ auto *RHSExpr = UpExpr;
+ if (RHSExpr) {
+ // Analyze RHS part of the whole expression.
+ if (auto *ACO = dyn_cast<AbstractConditionalOperator>(
+ RHSExpr->IgnoreParenImpCasts())) {
+ // If this is a conditional operator, analyze its condition for
+ // min/max reduction operator.
+ RHSExpr = ACO->getCond();
+ }
+ if (auto *BORHS =
+ dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
+ EExpr = BORHS->getRHS();
+ BO = BORHS->getOpcode();
+ }
}
- }
- if (XExpr) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
- LValue X = CGF.EmitLValue(XExpr);
- RValue E;
- if (EExpr)
- E = CGF.EmitAnyExpr(EExpr);
- CGF.EmitOMPAtomicSimpleUpdateExpr(
- X, E, BO, /*IsXLHSInRHSPart=*/true, llvm::Monotonic, Loc,
- [&CGF, UpExpr, VD](RValue XRValue) {
- CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(
- VD, [&CGF, VD, XRValue]() -> llvm::Value *{
- auto *LHSTemp = CGF.CreateMemTemp(VD->getType());
+ if (XExpr) {
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto &&AtomicRedGen = [this, BO, VD, IPriv,
+ Loc](CodeGenFunction &CGF, const Expr *XExpr,
+ const Expr *EExpr, const Expr *UpExpr) {
+ LValue X = CGF.EmitLValue(XExpr);
+ RValue E;
+ if (EExpr)
+ E = CGF.EmitAnyExpr(EExpr);
+ CGF.EmitOMPAtomicSimpleUpdateExpr(
+ X, E, BO, /*IsXLHSInRHSPart=*/true, llvm::Monotonic, Loc,
+ [&CGF, UpExpr, VD, IPriv](RValue XRValue) {
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ PrivateScope.addPrivate(VD, [&CGF, VD, XRValue]() -> Address {
+ Address LHSTemp = CGF.CreateMemTemp(VD->getType());
CGF.EmitStoreThroughLValue(
- XRValue,
- CGF.MakeNaturalAlignAddrLValue(LHSTemp, VD->getType()));
+ XRValue, CGF.MakeAddrLValue(LHSTemp, VD->getType()));
return LHSTemp;
});
- (void)PrivateScope.Privatize();
- return CGF.EmitAnyExpr(UpExpr);
- });
- } else {
- // Emit as a critical region.
- emitCriticalRegion(CGF, ".atomic_reduction", [E](CodeGenFunction &CGF) {
- CGF.EmitIgnoredExpr(E);
- }, Loc);
- }
- ++I;
+ (void)PrivateScope.Privatize();
+ return CGF.EmitAnyExpr(UpExpr);
+ });
+ };
+ if ((*IPriv)->getType()->isArrayType()) {
+ // Emit atomic reduction for array section.
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
+ AtomicRedGen, XExpr, EExpr, UpExpr);
+ } else
+ // Emit atomic reduction for array subscript or single variable.
+ AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
+ } else {
+ // Emit as a critical region.
+ auto &&CritRedGen = [this, E, Loc](CodeGenFunction &CGF, const Expr *,
+ const Expr *, const Expr *) {
+ emitCriticalRegion(
+ CGF, ".atomic_reduction",
+ [E](CodeGenFunction &CGF) { CGF.EmitIgnoredExpr(E); }, Loc);
+ };
+ if ((*IPriv)->getType()->isArrayType()) {
+ auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
+ CritRedGen);
+ } else
+ CritRedGen(CGF, nullptr, nullptr, nullptr);
+ }
+ ++ILHS, ++IRHS, ++IPriv;
}
}
@@ -2728,6 +3113,8 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
// global_tid);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
@@ -2737,8 +3124,11 @@ void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnerKind,
- const RegionCodeGenTy &CodeGen) {
- InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind);
+ const RegionCodeGenTy &CodeGen,
+ bool HasCancel) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
}
@@ -2770,13 +3160,15 @@ static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
void CGOpenMPRuntime::emitCancellationPointCall(
CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
// global_tid, kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- auto CancelDest =
- CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
- if (CancelDest.isValid()) {
+ if (OMPRegionInfo->getDirectiveKind() == OMPD_single)
+ return;
+ if (OMPRegionInfo->hasCancel()) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
@@ -2793,8 +3185,10 @@ void CGOpenMPRuntime::emitCancellationPointCall(
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// __kmpc_cancel_barrier();
- emitBarrierCall(CGF, Loc, OMPD_unknown, /*CheckForCancel=*/false);
+ emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
// exit from construct;
+ auto CancelDest =
+ CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
}
@@ -2802,14 +3196,18 @@ void CGOpenMPRuntime::emitCancellationPointCall(
}
void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- auto CancelDest =
- CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
- if (CancelDest.isValid()) {
+ if (OMPRegionInfo->getDirectiveKind() == OMPD_single)
+ return;
+ auto &&ThenGen = [this, Loc, CancelRegion,
+ OMPRegionInfo](CodeGenFunction &CGF) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
@@ -2826,11 +3224,332 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// __kmpc_cancel_barrier();
- emitBarrierCall(CGF, Loc, OMPD_unknown, /*CheckForCancel=*/false);
+ emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
// exit from construct;
+ auto CancelDest =
+ CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
- }
+ };
+ if (IfCond)
+ emitOMPIfClause(CGF, IfCond, ThenGen, [](CodeGenFunction &) {});
+ else
+ ThenGen(CGF);
}
}
+llvm::Value *
+CGOpenMPRuntime::emitTargetOutlinedFunction(const OMPExecutableDirective &D,
+ const RegionCodeGenTy &CodeGen) {
+ const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+
+ CodeGenFunction CGF(CGM, true);
+ CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ return CGF.GenerateOpenMPCapturedStmtFunction(CS);
+}
+
+void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ llvm::Value *OutlinedFn,
+ const Expr *IfCond, const Expr *Device,
+ ArrayRef<llvm::Value *> CapturedVars) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ /// \brief Values for bit flags used to specify the mapping type for
+ /// offloading.
+ enum OpenMPOffloadMappingFlags {
+ /// \brief Allocate memory on the device and move data from host to device.
+ OMP_MAP_TO = 0x01,
+ /// \brief Allocate memory on the device and move data from device to host.
+ OMP_MAP_FROM = 0x02,
+ /// \brief The element passed to the device is a pointer.
+ OMP_MAP_PTR = 0x20,
+ /// \brief Pass the element to the device by value.
+ OMP_MAP_BYCOPY = 0x80,
+ };
+
+ enum OpenMPOffloadingReservedDeviceIDs {
+ /// \brief Device ID if the device was not defined, runtime should get it
+ /// from environment variables in the spec.
+ OMP_DEVICEID_UNDEF = -1,
+ };
+
+ auto &Ctx = CGF.getContext();
+
+ // Fill up the arrays with the all the captured variables.
+ SmallVector<llvm::Value *, 16> BasePointers;
+ SmallVector<llvm::Value *, 16> Pointers;
+ SmallVector<llvm::Value *, 16> Sizes;
+ SmallVector<unsigned, 16> MapTypes;
+
+ bool hasVLACaptures = false;
+
+ const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+ auto RI = CS.getCapturedRecordDecl()->field_begin();
+ // auto II = CS.capture_init_begin();
+ auto CV = CapturedVars.begin();
+ for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
+ CE = CS.capture_end();
+ CI != CE; ++CI, ++RI, ++CV) {
+ StringRef Name;
+ QualType Ty;
+ llvm::Value *BasePointer;
+ llvm::Value *Pointer;
+ llvm::Value *Size;
+ unsigned MapType;
+
+ // VLA sizes are passed to the outlined region by copy.
+ if (CI->capturesVariableArrayType()) {
+ BasePointer = Pointer = *CV;
+ Size = getTypeSize(CGF, RI->getType());
+ // Copy to the device as an argument. No need to retrieve it.
+ MapType = OMP_MAP_BYCOPY;
+ hasVLACaptures = true;
+ } else if (CI->capturesThis()) {
+ BasePointer = Pointer = *CV;
+ const PointerType *PtrTy = cast<PointerType>(RI->getType().getTypePtr());
+ Size = getTypeSize(CGF, PtrTy->getPointeeType());
+ // Default map type.
+ MapType = OMP_MAP_TO | OMP_MAP_FROM;
+ } else if (CI->capturesVariableByCopy()) {
+ MapType = OMP_MAP_BYCOPY;
+ if (!RI->getType()->isAnyPointerType()) {
+ // If the field is not a pointer, we need to save the actual value and
+ // load it as a void pointer.
+ auto DstAddr = CGF.CreateMemTemp(
+ Ctx.getUIntPtrType(),
+ Twine(CI->getCapturedVar()->getName()) + ".casted");
+ LValue DstLV = CGF.MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
+
+ auto *SrcAddrVal = CGF.EmitScalarConversion(
+ DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
+ Ctx.getPointerType(RI->getType()), SourceLocation());
+ LValue SrcLV =
+ CGF.MakeNaturalAlignAddrLValue(SrcAddrVal, RI->getType());
+
+ // Store the value using the source type pointer.
+ CGF.EmitStoreThroughLValue(RValue::get(*CV), SrcLV);
+
+ // Load the value using the destination type pointer.
+ BasePointer = Pointer =
+ CGF.EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
+ } else {
+ MapType |= OMP_MAP_PTR;
+ BasePointer = Pointer = *CV;
+ }
+ Size = getTypeSize(CGF, RI->getType());
+ } else {
+ assert(CI->capturesVariable() && "Expected captured reference.");
+ BasePointer = Pointer = *CV;
+
+ const ReferenceType *PtrTy =
+ cast<ReferenceType>(RI->getType().getTypePtr());
+ QualType ElementType = PtrTy->getPointeeType();
+ Size = getTypeSize(CGF, ElementType);
+ // The default map type for a scalar/complex type is 'to' because by
+ // default the value doesn't have to be retrieved. For an aggregate type,
+ // the default is 'tofrom'.
+ MapType = ElementType->isAggregateType() ? (OMP_MAP_TO | OMP_MAP_FROM)
+ : OMP_MAP_TO;
+ if (ElementType->isAnyPointerType())
+ MapType |= OMP_MAP_PTR;
+ }
+
+ BasePointers.push_back(BasePointer);
+ Pointers.push_back(Pointer);
+ Sizes.push_back(Size);
+ MapTypes.push_back(MapType);
+ }
+
+ // Keep track on whether the host function has to be executed.
+ auto OffloadErrorQType =
+ Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true);
+ auto OffloadError = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(OffloadErrorQType, ".run_host_version"),
+ OffloadErrorQType);
+ CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty),
+ OffloadError);
+
+ // Fill up the pointer arrays and transfer execution to the device.
+ auto &&ThenGen = [this, &Ctx, &BasePointers, &Pointers, &Sizes, &MapTypes,
+ hasVLACaptures, Device, OffloadError,
+ OffloadErrorQType](CodeGenFunction &CGF) {
+ unsigned PointerNumVal = BasePointers.size();
+ llvm::Value *PointerNum = CGF.Builder.getInt32(PointerNumVal);
+ llvm::Value *BasePointersArray;
+ llvm::Value *PointersArray;
+ llvm::Value *SizesArray;
+ llvm::Value *MapTypesArray;
+
+ if (PointerNumVal) {
+ llvm::APInt PointerNumAP(32, PointerNumVal, /*isSigned=*/true);
+ QualType PointerArrayType = Ctx.getConstantArrayType(
+ Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+
+ BasePointersArray =
+ CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
+ PointersArray =
+ CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
+
+ // If we don't have any VLA types, we can use a constant array for the map
+ // sizes, otherwise we need to fill up the arrays as we do for the
+ // pointers.
+ if (hasVLACaptures) {
+ QualType SizeArrayType = Ctx.getConstantArrayType(
+ Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ SizesArray =
+ CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
+ } else {
+ // We expect all the sizes to be constant, so we collect them to create
+ // a constant array.
+ SmallVector<llvm::Constant *, 16> ConstSizes;
+ for (auto S : Sizes)
+ ConstSizes.push_back(cast<llvm::Constant>(S));
+
+ auto *SizesArrayInit = llvm::ConstantArray::get(
+ llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
+ auto *SizesArrayGbl = new llvm::GlobalVariable(
+ CGM.getModule(), SizesArrayInit->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
+ SizesArrayInit, ".offload_sizes");
+ SizesArrayGbl->setUnnamedAddr(true);
+ SizesArray = SizesArrayGbl;
+ }
+
+ // The map types are always constant so we don't need to generate code to
+ // fill arrays. Instead, we create an array constant.
+ llvm::Constant *MapTypesArrayInit =
+ llvm::ConstantDataArray::get(CGF.Builder.getContext(), MapTypes);
+ auto *MapTypesArrayGbl = new llvm::GlobalVariable(
+ CGM.getModule(), MapTypesArrayInit->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
+ MapTypesArrayInit, ".offload_maptypes");
+ MapTypesArrayGbl->setUnnamedAddr(true);
+ MapTypesArray = MapTypesArrayGbl;
+
+ for (unsigned i = 0; i < PointerNumVal; ++i) {
+
+ llvm::Value *BPVal = BasePointers[i];
+ if (BPVal->getType()->isPointerTy())
+ BPVal = CGF.Builder.CreateBitCast(BPVal, CGM.VoidPtrTy);
+ else {
+ assert(BPVal->getType()->isIntegerTy() &&
+ "If not a pointer, the value type must be an integer.");
+ BPVal = CGF.Builder.CreateIntToPtr(BPVal, CGM.VoidPtrTy);
+ }
+ llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal),
+ BasePointersArray, 0, i);
+ Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
+ CGF.Builder.CreateStore(BPVal, BPAddr);
+
+ llvm::Value *PVal = Pointers[i];
+ if (PVal->getType()->isPointerTy())
+ PVal = CGF.Builder.CreateBitCast(PVal, CGM.VoidPtrTy);
+ else {
+ assert(PVal->getType()->isIntegerTy() &&
+ "If not a pointer, the value type must be an integer.");
+ PVal = CGF.Builder.CreateIntToPtr(PVal, CGM.VoidPtrTy);
+ }
+ llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), PointersArray,
+ 0, i);
+ Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
+ CGF.Builder.CreateStore(PVal, PAddr);
+
+ if (hasVLACaptures) {
+ llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.SizeTy, PointerNumVal), SizesArray,
+ /*Idx0=*/0,
+ /*Idx1=*/i);
+ Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(
+ Sizes[i], CGM.SizeTy, /*isSigned=*/true),
+ SAddr);
+ }
+ }
+
+ BasePointersArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), BasePointersArray,
+ /*Idx0=*/0, /*Idx1=*/0);
+ PointersArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), PointersArray,
+ /*Idx0=*/0,
+ /*Idx1=*/0);
+ SizesArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.SizeTy, PointerNumVal), SizesArray,
+ /*Idx0=*/0, /*Idx1=*/0);
+ MapTypesArray = CGF.Builder.CreateConstInBoundsGEP2_32(
+ llvm::ArrayType::get(CGM.Int32Ty, PointerNumVal), MapTypesArray,
+ /*Idx0=*/0,
+ /*Idx1=*/0);
+
+ } else {
+ BasePointersArray = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
+ PointersArray = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
+ SizesArray = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
+ MapTypesArray =
+ llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo());
+ }
+
+ // On top of the arrays that were filled up, the target offloading call
+ // takes as arguments the device id as well as the host pointer. The host
+ // pointer is used by the runtime library to identify the current target
+ // region, so it only has to be unique and not necessarily point to
+ // anything. It could be the pointer to the outlined function that
+ // implements the target region, but we aren't using that so that the
+ // compiler doesn't need to keep that, and could therefore inline the host
+ // function if proven worthwhile during optimization.
+
+ llvm::Value *HostPtr = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage,
+ llvm::Constant::getNullValue(CGM.Int8Ty), ".offload_hstptr");
+
+ // Emit device ID if any.
+ llvm::Value *DeviceID;
+ if (Device)
+ DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
+ CGM.Int32Ty, /*isSigned=*/true);
+ else
+ DeviceID = CGF.Builder.getInt32(OMP_DEVICEID_UNDEF);
+
+ llvm::Value *OffloadingArgs[] = {
+ DeviceID, HostPtr, PointerNum, BasePointersArray,
+ PointersArray, SizesArray, MapTypesArray};
+ auto Return = CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target),
+ OffloadingArgs);
+
+ CGF.EmitStoreOfScalar(Return, OffloadError);
+ };
+
+ if (IfCond) {
+ // Notify that the host version must be executed.
+ auto &&ElseGen = [this, OffloadError,
+ OffloadErrorQType](CodeGenFunction &CGF) {
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/-1u),
+ OffloadError);
+ };
+ emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
+ } else {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ ThenGen(CGF);
+ }
+
+ // Check the error code and execute the host version if required.
+ auto OffloadFailedBlock = CGF.createBasicBlock("omp_offload.failed");
+ auto OffloadContBlock = CGF.createBasicBlock("omp_offload.cont");
+ auto OffloadErrorVal = CGF.EmitLoadOfScalar(OffloadError, SourceLocation());
+ auto Failed = CGF.Builder.CreateIsNotNull(OffloadErrorVal);
+ CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
+
+ CGF.EmitBlock(OffloadFailedBlock);
+ CGF.Builder.CreateCall(OutlinedFn, BasePointers);
+ CGF.EmitBranch(OffloadContBlock);
+
+ CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
+ return;
+}
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 44bc8a139b15..992f9a8805e2 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -39,7 +39,7 @@ class OMPExecutableDirective;
class VarDecl;
namespace CodeGen {
-
+class Address;
class CodeGenFunction;
class CodeGenModule;
@@ -62,6 +62,9 @@ private:
// Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *crit);
OMPRTL__kmpc_critical,
+ // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
+ // global_tid, kmp_critical_name *crit, uintptr_t hint);
+ OMPRTL__kmpc_critical_with_hint,
// Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *crit);
OMPRTL__kmpc_end_critical,
@@ -154,6 +157,14 @@ private:
// Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind);
OMPRTL__kmpc_cancel,
+
+ //
+ // Offloading related calls
+ //
+ // Call to int32_t __tgt_target(int32_t device_id, void *host_ptr, int32_t
+ // arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
+ // *arg_types);
+ OMPRTL__tgt_target,
};
/// \brief Values for bit flags used in the ident_t to describe the fields.
@@ -184,7 +195,9 @@ private:
/// \brief Map of flags and corresponding default locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
- llvm::Value *getOrCreateDefaultLocation(OpenMPLocationFlags Flags);
+ Address getOrCreateDefaultLocation(OpenMPLocationFlags Flags);
+
+public:
/// \brief Describes ident structure that describes a source location.
/// All descriptions are taken from
/// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
@@ -225,6 +238,7 @@ private:
/// and a pair of line numbers that delimit the construct.
IdentField_PSource
};
+private:
llvm::StructType *IdentTy;
/// \brief Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
@@ -321,8 +335,7 @@ private:
/// \brief Emits address of the word in a memory where current thread id is
/// stored.
- virtual llvm::Value *emitThreadIDAddress(CodeGenFunction &CGF,
- SourceLocation Loc);
+ virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
/// \brief Gets thread id value for the current thread.
///
@@ -346,7 +359,7 @@ private:
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
- void emitThreadPrivateVarInit(CodeGenFunction &CGF, llvm::Value *VDAddr,
+ void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
@@ -396,23 +409,25 @@ public:
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedStruct A pointer to the record with the references to
+ /// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
- llvm::Value *CapturedStruct,
+ ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// \brief Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
+ /// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
- SourceLocation Loc);
+ SourceLocation Loc,
+ const Expr *Hint = nullptr);
/// \brief Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
@@ -447,17 +462,20 @@ public:
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
- SourceLocation Loc);
+ SourceLocation Loc, bool IsThreads);
/// \brief Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
- /// \param CheckForCancel true if check for possible cancellation must be
- /// performed, false otherwise.
+ /// \param EmitChecks true if need to emit checks for cancellation barriers.
+ /// \param ForceSimpleCall true simple barrier call must be emitted, false if
+ /// runtime class decides which one to emit (simple or with cancellation
+ /// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
- bool CheckForCancel = true);
+ bool EmitChecks = true,
+ bool ForceSimpleCall = false);
/// \brief Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
@@ -473,6 +491,12 @@ public:
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
+ virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind SchedKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, llvm::Value *UB,
+ llvm::Value *Chunk = nullptr);
+
/// \brief Call the appropriate runtime routine to initialize it before start
/// of loop.
///
@@ -497,11 +521,12 @@ public:
/// \param Chunk Value of the chunk for the static_chunked scheduled loop.
/// For the default (nullptr) value, the chunk 1 will be used.
///
- virtual void emitForInit(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPScheduleClauseKind SchedKind, unsigned IVSize,
- bool IVSigned, bool Ordered, llvm::Value *IL,
- llvm::Value *LB, llvm::Value *UB, llvm::Value *ST,
- llvm::Value *Chunk = nullptr);
+ virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind SchedKind,
+ unsigned IVSize, bool IVSigned, bool Ordered,
+ Address IL, Address LB,
+ Address UB, Address ST,
+ llvm::Value *Chunk = nullptr);
/// \brief Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
@@ -539,8 +564,8 @@ public:
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
- llvm::Value *IL, llvm::Value *LB,
- llvm::Value *UB, llvm::Value *ST);
+ Address IL, Address LB,
+ Address UB, Address ST);
/// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
@@ -562,10 +587,10 @@ public:
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
- virtual llvm::Value *getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- llvm::Value *VDAddr,
- SourceLocation Loc);
+ virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ Address VDAddr,
+ SourceLocation Loc);
/// \brief Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
@@ -576,7 +601,7 @@ public:
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
- emitThreadPrivateVarDefinition(const VarDecl *VD, llvm::Value *VDAddr,
+ emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
@@ -632,7 +657,7 @@ public:
virtual void emitTaskCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
- llvm::Value *TaskFunction, QualType SharedsTy, llvm::Value *Shareds,
+ llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
ArrayRef<const Expr *> PrivateCopies,
ArrayRef<const Expr *> FirstprivateVars,
@@ -645,9 +670,12 @@ public:
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
+ /// \param HasCancel true if region has inner cancel directive, false
+ /// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen);
+ const RegionCodeGenTy &CodeGen,
+ bool HasCancel = false);
/// \brief Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
@@ -679,6 +707,7 @@ public:
/// }
/// \endcode
///
+ /// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
@@ -686,6 +715,7 @@ public:
/// \param WithNowait true if parent directive has also nowait clause, false
/// otherwise.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
@@ -703,10 +733,36 @@ public:
OpenMPDirectiveKind CancelRegion);
/// \brief Emit code for 'cancel' construct.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
+
+ /// \brief Emit outilined function for 'target' directive.
+ /// \param D Directive to emit.
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ virtual llvm::Value *
+ emitTargetOutlinedFunction(const OMPExecutableDirective &D,
+ const RegionCodeGenTy &CodeGen);
+
+ /// \brief Emit the target offloading code associated with \a D. The emitted
+ /// code attempts offloading the execution to the device, an the event of
+ /// a failure it executes the host version outlined in \a OutlinedFn.
+ /// \param D Directive to emit.
+ /// \param OutlinedFn Host version of the code to be offloaded.
+ /// \param IfCond Expression evaluated in if clause associated with the target
+ /// directive, or null if no if clause is used.
+ /// \param Device Expression evaluated in device clause associated with the
+ /// target directive, or null if no device clause is used.
+ /// \param CapturedVars Values captured in the current region.
+ virtual void emitTargetCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ llvm::Value *OutlinedFn, const Expr *IfCond,
+ const Expr *Device,
+ ArrayRef<llvm::Value *> CapturedVars);
};
} // namespace CodeGen
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index f91ecebd0926..375b59c5cb33 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -454,7 +454,7 @@ void CGRecordLowering::accumulateBases() {
// contain only a trailing array member.
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
if (!BaseDecl->isEmpty() &&
- !Context.getASTRecordLayout(BaseDecl).getSize().isZero())
+ !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
}
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 7a0b8a35be01..cc4fa2ec5972 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -16,6 +16,7 @@
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/LoopHint.h"
@@ -25,6 +26,8 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/MDBuilder.h"
+
using namespace clang;
using namespace CodeGen;
@@ -138,6 +141,10 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
case Stmt::GCCAsmStmtClass: // Intentional fall-through.
case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+ case Stmt::CoroutineBodyStmtClass:
+ case Stmt::CoreturnStmtClass:
+ CGM.ErrorUnsupported(S, "coroutine");
+ break;
case Stmt::CapturedStmtClass: {
const CapturedStmt *CS = cast<CapturedStmt>(S);
EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
@@ -246,6 +253,18 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::OMPCancelDirectiveClass:
EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
break;
+ case Stmt::OMPTargetDataDirectiveClass:
+ EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
+ break;
+ case Stmt::OMPTaskLoopDirectiveClass:
+ EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
+ break;
+ case Stmt::OMPTaskLoopSimdDirectiveClass:
+ EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
+ break;
+case Stmt::OMPDistributeDirectiveClass:
+ EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
+ break;
}
}
@@ -272,8 +291,8 @@ bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
/// this captures the expression result of the last sub-statement and returns it
/// (for use by the statement expression extension).
-llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
- AggValueSlot AggSlot) {
+Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ AggValueSlot AggSlot) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
@@ -283,7 +302,7 @@ llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLa
return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
}
-llvm::Value*
+Address
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
bool GetLast,
AggValueSlot AggSlot) {
@@ -292,7 +311,7 @@ CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
E = S.body_end()-GetLast; I != E; ++I)
EmitStmt(*I);
- llvm::Value *RetAlloca = nullptr;
+ Address RetAlloca = Address::invalid();
if (GetLast) {
// We have to special case labels here. They are statements, but when put
// at the end of a statement expression, they yield the value of their
@@ -337,7 +356,7 @@ void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
return;
// Can only simplify empty blocks.
- if (BI != BB->begin())
+ if (BI->getIterator() != BB->begin())
return;
BB->replaceAllUsesWith(BI->getSuccessor(0));
@@ -359,7 +378,7 @@ void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
// Place the block after the current block, if possible, or else at
// the end of the function.
if (CurBB && CurBB->getParent())
- CurFn->getBasicBlockList().insertAfter(CurBB, BB);
+ CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
else
CurFn->getBasicBlockList().push_back(BB);
Builder.SetInsertPoint(BB);
@@ -386,7 +405,8 @@ void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
bool inserted = false;
for (llvm::User *u : block->users()) {
if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
- CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
+ CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
+ block);
inserted = true;
break;
}
@@ -590,100 +610,6 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
EmitBlock(ContBlock, true);
}
-void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context,
- llvm::BranchInst *CondBr,
- ArrayRef<const Attr *> Attrs) {
- // Return if there are no hints.
- if (Attrs.empty())
- return;
-
- // Add vectorize and unroll hints to the metadata on the conditional branch.
- //
- // FIXME: Should this really start with a size of 1?
- SmallVector<llvm::Metadata *, 2> Metadata(1);
- for (const auto *Attr : Attrs) {
- const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
-
- // Skip non loop hint attributes
- if (!LH)
- continue;
-
- LoopHintAttr::OptionType Option = LH->getOption();
- LoopHintAttr::LoopHintState State = LH->getState();
- const char *MetadataName;
- switch (Option) {
- case LoopHintAttr::Vectorize:
- case LoopHintAttr::VectorizeWidth:
- MetadataName = "llvm.loop.vectorize.width";
- break;
- case LoopHintAttr::Interleave:
- case LoopHintAttr::InterleaveCount:
- MetadataName = "llvm.loop.interleave.count";
- break;
- case LoopHintAttr::Unroll:
- // With the unroll loop hint, a non-zero value indicates full unrolling.
- MetadataName = State == LoopHintAttr::Disable ? "llvm.loop.unroll.disable"
- : "llvm.loop.unroll.full";
- break;
- case LoopHintAttr::UnrollCount:
- MetadataName = "llvm.loop.unroll.count";
- break;
- }
-
- Expr *ValueExpr = LH->getValue();
- int ValueInt = 1;
- if (ValueExpr) {
- llvm::APSInt ValueAPS =
- ValueExpr->EvaluateKnownConstInt(CGM.getContext());
- ValueInt = static_cast<int>(ValueAPS.getSExtValue());
- }
-
- llvm::Constant *Value;
- llvm::MDString *Name;
- switch (Option) {
- case LoopHintAttr::Vectorize:
- case LoopHintAttr::Interleave:
- if (State != LoopHintAttr::Disable) {
- // FIXME: In the future I will modifiy the behavior of the metadata
- // so we can enable/disable vectorization and interleaving separately.
- Name = llvm::MDString::get(Context, "llvm.loop.vectorize.enable");
- Value = Builder.getTrue();
- break;
- }
- // Vectorization/interleaving is disabled, set width/count to 1.
- ValueInt = 1;
- // Fallthrough.
- case LoopHintAttr::VectorizeWidth:
- case LoopHintAttr::InterleaveCount:
- case LoopHintAttr::UnrollCount:
- Name = llvm::MDString::get(Context, MetadataName);
- Value = llvm::ConstantInt::get(Int32Ty, ValueInt);
- break;
- case LoopHintAttr::Unroll:
- Name = llvm::MDString::get(Context, MetadataName);
- Value = nullptr;
- break;
- }
-
- SmallVector<llvm::Metadata *, 2> OpValues;
- OpValues.push_back(Name);
- if (Value)
- OpValues.push_back(llvm::ConstantAsMetadata::get(Value));
-
- // Set or overwrite metadata indicated by Name.
- Metadata.push_back(llvm::MDNode::get(Context, OpValues));
- }
-
- // FIXME: This condition is never false. Should it be an assert?
- if (!Metadata.empty()) {
- // Add llvm.loop MDNode to CondBr.
- llvm::MDNode *LoopID = llvm::MDNode::get(Context, Metadata);
- LoopID->replaceOperandWith(0, LoopID); // First op points to itself.
-
- CondBr->setMetadata("llvm.loop", LoopID);
- }
-}
-
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
ArrayRef<const Attr *> WhileAttrs) {
// Emit the header for the loop, which will also become
@@ -691,7 +617,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
EmitBlock(LoopHeader.getBlock());
- LoopStack.push(LoopHeader.getBlock(), WhileAttrs);
+ LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs);
// Create an exit block for when the condition fails, which will
// also become the break target.
@@ -730,7 +656,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (ConditionScope.requiresCleanups())
ExitBlock = createBasicBlock("while.exit");
- llvm::BranchInst *CondBr = Builder.CreateCondBr(
+ Builder.CreateCondBr(
BoolCondVal, LoopBody, ExitBlock,
createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
@@ -738,9 +664,6 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
}
-
- // Attach metadata to loop body conditional branch.
- EmitCondBrHints(LoopBody->getContext(), CondBr, WhileAttrs);
}
// Emit the loop body. We have to emit this in a cleanup scope
@@ -785,7 +708,7 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// Emit the body of the loop.
llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
- LoopStack.push(LoopBody, DoAttrs);
+ LoopStack.push(LoopBody, CGM.getContext(), DoAttrs);
EmitBlockWithFallThrough(LoopBody, &S);
{
@@ -815,12 +738,9 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch) {
uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
- llvm::BranchInst *CondBr = Builder.CreateCondBr(
+ Builder.CreateCondBr(
BoolCondVal, LoopBody, LoopExit.getBlock(),
createProfileWeightsForLoop(S.getCond(), BackedgeCount));
-
- // Attach metadata to loop body conditional branch.
- EmitCondBrHints(LoopBody->getContext(), CondBr, DoAttrs);
}
LoopStack.pop();
@@ -851,7 +771,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
llvm::BasicBlock *CondBlock = Continue.getBlock();
EmitBlock(CondBlock);
- LoopStack.push(CondBlock, ForAttrs);
+ LoopStack.push(CondBlock, CGM.getContext(), ForAttrs);
// If the for loop doesn't have an increment we can just use the
// condition as the continue block. Otherwise we'll need to create
@@ -885,13 +805,10 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
- llvm::BranchInst *CondBr = Builder.CreateCondBr(
+ Builder.CreateCondBr(
BoolCondVal, ForBody, ExitBlock,
createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
- // Attach metadata to loop body conditional branch.
- EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs);
-
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
@@ -949,7 +866,7 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
EmitBlock(CondBlock);
- LoopStack.push(CondBlock, ForAttrs);
+ LoopStack.push(CondBlock, CGM.getContext(), ForAttrs);
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
@@ -963,13 +880,10 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
// The body is executed if the expression, contextually converted
// to bool, is true.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
- llvm::BranchInst *CondBr = Builder.CreateCondBr(
+ Builder.CreateCondBr(
BoolCondVal, ForBody, ExitBlock,
createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
- // Attach metadata to loop body conditional branch.
- EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs);
-
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
@@ -1012,10 +926,9 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
if (RV.isScalar()) {
Builder.CreateStore(RV.getScalarVal(), ReturnValue);
} else if (RV.isAggregate()) {
- EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddress(), Ty);
} else {
- EmitStoreOfComplex(RV.getComplexVal(),
- MakeNaturalAlignAddrLValue(ReturnValue, Ty),
+ EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
/*init*/ true);
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -1056,8 +969,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// If there is an NRVO flag for this variable, set it to 1 into indicate
// that the cleanup code should not destroy the variable.
if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
- Builder.CreateStore(Builder.getTrue(), NRVOFlag);
- } else if (!ReturnValue || (RV && RV->getType()->isVoidType())) {
+ Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
+ } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
if (RV)
@@ -1075,20 +988,17 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
break;
case TEK_Complex:
- EmitComplexExprIntoLValue(RV,
- MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()),
+ EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
/*isInit*/ true);
break;
- case TEK_Aggregate: {
- CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment,
+ case TEK_Aggregate:
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue,
Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
break;
}
- }
}
++NumReturnExprs;
@@ -1624,6 +1534,22 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
EmitBlock(SwitchExit.getBlock(), true);
incrementProfileCounter(&S);
+ // If the switch has a condition wrapped by __builtin_unpredictable,
+ // create metadata that specifies that the switch is unpredictable.
+ // Don't bother if not optimizing because that metadata would not be used.
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ if (const CallExpr *Call = dyn_cast<CallExpr>(S.getCond())) {
+ const Decl *TargetDecl = Call->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
+ MDHelper.createUnpredictable());
+ }
+ }
+ }
+ }
+
if (SwitchWeights) {
assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
"switch weights do not match switch cases");
@@ -1675,9 +1601,7 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
assert(OutCons &&
"Must pass output names to constraints with a symbolic name");
unsigned Index;
- bool result = Target.resolveSymbolicName(Constraint,
- &(*OutCons)[0],
- OutCons->size(), Index);
+ bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
assert(result && "Could not resolve symbolic name"); (void)result;
Result += llvm::utostr(Index);
break;
@@ -1743,12 +1667,12 @@ CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
Ty));
} else {
- Arg = InputValue.getAddress();
+ Arg = InputValue.getPointer();
ConstraintStr += '*';
}
}
} else {
- Arg = InputValue.getAddress();
+ Arg = InputValue.getPointer();
ConstraintStr += '*';
}
@@ -1772,7 +1696,8 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
if (Info.allowsRegister() || !Info.allowsMemory())
if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
return EmitScalarExpr(InputExpr);
-
+ if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
+ return EmitScalarExpr(InputExpr);
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
@@ -1793,13 +1718,15 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
const LangOptions &LangOpts = CGF.CGM.getLangOpts();
+ unsigned StartToken = 0;
+ unsigned ByteOffset = 0;
// Add the location of the start of each subsequent line of the asm to the
// MDNode.
- for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
+ for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
if (StrVal[i] != '\n') continue;
- SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
- CGF.getTarget());
+ SourceLocation LineLoc = Str->getLocationOfByte(
+ i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
Locs.push_back(llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
}
@@ -1832,8 +1759,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Name = GAS->getInputName(i);
TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
bool IsValid =
- getTarget().validateInputConstraint(OutputConstraintInfos.data(),
- S.getNumOutputs(), Info);
+ getTarget().validateInputConstraint(OutputConstraintInfos, Info);
assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
InputConstraintInfos.push_back(Info);
}
@@ -1919,8 +1845,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
<< OutExpr->getType() << OutputConstraint;
}
} else {
- ArgTypes.push_back(Dest.getAddress()->getType());
- Args.push_back(Dest.getAddress());
+ ArgTypes.push_back(Dest.getAddress().getType());
+ Args.push_back(Dest.getPointer());
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -2077,6 +2003,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Result->addAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoUnwind);
+ if (isa<MSAsmStmt>(&S)) {
+ // If the assembly contains any labels, mark the call noduplicate to prevent
+ // defining the same ASM label twice (PR23715). This is pretty hacky, but it
+ // works.
+ if (AsmString.find("__MSASMLABEL_") != std::string::npos)
+ Result->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoDuplicate);
+ }
+
// Attach readnone and readonly attributes.
if (!HasSideEffect) {
if (ReadNone)
@@ -2152,12 +2087,12 @@ LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
QualType RecordTy = getContext().getRecordType(RD);
// Initialize the captured struct.
- LValue SlotLV = MakeNaturalAlignAddrLValue(
- CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
+ LValue SlotLV =
+ MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
RecordDecl::field_iterator CurField = RD->field_begin();
- for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(),
- E = S.capture_init_end();
+ for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
+ E = S.capture_init_end();
I != E; ++I, ++CurField) {
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
if (CurField->hasCapturedVLAType()) {
@@ -2184,13 +2119,12 @@ CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
delete CGF.CapturedStmtInfo;
// Emit call to the helper function.
- EmitCallOrInvoke(F, CapStruct.getAddress());
+ EmitCallOrInvoke(F, CapStruct.getPointer());
return F;
}
-llvm::Value *
-CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
+Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
LValue CapStruct = InitCapturedStruct(S);
return CapStruct.getAddress();
}
@@ -2229,8 +2163,7 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
CD->getLocation(),
CD->getBody()->getLocStart());
// Set the context parameter in CapturedStmtInfo.
- llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()];
- assert(DeclPtr && "missing context parameter for CapturedStmt");
+ Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
// Initialize variable-length arrays.
@@ -2252,7 +2185,7 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
}
- PGO.assignRegionCounters(CD, F);
+ PGO.assignRegionCounters(GlobalDecl(CD), F);
CapturedStmtInfo->EmitBody(*this, CD->getBody());
FinishFunction(CD->getBodyRBrace());
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index e5f507aa41bd..bcd2ac51929d 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -20,21 +20,195 @@
using namespace clang;
using namespace CodeGen;
+void CodeGenFunction::GenerateOpenMPCapturedVars(
+ const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ auto CurField = RD->field_begin();
+ auto CurCap = S.captures().begin();
+ for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
+ E = S.capture_init_end();
+ I != E; ++I, ++CurField, ++CurCap) {
+ if (CurField->hasCapturedVLAType()) {
+ auto VAT = CurField->getCapturedVLAType();
+ auto *Val = VLASizeMap[VAT->getSizeExpr()];
+ CapturedVars.push_back(Val);
+ } else if (CurCap->capturesThis())
+ CapturedVars.push_back(CXXThisValue);
+ else if (CurCap->capturesVariableByCopy())
+ CapturedVars.push_back(
+ EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal());
+ else {
+ assert(CurCap->capturesVariable() && "Expected capture by reference.");
+ CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer());
+ }
+ }
+}
+
+static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
+ StringRef Name, LValue AddrLV,
+ bool isReferenceType = false) {
+ ASTContext &Ctx = CGF.getContext();
+
+ auto *CastedPtr = CGF.EmitScalarConversion(
+ AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
+ Ctx.getPointerType(DstType), SourceLocation());
+ auto TmpAddr =
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
+ .getAddress();
+
+ // If we are dealing with references we need to return the address of the
+ // reference instead of the reference of the value.
+ if (isReferenceType) {
+ QualType RefType = Ctx.getLValueReferenceType(DstType);
+ auto *RefVal = TmpAddr.getPointer();
+ TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
+ auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
+ CGF.EmitScalarInit(RefVal, TmpLVal);
+ }
+
+ return TmpAddr;
+}
+
+llvm::Function *
+CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
+ assert(
+ CapturedStmtInfo &&
+ "CapturedStmtInfo should be set when generating the captured function");
+ const CapturedDecl *CD = S.getCapturedDecl();
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ assert(CD->hasBody() && "missing CapturedDecl body");
+
+ // Build the argument list.
+ ASTContext &Ctx = CGM.getContext();
+ FunctionArgList Args;
+ Args.append(CD->param_begin(),
+ std::next(CD->param_begin(), CD->getContextParamPosition()));
+ auto I = S.captures().begin();
+ for (auto *FD : RD->fields()) {
+ QualType ArgType = FD->getType();
+ IdentifierInfo *II = nullptr;
+ VarDecl *CapVar = nullptr;
+
+ // If this is a capture by copy and the type is not a pointer, the outlined
+ // function argument type should be uintptr and the value properly casted to
+ // uintptr. This is necessary given that the runtime library is only able to
+ // deal with pointers. We can pass in the same way the VLA type sizes to the
+ // outlined function.
+ if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
+ I->capturesVariableArrayType())
+ ArgType = Ctx.getUIntPtrType();
+
+ if (I->capturesVariable() || I->capturesVariableByCopy()) {
+ CapVar = I->getCapturedVar();
+ II = CapVar->getIdentifier();
+ } else if (I->capturesThis())
+ II = &getContext().Idents.get("this");
+ else {
+ assert(I->capturesVariableArrayType());
+ II = &getContext().Idents.get("vla");
+ }
+ if (ArgType->isVariablyModifiedType())
+ ArgType = getContext().getVariableArrayDecayedType(ArgType);
+ Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr,
+ FD->getLocation(), II, ArgType));
+ ++I;
+ }
+ Args.append(
+ std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
+ CD->param_end());
+
+ // Create the function declaration.
+ FunctionType::ExtInfo ExtInfo;
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
+ /*IsVariadic=*/false);
+ llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
+
+ llvm::Function *F = llvm::Function::Create(
+ FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
+ CapturedStmtInfo->getHelperName(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
+ if (CD->isNothrow())
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ // Generate the function.
+ StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
+ CD->getBody()->getLocStart());
+ unsigned Cnt = CD->getContextParamPosition();
+ I = S.captures().begin();
+ for (auto *FD : RD->fields()) {
+ // If we are capturing a pointer by copy we don't need to do anything, just
+ // use the value that we get from the arguments.
+ if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
+ setAddrOfLocalVar(I->getCapturedVar(), GetAddrOfLocalVar(Args[Cnt]));
+ ++Cnt, ++I;
+ continue;
+ }
+
+ LValue ArgLVal =
+ MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(),
+ AlignmentSource::Decl);
+ if (FD->hasCapturedVLAType()) {
+ LValue CastedArgLVal =
+ MakeAddrLValue(castValueFromUintptr(*this, FD->getType(),
+ Args[Cnt]->getName(), ArgLVal),
+ FD->getType(), AlignmentSource::Decl);
+ auto *ExprArg =
+ EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal();
+ auto VAT = FD->getCapturedVLAType();
+ VLASizeMap[VAT->getSizeExpr()] = ExprArg;
+ } else if (I->capturesVariable()) {
+ auto *Var = I->getCapturedVar();
+ QualType VarTy = Var->getType();
+ Address ArgAddr = ArgLVal.getAddress();
+ if (!VarTy->isReferenceType()) {
+ ArgAddr = EmitLoadOfReference(
+ ArgAddr, ArgLVal.getType()->castAs<ReferenceType>());
+ }
+ setAddrOfLocalVar(
+ Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var)));
+ } else if (I->capturesVariableByCopy()) {
+ assert(!FD->getType()->isAnyPointerType() &&
+ "Not expecting a captured pointer.");
+ auto *Var = I->getCapturedVar();
+ QualType VarTy = Var->getType();
+ setAddrOfLocalVar(I->getCapturedVar(),
+ castValueFromUintptr(*this, FD->getType(),
+ Args[Cnt]->getName(), ArgLVal,
+ VarTy->isReferenceType()));
+ } else {
+ // If 'this' is captured, load it into CXXThisValue.
+ assert(I->capturesThis());
+ CXXThisValue =
+ EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal();
+ }
+ ++Cnt, ++I;
+ }
+
+ PGO.assignRegionCounters(GlobalDecl(CD), F);
+ CapturedStmtInfo->EmitBody(*this, CD->getBody());
+ FinishFunction(CD->getBodyRBrace());
+
+ return F;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP Directive Emission
//===----------------------------------------------------------------------===//
void CodeGenFunction::EmitOMPAggregateAssign(
- llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
- const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) {
+ Address DestAddr, Address SrcAddr, QualType OriginalType,
+ const llvm::function_ref<void(Address, Address)> &CopyGen) {
// Perform element-by-element initialization.
QualType ElementTy;
- auto SrcBegin = SrcAddr;
- auto DestBegin = DestAddr;
+
+ // Drill down to the base element type on both arrays.
auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
- auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
+ auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
+ SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
+
+ auto SrcBegin = SrcAddr.getPointer();
+ auto DestBegin = DestAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin,
- DestBegin->getType());
auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
// The basic structure here is a while-do loop.
auto BodyBB = createBasicBlock("omp.arraycpy.body");
@@ -46,77 +220,144 @@ void CodeGenFunction::EmitOMPAggregateAssign(
// Enter the loop body, making that address the current address.
auto EntryBB = Builder.GetInsertBlock();
EmitBlock(BodyBB);
- auto SrcElementCurrent =
- Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
- SrcElementCurrent->addIncoming(SrcBegin, EntryBB);
- auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2,
- "omp.arraycpy.destElementPast");
- DestElementCurrent->addIncoming(DestBegin, EntryBB);
+
+ CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
+
+ llvm::PHINode *SrcElementPHI =
+ Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
+ SrcElementPHI->addIncoming(SrcBegin, EntryBB);
+ Address SrcElementCurrent =
+ Address(SrcElementPHI,
+ SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ llvm::PHINode *DestElementPHI =
+ Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ DestElementPHI->addIncoming(DestBegin, EntryBB);
+ Address DestElementCurrent =
+ Address(DestElementPHI,
+ DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
CopyGen(DestElementCurrent, SrcElementCurrent);
// Shift the address forward by one element.
auto DestElementNext = Builder.CreateConstGEP1_32(
- DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
auto SrcElementNext = Builder.CreateConstGEP1_32(
- SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element");
+ SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
// Check whether we've reached the end.
auto Done =
Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
Builder.CreateCondBr(Done, DoneBB, BodyBB);
- DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock());
- SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock());
+ DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
+ SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
// Done.
EmitBlock(DoneBB, /*IsFinished=*/true);
}
-void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF,
- QualType OriginalType, llvm::Value *DestAddr,
- llvm::Value *SrcAddr, const VarDecl *DestVD,
+/// \brief Emit initialization of arrays of complex types.
+/// \param DestAddr Address of the array.
+/// \param Type Type of array.
+/// \param Init Initial expression of array.
+static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
+ QualType Type, const Expr *Init) {
+ // Perform element-by-element initialization.
+ QualType ElementTy;
+
+ // Drill down to the base element type on both arrays.
+ auto ArrayTy = Type->getAsArrayTypeUnsafe();
+ auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
+ DestAddr =
+ CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
+
+ auto DestBegin = DestAddr.getPointer();
+ // Cast from pointer to array type to pointer to single element.
+ auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
+ // The basic structure here is a while-do loop.
+ auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
+ auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
+ auto IsEmpty =
+ CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
+ CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+
+ // Enter the loop body, making that address the current address.
+ auto EntryBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(BodyBB);
+
+ CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
+
+ llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
+ DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ DestElementPHI->addIncoming(DestBegin, EntryBB);
+ Address DestElementCurrent =
+ Address(DestElementPHI,
+ DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ // Emit copy.
+ {
+ CodeGenFunction::RunCleanupsScope InitScope(CGF);
+ CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
+ /*IsInitializer=*/false);
+ }
+
+ // Shift the address forward by one element.
+ auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
+ DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ // Check whether we've reached the end.
+ auto Done =
+ CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
+ CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
+ DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
+
+ // Done.
+ CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
+}
+
+void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
+ Address SrcAddr, const VarDecl *DestVD,
const VarDecl *SrcVD, const Expr *Copy) {
if (OriginalType->isArrayType()) {
auto *BO = dyn_cast<BinaryOperator>(Copy);
if (BO && BO->getOpcode() == BO_Assign) {
// Perform simple memcpy for simple copying.
- CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
+ EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
} else {
// For arrays with complex element types perform element by element
// copying.
- CGF.EmitOMPAggregateAssign(
+ EmitOMPAggregateAssign(
DestAddr, SrcAddr, OriginalType,
- [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement,
- llvm::Value *SrcElement) {
+ [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
// Working with the single array element, so have to remap
// destination and source variables to corresponding array
// elements.
- CodeGenFunction::OMPPrivateScope Remap(CGF);
- Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{
+ CodeGenFunction::OMPPrivateScope Remap(*this);
+ Remap.addPrivate(DestVD, [DestElement]() -> Address {
return DestElement;
});
Remap.addPrivate(
- SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; });
+ SrcVD, [SrcElement]() -> Address { return SrcElement; });
(void)Remap.Privatize();
- CGF.EmitIgnoredExpr(Copy);
+ EmitIgnoredExpr(Copy);
});
}
} else {
// Remap pseudo source variable to private copy.
- CodeGenFunction::OMPPrivateScope Remap(CGF);
- Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; });
- Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; });
+ CodeGenFunction::OMPPrivateScope Remap(*this);
+ Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
+ Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
(void)Remap.Privatize();
// Emit copying of the whole variable.
- CGF.EmitIgnoredExpr(Copy);
+ EmitIgnoredExpr(Copy);
}
}
bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope) {
+ if (!HaveInsertPoint())
+ return false;
llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
- for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) {
- auto *C = cast<OMPFirstprivateClause>(*I);
+ for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
auto IRef = C->varlist_begin();
auto InitsRef = C->inits().begin();
for (auto IInit : C->private_copies()) {
@@ -131,13 +372,13 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
/*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
OrigVD) != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- auto *OriginalAddr = EmitLValue(&DRE).getAddress();
+ Address OriginalAddr = EmitLValue(&DRE).getAddress();
QualType Type = OrigVD->getType();
if (Type->isArrayType()) {
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
// captured region.
- IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
auto Emission = EmitAutoVarAlloca(*VD);
auto *Init = VD->getInit();
if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
@@ -147,12 +388,12 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
} else {
EmitOMPAggregateAssign(
Emission.getAllocatedAddress(), OriginalAddr, Type,
- [this, VDInit, Init](llvm::Value *DestElement,
- llvm::Value *SrcElement) {
+ [this, VDInit, Init](Address DestElement,
+ Address SrcElement) {
// Clean up any temporaries needed by the initialization.
RunCleanupsScope InitScope(*this);
// Emit initialization for single element.
- LocalDeclMap[VDInit] = SrcElement;
+ setAddrOfLocalVar(VDInit, SrcElement);
EmitAnyExprToMem(Init, DestElement,
Init->getType().getQualifiers(),
/*IsInitializer*/ false);
@@ -163,12 +404,12 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
return Emission.getAllocatedAddress();
});
} else {
- IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable
// (for proper handling of captured global variables).
- LocalDeclMap[VDInit] = OriginalAddr;
+ setAddrOfLocalVar(VDInit, OriginalAddr);
EmitDecl(*VD);
LocalDeclMap.erase(VDInit);
return GetAddrOfLocalVar(VD);
@@ -188,16 +429,17 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
void CodeGenFunction::EmitOMPPrivateClause(
const OMPExecutableDirective &D,
CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ if (!HaveInsertPoint())
+ return;
llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
- for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) {
- auto *C = cast<OMPPrivateClause>(*I);
+ for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
auto IRef = C->varlist_begin();
for (auto IInit : C->private_copies()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ PrivateScope.addPrivate(OrigVD, [&]() -> Address {
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
@@ -212,14 +454,15 @@ void CodeGenFunction::EmitOMPPrivateClause(
}
bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
+ if (!HaveInsertPoint())
+ return false;
// threadprivate_var1 = master_threadprivate_var1;
// operator=(threadprivate_var2, master_threadprivate_var2);
// ...
// __kmpc_barrier(&loc, global_tid);
llvm::DenseSet<const VarDecl *> CopiedVars;
llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
- for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) {
- auto *C = cast<OMPCopyinClause>(*I);
+ for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
auto IRef = C->varlist_begin();
auto ISrcRef = C->source_exprs().begin();
auto IDestRef = C->destination_exprs().begin();
@@ -231,7 +474,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
// Get the address of the master variable. If we are emitting code with
// TLS support, the address is passed from the master as field in the
// captured declaration.
- llvm::Value *MasterAddr;
+ Address MasterAddr = Address::invalid();
if (getLangOpts().OpenMPUseTLS &&
getContext().getTargetInfo().isTLSSupported()) {
assert(CapturedStmtInfo->lookup(VD) &&
@@ -239,12 +482,15 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
VK_LValue, (*IRef)->getExprLoc());
MasterAddr = EmitLValue(&DRE).getAddress();
+ LocalDeclMap.erase(VD);
} else {
- MasterAddr = VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
- : CGM.GetAddrOfGlobal(VD);
+ MasterAddr =
+ Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
+ : CGM.GetAddrOfGlobal(VD),
+ getContext().getDeclAlign(VD));
}
// Get the address of the threadprivate variable.
- auto *PrivateAddr = EmitLValue(*IRef).getAddress();
+ Address PrivateAddr = EmitLValue(*IRef).getAddress();
if (CopiedVars.size() == 1) {
// At first check if current thread is a master thread. If it is, no
// need to copy data.
@@ -252,15 +498,14 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
CopyEnd = createBasicBlock("copyin.not.master.end");
Builder.CreateCondBr(
Builder.CreateICmpNE(
- Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
- Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
+ Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
+ Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
CopyBegin, CopyEnd);
EmitBlock(CopyBegin);
}
auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
- EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD,
- AssignOp);
+ EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
}
++IRef;
++ISrcRef;
@@ -277,11 +522,12 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
bool CodeGenFunction::EmitOMPLastprivateClauseInit(
const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
+ if (!HaveInsertPoint())
+ return false;
bool HasAtLeastOneLastprivate = false;
llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
- for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
+ for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
HasAtLeastOneLastprivate = true;
- auto *C = cast<OMPLastprivateClause>(*I);
auto IRef = C->varlist_begin();
auto IDestRef = C->destination_exprs().begin();
for (auto *IInit : C->private_copies()) {
@@ -290,7 +536,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
- PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{
+ PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
DeclRefExpr DRE(
const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
@@ -304,7 +550,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
if (IInit) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ PrivateScope.addPrivate(OrigVD, [&]() -> Address {
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
@@ -322,6 +568,8 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
void CodeGenFunction::EmitOMPLastprivateClauseFinal(
const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
+ if (!HaveInsertPoint())
+ return;
// Emit following code:
// if (<IsLastIterCond>) {
// orig_var1 = private_orig_var1;
@@ -359,8 +607,7 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
{
llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
bool FirstLCV = true;
- for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
- auto *C = cast<OMPLastprivateClause>(*I);
+ for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
auto IRef = C->varlist_begin();
auto ISrcRef = C->source_exprs().begin();
auto IDestRef = C->destination_exprs().begin();
@@ -385,11 +632,14 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
// Get the address of the original variable.
- auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
+ Address OriginalAddr = GetAddrOfLocalVar(DestVD);
// Get the address of the private variable.
- auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
- EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
- AssignOp);
+ Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
+ if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
+ PrivateAddr =
+ Address(Builder.CreateLoad(PrivateAddr),
+ getNaturalTypeAlignment(RefTy->getPointeeType()));
+ EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
}
++IRef;
++ISrcRef;
@@ -405,46 +655,174 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
void CodeGenFunction::EmitOMPReductionClauseInit(
const OMPExecutableDirective &D,
CodeGenFunction::OMPPrivateScope &PrivateScope) {
- for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
- auto *C = cast<OMPReductionClause>(*I);
+ if (!HaveInsertPoint())
+ return;
+ for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
auto ILHS = C->lhs_exprs().begin();
auto IRHS = C->rhs_exprs().begin();
+ auto IPriv = C->privates().begin();
for (auto IRef : C->varlists()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
- // Store the address of the original variable associated with the LHS
- // implicit variable.
- PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{
- DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
- CapturedStmtInfo->lookup(OrigVD) != nullptr,
- IRef->getType(), VK_LValue, IRef->getExprLoc());
- return EmitLValue(&DRE).getAddress();
- });
- // Emit reduction copy.
- bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{
- // Emit private VarDecl with reduction init.
- EmitDecl(*PrivateVD);
- return GetAddrOfLocalVar(PrivateVD);
- });
- assert(IsRegistered && "private var already registered as private");
- // Silence the warning about unused variable.
- (void)IsRegistered;
- ++ILHS, ++IRHS;
+ auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
+ if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) {
+ auto *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ auto *DE = cast<DeclRefExpr>(Base);
+ auto *OrigVD = cast<VarDecl>(DE->getDecl());
+ auto OASELValueLB = EmitOMPArraySectionExpr(OASE);
+ auto OASELValueUB =
+ EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
+ auto OriginalBaseLValue = EmitLValue(DE);
+ auto BaseLValue = OriginalBaseLValue;
+ auto *Zero = Builder.getInt64(/*C=*/0);
+ llvm::SmallVector<llvm::Value *, 4> Indexes;
+ Indexes.push_back(Zero);
+ auto *ItemTy =
+ OASELValueLB.getPointer()->getType()->getPointerElementType();
+ auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType();
+ while (Ty != ItemTy) {
+ Indexes.push_back(Zero);
+ Ty = Ty->getPointerElementType();
+ }
+ BaseLValue = MakeAddrLValue(
+ Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes),
+ OASELValueLB.getAlignment()),
+ OASELValueLB.getType(), OASELValueLB.getAlignmentSource());
+ // Store the address of the original variable associated with the LHS
+ // implicit variable.
+ PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address {
+ return OASELValueLB.getAddress();
+ });
+ // Emit reduction copy.
+ bool IsRegistered = PrivateScope.addPrivate(
+ OrigVD, [this, PrivateVD, BaseLValue, OASELValueLB, OASELValueUB,
+ OriginalBaseLValue]() -> Address {
+ // Emit VarDecl with copy init for arrays.
+ // Get the address of the original variable captured in current
+ // captured region.
+ auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(),
+ OASELValueLB.getPointer());
+ Size = Builder.CreateNUWAdd(
+ Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(
+ *this, cast<OpaqueValueExpr>(
+ getContext()
+ .getAsVariableArrayType(PrivateVD->getType())
+ ->getSizeExpr()),
+ RValue::get(Size));
+ EmitVariablyModifiedType(PrivateVD->getType());
+ auto Emission = EmitAutoVarAlloca(*PrivateVD);
+ auto Addr = Emission.getAllocatedAddress();
+ auto *Init = PrivateVD->getInit();
+ EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(), Init);
+ EmitAutoVarCleanups(Emission);
+ // Emit private VarDecl with reduction init.
+ auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
+ OASELValueLB.getPointer());
+ auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
+ Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Ptr, OriginalBaseLValue.getPointer()->getType());
+ return Address(Ptr, OriginalBaseLValue.getAlignment());
+ });
+ assert(IsRegistered && "private var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) {
+ auto *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ auto *DE = cast<DeclRefExpr>(Base);
+ auto *OrigVD = cast<VarDecl>(DE->getDecl());
+ auto ASELValue = EmitLValue(ASE);
+ auto OriginalBaseLValue = EmitLValue(DE);
+ auto BaseLValue = OriginalBaseLValue;
+ auto *Zero = Builder.getInt64(/*C=*/0);
+ llvm::SmallVector<llvm::Value *, 4> Indexes;
+ Indexes.push_back(Zero);
+ auto *ItemTy =
+ ASELValue.getPointer()->getType()->getPointerElementType();
+ auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType();
+ while (Ty != ItemTy) {
+ Indexes.push_back(Zero);
+ Ty = Ty->getPointerElementType();
+ }
+ BaseLValue = MakeAddrLValue(
+ Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes),
+ ASELValue.getAlignment()),
+ ASELValue.getType(), ASELValue.getAlignmentSource());
+ // Store the address of the original variable associated with the LHS
+ // implicit variable.
+ PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address {
+ return ASELValue.getAddress();
+ });
+ // Emit reduction copy.
+ bool IsRegistered = PrivateScope.addPrivate(
+ OrigVD, [this, PrivateVD, BaseLValue, ASELValue,
+ OriginalBaseLValue]() -> Address {
+ // Emit private VarDecl with reduction init.
+ EmitDecl(*PrivateVD);
+ auto Addr = GetAddrOfLocalVar(PrivateVD);
+ auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(),
+ ASELValue.getPointer());
+ auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset);
+ Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Ptr, OriginalBaseLValue.getPointer()->getType());
+ return Address(Ptr, OriginalBaseLValue.getAlignment());
+ });
+ assert(IsRegistered && "private var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ } else {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
+ // Store the address of the original variable associated with the LHS
+ // implicit variable.
+ PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> Address {
+ DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ IRef->getType(), VK_LValue, IRef->getExprLoc());
+ return EmitLValue(&DRE).getAddress();
+ });
+ // Emit reduction copy.
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> Address {
+ // Emit private VarDecl with reduction init.
+ EmitDecl(*PrivateVD);
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ assert(IsRegistered && "private var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+ PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
+ return GetAddrOfLocalVar(PrivateVD);
+ });
+ }
+ ++ILHS, ++IRHS, ++IPriv;
}
}
}
void CodeGenFunction::EmitOMPReductionClauseFinal(
const OMPExecutableDirective &D) {
+ if (!HaveInsertPoint())
+ return;
+ llvm::SmallVector<const Expr *, 8> Privates;
llvm::SmallVector<const Expr *, 8> LHSExprs;
llvm::SmallVector<const Expr *, 8> RHSExprs;
llvm::SmallVector<const Expr *, 8> ReductionOps;
bool HasAtLeastOneReduction = false;
- for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
+ for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
HasAtLeastOneReduction = true;
- auto *C = cast<OMPReductionClause>(*I);
+ Privates.append(C->privates().begin(), C->privates().end());
LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
@@ -453,8 +831,8 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
// Emit nowait reduction if nowait clause is present or directive is a
// parallel directive (it always has implicit barrier).
CGM.getOpenMPRuntime().emitReduction(
- *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps,
- D.getSingleClause(OMPC_nowait) ||
+ *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
+ D.getSingleClause<OMPNowaitClause>() ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
D.getDirectiveKind() == OMPD_simd,
D.getDirectiveKind() == OMPD_simd);
@@ -466,29 +844,32 @@ static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) {
auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
- auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS);
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
- if (auto C = S.getSingleClause(OMPC_num_threads)) {
+ if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- auto NumThreadsClause = cast<OMPNumThreadsClause>(C);
auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
/*IgnoreResultAssign*/ true);
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
CGF, NumThreads, NumThreadsClause->getLocStart());
}
- if (auto *C = S.getSingleClause(OMPC_proc_bind)) {
+ if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- auto *ProcBindClause = cast<OMPProcBindClause>(C);
CGF.CGM.getOpenMPRuntime().emitProcBindClause(
CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
}
const Expr *IfCond = nullptr;
- if (auto C = S.getSingleClause(OMPC_if)) {
- IfCond = cast<OMPIfClause>(C)->getCondition();
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_parallel) {
+ IfCond = C->getCondition();
+ break;
+ }
}
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
- CapturedStruct, IfCond);
+ CapturedVars, IfCond);
}
void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
@@ -503,17 +884,15 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
// initialization of firstprivate variables or propagation master's thread
// values of threadprivate variables to local instances of that variables
// of all other implicit threads.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
- OMPD_unknown);
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(
+ CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
}
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S);
- // Emit implicit barrier at the end of the 'parallel' directive.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
- OMPD_unknown);
};
emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
}
@@ -526,8 +905,7 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
EmitIgnoredExpr(I);
}
// Update the linear variables.
- for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
- auto *C = cast<OMPLinearClause>(*I);
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
for (auto U : C->updates()) {
EmitIgnoredExpr(U);
}
@@ -595,9 +973,10 @@ void CodeGenFunction::EmitOMPInnerLoop(
}
void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
+ if (!HaveInsertPoint())
+ return;
// Emit inits for the linear variables.
- for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
- auto *C = cast<OMPLinearClause>(*I);
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
for (auto Init : C->inits()) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
auto *OrigVD = cast<VarDecl>(
@@ -608,8 +987,7 @@ void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
VD->getInit()->getExprLoc());
AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
EmitExprAsInit(&DRE, VD,
- MakeAddrLValue(Emission.getAllocatedAddress(),
- VD->getType(), Emission.Alignment),
+ MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
/*capturedByInit=*/false);
EmitAutoVarCleanups(Emission);
}
@@ -626,19 +1004,20 @@ void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
static void emitLinearClauseFinal(CodeGenFunction &CGF,
const OMPLoopDirective &D) {
+ if (!CGF.HaveInsertPoint())
+ return;
// Emit the final values of the linear variables.
- for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
- auto *C = cast<OMPLinearClause>(*I);
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
auto IC = C->varlist_begin();
for (auto F : C->finals()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
- auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress();
+ Address OrigAddr = CGF.EmitLValue(&DRE).getAddress();
CodeGenFunction::OMPPrivateScope VarScope(CGF);
VarScope.addPrivate(OrigVD,
- [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
+ [OrigAddr]() -> Address { return OrigAddr; });
(void)VarScope.Privatize();
CGF.EmitIgnoredExpr(F);
++IC;
@@ -648,8 +1027,9 @@ static void emitLinearClauseFinal(CodeGenFunction &CGF,
static void emitAlignedClause(CodeGenFunction &CGF,
const OMPExecutableDirective &D) {
- for (auto &&I = D.getClausesOfKind(OMPC_aligned); I; ++I) {
- auto *Clause = cast<OMPAlignedClause>(*I);
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
unsigned ClauseAlignment = 0;
if (auto AlignmentExpr = Clause->getAlignment()) {
auto AlignmentCI =
@@ -680,24 +1060,36 @@ static void emitAlignedClause(CodeGenFunction &CGF,
static void emitPrivateLoopCounters(CodeGenFunction &CGF,
CodeGenFunction::OMPPrivateScope &LoopScope,
- ArrayRef<Expr *> Counters) {
+ ArrayRef<Expr *> Counters,
+ ArrayRef<Expr *> PrivateCounters) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ auto I = PrivateCounters.begin();
for (auto *E : Counters) {
- auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
+ Address Addr = Address::invalid();
+ (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
// Emit var without initialization.
- auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
+ auto VarEmission = CGF.EmitAutoVarAlloca(*PrivateVD);
CGF.EmitAutoVarCleanups(VarEmission);
- return VarEmission.getAllocatedAddress();
+ Addr = VarEmission.getAllocatedAddress();
+ return Addr;
});
+ (void)LoopScope.addPrivate(VD, [&]() -> Address { return Addr; });
+ ++I;
}
}
static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
+ if (!CGF.HaveInsertPoint())
+ return;
{
CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
- emitPrivateLoopCounters(CGF, PreCondScope, S.counters());
+ emitPrivateLoopCounters(CGF, PreCondScope, S.counters(),
+ S.private_counters());
(void)PreCondScope.Privatize();
// Get initial values of real counters.
for (auto I : S.inits()) {
@@ -711,31 +1103,45 @@ static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
static void
emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
CodeGenFunction::OMPPrivateScope &PrivateScope) {
- for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
- auto *C = cast<OMPLinearClause>(*I);
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
+ auto CurPrivate = C->privates().begin();
for (auto *E : C->varlists()) {
- auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * {
- // Emit var without initialization.
- auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
- CGF.EmitAutoVarCleanups(VarEmission);
- return VarEmission.getAllocatedAddress();
+ auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ auto *PrivateVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
+ bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
+ // Emit private VarDecl with copy init.
+ CGF.EmitVarDecl(*PrivateVD);
+ return CGF.GetAddrOfLocalVar(PrivateVD);
});
assert(IsRegistered && "linear var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
+ ++CurPrivate;
}
}
}
-static void emitSafelenClause(CodeGenFunction &CGF,
- const OMPExecutableDirective &D) {
- if (auto *C =
- cast_or_null<OMPSafelenClause>(D.getSingleClause(OMPC_safelen))) {
+static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
+ RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
+ /*ignoreResult=*/true);
+ llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
+ CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
+ // In presence of finite 'safelen', it may be unsafe to mark all
+ // the memory instructions parallel, because loop-carried
+ // dependences of 'safelen' iterations are possible.
+ CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
+ } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
/*ignoreResult=*/true);
llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
- CGF.LoopStack.setVectorizerWidth(Val->getZExtValue());
+ CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
// In presence of finite 'safelen', it may be unsafe to mark all
// the memory instructions parallel, because loop-carried
// dependences of 'safelen' iterations are possible.
@@ -746,22 +1152,24 @@ static void emitSafelenClause(CodeGenFunction &CGF,
void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
// Walk clauses and process safelen/lastprivate.
LoopStack.setParallel();
- LoopStack.setVectorizerEnable(true);
- emitSafelenClause(*this, D);
+ LoopStack.setVectorizeEnable(true);
+ emitSimdlenSafelenClause(*this, D);
}
void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
+ if (!HaveInsertPoint())
+ return;
auto IC = D.counters().begin();
for (auto F : D.finals()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
- if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
+ if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
- auto *OrigAddr = EmitLValue(&DRE).getAddress();
+ Address OrigAddr = EmitLValue(&DRE).getAddress();
OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD,
- [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
+ [OrigAddr]() -> Address { return OrigAddr; });
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
}
@@ -817,7 +1225,8 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
bool HasLastprivateClause;
{
OMPPrivateScope LoopScope(CGF);
- emitPrivateLoopCounters(CGF, LoopScope, S.counters());
+ emitPrivateLoopCounters(CGF, LoopScope, S.counters(),
+ S.private_counters());
emitPrivateLinearVars(CGF, S, LoopScope);
CGF.EmitOMPPrivateClause(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
@@ -849,9 +1258,9 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const OMPLoopDirective &S,
OMPPrivateScope &LoopScope,
- bool Ordered, llvm::Value *LB,
- llvm::Value *UB, llvm::Value *ST,
- llvm::Value *IL, llvm::Value *Chunk) {
+ bool Ordered, Address LB,
+ Address UB, Address ST,
+ Address IL, llvm::Value *Chunk) {
auto &RT = CGM.getOpenMPRuntime();
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
@@ -915,11 +1324,14 @@ void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
- RT.emitForInit(
- *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB,
- (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal()
- : UB),
- ST, Chunk);
+ if (DynamicOrOrdered) {
+ llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
+ RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered, UBVal, Chunk);
+ } else {
+ RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk);
+ }
auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
@@ -1019,8 +1431,7 @@ emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
// Detect the loop schedule kind and chunk.
auto ScheduleKind = OMPC_SCHEDULE_unknown;
llvm::Value *Chunk = nullptr;
- if (auto *C =
- cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) {
+ if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
ScheduleKind = C->getScheduleKind();
if (const auto *Ch = C->getChunkSize()) {
if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
@@ -1029,8 +1440,8 @@ emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitVarDecl(*ImpVar);
CGF.EmitStoreThroughLValue(
CGF.EmitAnyExpr(Ch),
- CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
- ImpVar->getType()));
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
+ ImpVar->getType()));
} else {
Ch = ImpRef;
}
@@ -1038,7 +1449,8 @@ emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
if (!C->getHelperChunkSize() || !OuterRegion) {
Chunk = CGF.EmitScalarExpr(Ch);
Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
- S.getIterationVariable()->getType());
+ S.getIterationVariable()->getType(),
+ S.getLocStart());
}
}
}
@@ -1100,13 +1512,15 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
if (EmitOMPFirstprivateClause(S, LoopScope)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
- OMPD_unknown);
+ CGM.getOpenMPRuntime().emitBarrierCall(
+ *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
}
EmitOMPPrivateClause(S, LoopScope);
HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
EmitOMPReductionClauseInit(S, LoopScope);
- emitPrivateLoopCounters(*this, LoopScope, S.counters());
+ emitPrivateLoopCounters(*this, LoopScope, S.counters(),
+ S.private_counters());
emitPrivateLinearVars(*this, S, LoopScope);
(void)LoopScope.Privatize();
@@ -1119,7 +1533,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
ScheduleKind = ScheduleInfo.second;
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
- const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr;
+ const bool Ordered = S.getSingleClause<OMPOrderedClause>() != nullptr;
if (RT.isStaticNonchunked(ScheduleKind,
/* Chunked */ Chunk != nullptr) &&
!Ordered) {
@@ -1131,9 +1545,10 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
// chunks that are approximately equal in size, and at most one chunk is
// distributed to each thread. Note that the size of the chunks is
// unspecified in this case.
- RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
- Ordered, IL.getAddress(), LB.getAddress(),
- UB.getAddress(), ST.getAddress());
+ RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered,
+ IL.getAddress(), LB.getAddress(),
+ UB.getAddress(), ST.getAddress());
auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
EmitIgnoredExpr(S.getEnsureUpperBound());
@@ -1181,10 +1596,11 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
};
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
+ S.hasCancel());
// Emit an implicit barrier at the end.
- if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
+ if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
}
}
@@ -1198,7 +1614,7 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
// Emit an implicit barrier at the end.
- if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
+ if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
}
}
@@ -1206,7 +1622,7 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
const Twine &Name,
llvm::Value *Init = nullptr) {
- auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
+ auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
if (Init)
CGF.EmitScalarInit(Init, LVal);
return LVal;
@@ -1276,8 +1692,9 @@ CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
- OMPD_unknown);
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(
+ CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
}
CGF.EmitOMPPrivateClause(S, LoopScope);
HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
@@ -1285,7 +1702,7 @@ CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
(void)LoopScope.Privatize();
// Emit static non-chunked loop.
- CGF.CGM.getOpenMPRuntime().emitForInit(
+ CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
/*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
LB.getAddress(), UB.getAddress(), ST.getAddress());
@@ -1310,11 +1727,17 @@ CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CGF.EmitLoadOfScalar(IL, S.getLocStart())));
};
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen);
+ bool HasCancel = false;
+ if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
+ HasCancel = OSD->hasCancel();
+ else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
+ HasCancel = OPSD->hasCancel();
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
+ HasCancel);
// Emit barrier for lastprivates only if 'sections' directive has 'nowait'
// clause. Otherwise the barrier will be generated by the codegen for the
// directive.
- if (HasLastprivates && S.getSingleClause(OMPC_nowait)) {
+ if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
@@ -1327,11 +1750,11 @@ CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
bool HasFirstprivates;
// No need to generate reductions for sections with single section region, we
// can use original shared variables for all operations.
- bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty();
+ bool HasReductions = S.hasClausesOfKind<OMPReductionClause>();
// No need to generate lastprivates for sections with single section region,
// we can use original shared variable for all calculations with barrier at
// the end of the sections.
- bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty();
+ bool HasLastprivates = S.hasClausesOfKind<OMPLastprivateClause>();
auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
CodeGenFunction::OMPPrivateScope SingleScope(CGF);
HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
@@ -1347,10 +1770,12 @@ CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// 'sections' directive has 'nowait' clause. Otherwise the barrier will be
// generated by the codegen for the directive.
if ((HasFirstprivates || HasLastprivates || HasReductions) &&
- S.getSingleClause(OMPC_nowait)) {
+ S.getSingleClause<OMPNowaitClause>()) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown);
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
}
return OMPD_single;
}
@@ -1359,7 +1784,7 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
OpenMPDirectiveKind EmittedAs = EmitSections(S);
// Emit an implicit barrier at the end.
- if (!S.getSingleClause(OMPC_nowait)) {
+ if (!S.getSingleClause<OMPNowaitClause>()) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
}
}
@@ -1368,9 +1793,9 @@ void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- CGF.EnsureInsertPoint();
};
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
+ S.hasCancel());
}
void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
@@ -1383,8 +1808,7 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
// construct.
// Build a list of copyprivate variables along with helper expressions
// (<source>, <destination>, <destination>=<source> expressions)
- for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) {
- auto *C = cast<OMPCopyprivateClause>(*I);
+ for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
DestExprs.append(C->destination_exprs().begin(),
C->destination_exprs().end());
@@ -1402,18 +1826,17 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
(void)SingleScope.Privatize();
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
CopyprivateVars, DestExprs, SrcExprs,
AssignmentOps);
// Emit an implicit barrier at the end (to avoid data race on firstprivate
// init or if no 'nowait' clause was specified and no 'copyprivate' clause).
- if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) &&
+ if ((!S.getSingleClause<OMPNowaitClause>() || HasFirstprivates) &&
CopyprivateVars.empty()) {
CGM.getOpenMPRuntime().emitBarrierCall(
*this, S.getLocStart(),
- S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single);
+ S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
}
}
@@ -1421,7 +1844,6 @@ void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
}
@@ -1430,10 +1852,13 @@ void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- CGF.EnsureInsertPoint();
};
- CGM.getOpenMPRuntime().emitCriticalRegion(
- *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart());
+ Expr *Hint = nullptr;
+ if (auto *HintClause = S.getSingleClause<OMPHintClause>())
+ Hint = HintClause->getHint();
+ CGM.getOpenMPRuntime().emitCriticalRegion(*this,
+ S.getDirectiveName().getAsString(),
+ CodeGen, S.getLocStart(), Hint);
}
void CodeGenFunction::EmitOMPParallelForDirective(
@@ -1444,11 +1869,6 @@ void CodeGenFunction::EmitOMPParallelForDirective(
(void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitOMPWorksharingLoop(S);
- // Emit implicit barrier at the end of parallel region, but this barrier
- // is at the end of 'for' directive, so emit it as the implicit barrier for
- // this 'for' directive.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
- OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
}
@@ -1461,11 +1881,6 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
(void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitOMPWorksharingLoop(S);
- // Emit implicit barrier at the end of parallel region, but this barrier
- // is at the end of 'for' directive, so emit it as the implicit barrier for
- // this 'for' directive.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
- OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
}
@@ -1477,9 +1892,6 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective(
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
(void)CGF.EmitSections(S);
- // Emit implicit barrier at the end of parallel region.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
- OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
}
@@ -1497,8 +1909,7 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Get list of private variables.
llvm::SmallVector<const Expr *, 8> PrivateVars;
llvm::SmallVector<const Expr *, 8> PrivateCopies;
- for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) {
- auto *C = cast<OMPPrivateClause>(*I);
+ for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
auto IRef = C->varlist_begin();
for (auto *IInit : C->private_copies()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
@@ -1514,8 +1925,7 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
llvm::SmallVector<const Expr *, 8> FirstprivateVars;
llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
llvm::SmallVector<const Expr *, 8> FirstprivateInits;
- for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) {
- auto *C = cast<OMPFirstprivateClause>(*I);
+ for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
auto IRef = C->varlist_begin();
auto IElemInitRef = C->inits().begin();
for (auto *IInit : C->private_copies()) {
@@ -1531,8 +1941,7 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Build list of dependences.
llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8>
Dependences;
- for (auto &&I = S.getClausesOfKind(OMPC_depend); I; ++I) {
- auto *C = cast<OMPDependClause>(*I);
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
for (auto *IRef : C->varlists()) {
Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
}
@@ -1543,35 +1952,33 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
OMPPrivateScope Scope(CGF);
if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
- auto *CopyFn = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)),
- CGF.PointerAlignInBytes);
- auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)),
- CGF.PointerAlignInBytes);
+ auto *CopyFn = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
+ auto *PrivatesPtr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
// Map privates.
- llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16>
+ llvm::SmallVector<std::pair<const VarDecl *, Address>, 16>
PrivatePtrs;
llvm::SmallVector<llvm::Value *, 16> CallArgs;
CallArgs.push_back(PrivatesPtr);
for (auto *E : PrivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- auto *PrivatePtr =
+ Address PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
- CallArgs.push_back(PrivatePtr);
+ CallArgs.push_back(PrivatePtr.getPointer());
}
for (auto *E : FirstprivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- auto *PrivatePtr =
+ Address PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
- CallArgs.push_back(PrivatePtr);
+ CallArgs.push_back(PrivatePtr.getPointer());
}
CGF.EmitRuntimeCall(CopyFn, CallArgs);
for (auto &&Pair : PrivatePtrs) {
- auto *Replacement =
- CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes);
+ Address Replacement(CGF.Builder.CreateLoad(Pair.second),
+ CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
}
}
@@ -1584,13 +1991,13 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
S, *I, OMPD_task, CodeGen);
// Check if we should emit tied or untied task.
- bool Tied = !S.getSingleClause(OMPC_untied);
+ bool Tied = !S.getSingleClause<OMPUntiedClause>();
// Check if the task is final
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
- if (auto *Clause = S.getSingleClause(OMPC_final)) {
+ if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
- auto *Cond = cast<OMPFinalClause>(Clause)->getCondition();
+ auto *Cond = Clause->getCondition();
bool CondConstant;
if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
Final.setInt(CondConstant);
@@ -1602,8 +2009,12 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
}
auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
- if (auto C = S.getSingleClause(OMPC_if)) {
- IfCond = cast<OMPIfClause>(C)->getCondition();
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_task) {
+ IfCond = C->getCondition();
+ break;
+ }
}
CGM.getOpenMPRuntime().emitTaskCall(
*this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
@@ -1629,15 +2040,13 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
}
void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
- if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) {
- auto FlushClause = cast<OMPFlushClause>(C);
+ if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
return llvm::makeArrayRef(FlushClause->varlist_begin(),
FlushClause->varlist_end());
}
@@ -1645,37 +2054,65 @@ void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
}(), S.getLocStart());
}
+void CodeGenFunction::EmitOMPDistributeDirective(
+ const OMPDistributeDirective &S) {
+ llvm_unreachable("CodeGen for 'omp distribute' is not supported yet.");
+}
+
+static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
+ const CapturedStmt *S) {
+ CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
+ CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
+ CGF.CapturedStmtInfo = &CapStmtInfo;
+ auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
+ Fn->addFnAttr(llvm::Attribute::NoInline);
+ return Fn;
+}
+
void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
+ if (!S.getAssociatedStmt())
+ return;
LexicalScope Scope(*this, S.getSourceRange());
- auto &&CodeGen = [&S](CodeGenFunction &CGF) {
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- CGF.EnsureInsertPoint();
+ auto *C = S.getSingleClause<OMPSIMDClause>();
+ auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF) {
+ if (C) {
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
+ auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
+ CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars);
+ } else {
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ }
};
- CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart());
+ CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
}
static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
- QualType SrcType, QualType DestType) {
+ QualType SrcType, QualType DestType,
+ SourceLocation Loc) {
assert(CGF.hasScalarEvaluationKind(DestType) &&
"DestType must have scalar evaluation kind.");
assert(!Val.isAggregate() && "Must be a scalar or complex.");
return Val.isScalar()
- ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType)
+ ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
+ Loc)
: CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
- DestType);
+ DestType, Loc);
}
static CodeGenFunction::ComplexPairTy
convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
- QualType DestType) {
+ QualType DestType, SourceLocation Loc) {
assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
"DestType must have complex evaluation kind.");
CodeGenFunction::ComplexPairTy ComplexVal;
if (Val.isScalar()) {
// Convert the input element to the element type of the complex.
auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
- auto ScalarVal =
- CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType);
+ auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
+ DestElementType, Loc);
ComplexVal = CodeGenFunction::ComplexPairTy(
ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
} else {
@@ -1683,9 +2120,9 @@ convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
ComplexVal.first = CGF.EmitScalarConversion(
- Val.getComplexVal().first, SrcElementType, DestElementType);
+ Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
ComplexVal.second = CGF.EmitScalarConversion(
- Val.getComplexVal().second, SrcElementType, DestElementType);
+ Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
}
return ComplexVal;
}
@@ -1702,16 +2139,16 @@ static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
}
static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal,
- QualType RValTy) {
+ QualType RValTy, SourceLocation Loc) {
switch (CGF.getEvaluationKind(LVal.getType())) {
case TEK_Scalar:
- CGF.EmitStoreThroughLValue(
- RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())),
- LVal);
+ CGF.EmitStoreThroughLValue(RValue::get(convertToScalarValue(
+ CGF, RVal, RValTy, LVal.getType(), Loc)),
+ LVal);
break;
case TEK_Complex:
CGF.EmitStoreOfComplex(
- convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal,
+ convertToComplexValue(CGF, RVal, RValTy, LVal.getType(), Loc), LVal,
/*isInit=*/false);
break;
case TEK_Aggregate:
@@ -1739,7 +2176,7 @@ static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
// list.
if (IsSeqCst)
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
- emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType());
+ emitSimpleStore(CGF, VLValue, Res, X->getType().getNonReferenceType(), Loc);
}
static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
@@ -1769,8 +2206,8 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
!Update.getScalarVal()->getType()->isIntegerTy() ||
!X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
(Update.getScalarVal()->getType() !=
- X.getAddress()->getType()->getPointerElementType())) ||
- !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() ||
+ X.getAddress().getElementType())) ||
+ !X.getAddress().getElementType()->isIntegerTy() ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
@@ -1841,10 +2278,10 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
auto *UpdateVal = Update.getScalarVal();
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
UpdateVal = CGF.Builder.CreateIntCast(
- IC, X.getAddress()->getType()->getPointerElementType(),
+ IC, X.getAddress().getElementType(),
X.getType()->hasSignedIntegerRepresentation());
}
- auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
+ auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
@@ -1910,12 +2347,14 @@ static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
}
static RValue convertToType(CodeGenFunction &CGF, RValue Value,
- QualType SourceType, QualType ResType) {
+ QualType SourceType, QualType ResType,
+ SourceLocation Loc) {
switch (CGF.getEvaluationKind(ResType)) {
case TEK_Scalar:
- return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType));
+ return RValue::get(
+ convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
case TEK_Complex: {
- auto Res = convertToComplexValue(CGF, Value, SourceType, ResType);
+ auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
return RValue::getComplex(Res.first, Res.second);
}
case TEK_Aggregate:
@@ -1980,7 +2419,7 @@ static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
// 'x' is simply rewritten with some 'expr'.
NewVValType = X->getType().getNonReferenceType();
ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
- X->getType().getNonReferenceType());
+ X->getType().getNonReferenceType(), Loc);
auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue {
NewVVal = XRValue;
return ExprRValue;
@@ -1995,7 +2434,7 @@ static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
}
}
// Emit post-update store to 'v' of old/new 'x' value.
- emitSimpleStore(CGF, VLValue, NewVVal, NewVValType);
+ emitSimpleStore(CGF, VLValue, NewVVal, NewVValType, Loc);
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
@@ -2032,6 +2471,7 @@ static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_lastprivate:
case OMPC_reduction:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
case OMPC_default:
case OMPC_seq_cst:
@@ -2049,12 +2489,23 @@ static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_threadprivate:
case OMPC_depend:
case OMPC_mergeable:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
- bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst);
+ bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
OpenMPClauseKind Kind = OMPC_unknown;
for (auto *C : S.clauses()) {
// Find first clause (skip seq_cst clause, if it is first).
@@ -2079,7 +2530,8 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
}
LexicalScope Scope(*this, S.getSourceRange());
- auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) {
+ auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF) {
+ CGF.EmitStopPoint(CS);
EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
S.getV(), S.getExpr(), S.getUpdateExpr(),
S.isXLHSInRHSPart(), S.getLocStart());
@@ -2087,8 +2539,37 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
}
-void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
- llvm_unreachable("CodeGen for 'omp target' is not supported yet.");
+void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
+ LexicalScope Scope(*this, S.getSourceRange());
+ const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt());
+
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ GenerateOpenMPCapturedVars(CS, CapturedVars);
+
+ // Emit target region as a standalone region.
+ auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
+ CGF.EmitStmt(CS.getCapturedStmt());
+ };
+
+ // Obtain the target region outlined function.
+ llvm::Value *Fn =
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, CodeGen);
+
+ // Check if we have any if clause associated with the directive.
+ const Expr *IfCond = nullptr;
+
+ if (auto *C = S.getSingleClause<OMPIfClause>()) {
+ IfCond = C->getCondition();
+ }
+
+ // Check if we have any device clause associated with the directive.
+ const Expr *Device = nullptr;
+ if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
+ Device = C->getDevice();
+ }
+
+ CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, IfCond, Device,
+ CapturedVars);
}
void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
@@ -2102,7 +2583,15 @@ void CodeGenFunction::EmitOMPCancellationPointDirective(
}
void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
- CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(),
+ const Expr *IfCond = nullptr;
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_cancel) {
+ IfCond = C->getCondition();
+ break;
+ }
+ }
+ CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
S.getCancelRegion());
}
@@ -2110,8 +2599,35 @@ CodeGenFunction::JumpDest
CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
if (Kind == OMPD_parallel || Kind == OMPD_task)
return ReturnBlock;
- else if (Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections)
- return BreakContinueStack.empty() ? JumpDest()
- : BreakContinueStack.back().BreakBlock;
- return JumpDest();
+ assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
+ Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for);
+ return BreakContinueStack.back().BreakBlock;
+}
+
+// Generate the instructions for '#pragma omp target data' directive.
+void CodeGenFunction::EmitOMPTargetDataDirective(
+ const OMPTargetDataDirective &S) {
+ // emit the code inside the construct for now
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_target_data,
+ [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
}
+
+void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
+ // emit the code inside the construct for now
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_taskloop,
+ [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
+}
+
+void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
+ const OMPTaskLoopSimdDirective &S) {
+ // emit the code inside the construct for now
+ auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_taskloop_simd,
+ [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); });
+}
+
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index e3df5a4c05e3..4fb76710d2ad 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -108,7 +108,6 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
llvm::raw_svector_ostream Out(OutName);
cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
.mangleCXXVTT(RD, Out);
- Out.flush();
StringRef Name = OutName.str();
// This will also defer the definition of the VTT.
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 1b7f1d76042d..c8f3add67762 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -44,7 +44,6 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
Thunk.This, Out);
else
getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
- Out.flush();
llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true,
@@ -103,8 +102,11 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
CGF.EmitBlock(AdjustNotNull);
}
- ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, ReturnValue,
- Thunk.Return);
+ auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
+ auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl);
+ ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF,
+ Address(ReturnValue, ClassAlign),
+ Thunk.Return);
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
@@ -172,27 +174,29 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
// Find the first store of "this", which will be to the alloca associated
// with "this".
- llvm::Value *ThisPtr = &*AI;
- llvm::BasicBlock *EntryBB = Fn->begin();
- llvm::Instruction *ThisStore =
+ Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent()));
+ llvm::BasicBlock *EntryBB = &Fn->front();
+ llvm::BasicBlock::iterator ThisStore =
std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
- return isa<llvm::StoreInst>(I) && I.getOperand(0) == ThisPtr;
- });
- assert(ThisStore && "Store of this should be in entry block?");
+ return isa<llvm::StoreInst>(I) &&
+ I.getOperand(0) == ThisPtr.getPointer();
+ });
+ assert(ThisStore != EntryBB->end() &&
+ "Store of this should be in entry block?");
// Adjust "this", if necessary.
- Builder.SetInsertPoint(ThisStore);
+ Builder.SetInsertPoint(&*ThisStore);
llvm::Value *AdjustedThisPtr =
CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This);
ThisStore->setOperand(0, AdjustedThisPtr);
if (!Thunk.Return.isEmpty()) {
// Fix up the returned value, if necessary.
- for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) {
- llvm::Instruction *T = I->getTerminator();
+ for (llvm::BasicBlock &BB : *Fn) {
+ llvm::Instruction *T = BB.getTerminator();
if (isa<llvm::ReturnInst>(T)) {
RValue RV = RValue::get(T->getOperand(0));
T->eraseFromParent();
- Builder.SetInsertPoint(&*I);
+ Builder.SetInsertPoint(&BB);
RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
Builder.CreateRet(RV.getScalarVal());
break;
@@ -236,6 +240,17 @@ void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
// Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
CXXThisValue = CXXABIThisValue;
+ CurCodeDecl = MD;
+ CurFuncDecl = MD;
+}
+
+void CodeGenFunction::FinishThunk() {
+ // Clear these to restore the invariants expected by
+ // StartFunction/FinishFunction.
+ CurCodeDecl = nullptr;
+ CurFuncDecl = nullptr;
+
+ FinishFunction();
}
void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
@@ -245,9 +260,10 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
// Adjust the 'this' pointer if necessary
- llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment(
- *this, LoadCXXThis(), Thunk->This)
- : LoadCXXThis();
+ llvm::Value *AdjustedThisPtr =
+ Thunk ? CGM.getCXXABI().performThisAdjustment(
+ *this, LoadCXXThisAddress(), Thunk->This)
+ : LoadCXXThis();
if (CurFnInfo->usesInAlloca()) {
// We don't handle return adjusting thunks, because they require us to call
@@ -312,6 +328,8 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
// Consider return adjustment if we have ThunkInfo.
if (Thunk && !Thunk->Return.isEmpty())
RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);
+ else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(CallOrInvoke))
+ Call->setTailCallKind(llvm::CallInst::TCK_Tail);
// Emit return.
if (!ResultType->isVoidType() && Slot.isNull())
@@ -320,7 +338,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
// Disable the final ARC autorelease.
AutoreleaseResult = false;
- FinishFunction();
+ FinishThunk();
}
void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
@@ -345,9 +363,8 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
Args[ThisArgNo] = AdjustedThisPtr;
} else {
assert(ThisAI.isInAlloca() && "this is passed directly or inalloca");
- llvm::Value *ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
- llvm::Type *ThisType =
- cast<llvm::PointerType>(ThisAddr->getType())->getElementType();
+ Address ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
+ llvm::Type *ThisType = ThisAddr.getElementType();
if (ThisType != AdjustedThisPtr->getType())
AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
Builder.CreateStore(AdjustedThisPtr, ThisAddr);
@@ -502,8 +519,8 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
if (!ThunkInfoVector)
return;
- for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I)
- emitThunk(GD, (*ThunkInfoVector)[I], /*ForVTable=*/false);
+ for (const ThunkInfo& Thunk : *ThunkInfoVector)
+ emitThunk(GD, Thunk, /*ForVTable=*/false);
}
llvm::Constant *CodeGenVTables::CreateVTableInitializer(
@@ -565,6 +582,24 @@ llvm::Constant *CodeGenVTables::CreateVTableInitializer(
break;
}
+ if (CGM.getLangOpts().CUDA) {
+ // Emit NULL for methods we can't codegen on this
+ // side. Otherwise we'd end up with vtable with unresolved
+ // references.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ // OK on device side: functions w/ __device__ attribute
+ // OK on host side: anything except __device__-only functions.
+ bool CanEmitMethod = CGM.getLangOpts().CUDAIsDevice
+ ? MD->hasAttr<CUDADeviceAttr>()
+ : (MD->hasAttr<CUDAHostAttr>() ||
+ !MD->hasAttr<CUDADeviceAttr>());
+ if (!CanEmitMethod) {
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ }
+ // Method is acceptable, continue processing as usual.
+ }
+
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
@@ -642,7 +677,6 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
.mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(),
Base.getBase(), Out);
- Out.flush();
StringRef Name = OutName.str();
llvm::ArrayType *ArrayType =
@@ -679,6 +713,12 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
return VTable;
}
+static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM,
+ const CXXRecordDecl *RD) {
+ return CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGM.getCXXABI().canSpeculativelyEmitVTable(RD);
+}
+
/// Compute the required linkage of the v-table for the given class.
///
/// Note that we only call this at the end of the translation unit.
@@ -700,7 +740,12 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
switch (keyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
- assert(def && "Should not have been asked to emit this");
+ assert((def || CodeGenOpts.OptimizationLevel > 0) &&
+ "Shouldn't query vtable linkage without key function or "
+ "optimizations");
+ if (!def && CodeGenOpts.OptimizationLevel > 0)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
+
if (keyFunction->isInlined())
return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
@@ -742,16 +787,18 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
}
switch (RD->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- case TSK_ImplicitInstantiation:
- return DiscardableODRLinkage;
-
- case TSK_ExplicitInstantiationDeclaration:
- return llvm::GlobalVariable::ExternalLinkage;
-
- case TSK_ExplicitInstantiationDefinition:
- return NonDiscardableODRLinkage;
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ return DiscardableODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ return shouldEmitAvailableExternallyVTable(*this, RD)
+ ? llvm::GlobalVariable::AvailableExternallyLinkage
+ : llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return NonDiscardableODRLinkage;
}
llvm_unreachable("Invalid TemplateSpecializationKind!");
@@ -819,7 +866,12 @@ bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
/// we define that v-table?
static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM,
const CXXRecordDecl *RD) {
- return !CGM.getVTables().isVTableExternal(RD);
+ // If vtable is internal then it has to be done.
+ if (!CGM.getVTables().isVTableExternal(RD))
+ return true;
+
+ // If it's external then maybe we will need it as available_externally.
+ return shouldEmitAvailableExternallyVTable(CGM, RD);
}
/// Given that at some point we emitted a reference to one or more
@@ -832,13 +884,9 @@ void CodeGenModule::EmitDeferredVTables() {
size_t savedSize = DeferredVTables.size();
#endif
- typedef std::vector<const CXXRecordDecl *>::const_iterator const_iterator;
- for (const_iterator i = DeferredVTables.begin(),
- e = DeferredVTables.end(); i != e; ++i) {
- const CXXRecordDecl *RD = *i;
+ for (const CXXRecordDecl *RD : DeferredVTables)
if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD))
VTables.GenerateClassData(RD);
- }
assert(savedSize == DeferredVTables.size() &&
"deferred extra v-tables during v-table emission?");
@@ -846,8 +894,12 @@ void CodeGenModule::EmitDeferredVTables() {
}
bool CodeGenModule::IsCFIBlacklistedRecord(const CXXRecordDecl *RD) {
- // FIXME: Make this user configurable.
- return RD->isInStdNamespace();
+ if (RD->hasAttr<UuidAttr>() &&
+ getContext().getSanitizerBlacklist().isBlacklistedType("attr:uuid"))
+ return true;
+
+ return getContext().getSanitizerBlacklist().isBlacklistedType(
+ RD->getQualifiedNameAsString());
}
void CodeGenModule::EmitVTableBitSetEntries(llvm::GlobalVariable *VTable,
@@ -861,41 +913,46 @@ void CodeGenModule::EmitVTableBitSetEntries(llvm::GlobalVariable *VTable,
CharUnits PointerWidth =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- std::vector<llvm::MDTuple *> BitsetEntries;
+ typedef std::pair<const CXXRecordDecl *, unsigned> BSEntry;
+ std::vector<BSEntry> BitsetEntries;
// Create a bit set entry for each address point.
for (auto &&AP : VTLayout.getAddressPoints()) {
if (IsCFIBlacklistedRecord(AP.first.getBase()))
continue;
- BitsetEntries.push_back(CreateVTableBitSetEntry(
- VTable, PointerWidth * AP.second, AP.first.getBase()));
+ BitsetEntries.push_back(std::make_pair(AP.first.getBase(), AP.second));
}
// Sort the bit set entries for determinism.
- std::sort(BitsetEntries.begin(), BitsetEntries.end(), [](llvm::MDTuple *T1,
- llvm::MDTuple *T2) {
- if (T1 == T2)
+ std::sort(BitsetEntries.begin(), BitsetEntries.end(),
+ [this](const BSEntry &E1, const BSEntry &E2) {
+ if (&E1 == &E2)
return false;
- StringRef S1 = cast<llvm::MDString>(T1->getOperand(0))->getString();
- StringRef S2 = cast<llvm::MDString>(T2->getOperand(0))->getString();
+ std::string S1;
+ llvm::raw_string_ostream O1(S1);
+ getCXXABI().getMangleContext().mangleTypeName(
+ QualType(E1.first->getTypeForDecl(), 0), O1);
+ O1.flush();
+
+ std::string S2;
+ llvm::raw_string_ostream O2(S2);
+ getCXXABI().getMangleContext().mangleTypeName(
+ QualType(E2.first->getTypeForDecl(), 0), O2);
+ O2.flush();
+
if (S1 < S2)
return true;
if (S1 != S2)
return false;
- uint64_t Offset1 = cast<llvm::ConstantInt>(
- cast<llvm::ConstantAsMetadata>(T1->getOperand(2))
- ->getValue())->getZExtValue();
- uint64_t Offset2 = cast<llvm::ConstantInt>(
- cast<llvm::ConstantAsMetadata>(T2->getOperand(2))
- ->getValue())->getZExtValue();
- assert(Offset1 != Offset2);
- return Offset1 < Offset2;
+ return E1.second < E2.second;
});
llvm::NamedMDNode *BitsetsMD =
getModule().getOrInsertNamedMetadata("llvm.bitsets");
for (auto BitsetEntry : BitsetEntries)
- BitsetsMD->addOperand(BitsetEntry);
+ CreateVTableBitSetEntry(BitsetsMD, VTable,
+ PointerWidth * BitsetEntry.second,
+ BitsetEntry.first);
}
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index e0195a22eb1e..c27e54af258d 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -85,10 +85,6 @@ public:
uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
BaseSubobject Base);
- /// getAddressPoint - Get the address point of the given subobject in the
- /// class decl.
- uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD);
-
/// GenerateConstructionVTable - Generate a construction vtable for the given
/// base subobject.
llvm::GlobalVariable *
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 92055917dba9..3ccc4cda89f9 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -16,10 +16,10 @@
#define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
#include "clang/AST/ASTContext.h"
-#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Type.h"
+#include "Address.h"
namespace llvm {
class Constant;
@@ -38,6 +38,10 @@ namespace CodeGen {
class RValue {
enum Flavor { Scalar, Complex, Aggregate };
+ // The shift to make to an aggregate's alignment to make it look
+ // like a pointer.
+ enum { AggAlignShift = 4 };
+
// Stores first value and flavor.
llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
// Stores second value and volatility.
@@ -63,11 +67,21 @@ public:
}
/// getAggregateAddr() - Return the Value* of the address of the aggregate.
- llvm::Value *getAggregateAddr() const {
+ Address getAggregateAddress() const {
+ assert(isAggregate() && "Not an aggregate!");
+ auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
+ return Address(V1.getPointer(), CharUnits::fromQuantity(align));
+ }
+ llvm::Value *getAggregatePointer() const {
assert(isAggregate() && "Not an aggregate!");
return V1.getPointer();
}
+ static RValue getIgnored() {
+ // FIXME: should we make this a more explicit state?
+ return get(nullptr);
+ }
+
static RValue get(llvm::Value *V) {
RValue ER;
ER.V1.setPointer(V);
@@ -89,11 +103,14 @@ public:
// FIXME: Aggregate rvalues need to retain information about whether they are
// volatile or not. Remove default to find all places that probably get this
// wrong.
- static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
+ static RValue getAggregate(Address addr, bool isVolatile = false) {
RValue ER;
- ER.V1.setPointer(V);
+ ER.V1.setPointer(addr.getPointer());
ER.V1.setInt(Aggregate);
- ER.V2.setInt(Volatile);
+
+ auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
+ ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
+ ER.V2.setInt(isVolatile);
return ER;
}
};
@@ -103,6 +120,32 @@ enum ARCPreciseLifetime_t {
ARCImpreciseLifetime, ARCPreciseLifetime
};
+/// The source of the alignment of an l-value; an expression of
+/// confidence in the alignment actually matching the estimate.
+enum class AlignmentSource {
+ /// The l-value was an access to a declared entity or something
+ /// equivalently strong, like the address of an array allocated by a
+ /// language runtime.
+ Decl,
+
+ /// The l-value was considered opaque, so the alignment was
+ /// determined from a type, but that type was an explicitly-aligned
+ /// typedef.
+ AttributedType,
+
+ /// The l-value was considered opaque, so the alignment was
+ /// determined from a type.
+ Type
+};
+
+/// Given that the base address has the given alignment source, what's
+/// our confidence in the alignment of the field?
+static inline AlignmentSource getFieldAlignmentSource(AlignmentSource Source) {
+ // For now, we don't distinguish fields of opaque pointers from
+ // top-level declarations, but maybe we should.
+ return AlignmentSource::Decl;
+}
+
/// LValue - This represents an lvalue references. Because C/C++ allow
/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
/// bitrange.
@@ -157,6 +200,12 @@ class LValue {
// to make the default bitfield pattern all-zeroes.
bool ImpreciseLifetime : 1;
+ unsigned AlignSource : 2;
+
+ // This flag shows if a nontemporal load/stores should be used when accessing
+ // this lvalue.
+ bool Nontemporal : 1;
+
Expr *BaseIvarExp;
/// Used by struct-path-aware TBAA.
@@ -169,17 +218,21 @@ class LValue {
private:
void Initialize(QualType Type, Qualifiers Quals,
- CharUnits Alignment,
+ CharUnits Alignment, AlignmentSource AlignSource,
llvm::MDNode *TBAAInfo = nullptr) {
+ assert((!Alignment.isZero() || Type->isIncompleteType()) &&
+ "initializing l-value with zero alignment!");
this->Type = Type;
this->Quals = Quals;
this->Alignment = Alignment.getQuantity();
assert(this->Alignment == Alignment.getQuantity() &&
"Alignment exceeds allowed max!");
+ this->AlignSource = unsigned(AlignSource);
// Initialize Objective-C flags.
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
this->ImpreciseLifetime = false;
+ this->Nontemporal = false;
this->ThreadLocalRef = false;
this->BaseIvarExp = nullptr;
@@ -229,6 +282,8 @@ public:
void setARCPreciseLifetime(ARCPreciseLifetime_t value) {
ImpreciseLifetime = (value == ARCImpreciseLifetime);
}
+ bool isNontemporal() const { return Nontemporal; }
+ void setNontemporal(bool Value) { Nontemporal = Value; }
bool isObjCWeak() const {
return Quals.getObjCGCAttr() == Qualifiers::Weak;
@@ -261,29 +316,50 @@ public:
CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
+ AlignmentSource getAlignmentSource() const {
+ return AlignmentSource(AlignSource);
+ }
+ void setAlignmentSource(AlignmentSource Source) {
+ AlignSource = unsigned(Source);
+ }
+
// simple lvalue
- llvm::Value *getAddress() const { assert(isSimple()); return V; }
- void setAddress(llvm::Value *address) {
+ llvm::Value *getPointer() const {
+ assert(isSimple());
+ return V;
+ }
+ Address getAddress() const { return Address(getPointer(), getAlignment()); }
+ void setAddress(Address address) {
assert(isSimple());
- V = address;
+ V = address.getPointer();
+ Alignment = address.getAlignment().getQuantity();
}
// vector elt lvalue
- llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ Address getVectorAddress() const {
+ return Address(getVectorPointer(), getAlignment());
+ }
+ llvm::Value *getVectorPointer() const { assert(isVectorElt()); return V; }
llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
// extended vector elements.
- llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ Address getExtVectorAddress() const {
+ return Address(getExtVectorPointer(), getAlignment());
+ }
+ llvm::Value *getExtVectorPointer() const {
+ assert(isExtVectorElt());
+ return V;
+ }
llvm::Constant *getExtVectorElts() const {
assert(isExtVectorElt());
return VectorElts;
}
// bitfield lvalue
- llvm::Value *getBitFieldAddr() const {
- assert(isBitField());
- return V;
+ Address getBitFieldAddress() const {
+ return Address(getBitFieldPointer(), getAlignment());
}
+ llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
const CGBitFieldInfo &getBitFieldInfo() const {
assert(isBitField());
return *BitFieldInfo;
@@ -292,37 +368,40 @@ public:
// global register lvalue
llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; }
- static LValue MakeAddr(llvm::Value *address, QualType type,
- CharUnits alignment, ASTContext &Context,
+ static LValue MakeAddr(Address address, QualType type,
+ ASTContext &Context,
+ AlignmentSource alignSource,
llvm::MDNode *TBAAInfo = nullptr) {
Qualifiers qs = type.getQualifiers();
qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
LValue R;
R.LVType = Simple;
- assert(address->getType()->isPointerTy());
- R.V = address;
- R.Initialize(type, qs, alignment, TBAAInfo);
+ assert(address.getPointer()->getType()->isPointerTy());
+ R.V = address.getPointer();
+ R.Initialize(type, qs, address.getAlignment(), alignSource, TBAAInfo);
return R;
}
- static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
- QualType type, CharUnits Alignment) {
+ static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx,
+ QualType type, AlignmentSource alignSource) {
LValue R;
R.LVType = VectorElt;
- R.V = Vec;
+ R.V = vecAddress.getPointer();
R.VectorIdx = Idx;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
+ alignSource);
return R;
}
- static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
- QualType type, CharUnits Alignment) {
+ static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts,
+ QualType type, AlignmentSource alignSource) {
LValue R;
R.LVType = ExtVectorElt;
- R.V = Vec;
+ R.V = vecAddress.getPointer();
R.VectorElts = Elts;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
+ alignSource);
return R;
}
@@ -332,29 +411,28 @@ public:
/// bit-field refers to.
/// \param Info - The information describing how to perform the bit-field
/// access.
- static LValue MakeBitfield(llvm::Value *Addr,
+ static LValue MakeBitfield(Address Addr,
const CGBitFieldInfo &Info,
- QualType type, CharUnits Alignment) {
+ QualType type,
+ AlignmentSource alignSource) {
LValue R;
R.LVType = BitField;
- R.V = Addr;
+ R.V = Addr.getPointer();
R.BitFieldInfo = &Info;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), alignSource);
return R;
}
- static LValue MakeGlobalReg(llvm::Value *Reg,
- QualType type,
- CharUnits Alignment) {
+ static LValue MakeGlobalReg(Address Reg, QualType type) {
LValue R;
R.LVType = GlobalReg;
- R.V = Reg;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.V = Reg.getPointer();
+ R.Initialize(type, type.getQualifiers(), Reg.getAlignment(),
+ AlignmentSource::Decl);
return R;
}
RValue asAggregateRValue() const {
- // FIMXE: Alignment
return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
@@ -407,7 +485,7 @@ public:
/// ignored - Returns an aggregate value slot indicating that the
/// aggregate value is being ignored.
static AggValueSlot ignored() {
- return forAddr(nullptr, CharUnits(), Qualifiers(), IsNotDestructed,
+ return forAddr(Address::invalid(), Qualifiers(), IsNotDestructed,
DoesNotNeedGCBarriers, IsNotAliased);
}
@@ -421,15 +499,20 @@ public:
/// for calling destructors on this object
/// \param needsGC - true if the slot is potentially located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *addr, CharUnits align,
+ static AggValueSlot forAddr(Address addr,
Qualifiers quals,
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
AggValueSlot AV;
- AV.Addr = addr;
- AV.Alignment = align.getQuantity();
+ if (addr.isValid()) {
+ AV.Addr = addr.getPointer();
+ AV.Alignment = addr.getAlignment().getQuantity();
+ } else {
+ AV.Addr = nullptr;
+ AV.Alignment = 0;
+ }
AV.Quals = quals;
AV.DestructedFlag = isDestructed;
AV.ObjCGCFlag = needsGC;
@@ -443,7 +526,7 @@ public:
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
- return forAddr(LV.getAddress(), LV.getAlignment(),
+ return forAddr(LV.getAddress(),
LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
}
@@ -471,11 +554,15 @@ public:
NeedsGCBarriers_t requiresGCollection() const {
return NeedsGCBarriers_t(ObjCGCFlag);
}
-
- llvm::Value *getAddr() const {
+
+ llvm::Value *getPointer() const {
return Addr;
}
+ Address getAddress() const {
+ return Address(Addr, getAlignment());
+ }
+
bool isIgnored() const {
return Addr == nullptr;
}
@@ -488,9 +575,12 @@ public:
return IsAliased_t(AliasedFlag);
}
- // FIXME: Alignment?
RValue asRValue() const {
- return RValue::getAggregate(getAddr(), isVolatile());
+ if (isIgnored()) {
+ return RValue::getIgnored();
+ } else {
+ return RValue::getAggregate(getAddress(), isVolatile());
+ }
}
void setZeroed(bool V = true) { ZeroedFlag = V; }
diff --git a/lib/CodeGen/CodeGenABITypes.cpp b/lib/CodeGen/CodeGenABITypes.cpp
index 755e8aa628ce..643c996e2ec9 100644
--- a/lib/CodeGen/CodeGenABITypes.cpp
+++ b/lib/CodeGen/CodeGenABITypes.cpp
@@ -26,23 +26,16 @@
using namespace clang;
using namespace CodeGen;
-CodeGenABITypes::CodeGenABITypes(ASTContext &C,
- llvm::Module &M,
- const llvm::DataLayout &TD,
+CodeGenABITypes::CodeGenABITypes(ASTContext &C, llvm::Module &M,
CoverageSourceInfo *CoverageInfo)
- : CGO(new CodeGenOptions),
- HSO(new HeaderSearchOptions),
- PPO(new PreprocessorOptions),
- CGM(new CodeGen::CodeGenModule(C, *HSO, *PPO, *CGO,
- M, TD, C.getDiagnostics(),
- CoverageInfo)) {
-}
+ : CGO(new CodeGenOptions), HSO(new HeaderSearchOptions),
+ PPO(new PreprocessorOptions),
+ CGM(new CodeGen::CodeGenModule(C, *HSO, *PPO, *CGO, M, C.getDiagnostics(),
+ CoverageInfo)) {}
-CodeGenABITypes::~CodeGenABITypes()
-{
- delete CGO;
- delete CGM;
-}
+// Explicitly out-of-line because ~CodeGenModule() is private but
+// CodeGenABITypes.h is part of clang's API.
+CodeGenABITypes::~CodeGenABITypes() = default;
const CGFunctionInfo &
CodeGenABITypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
@@ -51,8 +44,9 @@ CodeGenABITypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
}
const CGFunctionInfo &
-CodeGenABITypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty) {
- return CGM->getTypes().arrangeFreeFunctionType(Ty);
+CodeGenABITypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty,
+ const FunctionDecl *FD) {
+ return CGM->getTypes().arrangeFreeFunctionType(Ty, FD);
}
const CGFunctionInfo &
@@ -62,15 +56,14 @@ CodeGenABITypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty) {
const CGFunctionInfo &
CodeGenABITypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP) {
- return CGM->getTypes().arrangeCXXMethodType(RD, FTP);
+ const FunctionProtoType *FTP,
+ const CXXMethodDecl *MD) {
+ return CGM->getTypes().arrangeCXXMethodType(RD, FTP, MD);
}
-const CGFunctionInfo &
-CodeGenABITypes::arrangeFreeFunctionCall(CanQualType returnType,
- ArrayRef<CanQualType> argTypes,
- FunctionType::ExtInfo info,
- RequiredArgs args) {
+const CGFunctionInfo &CodeGenABITypes::arrangeFreeFunctionCall(
+ CanQualType returnType, ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info, RequiredArgs args) {
return CGM->getTypes().arrangeLLVMFunctionInfo(
returnType, /*IsInstanceMethod=*/false, /*IsChainCall=*/false, argTypes,
info, args);
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 0e7b6d8a71d4..abef5432518e 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -26,10 +26,12 @@
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/FunctionInfo.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Linker/Linker.h"
+#include "llvm/Object/FunctionIndexObjectFile.h"
#include "llvm/Pass.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
@@ -53,40 +55,47 @@ namespace clang {
std::unique_ptr<CodeGenerator> Gen;
- std::unique_ptr<llvm::Module> TheModule, LinkModule;
+ std::unique_ptr<llvm::Module> TheModule;
+ SmallVector<std::pair<unsigned, std::unique_ptr<llvm::Module>>, 4>
+ LinkModules;
+
+ // This is here so that the diagnostic printer knows the module a diagnostic
+ // refers to.
+ llvm::Module *CurLinkModule = nullptr;
public:
- BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PPOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, bool TimePasses,
- const std::string &InFile, llvm::Module *LinkModule,
- raw_pwrite_stream *OS, LLVMContext &C,
- CoverageSourceInfo *CoverageInfo = nullptr)
+ BackendConsumer(
+ BackendAction Action, DiagnosticsEngine &Diags,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ bool TimePasses, const std::string &InFile,
+ const SmallVectorImpl<std::pair<unsigned, llvm::Module *>> &LinkModules,
+ raw_pwrite_stream *OS, LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo = nullptr)
: Diags(Diags), Action(Action), CodeGenOpts(CodeGenOpts),
TargetOpts(TargetOpts), LangOpts(LangOpts), AsmOutStream(OS),
Context(nullptr), LLVMIRGeneration("LLVM IR Generation Time"),
Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
- CodeGenOpts, C, CoverageInfo)),
- LinkModule(LinkModule) {
+ CodeGenOpts, C, CoverageInfo)) {
llvm::TimePassesIsEnabled = TimePasses;
+ for (auto &I : LinkModules)
+ this->LinkModules.push_back(
+ std::make_pair(I.first, std::unique_ptr<llvm::Module>(I.second)));
}
-
std::unique_ptr<llvm::Module> takeModule() { return std::move(TheModule); }
- llvm::Module *takeLinkModule() { return LinkModule.release(); }
+ void releaseLinkModules() {
+ for (auto &I : LinkModules)
+ I.second.release();
+ }
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
Gen->HandleCXXStaticMemberVarInstantiation(VD);
}
void Initialize(ASTContext &Ctx) override {
- if (Context) {
- assert(Context == &Ctx);
- return;
- }
-
+ assert(!Context && "initialized multiple times");
+
Context = &Ctx;
if (llvm::TimePassesIsEnabled)
@@ -158,14 +167,6 @@ namespace clang {
assert(TheModule.get() == M &&
"Unexpected module change during IR generation");
- // Link LinkModule into this module if present, preserving its validity.
- if (LinkModule) {
- if (Linker::LinkModules(
- M, LinkModule.get(),
- [=](const DiagnosticInfo &DI) { linkerDiagnosticHandler(DI); }))
- return;
- }
-
// Install an inline asm handler so that diagnostics get printed through
// our diagnostics hooks.
LLVMContext &Ctx = TheModule->getContext();
@@ -179,8 +180,16 @@ namespace clang {
void *OldDiagnosticContext = Ctx.getDiagnosticContext();
Ctx.setDiagnosticHandler(DiagnosticHandler, this);
+ // Link LinkModule into this module if present, preserving its validity.
+ for (auto &I : LinkModules) {
+ unsigned LinkFlags = I.first;
+ CurLinkModule = I.second.get();
+ if (Linker::linkModules(*M, std::move(I.second), LinkFlags))
+ return;
+ }
+
EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
- C.getTargetInfo().getTargetDescription(),
+ C.getTargetInfo().getDataLayoutString(),
TheModule.get(), Action, AsmOutStream);
Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
@@ -226,8 +235,6 @@ namespace clang {
((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
}
- void linkerDiagnosticHandler(const llvm::DiagnosticInfo &DI);
-
static void DiagnosticHandler(const llvm::DiagnosticInfo &DI,
void *Context) {
((BackendConsumer *)Context)->DiagnosticHandlerImpl(DI);
@@ -256,6 +263,10 @@ namespace clang {
const llvm::DiagnosticInfoOptimizationRemarkMissed &D);
void OptimizationRemarkHandler(
const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D);
+ void OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisFPCommute &D);
+ void OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisAliasing &D);
void OptimizationFailureHandler(
const llvm::DiagnosticInfoOptimizationFailure &D);
};
@@ -333,8 +344,7 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
// Convert the SMDiagnostic ranges into SourceRange and attach them
// to the diagnostic.
- for (unsigned i = 0, e = D.getRanges().size(); i != e; ++i) {
- std::pair<unsigned, unsigned> Range = D.getRanges()[i];
+ for (const std::pair<unsigned, unsigned> &Range : D.getRanges()) {
unsigned Column = D.getColumnNo();
B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
Loc.getLocWithOffset(Range.second - Column));
@@ -492,33 +502,46 @@ void BackendConsumer::OptimizationRemarkHandler(
void BackendConsumer::OptimizationRemarkHandler(
const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D) {
- // Optimization analysis remarks are active only if the -Rpass-analysis
- // flag has a regular expression that matches the name of the pass
- // name in \p D.
- if (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
- CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName()))
+ // Optimization analysis remarks are active if the pass name is set to
+ // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
+ // regular expression that matches the name of the pass name in \p D.
+
+ if (D.getPassName() == llvm::DiagnosticInfo::AlwaysPrint ||
+ (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
+ CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
EmitOptimizationMessage(
D, diag::remark_fe_backend_optimization_remark_analysis);
}
-void BackendConsumer::OptimizationFailureHandler(
- const llvm::DiagnosticInfoOptimizationFailure &D) {
- EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure);
+void BackendConsumer::OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisFPCommute &D) {
+ // Optimization analysis remarks are active if the pass name is set to
+ // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
+ // regular expression that matches the name of the pass name in \p D.
+
+ if (D.getPassName() == llvm::DiagnosticInfo::AlwaysPrint ||
+ (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
+ CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ EmitOptimizationMessage(
+ D, diag::remark_fe_backend_optimization_remark_analysis_fpcommute);
}
-void BackendConsumer::linkerDiagnosticHandler(const DiagnosticInfo &DI) {
- if (DI.getSeverity() != DS_Error)
- return;
-
- std::string MsgStorage;
- {
- raw_string_ostream Stream(MsgStorage);
- DiagnosticPrinterRawOStream DP(Stream);
- DI.print(DP);
- }
+void BackendConsumer::OptimizationRemarkHandler(
+ const llvm::DiagnosticInfoOptimizationRemarkAnalysisAliasing &D) {
+ // Optimization analysis remarks are active if the pass name is set to
+ // llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
+ // regular expression that matches the name of the pass name in \p D.
+
+ if (D.getPassName() == llvm::DiagnosticInfo::AlwaysPrint ||
+ (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
+ CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ EmitOptimizationMessage(
+ D, diag::remark_fe_backend_optimization_remark_analysis_aliasing);
+}
- Diags.Report(diag::err_fe_cannot_link_module)
- << LinkModule->getModuleIdentifier() << MsgStorage;
+void BackendConsumer::OptimizationFailureHandler(
+ const llvm::DiagnosticInfoOptimizationFailure &D) {
+ EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure);
}
/// \brief This function is invoked when the backend needs
@@ -538,6 +561,13 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
return;
ComputeDiagID(Severity, backend_frame_larger_than, DiagID);
break;
+ case DK_Linker:
+ assert(CurLinkModule);
+ // FIXME: stop eating the warnings and notes.
+ if (Severity != DS_Error)
+ return;
+ DiagID = diag::err_fe_cannot_link_module;
+ break;
case llvm::DK_OptimizationRemark:
// Optimization remarks are always handled completely by this
// handler. There is no generic way of emitting them.
@@ -554,6 +584,18 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
OptimizationRemarkHandler(
cast<DiagnosticInfoOptimizationRemarkAnalysis>(DI));
return;
+ case llvm::DK_OptimizationRemarkAnalysisFPCommute:
+ // Optimization remarks are always handled completely by this
+ // handler. There is no generic way of emitting them.
+ OptimizationRemarkHandler(
+ cast<DiagnosticInfoOptimizationRemarkAnalysisFPCommute>(DI));
+ return;
+ case llvm::DK_OptimizationRemarkAnalysisAliasing:
+ // Optimization remarks are always handled completely by this
+ // handler. There is no generic way of emitting them.
+ OptimizationRemarkHandler(
+ cast<DiagnosticInfoOptimizationRemarkAnalysisAliasing>(DI));
+ return;
case llvm::DK_OptimizationFailure:
// Optimization failures are always handled completely by this
// handler.
@@ -571,6 +613,12 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
DI.print(DP);
}
+ if (DiagID == diag::err_fe_cannot_link_module) {
+ Diags.Report(diag::err_fe_cannot_link_module)
+ << CurLinkModule->getModuleIdentifier() << MsgStorage;
+ return;
+ }
+
// Report the backend message using the usual diagnostic mechanism.
FullSourceLoc Loc;
Diags.Report(Loc, DiagID).AddString(MsgStorage);
@@ -578,9 +626,8 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
#undef ComputeDiagID
CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
- : Act(_Act), LinkModule(nullptr),
- VMContext(_VMContext ? _VMContext : new LLVMContext),
- OwnsVMContext(!_VMContext) {}
+ : Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext),
+ OwnsVMContext(!_VMContext) {}
CodeGenAction::~CodeGenAction() {
TheModule.reset();
@@ -595,9 +642,9 @@ void CodeGenAction::EndSourceFileAction() {
if (!getCompilerInstance().hasASTConsumer())
return;
- // If we were given a link module, release consumer's ownership of it.
- if (LinkModule)
- BEConsumer->takeLinkModule();
+ // Take back ownership of link modules we passed to consumer.
+ if (!LinkModules.empty())
+ BEConsumer->releaseLinkModules();
// Steal the module from the consumer.
TheModule = BEConsumer->takeModule();
@@ -639,28 +686,29 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (BA != Backend_EmitNothing && !OS)
return nullptr;
- llvm::Module *LinkModuleToUse = LinkModule;
-
- // If we were not given a link module, and the user requested that one be
- // loaded from bitcode, do so now.
- const std::string &LinkBCFile = CI.getCodeGenOpts().LinkBitcodeFile;
- if (!LinkModuleToUse && !LinkBCFile.empty()) {
- auto BCBuf = CI.getFileManager().getBufferForFile(LinkBCFile);
- if (!BCBuf) {
- CI.getDiagnostics().Report(diag::err_cannot_open_file)
- << LinkBCFile << BCBuf.getError().message();
- return nullptr;
- }
+ // Load bitcode modules to link with, if we need to.
+ if (LinkModules.empty())
+ for (auto &I : CI.getCodeGenOpts().LinkBitcodeFiles) {
+ const std::string &LinkBCFile = I.second;
+
+ auto BCBuf = CI.getFileManager().getBufferForFile(LinkBCFile);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << BCBuf.getError().message();
+ LinkModules.clear();
+ return nullptr;
+ }
- ErrorOr<std::unique_ptr<llvm::Module>> ModuleOrErr =
- getLazyBitcodeModule(std::move(*BCBuf), *VMContext);
- if (std::error_code EC = ModuleOrErr.getError()) {
- CI.getDiagnostics().Report(diag::err_cannot_open_file)
- << LinkBCFile << EC.message();
- return nullptr;
+ ErrorOr<std::unique_ptr<llvm::Module>> ModuleOrErr =
+ getLazyBitcodeModule(std::move(*BCBuf), *VMContext);
+ if (std::error_code EC = ModuleOrErr.getError()) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file) << LinkBCFile
+ << EC.message();
+ LinkModules.clear();
+ return nullptr;
+ }
+ addLinkModule(ModuleOrErr.get().release(), I.first);
}
- LinkModuleToUse = ModuleOrErr.get().release();
- }
CoverageSourceInfo *CoverageInfo = nullptr;
// Add the preprocessor callback only when the coverage mapping is generated.
@@ -669,11 +717,12 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessor().addPPCallbacks(
std::unique_ptr<PPCallbacks>(CoverageInfo));
}
+
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile,
- LinkModuleToUse, OS, *VMContext, CoverageInfo));
+ CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile, LinkModules,
+ OS, *VMContext, CoverageInfo));
BEConsumer = Result.get();
return std::move(Result);
}
@@ -732,11 +781,43 @@ void CodeGenAction::ExecuteAction() {
TheModule->setTargetTriple(TargetOpts.Triple);
}
+ auto DiagHandler = [&](const DiagnosticInfo &DI) {
+ TheModule->getContext().diagnose(DI);
+ };
+
+ // If we are performing ThinLTO importing compilation (indicated by
+ // a non-empty index file option), then we need promote to global scope
+ // and rename any local values that are potentially exported to other
+ // modules. Do this early so that the rest of the compilation sees the
+ // promoted symbols.
+ std::unique_ptr<FunctionInfoIndex> Index;
+ if (!CI.getCodeGenOpts().ThinLTOIndexFile.empty()) {
+ ErrorOr<std::unique_ptr<FunctionInfoIndex>> IndexOrErr =
+ llvm::getFunctionIndexForFile(CI.getCodeGenOpts().ThinLTOIndexFile,
+ DiagHandler);
+ if (std::error_code EC = IndexOrErr.getError()) {
+ std::string Error = EC.message();
+ errs() << "Error loading index file '"
+ << CI.getCodeGenOpts().ThinLTOIndexFile << "': " << Error
+ << "\n";
+ return;
+ }
+ Index = std::move(IndexOrErr.get());
+ assert(Index);
+ // Currently this requires creating a new Module object.
+ std::unique_ptr<llvm::Module> RenamedModule =
+ renameModuleForThinLTO(std::move(TheModule), Index.get());
+ if (!RenamedModule)
+ return;
+
+ TheModule = std::move(RenamedModule);
+ }
+
LLVMContext &Ctx = TheModule->getContext();
Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler);
EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(), TargetOpts,
- CI.getLangOpts(), CI.getTarget().getTargetDescription(),
- TheModule.get(), BA, OS);
+ CI.getLangOpts(), CI.getTarget().getDataLayoutString(),
+ TheModule.get(), BA, OS, std::move(Index));
return;
}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index ec3c75ccd257..048a04328fc2 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGBlocks.h"
#include "CGCleanup.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
@@ -24,9 +25,11 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
@@ -36,12 +39,14 @@ using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
- Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
+ Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
- CurFn(nullptr), CapturedStmtInfo(nullptr),
+ CurFn(nullptr), ReturnValue(Address::invalid()),
+ CapturedStmtInfo(nullptr),
SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
- IsOutlinedSEHHelper(false), BlockInfo(nullptr), BlockPointer(nullptr),
+ IsOutlinedSEHHelper(false),
+ BlockInfo(nullptr), BlockPointer(nullptr),
LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
@@ -51,7 +56,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
CXXABIThisValue(nullptr), CXXThisValue(nullptr),
- CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
+ CXXStructorImplicitParamDecl(nullptr),
CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
TerminateHandler(nullptr), TrapBB(nullptr) {
@@ -91,18 +96,69 @@ CodeGenFunction::~CodeGenFunction() {
}
}
-LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
+ AlignmentSource *Source) {
+ return getNaturalTypeAlignment(T->getPointeeType(), Source,
+ /*forPointee*/ true);
+}
+
+CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
+ AlignmentSource *Source,
+ bool forPointeeType) {
+ // Honor alignment typedef attributes even on incomplete types.
+ // We also honor them straight for C++ class types, even as pointees;
+ // there's an expressivity gap here.
+ if (auto TT = T->getAs<TypedefType>()) {
+ if (auto Align = TT->getDecl()->getMaxAlignment()) {
+ if (Source) *Source = AlignmentSource::AttributedType;
+ return getContext().toCharUnitsFromBits(Align);
+ }
+ }
+
+ if (Source) *Source = AlignmentSource::Type;
+
CharUnits Alignment;
- if (CGM.getCXXABI().isTypeInfoCalculable(T)) {
- Alignment = getContext().getTypeAlignInChars(T);
- unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign;
- if (MaxAlign && Alignment.getQuantity() > MaxAlign &&
- !getContext().isAlignmentRequired(T))
- Alignment = CharUnits::fromQuantity(MaxAlign);
+ if (T->isIncompleteType()) {
+ Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
+ } else {
+ // For C++ class pointees, we don't know whether we're pointing at a
+ // base or a complete object, so we generally need to use the
+ // non-virtual alignment.
+ const CXXRecordDecl *RD;
+ if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
+ Alignment = CGM.getClassPointerAlignment(RD);
+ } else {
+ Alignment = getContext().getTypeAlignInChars(T);
+ }
+
+ // Cap to the global maximum type alignment unless the alignment
+ // was somehow explicit on the type.
+ if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
+ if (Alignment.getQuantity() > MaxAlign &&
+ !getContext().isAlignmentRequired(T))
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
}
- return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T));
+ return Alignment;
}
+LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ AlignmentSource AlignSource;
+ CharUnits Alignment = getNaturalTypeAlignment(T, &AlignSource);
+ return LValue::MakeAddr(Address(V, Alignment), T, getContext(), AlignSource,
+ CGM.getTBAAInfo(T));
+}
+
+/// Given a value of type T* that may not be to a complete object,
+/// construct an l-value with the natural pointee alignment of T.
+LValue
+CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
+ AlignmentSource AlignSource;
+ CharUnits Align = getNaturalTypeAlignment(T, &AlignSource, /*pointee*/ true);
+ return MakeAddrLValue(Address(V, Align), T, AlignSource);
+}
+
+
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
}
@@ -295,7 +351,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
EscapeArgs[Pair.second] = Pair.first;
llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::localescape);
- CGBuilderTy(AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
+ CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
}
// Remove the AllocaInsertPt instruction, which is just a convenience for us.
@@ -660,6 +716,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
}
}
+ // If we're in C++ mode and the function name is "main", it is guaranteed
+ // to be norecurse by the standard (3.6.1.3 "The function main shall not be
+ // used within a program").
+ if (getLangOpts().CPlusPlus)
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (FD->isMain())
+ Fn->addFnAttr(llvm::Attribute::NoRecurse);
+
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
@@ -696,7 +760,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (RetTy->isVoidType()) {
// Void type; nothing to return.
- ReturnValue = nullptr;
+ ReturnValue = Address::invalid();
// Count the implicit return.
if (!endsWithReturn(D))
@@ -708,15 +772,16 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue = AI;
+ ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
// Load the sret pointer from the argument struct and return into that.
unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
- llvm::Value *Addr = Builder.CreateStructGEP(nullptr, EI, Idx);
- ReturnValue = Builder.CreateLoad(Addr, "agg.result");
+ llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
+ Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
+ ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -826,15 +891,11 @@ static void TryMarkNoThrow(llvm::Function *F) {
// can't do this on functions that can be overwritten.
if (F->mayBeOverridden()) return;
- for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
- for (llvm::BasicBlock::iterator
- BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
- if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
- if (!Call->doesNotThrow())
- return;
- } else if (isa<llvm::ResumeInst>(&*BI)) {
+ for (llvm::BasicBlock &BB : *F)
+ for (llvm::Instruction &I : BB)
+ if (I.mayThrow())
return;
- }
+
F->setDoesNotThrow();
}
@@ -859,7 +920,18 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
CGM.getCXXABI().buildThisParam(*this, Args);
}
- Args.append(FD->param_begin(), FD->param_end());
+ for (auto *Param : FD->params()) {
+ Args.push_back(Param);
+ if (!Param->hasAttr<PassObjectSizeAttr>())
+ continue;
+
+ IdentifierInfo *NoID = nullptr;
+ auto *Implicit = ImplicitParamDecl::Create(
+ getContext(), Param->getDeclContext(), Param->getLocation(), NoID,
+ getContext().getSizeType());
+ SizeArguments[Param] = Implicit;
+ Args.push_back(Implicit);
+ }
if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
@@ -885,8 +957,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
// Generate the body of the function.
- PGO.checkGlobalDecl(GD);
- PGO.assignRegionCounters(GD.getDecl(), CurFn);
+ PGO.assignRegionCounters(GD, CurFn);
if (isa<CXXDestructorDecl>(FD))
EmitDestructorBody(Args);
else if (isa<CXXConstructorDecl>(FD))
@@ -1207,6 +1278,22 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
return;
}
+ // If the branch has a condition wrapped by __builtin_unpredictable,
+ // create metadata that specifies that the branch is unpredictable.
+ // Don't bother if not optimizing because that metadata would not be used.
+ llvm::MDNode *Unpredictable = nullptr;
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ if (const CallExpr *Call = dyn_cast<CallExpr>(Cond)) {
+ const Decl *TargetDecl = Call->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
+ llvm::MDBuilder MDHelper(getLLVMContext());
+ Unpredictable = MDHelper.createUnpredictable();
+ }
+ }
+ }
+ }
+
// Create branch weights based on the number of times we get here and the
// number of times the condition should be true.
uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
@@ -1219,7 +1306,7 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
ApplyDebugLocation DL(*this, Cond);
CondV = EvaluateExprAsBool(Cond);
}
- Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights);
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
@@ -1236,20 +1323,18 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
/// base element of the array
/// \param sizeInChars - the total size of the VLA, in chars
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
- llvm::Value *dest, llvm::Value *src,
+ Address dest, Address src,
llvm::Value *sizeInChars) {
- std::pair<CharUnits,CharUnits> baseSizeAndAlign
- = CGF.getContext().getTypeInfoInChars(baseType);
-
CGBuilderTy &Builder = CGF.Builder;
+ CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
llvm::Value *baseSizeInChars
- = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
+ = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
- llvm::Type *i8p = Builder.getInt8PtrTy();
-
- llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
- llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
+ Address begin =
+ Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
+ llvm::Value *end =
+ Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
@@ -1259,17 +1344,19 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
// count must be nonzero.
CGF.EmitBlock(loopBB);
- llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
- cur->addIncoming(begin, originBB);
+ llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
+ cur->addIncoming(begin.getPointer(), originBB);
+
+ CharUnits curAlign =
+ dest.getAlignment().alignmentOfArrayElement(baseSize);
// memcpy the individual element bit-pattern.
- Builder.CreateMemCpy(cur, src, baseSizeInChars,
- baseSizeAndAlign.second.getQuantity(),
+ Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
/*volatile*/ false);
// Go to the next element.
- llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(),
- cur, 1, "vla.next");
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
// Leave if that's the end of the VLA.
llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
@@ -1280,7 +1367,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
}
void
-CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
+CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
// Ignore empty classes in C++.
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -1290,23 +1377,17 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
}
// Cast the dest ptr to the appropriate i8 pointer type.
- unsigned DestAS =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
- if (DestPtr->getType() != BP)
- DestPtr = Builder.CreateBitCast(DestPtr, BP);
+ if (DestPtr.getElementType() != Int8Ty)
+ DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
// Get size and alignment info for this aggregate.
- std::pair<CharUnits, CharUnits> TypeInfo =
- getContext().getTypeInfoInChars(Ty);
- CharUnits Size = TypeInfo.first;
- CharUnits Align = TypeInfo.second;
+ CharUnits size = getContext().getTypeSizeInChars(Ty);
llvm::Value *SizeVal;
const VariableArrayType *vla;
// Don't bother emitting a zero-byte memset.
- if (Size.isZero()) {
+ if (size.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
if (const VariableArrayType *vlaType =
dyn_cast_or_null<VariableArrayType>(
@@ -1324,7 +1405,7 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
return;
}
} else {
- SizeVal = CGM.getSize(Size);
+ SizeVal = CGM.getSize(size);
vla = nullptr;
}
@@ -1343,21 +1424,22 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
/*isConstant=*/true,
llvm::GlobalVariable::PrivateLinkage,
NullConstant, Twine());
- llvm::Value *SrcPtr =
- Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
+ CharUnits NullAlign = DestPtr.getAlignment();
+ NullVariable->setAlignment(NullAlign.getQuantity());
+ Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
+ NullAlign);
if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
// Get and call the appropriate llvm.memcpy overload.
- Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
- Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
- Align.getQuantity(), false);
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
}
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
@@ -1376,7 +1458,7 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
// If we already made the indirect branch for indirect goto, return its block.
if (IndirectBranch) return IndirectBranch->getParent();
- CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+ CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
// Create the PHI node that indirect gotos will add entries to.
llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
@@ -1391,7 +1473,7 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
/// element type and a properly-typed first element pointer.
llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
QualType &baseType,
- llvm::Value *&addr) {
+ Address &addr) {
const ArrayType *arrayType = origArrayType;
// If it's a VLA, we have to load the stored size. Note that
@@ -1430,8 +1512,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
QualType eltType;
llvm::ArrayType *llvmArrayType =
- dyn_cast<llvm::ArrayType>(
- cast<llvm::PointerType>(addr->getType())->getElementType());
+ dyn_cast<llvm::ArrayType>(addr.getElementType());
while (llvmArrayType) {
assert(isa<ConstantArrayType>(arrayType));
assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
@@ -1459,12 +1540,13 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
arrayType = getContext().getAsArrayType(eltType);
}
- unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
- llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
- addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
+ llvm::Type *baseType = ConvertType(eltType);
+ addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
} else {
// Create the actual GEP.
- addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+ addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
+ gepIndices, "array.begin"),
+ addr.getAlignment());
}
baseType = eltType;
@@ -1649,9 +1731,13 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
} while (type->isVariablyModifiedType());
}
-llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+Address CodeGenFunction::EmitVAListRef(const Expr* E) {
if (getContext().getBuiltinVaListType()->isArrayType())
- return EmitScalarExpr(E);
+ return EmitPointerWithAlignment(E);
+ return EmitLValue(E).getAddress();
+}
+
+Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
return EmitLValue(E).getAddress();
}
@@ -1713,9 +1799,10 @@ void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
I->getAnnotation(), D->getLocation());
}
-llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
- llvm::Value *V) {
+Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
+ Address Addr) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ llvm::Value *V = Addr.getPointer();
llvm::Type *VTy = V->getType();
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
CGM.Int8PtrTy);
@@ -1730,7 +1817,7 @@ llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
V = Builder.CreateBitCast(V, VTy);
}
- return V;
+ return Address(V, Addr.getAlignment());
}
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
@@ -1773,3 +1860,80 @@ template void CGBuilderInserter<PreserveNames>::InsertHelper(
llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
#undef PreserveNames
+
+static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
+ CodeGenModule &CGM, const FunctionDecl *FD,
+ std::string &FirstMissing) {
+ // If there aren't any required features listed then go ahead and return.
+ if (ReqFeatures.empty())
+ return false;
+
+ // Now build up the set of caller features and verify that all the required
+ // features are there.
+ llvm::StringMap<bool> CallerFeatureMap;
+ CGM.getFunctionFeatureMap(CallerFeatureMap, FD);
+
+ // If we have at least one of the features in the feature list return
+ // true, otherwise return false.
+ return std::all_of(
+ ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
+ SmallVector<StringRef, 1> OrFeatures;
+ Feature.split(OrFeatures, "|");
+ return std::any_of(OrFeatures.begin(), OrFeatures.end(),
+ [&](StringRef Feature) {
+ if (!CallerFeatureMap.lookup(Feature)) {
+ FirstMissing = Feature.str();
+ return false;
+ }
+ return true;
+ });
+ });
+}
+
+// Emits an error if we don't have a valid set of target features for the
+// called function.
+void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
+ const FunctionDecl *TargetDecl) {
+ // Early exit if this is an indirect call.
+ if (!TargetDecl)
+ return;
+
+ // Get the current enclosing function if it exists. If it doesn't
+ // we can't check the target features anyhow.
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
+ if (!FD)
+ return;
+
+ // Grab the required features for the call. For a builtin this is listed in
+ // the td file with the default cpu, for an always_inline function this is any
+ // listed cpu and any listed features.
+ unsigned BuiltinID = TargetDecl->getBuiltinID();
+ std::string MissingFeature;
+ if (BuiltinID) {
+ SmallVector<StringRef, 1> ReqFeatures;
+ const char *FeatureList =
+ CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
+ // Return if the builtin doesn't have any required features.
+ if (!FeatureList || StringRef(FeatureList) == "")
+ return;
+ StringRef(FeatureList).split(ReqFeatures, ",");
+ if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
+ CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature)
+ << TargetDecl->getDeclName()
+ << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
+
+ } else if (TargetDecl->hasAttr<TargetAttr>()) {
+ // Get the required features for the callee.
+ SmallVector<StringRef, 1> ReqFeatures;
+ llvm::StringMap<bool> CalleeFeatureMap;
+ CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
+ for (const auto &F : CalleeFeatureMap) {
+ // Only positive features are "required".
+ if (F.getValue())
+ ReqFeatures.push_back(F.getKey());
+ }
+ if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
+ CGM.getDiags().Report(E->getLocStart(), diag::err_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
+ }
+}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index f2bc402f8b25..b4a9186462ec 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -24,6 +24,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/CapturedStmt.h"
@@ -80,6 +81,8 @@ class CGFunctionInfo;
class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
+class BlockByrefHelpers;
+class BlockByrefInfo;
class BlockFlags;
class BlockFieldFlags;
@@ -159,9 +162,9 @@ public:
/// ReturnBlock - Unified return block.
JumpDest ReturnBlock;
- /// ReturnValue - The temporary alloca to hold the return value. This is null
- /// iff the function has no return value.
- llvm::Value *ReturnValue;
+ /// ReturnValue - The temporary alloca to hold the return
+ /// value. This is invalid iff the function has no return value.
+ Address ReturnValue;
/// AllocaInsertPoint - This is an instruction in the entry block before which
/// we prefer to insert allocas.
@@ -247,10 +250,6 @@ public:
~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
};
- /// BoundsChecking - Emit run-time bounds checks. Higher values mean
- /// potentially higher performance penalties.
- unsigned char BoundsChecking;
-
/// \brief Sanitizers enabled for this function.
SanitizerSet SanOpts;
@@ -294,6 +293,8 @@ public:
llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
+ llvm::Instruction *CurrentFuncletPad = nullptr;
+
/// Header for data within LifetimeExtendedCleanupStack.
struct LifetimeExtendedCleanupHeader {
/// The size of the following cleanup object.
@@ -327,7 +328,7 @@ public:
/// A stack of exception code slots. Entering an __except block pushes a slot
/// on the stack and leaving pops one. The __exception_code() intrinsic loads
/// a value from the top of the stack.
- SmallVector<llvm::Value *, 1> SEHCodeSlotStack;
+ SmallVector<Address, 1> SEHCodeSlotStack;
/// Value returned by __exception_info intrinsic.
llvm::Value *SEHInfo = nullptr;
@@ -373,6 +374,11 @@ public:
/// Returns true inside SEH __try blocks.
bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
+ /// Returns true while emitting a cleanuppad.
+ bool isCleanupPadScope() const {
+ return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
+ }
+
/// pushFullExprCleanup - Push a cleanup to be run at the end of the
/// current full-expression. Safe against the possibility that
/// we're currently inside a conditionally-evaluated expression.
@@ -419,13 +425,12 @@ public:
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
/// non-trivial destructor.
- void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+ void PushDestructorCleanup(QualType T, Address Addr);
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object variant of the given destructor on the object at
/// the given address.
- void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
- llvm::Value *Addr);
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
@@ -555,13 +560,14 @@ public:
void rescopeLabels();
};
+ typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
+
/// \brief The scope used to remap some variables as private in the OpenMP
/// loop body (or other captured region emitted without outlining), and to
/// restore old vars back on exit.
class OMPPrivateScope : public RunCleanupsScope {
- typedef llvm::DenseMap<const VarDecl *, llvm::Value *> VarDeclMapTy;
- VarDeclMapTy SavedLocals;
- VarDeclMapTy SavedPrivates;
+ DeclMapTy SavedLocals;
+ DeclMapTy SavedPrivates;
private:
OMPPrivateScope(const OMPPrivateScope &) = delete;
@@ -578,13 +584,30 @@ public:
/// been privatized already.
bool
addPrivate(const VarDecl *LocalVD,
- const std::function<llvm::Value *()> &PrivateGen) {
+ llvm::function_ref<Address()> PrivateGen) {
assert(PerformCleanup && "adding private to dead scope");
- if (SavedLocals.count(LocalVD) > 0) return false;
- SavedLocals[LocalVD] = CGF.LocalDeclMap.lookup(LocalVD);
- CGF.LocalDeclMap.erase(LocalVD);
- SavedPrivates[LocalVD] = PrivateGen();
- CGF.LocalDeclMap[LocalVD] = SavedLocals[LocalVD];
+
+ // Only save it once.
+ if (SavedLocals.count(LocalVD)) return false;
+
+ // Copy the existing local entry to SavedLocals.
+ auto it = CGF.LocalDeclMap.find(LocalVD);
+ if (it != CGF.LocalDeclMap.end()) {
+ SavedLocals.insert({LocalVD, it->second});
+ } else {
+ SavedLocals.insert({LocalVD, Address::invalid()});
+ }
+
+ // Generate the private entry.
+ Address Addr = PrivateGen();
+ QualType VarTy = LocalVD->getType();
+ if (VarTy->isReferenceType()) {
+ Address Temp = CGF.CreateMemTemp(VarTy);
+ CGF.Builder.CreateStore(Addr.getPointer(), Temp);
+ Addr = Temp;
+ }
+ SavedPrivates.insert({LocalVD, Addr});
+
return true;
}
@@ -597,19 +620,14 @@ public:
/// private copies.
/// \return true if at least one variable was privatized, false otherwise.
bool Privatize() {
- for (auto VDPair : SavedPrivates) {
- CGF.LocalDeclMap[VDPair.first] = VDPair.second;
- }
+ copyInto(SavedPrivates, CGF.LocalDeclMap);
SavedPrivates.clear();
return !SavedLocals.empty();
}
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
- // Remap vars back to the original values.
- for (auto I : SavedLocals) {
- CGF.LocalDeclMap[I.first] = I.second;
- }
+ copyInto(SavedLocals, CGF.LocalDeclMap);
SavedLocals.clear();
}
@@ -618,6 +636,25 @@ public:
if (PerformCleanup)
ForceCleanup();
}
+
+ private:
+ /// Copy all the entries in the source map over the corresponding
+ /// entries in the destination, which must exist.
+ static void copyInto(const DeclMapTy &src, DeclMapTy &dest) {
+ for (auto &pair : src) {
+ if (!pair.second.isValid()) {
+ dest.erase(pair.first);
+ continue;
+ }
+
+ auto it = dest.find(pair.first);
+ if (it != dest.end()) {
+ it->second = pair.second;
+ } else {
+ dest.insert(pair);
+ }
+ }
+ }
};
/// \brief Takes the old cleanup stack size and emits the cleanup blocks
@@ -665,6 +702,7 @@ public:
llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
+ llvm::BasicBlock *getMSVCDispatchBlock(EHScopeStack::stable_iterator scope);
/// An object to manage conditionally-evaluated expressions.
class ConditionalEvaluation {
@@ -697,10 +735,11 @@ public:
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
- void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
+ void setBeforeOutermostConditional(llvm::Value *value, Address addr) {
assert(isInConditionalBranch());
llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
- new llvm::StoreInst(value, addr, &block->back());
+ auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
+ store->setAlignment(addr.getAlignment().getQuantity());
}
/// An RAII object to record that we're evaluating a statement
@@ -859,15 +898,6 @@ public:
}
};
- /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
- /// number that holds the value.
- std::pair<llvm::Type *, unsigned>
- getByRefValueLLVMField(const ValueDecl *VD) const;
-
- /// BuildBlockByrefAddress - Computes address location of the
- /// variable which is declared as __block.
- llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
- const VarDecl *V);
private:
CGDebugInfo *DebugInfo;
bool DisableDebugInfo;
@@ -884,9 +914,14 @@ private:
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
- typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
DeclMapTy LocalDeclMap;
+ /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
+ /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
+ /// parameter.
+ llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
+ SizeArguments;
+
/// Track escaped local variables with auto storage. Used during SEH
/// outlining to produce a call to llvm.localescape.
llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
@@ -984,7 +1019,7 @@ public:
/// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
class FieldConstructionScope {
public:
- FieldConstructionScope(CodeGenFunction &CGF, llvm::Value *This)
+ FieldConstructionScope(CodeGenFunction &CGF, Address This)
: CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
CGF.CXXDefaultInitExprThis = This;
}
@@ -994,7 +1029,7 @@ public:
private:
CodeGenFunction &CGF;
- llvm::Value *OldCXXDefaultInitExprThis;
+ Address OldCXXDefaultInitExprThis;
};
/// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
@@ -1002,16 +1037,20 @@ public:
class CXXDefaultInitExprScope {
public:
CXXDefaultInitExprScope(CodeGenFunction &CGF)
- : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue) {
- CGF.CXXThisValue = CGF.CXXDefaultInitExprThis;
+ : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
+ OldCXXThisAlignment(CGF.CXXThisAlignment) {
+ CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
+ CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
}
~CXXDefaultInitExprScope() {
CGF.CXXThisValue = OldCXXThisValue;
+ CGF.CXXThisAlignment = OldCXXThisAlignment;
}
public:
CodeGenFunction &CGF;
llvm::Value *OldCXXThisValue;
+ CharUnits OldCXXThisAlignment;
};
private:
@@ -1020,10 +1059,12 @@ private:
ImplicitParamDecl *CXXABIThisDecl;
llvm::Value *CXXABIThisValue;
llvm::Value *CXXThisValue;
+ CharUnits CXXABIThisAlignment;
+ CharUnits CXXThisAlignment;
/// The value of 'this' to use when evaluating CXXDefaultInitExprs within
/// this expression.
- llvm::Value *CXXDefaultInitExprThis;
+ Address CXXDefaultInitExprThis = Address::invalid();
/// CXXStructorImplicitParamDecl - When generating code for a constructor or
/// destructor, this will hold the implicit argument (e.g. VTT).
@@ -1042,10 +1083,9 @@ private:
/// handling code.
SourceLocation CurEHLocation;
- /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
- /// type as well as the field number that contains the actual data.
- llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
- unsigned> > ByRefValueInfo;
+ /// BlockByrefInfos - For each __block variable, contains
+ /// information about the layout of the variable.
+ llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
@@ -1086,15 +1126,15 @@ public:
/// Returns a pointer to the function's exception object and selector slot,
/// which is assigned in every landing pad.
- llvm::Value *getExceptionSlot();
- llvm::Value *getEHSelectorSlot();
+ Address getExceptionSlot();
+ Address getEHSelectorSlot();
/// Returns the contents of the function's exception object and selector
/// slots.
llvm::Value *getExceptionFromSlot();
llvm::Value *getSelectorFromSlot();
- llvm::Value *getNormalCleanupDestSlot();
+ Address getNormalCleanupDestSlot();
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
@@ -1121,38 +1161,41 @@ public:
// Cleanups
//===--------------------------------------------------------------------===//
- typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
+ typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
- llvm::Value *arrayEndPointer,
+ Address arrayEndPointer,
QualType elementType,
+ CharUnits elementAlignment,
Destroyer *destroyer);
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
+ CharUnits elementAlignment,
Destroyer *destroyer);
void pushDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type);
+ Address addr, QualType type);
void pushEHDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type);
- void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
+ Address addr, QualType type);
+ void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray);
- void pushLifetimeExtendedDestroy(CleanupKind kind, llvm::Value *addr,
+ void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
llvm::Value *CompletePtr,
QualType ElementType);
- void pushStackRestore(CleanupKind kind, llvm::Value *SPMem);
- void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
+ void pushStackRestore(CleanupKind kind, Address SPMem);
+ void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
- llvm::Function *generateDestroyHelper(llvm::Constant *addr, QualType type,
+ llvm::Function *generateDestroyHelper(Address addr, QualType type,
Destroyer *destroyer,
bool useEHCleanupForArray,
const VarDecl *VD);
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
- QualType type, Destroyer *destroyer,
+ QualType elementType, CharUnits elementAlign,
+ Destroyer *destroyer,
bool checkZeroLength, bool useEHCleanup);
Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
@@ -1203,8 +1246,6 @@ public:
void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
llvm::Constant *AtomicHelperFn);
- bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
- bool IvarTypeWithAggrGCObjects(QualType Ty);
//===--------------------------------------------------------------------===//
// Block Bits
@@ -1213,10 +1254,6 @@ public:
llvm::Value *EmitBlockLiteral(const BlockExpr *);
llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
static void destroyBlockInfos(CGBlockInfo *info);
- llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
- const CGBlockInfo &Info,
- llvm::StructType *,
- llvm::Constant *BlockVarLayout);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
@@ -1238,15 +1275,22 @@ public:
void emitByrefStructureInit(const AutoVarEmission &emission);
void enterByrefCleanup(const AutoVarEmission &emission);
- llvm::Value *LoadBlockStruct() {
- assert(BlockPointer && "no block pointer set!");
- return BlockPointer;
- }
+ void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
+ llvm::Value *ptr);
+
+ Address LoadBlockStruct();
+ Address GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
- void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
- void AllocateBlockDecl(const DeclRefExpr *E);
- llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
- llvm::Type *BuildByRefType(const VarDecl *var);
+ /// BuildBlockByrefAddress - Computes the location of the
+ /// data in a variable which is declared as __block.
+ Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
+ bool followForward = true);
+ Address emitBlockByrefAddress(Address baseAddr,
+ const BlockByrefInfo &info,
+ bool followForward,
+ const llvm::Twine &name);
+
+ const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
@@ -1290,6 +1334,8 @@ public:
void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk);
+ void FinishThunk();
+
/// Emit a musttail call for a thunk with a potentially adjusted this pointer.
void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
llvm::Value *Callee);
@@ -1308,27 +1354,34 @@ public:
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
ArrayRef<VarDecl *> ArrayIndexes);
- /// InitializeVTablePointer - Initialize the vtable pointer of the given
- /// subobject.
- ///
- void InitializeVTablePointer(BaseSubobject Base,
- const CXXRecordDecl *NearestVBase,
- CharUnits OffsetFromNearestVBase,
- const CXXRecordDecl *VTableClass);
+ /// Struct with all informations about dynamic [sub]class needed to set vptr.
+ struct VPtr {
+ BaseSubobject Base;
+ const CXXRecordDecl *NearestVBase;
+ CharUnits OffsetFromNearestVBase;
+ const CXXRecordDecl *VTableClass;
+ };
+
+ /// Initialize the vtable pointer of the given subobject.
+ void InitializeVTablePointer(const VPtr &vptr);
+
+ typedef llvm::SmallVector<VPtr, 4> VPtrsVector;
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
- void InitializeVTablePointers(BaseSubobject Base,
- const CXXRecordDecl *NearestVBase,
- CharUnits OffsetFromNearestVBase,
- bool BaseIsNonVirtualPrimaryBase,
- const CXXRecordDecl *VTableClass,
- VisitedVirtualBasesSetTy& VBases);
+ VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass);
+
+ void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
/// GetVTablePtr - Return the Value of the vtable pointer member pointed
/// to by This.
- llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
+ llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
+ const CXXRecordDecl *VTableClass);
enum CFITypeCheckKind {
CFITCK_VCall,
@@ -1495,49 +1548,85 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
- LValue MakeAddrLValue(llvm::Value *V, QualType T,
- CharUnits Alignment = CharUnits()) {
- return LValue::MakeAddr(V, T, Alignment, getContext(),
+ LValue MakeAddrLValue(Address Addr, QualType T,
+ AlignmentSource AlignSource = AlignmentSource::Type) {
+ return LValue::MakeAddr(Addr, T, getContext(), AlignSource,
CGM.getTBAAInfo(T));
}
+ LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
+ AlignmentSource AlignSource = AlignmentSource::Type) {
+ return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
+ AlignSource, CGM.getTBAAInfo(T));
+ }
+
+ LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
+ CharUnits getNaturalTypeAlignment(QualType T,
+ AlignmentSource *Source = nullptr,
+ bool forPointeeType = false);
+ CharUnits getNaturalPointeeTypeAlignment(QualType T,
+ AlignmentSource *Source = nullptr);
+
+ Address EmitLoadOfReference(Address Ref, const ReferenceType *RefTy,
+ AlignmentSource *Source = nullptr);
+ LValue EmitLoadOfReferenceLValue(Address Ref, const ReferenceType *RefTy);
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The caller is responsible for setting an appropriate alignment on
/// the alloca.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
const Twine &Name = "tmp");
+ Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp");
- /// InitTempAlloca - Provide an initial value for the given alloca.
- void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
+ /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
+ /// default ABI alignment of the given LLVM type.
+ ///
+ /// IMPORTANT NOTE: This is *not* generally the right alignment for
+ /// any given AST type that happens to have been lowered to the
+ /// given IR type. This should only ever be used for function-local,
+ /// IR-driven manipulations like saving and restoring a value. Do
+ /// not hand this address off to arbitrary IRGen routines, and especially
+ /// do not pass it as an argument to a function that might expect a
+ /// properly ABI-aligned value.
+ Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
+
+ /// InitTempAlloca - Provide an initial value for the given alloca which
+ /// will be observable at all locations in the function.
+ ///
+ /// The address should be something that was returned from one of
+ /// the CreateTempAlloca or CreateMemTemp routines, and the
+ /// initializer must be valid in the entry block (i.e. it must
+ /// either be a constant or an argument value).
+ void InitTempAlloca(Address Alloca, llvm::Value *Value);
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
/// value needs to be stored into an alloca (for example, to avoid explicit
/// PHI construction), but the type is the IR type, not the type appropriate
/// for storing in memory.
- llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
+ ///
+ /// That is, this is exactly equivalent to CreateMemTemp, but calling
+ /// ConvertType instead of ConvertTypeForMem.
+ Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignment.
- llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
+ Address CreateMemTemp(QualType T, const Twine &Name = "tmp");
+ Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
- CharUnits Alignment = getContext().getTypeAlignInChars(T);
- return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name),
T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
}
- /// CreateInAllocaTmp - Create a temporary memory object for the given
- /// aggregate type.
- AggValueSlot CreateInAllocaTmp(QualType T, const Twine &Name = "inalloca");
-
/// Emit a cast to void* in the appropriate address space.
llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
@@ -1560,7 +1649,12 @@ public:
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
// or the value of the expression, depending on how va_list is defined.
- llvm::Value *EmitVAListRef(const Expr *E);
+ Address EmitVAListRef(const Expr *E);
+
+ /// Emit a "reference" to a __builtin_ms_va_list; this is
+ /// always the value of the expression, because a __builtin_ms_va_list is a
+ /// pointer to a char.
+ Address EmitMSVAListRef(const Expr *E);
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
@@ -1568,10 +1662,10 @@ public:
/// EmitAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
- void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
+ void EmitAnyExprToMem(const Expr *E, Address Location,
Qualifiers Quals, bool IsInitializer);
- void EmitAnyExprToExn(const Expr *E, llvm::Value *Addr);
+ void EmitAnyExprToExn(const Expr *E, Address Addr);
/// EmitExprAsInit - Emits the code necessary to initialize a
/// location in memory with the given initializer.
@@ -1591,19 +1685,15 @@ public:
///
/// The difference to EmitAggregateCopy is that tail padding is not copied.
/// This is required for correctness when assigning non-POD structures in C++.
- void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ void EmitAggregateAssign(Address DestPtr, Address SrcPtr,
QualType EltTy) {
bool IsVolatile = hasVolatileMember(EltTy);
- EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, CharUnits::Zero(),
- true);
+ EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, true);
}
- void EmitAggregateCopyCtor(llvm::Value *DestPtr, llvm::Value *SrcPtr,
- QualType DestTy, QualType SrcTy) {
- CharUnits DestTypeAlign = getContext().getTypeAlignInChars(DestTy);
- CharUnits SrcTypeAlign = getContext().getTypeAlignInChars(SrcTy);
+ void EmitAggregateCopyCtor(Address DestPtr, Address SrcPtr,
+ QualType DestTy, QualType SrcTy) {
EmitAggregateCopy(DestPtr, SrcPtr, SrcTy, /*IsVolatile=*/false,
- std::min(DestTypeAlign, SrcTypeAlign),
/*IsAssignment=*/false);
}
@@ -1613,20 +1703,16 @@ public:
/// volatile.
/// \param isAssignment - If false, allow padding to be copied. This often
/// yields more efficient.
- void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ void EmitAggregateCopy(Address DestPtr, Address SrcPtr,
QualType EltTy, bool isVolatile=false,
- CharUnits Alignment = CharUnits::Zero(),
bool isAssignment = false);
- /// StartBlock - Start new block named N. If insert block is a dummy block
- /// then reuse it.
- void StartBlock(const char *N);
-
/// GetAddrOfLocalVar - Return the address of a local variable.
- llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
- llvm::Value *Res = LocalDeclMap[VD];
- assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
- return Res;
+ Address GetAddrOfLocalVar(const VarDecl *VD) {
+ auto it = LocalDeclMap.find(VD);
+ assert(it != LocalDeclMap.end() &&
+ "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return it->second;
}
/// getOpaqueLValueMapping - Given an opaque value expression (which
@@ -1661,19 +1747,31 @@ public:
/// EmitNullInitialization - Generate code to set a value of the given type to
/// null, If the type contains data member pointers, they will be initialized
/// to -1 in accordance with the Itanium C++ ABI.
- void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
-
- // EmitVAArg - Generate code to get an argument from the passed in pointer
- // and update it accordingly. The return value is a pointer to the argument.
+ void EmitNullInitialization(Address DestPtr, QualType Ty);
+
+ /// Emits a call to an LLVM variable-argument intrinsic, either
+ /// \c llvm.va_start or \c llvm.va_end.
+ /// \param ArgValue A reference to the \c va_list as emitted by either
+ /// \c EmitVAListRef or \c EmitMSVAListRef.
+ /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
+ /// calls \c llvm.va_end.
+ llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
+
+ /// Generate code to get an argument from the passed in pointer
+ /// and update it accordingly.
+ /// \param VE The \c VAArgExpr for which to generate code.
+ /// \param VAListAddr Receives a reference to the \c va_list as emitted by
+ /// either \c EmitVAListRef or \c EmitMSVAListRef.
+ /// \returns A pointer to the argument.
// FIXME: We should be able to get rid of this method and use the va_arg
// instruction in LLVM instead once it works well enough.
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+ Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
/// emitArrayLength - Compute the length of an array, even if it's a
/// VLA, and drill down to the base element type.
llvm::Value *emitArrayLength(const ArrayType *arrayType,
QualType &baseType,
- llvm::Value *&addr);
+ Address &addr);
/// EmitVLASize - Capture all the sizes for the VLA expressions in
/// the given variably-modified type and store them in the VLASizeMap.
@@ -1694,6 +1792,7 @@ public:
assert(CXXThisValue && "no 'this' value for this function");
return CXXThisValue;
}
+ Address LoadCXXThisAddress();
/// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
/// virtual bases.
@@ -1704,35 +1803,29 @@ public:
return CXXStructorImplicitParamValue;
}
- /// LoadCXXStructorImplicitParam - Load the implicit parameter
- /// for a constructor/destructor.
- llvm::Value *LoadCXXStructorImplicitParam() {
- assert(CXXStructorImplicitParamValue &&
- "no implicit argument value for this function");
- return CXXStructorImplicitParamValue;
- }
-
/// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
/// complete class to the given direct base.
- llvm::Value *
- GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
+ Address
+ GetAddressOfDirectBaseInCompleteClass(Address Value,
const CXXRecordDecl *Derived,
const CXXRecordDecl *Base,
bool BaseIsVirtual);
+ static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
+
/// GetAddressOfBaseClass - This function will add the necessary delta to the
/// load of 'this' and returns address of the base class.
- llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
- const CXXRecordDecl *Derived,
- CastExpr::path_const_iterator PathBegin,
- CastExpr::path_const_iterator PathEnd,
- bool NullCheckValue, SourceLocation Loc);
-
- llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
- const CXXRecordDecl *Derived,
- CastExpr::path_const_iterator PathBegin,
- CastExpr::path_const_iterator PathEnd,
- bool NullCheckValue);
+ Address GetAddressOfBaseClass(Address Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue, SourceLocation Loc);
+
+ Address GetAddressOfDerivedClass(Address Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
/// GetVTTParameter - Return the VTT parameter that should be passed to a
/// base constructor/destructor with virtual bases.
@@ -1751,23 +1844,31 @@ public:
// they are substantially the same.
void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
const FunctionArgList &Args);
+
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- llvm::Value *This, const CXXConstructExpr *E);
+ Address This, const CXXConstructExpr *E);
+
+ /// Emit assumption load for all bases. Requires to be be called only on
+ /// most-derived class and not under construction of the object.
+ void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
+
+ /// Emit assumption that vptr load == global vtable.
+ void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
- llvm::Value *This, llvm::Value *Src,
- const CXXConstructExpr *E);
+ Address This, Address Src,
+ const CXXConstructExpr *E);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
const ConstantArrayType *ArrayTy,
- llvm::Value *ArrayPtr,
+ Address ArrayPtr,
const CXXConstructExpr *E,
bool ZeroInitialization = false);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
- llvm::Value *ArrayPtr,
+ Address ArrayPtr,
const CXXConstructExpr *E,
bool ZeroInitialization = false);
@@ -1775,15 +1876,15 @@ public:
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, bool Delegating,
- llvm::Value *This);
+ Address This);
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
- llvm::Type *ElementTy, llvm::Value *NewPtr,
+ llvm::Type *ElementTy, Address NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie);
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
- llvm::Value *Ptr);
+ Address Ptr);
llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
@@ -1797,9 +1898,9 @@ public:
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
const Expr *Arg, bool IsDelete);
- llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
- llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
- llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
+ llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+ llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
+ Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
/// \brief Situations in which we might emit a check for the suitability of a
/// pointer or glvalue.
@@ -1896,12 +1997,9 @@ public:
const VarDecl *Variable;
- /// The alignment of the variable.
- CharUnits Alignment;
-
- /// The address of the alloca. Null if the variable was emitted
+ /// The address of the alloca. Invalid if the variable was emitted
/// as a global constant.
- llvm::Value *Address;
+ Address Addr;
llvm::Value *NRVOFlag;
@@ -1916,14 +2014,14 @@ public:
llvm::Value *SizeForLifetimeMarkers;
struct Invalid {};
- AutoVarEmission(Invalid) : Variable(nullptr) {}
+ AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {}
AutoVarEmission(const VarDecl &variable)
- : Variable(&variable), Address(nullptr), NRVOFlag(nullptr),
+ : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsByRef(false), IsConstantAggregate(false),
SizeForLifetimeMarkers(nullptr) {}
- bool wasEmittedAsGlobal() const { return Address == nullptr; }
+ bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
public:
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
@@ -1938,19 +2036,17 @@ public:
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself.
- llvm::Value *getAllocatedAddress() const {
- return Address;
+ Address getAllocatedAddress() const {
+ return Addr;
}
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
- llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
- if (!IsByRef) return Address;
+ Address getObjectAddress(CodeGenFunction &CGF) const {
+ if (!IsByRef) return Addr;
- auto F = CGF.getByRefValueLLVMField(Variable);
- return CGF.Builder.CreateStructGEP(F.first, Address, F.second,
- Variable->getNameAsString());
+ return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
}
};
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
@@ -1962,9 +2058,35 @@ public:
void EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
+ class ParamValue {
+ llvm::Value *Value;
+ unsigned Alignment;
+ ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
+ public:
+ static ParamValue forDirect(llvm::Value *value) {
+ return ParamValue(value, 0);
+ }
+ static ParamValue forIndirect(Address addr) {
+ assert(!addr.getAlignment().isZero());
+ return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
+ }
+
+ bool isIndirect() const { return Alignment != 0; }
+ llvm::Value *getAnyValue() const { return Value; }
+
+ llvm::Value *getDirectValue() const {
+ assert(!isIndirect());
+ return Value;
+ }
+
+ Address getIndirectAddress() const {
+ assert(isIndirect());
+ return Address(Value, CharUnits::fromQuantity(Alignment));
+ }
+ };
+
/// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
- void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, bool ArgIsPointer,
- unsigned ArgNo);
+ void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
/// protectFromPeepholes - Protect a value that we're intending to
/// store to the side, but which will probably be used later, from
@@ -2001,11 +2123,11 @@ public:
/// \return True if the statement was handled.
bool EmitSimpleStmt(const Stmt *S);
- llvm::Value *EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
- AggValueSlot AVS = AggValueSlot::ignored());
- llvm::Value *EmitCompoundStmtWithoutScope(const CompoundStmt &S,
- bool GetLast = false,
- AggValueSlot AVS =
+ Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ AggValueSlot AVS = AggValueSlot::ignored());
+ Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
+ bool GetLast = false,
+ AggValueSlot AVS =
AggValueSlot::ignored());
/// EmitLabel - Emit the block for the given label. It is legal to call this
@@ -2018,8 +2140,6 @@ public:
void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
void EmitIfStmt(const IfStmt &S);
- void EmitCondBrHints(llvm::LLVMContext &Context, llvm::BranchInst *CondBr,
- ArrayRef<const Attr *> Attrs);
void EmitWhileStmt(const WhileStmt &S,
ArrayRef<const Attr *> Attrs = None);
void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
@@ -2077,19 +2197,20 @@ public:
/// either be an alloca or a call to llvm.localrecover if there are nested
/// outlined functions. ParentFP is the frame pointer of the outermost parent
/// frame.
- llvm::Value *recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
- llvm::Value *ParentVar,
- llvm::Value *ParentFP);
+ Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
+ Address ParentVar,
+ llvm::Value *ParentFP);
void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
ArrayRef<const Attr *> Attrs = None);
LValue InitCapturedStruct(const CapturedStmt &S);
llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
- void GenerateCapturedStmtFunctionProlog(const CapturedStmt &S);
- llvm::Function *GenerateCapturedStmtFunctionEpilog(const CapturedStmt &S);
llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
- llvm::Value *GenerateCapturedStmtArgument(const CapturedStmt &S);
+ Address GenerateCapturedStmtArgument(const CapturedStmt &S);
+ llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
+ void GenerateOpenMPCapturedVars(const CapturedStmt &S,
+ SmallVectorImpl<llvm::Value *> &CapturedVars);
/// \brief Perform element by element copying of arrays with type \a
/// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
/// generated by \a CopyGen.
@@ -2100,8 +2221,8 @@ public:
/// \param CopyGen Copying procedure that copies value of single array element
/// to another single array element.
void EmitOMPAggregateAssign(
- llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
- const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen);
+ Address DestAddr, Address SrcAddr, QualType OriginalType,
+ const llvm::function_ref<void(Address, Address)> &CopyGen);
/// \brief Emit proper copying of data from one variable to another.
///
/// \param OriginalType Original type of the copied variables.
@@ -2113,8 +2234,8 @@ public:
/// the base array element).
/// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
/// DestVD.
- void EmitOMPCopy(CodeGenFunction &CGF, QualType OriginalType,
- llvm::Value *DestAddr, llvm::Value *SrcAddr,
+ void EmitOMPCopy(QualType OriginalType,
+ Address DestAddr, Address SrcAddr,
const VarDecl *DestVD, const VarDecl *SrcVD,
const Expr *Copy);
/// \brief Emit atomic update code for constructs: \a X = \a X \a BO \a E or
@@ -2213,10 +2334,14 @@ public:
void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
void EmitOMPTargetDirective(const OMPTargetDirective &S);
+ void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S);
void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
void
EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
void EmitOMPCancelDirective(const OMPCancelDirective &S);
+ void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
+ void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
+ void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
/// \brief Emit inner loop of the worksharing/simd construct.
///
@@ -2249,8 +2374,8 @@ private:
void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const OMPLoopDirective &S,
OMPPrivateScope &LoopScope, bool Ordered,
- llvm::Value *LB, llvm::Value *UB, llvm::Value *ST,
- llvm::Value *IL, llvm::Value *Chunk);
+ Address LB, Address UB, Address ST,
+ Address IL, llvm::Value *Chunk);
/// \brief Emit code for sections directive.
OpenMPDirectiveKind EmitSections(const OMPExecutableDirective &S);
@@ -2297,7 +2422,7 @@ public:
/// that the address will be used to access the object.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
- RValue convertTempToRValue(llvm::Value *addr, QualType type,
+ RValue convertTempToRValue(Address addr, QualType type,
SourceLocation Loc);
void EmitAtomicInit(Expr *E, LValue lvalue);
@@ -2338,12 +2463,14 @@ public:
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
- llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty,
+ llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
SourceLocation Loc,
+ AlignmentSource AlignSource =
+ AlignmentSource::Type,
llvm::MDNode *TBAAInfo = nullptr,
QualType TBAABaseTy = QualType(),
- uint64_t TBAAOffset = 0);
+ uint64_t TBAAOffset = 0,
+ bool isNontemporal = false);
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
@@ -2354,11 +2481,12 @@ public:
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
- void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment, QualType Ty,
+ void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
+ bool Volatile, QualType Ty,
+ AlignmentSource AlignSource = AlignmentSource::Type,
llvm::MDNode *TBAAInfo = nullptr, bool isInit = false,
QualType TBAABaseTy = QualType(),
- uint64_t TBAAOffset = 0);
+ uint64_t TBAAOffset = 0, bool isNontemporal = false);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
@@ -2405,13 +2533,14 @@ public:
// Note: only available for agg return types
LValue EmitVAArgExprLValue(const VAArgExpr *E);
LValue EmitDeclRefLValue(const DeclRefExpr *E);
- LValue EmitReadRegister(const VarDecl *VD);
LValue EmitStringLiteralLValue(const StringLiteral *E);
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
LValue EmitPredefinedLValue(const PredefinedExpr *E);
LValue EmitUnaryOpLValue(const UnaryOperator *E);
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed = false);
+ LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
+ bool IsLowerBound = true);
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
@@ -2422,10 +2551,13 @@ public:
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
- llvm::Value *EmitExtVectorElementLValue(LValue V);
+ Address EmitExtVectorElementLValue(LValue V);
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
+ Address EmitArrayToPointerDecay(const Expr *Array,
+ AlignmentSource *AlignSource = nullptr);
+
class ConstantEmission {
llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
ConstantEmission(llvm::Constant *C, bool isReference)
@@ -2497,23 +2629,20 @@ public:
/// EmitCall - Generate a call of the given function, expecting the given
/// result type, and using the given argument list which specifies both the
/// LLVM arguments and the types they were derived from.
- ///
- /// \param TargetDecl - If given, the decl of the function in a direct call;
- /// used to set attributes on the call (noreturn, etc.).
- RValue EmitCall(const CGFunctionInfo &FnInfo,
- llvm::Value *Callee,
- ReturnValueSlot ReturnValue,
- const CallArgList &Args,
- const Decl *TargetDecl = nullptr,
+ RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, const CallArgList &Args,
+ CGCalleeInfo CalleeInfo = CGCalleeInfo(),
llvm::Instruction **callOrInvoke = nullptr);
RValue EmitCall(QualType FnType, llvm::Value *Callee, const CallExpr *E,
ReturnValueSlot ReturnValue,
- const Decl *TargetDecl = nullptr,
+ CGCalleeInfo CalleeInfo = CGCalleeInfo(),
llvm::Value *Chain = nullptr);
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
+ void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
+
llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
const Twine &name = "");
llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
@@ -2528,8 +2657,6 @@ public:
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name = "");
- llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
- const Twine &Name = "");
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const Twine &name = "");
@@ -2565,6 +2692,10 @@ public:
NestedNameSpecifier *Qualifier,
bool IsArrow, const Expr *Base);
// Compute the object pointer.
+ Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
+ llvm::Value *memberPtr,
+ const MemberPointerType *memberPtrType,
+ AlignmentSource *AlignSource = nullptr);
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
@@ -2599,7 +2730,7 @@ public:
unsigned Modifier,
const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops,
- llvm::Value *Align = nullptr);
+ Address PtrOp0, Address PtrOp1);
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier, llvm::Type *ArgTy,
const CallExpr *E);
@@ -2612,8 +2743,6 @@ public:
bool negateForRightShift);
llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
llvm::Type *Ty, bool usgn, const char *name);
- // Helper functions for EmitAArch64BuiltinExpr.
- llvm::Value *vectorWrapScalar8(llvm::Value *Op);
llvm::Value *vectorWrapScalar16(llvm::Value *Op);
llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -2623,6 +2752,8 @@ public:
llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E);
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
@@ -2643,24 +2774,23 @@ public:
}
// ARC primitives.
- void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
- void EmitARCDestroyWeak(llvm::Value *addr);
- llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
- llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
- llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
- bool ignored);
- void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
- void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
+ void EmitARCInitWeak(Address addr, llvm::Value *value);
+ void EmitARCDestroyWeak(Address addr);
+ llvm::Value *EmitARCLoadWeak(Address addr);
+ llvm::Value *EmitARCLoadWeakRetained(Address addr);
+ llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
+ void EmitARCCopyWeak(Address dst, Address src);
+ void EmitARCMoveWeak(Address dst, Address src);
llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
bool resultIgnored);
- llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
+ llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
bool resultIgnored);
llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
- void EmitARCDestroyStrong(llvm::Value *addr, ARCPreciseLifetime_t precise);
+ void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
llvm::Value *EmitARCAutorelease(llvm::Value *value);
llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
@@ -2673,8 +2803,6 @@ public:
EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
llvm::Value *EmitObjCThrowOperand(const Expr *expr);
-
- llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
@@ -2707,17 +2835,16 @@ public:
/// scalar type, returning the result.
llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
- /// EmitScalarConversion - Emit a conversion from the specified type to the
- /// specified destination type, both of which are LLVM scalar types.
+ /// Emit a conversion from the specified type to the specified destination
+ /// type, both of which are LLVM scalar types.
llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
- QualType DstTy);
+ QualType DstTy, SourceLocation Loc);
- /// EmitComplexToScalarConversion - Emit a conversion from the specified
- /// complex type to the specified destination type, where the destination type
- /// is an LLVM scalar type.
+ /// Emit a conversion from the specified complex type to the specified
+ /// destination type, where the destination type is an LLVM scalar type.
llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
- QualType DstTy);
-
+ QualType DstTy,
+ SourceLocation Loc);
/// EmitAggExpr - Emit the computation of the specified expression
/// of aggregate type. The result is computed into the given slot,
@@ -2728,11 +2855,6 @@ public:
/// aggregate type into a temporary LValue.
LValue EmitAggExprToLValue(const Expr *E);
- /// EmitGCMemmoveCollectable - Emit special API for structs with object
- /// pointers.
- void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
- QualType Ty);
-
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
void EmitExtendGCLifetime(llvm::Value *object);
@@ -2753,6 +2875,9 @@ public:
/// EmitLoadOfComplex - Load a complex number from the specified l-value.
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
+ Address emitAddrOfRealComponent(Address complex, QualType complexType);
+ Address emitAddrOfImagComponent(Address complex, QualType complexType);
+
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
@@ -2787,7 +2912,7 @@ public:
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> CXXThreadLocals,
- llvm::GlobalVariable *Guard = nullptr);
+ Address Guard = Address::invalid());
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
@@ -2802,8 +2927,7 @@ public:
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
- void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
- const Expr *Exp);
+ void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
void enterFullExpression(const ExprWithCleanups *E) {
if (E->getNumObjects() == 0) return;
@@ -2815,7 +2939,7 @@ public:
void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
- RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = nullptr);
+ RValue EmitAtomicExpr(AtomicExpr *E);
//===--------------------------------------------------------------------===//
// Annotations Emission
@@ -2832,7 +2956,7 @@ public:
/// Emit field annotations for the given field & value. Returns the
/// annotation result.
- llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
+ Address EmitFieldAnnotations(const FieldDecl *D, Address V);
//===--------------------------------------------------------------------===//
// Internal Helpers
@@ -2885,6 +3009,11 @@ public:
StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs);
+ /// \brief Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
+ /// if Cond if false.
+ void EmitCfiSlowPathCheck(llvm::Value *Cond, llvm::ConstantInt *TypeId,
+ llvm::Value *Ptr);
+
/// \brief Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
void EmitTrapCheck(llvm::Value *Checked);
@@ -2920,6 +3049,12 @@ private:
llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
DeferredReplacements;
+ /// Set the address of a local variable.
+ void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
+ assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
+ LocalDeclMap.insert({VD, Addr});
+ }
+
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
/// from function arguments into \arg Dst. See ABIArgInfo::Expand.
///
@@ -2942,6 +3077,18 @@ private:
std::string &ConstraintStr,
SourceLocation Loc);
+ /// \brief Attempts to statically evaluate the object size of E. If that
+ /// fails, emits code to figure the size of E out for us. This is
+ /// pass_object_size aware.
+ llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType);
+
+ /// \brief Emits the size of E, as required by __builtin_object_size. This
+ /// function is aware of pass_object_size parameters, and will act accordingly
+ /// if E is a parameter with the pass_object_size attribute.
+ llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType);
+
public:
#ifndef NDEBUG
// Determine whether the given argument is an Objective-C method
@@ -2966,12 +3113,11 @@ public:
/// EmitCallArgs - Emit call arguments for a function.
template <typename T>
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
+ llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
const FunctionDecl *CalleeDecl = nullptr,
unsigned ParamsToSkip = 0) {
SmallVector<QualType, 16> ArgTypes;
- CallExpr::const_arg_iterator Arg = ArgBeg;
+ CallExpr::const_arg_iterator Arg = ArgRange.begin();
assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
"Can't skip parameters if type info is not provided");
@@ -2984,7 +3130,7 @@ public:
for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
E = CallArgTypeInfo->param_type_end();
I != E; ++I, ++Arg) {
- assert(Arg != ArgEnd && "Running over edge of argument list!");
+ assert(Arg != ArgRange.end() && "Running over edge of argument list!");
assert((isGenericMethod ||
((*I)->isVariablyModifiedType() ||
(*I).getNonReferenceType()->isObjCRetainableType() ||
@@ -2992,7 +3138,7 @@ public:
.getCanonicalType((*I).getNonReferenceType())
.getTypePtr() ==
getContext()
- .getCanonicalType(Arg->getType())
+ .getCanonicalType((*Arg)->getType())
.getTypePtr())) &&
"type mismatch in call argument!");
ArgTypes.push_back(*I);
@@ -3001,23 +3147,46 @@ public:
// Either we've emitted all the call args, or we have a call to variadic
// function.
- assert(
- (Arg == ArgEnd || !CallArgTypeInfo || CallArgTypeInfo->isVariadic()) &&
- "Extra arguments in non-variadic function!");
+ assert((Arg == ArgRange.end() || !CallArgTypeInfo ||
+ CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
// If we still have any arguments, emit them using the type of the argument.
- for (; Arg != ArgEnd; ++Arg)
- ArgTypes.push_back(getVarArgType(*Arg));
+ for (auto *A : llvm::make_range(Arg, ArgRange.end()))
+ ArgTypes.push_back(getVarArgType(A));
- EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, CalleeDecl, ParamsToSkip);
+ EmitCallArgs(Args, ArgTypes, ArgRange, CalleeDecl, ParamsToSkip);
}
void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
+ llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
const FunctionDecl *CalleeDecl = nullptr,
unsigned ParamsToSkip = 0);
+ /// EmitPointerWithAlignment - Given an expression with a pointer
+ /// type, emit the value and compute our best estimate of the
+ /// alignment of the pointee.
+ ///
+ /// Note that this function will conservatively fall back on the type
+ /// when it doesn't
+ ///
+ /// \param Source - If non-null, this will be initialized with
+ /// information about the source of the alignment. Note that this
+ /// function will conservatively fall back on the type when it
+ /// doesn't recognize the expression, which means that sometimes
+ ///
+ /// a worst-case One
+ /// reasonable way to use this information is when there's a
+ /// language guarantee that the pointer must be aligned to some
+ /// stricter value, and we're simply trying to ensure that
+ /// sufficiently obvious uses of under-aligned objects don't get
+ /// miscompiled; for example, a placement new into the address of
+ /// a local variable. In such a case, it's quite reasonable to
+ /// just ignore the returned alignment when it isn't from an
+ /// explicit source.
+ Address EmitPointerWithAlignment(const Expr *Addr,
+ AlignmentSource *Source = nullptr);
+
private:
QualType getVarArgType(const Expr *Arg);
@@ -3027,16 +3196,11 @@ private:
void EmitDeclMetadata();
- CodeGenModule::ByrefHelpers *
- buildByrefHelpers(llvm::StructType &byrefType,
- const AutoVarEmission &emission);
+ BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission);
void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
- /// GetPointeeAlignment - Given an expression with a pointer type, emit the
- /// value and compute our best estimate of the alignment of the pointee.
- std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
-
llvm::Value *GetValueForARMHint(unsigned BuiltinID);
};
@@ -3059,17 +3223,23 @@ struct DominatingLLVMValue {
static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
if (!needsSaving(value)) return saved_type(value, false);
- // Otherwise we need an alloca.
- llvm::Value *alloca =
- CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
+ // Otherwise, we need an alloca.
+ auto align = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
+ Address alloca =
+ CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
CGF.Builder.CreateStore(value, alloca);
- return saved_type(alloca, true);
+ return saved_type(alloca.getPointer(), true);
}
static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
+ // If the value says it wasn't saved, trust that it's still dominating.
if (!value.getInt()) return value.getPointer();
- return CGF.Builder.CreateLoad(value.getPointer());
+
+ // Otherwise, it should be an alloca instruction, as set up in save().
+ auto alloca = cast<llvm::AllocaInst>(value.getPointer());
+ return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
}
};
@@ -3082,6 +3252,28 @@ template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
}
};
+/// A specialization of DominatingValue for Address.
+template <> struct DominatingValue<Address> {
+ typedef Address type;
+
+ struct saved_type {
+ DominatingLLVMValue::saved_type SavedValue;
+ CharUnits Alignment;
+ };
+
+ static bool needsSaving(type value) {
+ return DominatingLLVMValue::needsSaving(value.getPointer());
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return { DominatingLLVMValue::save(CGF, value.getPointer()),
+ value.getAlignment() };
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
+ value.Alignment);
+ }
+};
+
/// A specialization of DominatingValue for RValue.
template <> struct DominatingValue<RValue> {
typedef RValue type;
@@ -3090,15 +3282,17 @@ template <> struct DominatingValue<RValue> {
AggregateAddress, ComplexAddress };
llvm::Value *Value;
- Kind K;
- saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
+ unsigned K : 3;
+ unsigned Align : 29;
+ saved_type(llvm::Value *v, Kind k, unsigned a = 0)
+ : Value(v), K(k), Align(a) {}
public:
static bool needsSaving(RValue value);
static saved_type save(CodeGenFunction &CGF, RValue value);
RValue restore(CodeGenFunction &CGF);
- // implementations in CGExprCXX.cpp
+ // implementations in CGCleanup.cpp
};
static bool needsSaving(type value) {
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index c9c48c7628de..173b0dcba1c2 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGCall.h"
@@ -52,6 +53,7 @@
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MD5.h"
using namespace clang;
using namespace CodeGen;
@@ -64,8 +66,10 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
+ case TargetCXXABI::WatchOS:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
+ case TargetCXXABI::WebAssembly:
return CreateItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(CGM);
@@ -77,17 +81,16 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO,
const CodeGenOptions &CGO, llvm::Module &M,
- const llvm::DataLayout &TD,
DiagnosticsEngine &diags,
CoverageSourceInfo *CoverageInfo)
: Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
- TheDataLayout(TD), Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
+ Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
VMContext(M.getContext()), TBAA(nullptr), TheTargetCodeGenInfo(nullptr),
Types(*this), VTables(*this), ObjCRuntime(nullptr),
OpenCLRuntime(nullptr), OpenMPRuntime(nullptr), CUDARuntime(nullptr),
- DebugInfo(nullptr), ARCData(nullptr),
- NoObjCARCExceptionsMetadata(nullptr), RRData(nullptr), PGOReader(nullptr),
+ DebugInfo(nullptr), ObjCData(nullptr),
+ NoObjCARCExceptionsMetadata(nullptr), PGOReader(nullptr),
CFConstantStringClassRef(nullptr), ConstantStringClassRef(nullptr),
NSConstantStringType(nullptr), NSConcreteGlobalBlock(nullptr),
NSConcreteStackBlock(nullptr), BlockObjectAssign(nullptr),
@@ -106,7 +109,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
PointerAlignInBytes =
- C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
Int8PtrTy = Int8Ty->getPointerTo(0);
@@ -139,9 +144,8 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Block.GlobalUniqueCount = 0;
- if (C.getLangOpts().ObjCAutoRefCount)
- ARCData = new ARCEntrypoints();
- RRData = new RREntrypoints();
+ if (C.getLangOpts().ObjC1)
+ ObjCData = new ObjCEntrypoints();
if (!CodeGenOpts.InstrProfileInput.empty()) {
auto ReaderOrErr =
@@ -169,8 +173,7 @@ CodeGenModule::~CodeGenModule() {
delete TheTargetCodeGenInfo;
delete TBAA;
delete DebugInfo;
- delete ARCData;
- delete RRData;
+ delete ObjCData;
}
void CodeGenModule::createObjCRuntime() {
@@ -186,6 +189,7 @@ void CodeGenModule::createObjCRuntime() {
case ObjCRuntime::FragileMacOSX:
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
+ case ObjCRuntime::WatchOS:
ObjCRuntime = CreateMacObjCRuntime(*this);
return;
}
@@ -232,12 +236,27 @@ void CodeGenModule::applyReplacements() {
OldF->replaceAllUsesWith(Replacement);
if (NewF) {
NewF->removeFromParent();
- OldF->getParent()->getFunctionList().insertAfter(OldF, NewF);
+ OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
+ NewF);
}
OldF->eraseFromParent();
}
}
+void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
+ GlobalValReplacements.push_back(std::make_pair(GV, C));
+}
+
+void CodeGenModule::applyGlobalValReplacements() {
+ for (auto &I : GlobalValReplacements) {
+ llvm::GlobalValue *GV = I.first;
+ llvm::Constant *C = I.second;
+
+ GV->replaceAllUsesWith(C);
+ GV->eraseFromParent();
+ }
+}
+
// This is only used in aliases that we created and we know they have a
// linear structure.
static const llvm::GlobalObject *getAliasedGlobal(const llvm::GlobalAlias &GA) {
@@ -340,6 +359,7 @@ void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
void CodeGenModule::Release() {
EmitDeferred();
+ applyGlobalValReplacements();
applyReplacements();
checkAliases();
EmitCXXGlobalInitFunc();
@@ -355,8 +375,11 @@ void CodeGenModule::Release() {
if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction())
AddGlobalDtor(CudaDtorFunction);
}
- if (PGOReader && PGOStats.hasDiagnostics())
- PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
+ if (PGOReader) {
+ getModule().setMaximumFunctionCount(PGOReader->getMaximumFunctionCount());
+ if (PGOStats.hasDiagnostics())
+ PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
+ }
EmitCtorList(GlobalCtors, "llvm.global_ctors");
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitGlobalAnnotations();
@@ -370,11 +393,32 @@ void CodeGenModule::Release() {
(Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
EmitModuleLinkOptions();
}
- if (CodeGenOpts.DwarfVersion)
+ if (CodeGenOpts.DwarfVersion) {
// We actually want the latest version when there are conflicts.
// We can change from Warning to Latest if such mode is supported.
getModule().addModuleFlag(llvm::Module::Warning, "Dwarf Version",
CodeGenOpts.DwarfVersion);
+ }
+ if (CodeGenOpts.EmitCodeView) {
+ // Indicate that we want CodeView in the metadata.
+ getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
+ }
+ if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
+ // We don't support LTO with 2 with different StrictVTablePointers
+ // FIXME: we could support it by stripping all the information introduced
+ // by StrictVTablePointers.
+
+ getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
+
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(VMContext, "StrictVTablePointers"),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 1))};
+
+ getModule().addModuleFlag(llvm::Module::Require,
+ "StrictVTablePointersRequirement",
+ llvm::MDNode::get(VMContext, Ops));
+ }
if (DebugInfo)
// We support a single version in the linked module. The LLVM
// parser will drop debug info with a different version number
@@ -399,6 +443,11 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
}
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ // Indicate that we want cross-DSO control flow integrity checks.
+ getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
+ }
+
if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
llvm::PICLevel::Level PL = llvm::PICLevel::Default;
switch (PLevel) {
@@ -450,12 +499,6 @@ llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
return TBAA->getTBAAStructInfo(QTy);
}
-llvm::MDNode *CodeGenModule::getTBAAStructTypeInfo(QualType QTy) {
- if (!TBAA)
- return nullptr;
- return TBAA->getTBAAStructTypeInfo(QTy);
-}
-
llvm::MDNode *CodeGenModule::getTBAAStructTagInfo(QualType BaseTy,
llvm::MDNode *AccessN,
uint64_t O) {
@@ -468,9 +511,9 @@ llvm::MDNode *CodeGenModule::getTBAAStructTagInfo(QualType BaseTy,
/// and struct-path aware TBAA, the tag has the same format:
/// base type, access type and offset.
/// When ConvertTypeToTag is true, we create a tag based on the scalar type.
-void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
- llvm::MDNode *TBAAInfo,
- bool ConvertTypeToTag) {
+void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo,
+ bool ConvertTypeToTag) {
if (ConvertTypeToTag && TBAA)
Inst->setMetadata(llvm::LLVMContext::MD_tbaa,
TBAA->getTBAAScalarTagInfo(TBAAInfo));
@@ -478,6 +521,16 @@ void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
}
+void CodeGenModule::DecorateInstructionWithInvariantGroup(
+ llvm::Instruction *I, const CXXRecordDecl *RD) {
+ llvm::Metadata *MD = CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ auto *MetaDataNode = dyn_cast<llvm::MDNode>(MD);
+ // Check if we have to wrap MDString in MDNode.
+ if (!MetaDataNode)
+ MetaDataNode = llvm::MDNode::get(getLLVMContext(), MD);
+ I->setMetadata(llvm::LLVMContext::MD_invariant_group, MetaDataNode);
+}
+
void CodeGenModule::Error(SourceLocation loc, StringRef message) {
unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
getDiags().Report(Context.getFullLoc(loc), diagID) << message;
@@ -692,6 +745,21 @@ void CodeGenModule::setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F)
F->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
}
+llvm::ConstantInt *
+CodeGenModule::CreateCfiIdForTypeMetadata(llvm::Metadata *MD) {
+ llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
+ if (!MDS) return nullptr;
+
+ llvm::MD5 md5;
+ llvm::MD5::MD5Result result;
+ md5.update(MDS->getString());
+ md5.final(result);
+ uint64_t id = 0;
+ for (int i = 0; i < 8; ++i)
+ id |= static_cast<uint64_t>(result[i]) << (i * 8);
+ return llvm::ConstantInt::get(Int64Ty, id);
+}
+
void CodeGenModule::setFunctionDefinitionAttributes(const FunctionDecl *D,
llvm::Function *F) {
setNonAliasAttributes(D, F);
@@ -737,6 +805,21 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ B.addAttribute(llvm::Attribute::StackProtect);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
+ B.addAttribute(llvm::Attribute::StackProtectStrong);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ B.addAttribute(llvm::Attribute::StackProtectReq);
+
+ if (!D) {
+ F->addAttributes(llvm::AttributeSet::FunctionIndex,
+ llvm::AttributeSet::get(
+ F->getContext(),
+ llvm::AttributeSet::FunctionIndex, B));
+ return;
+ }
+
if (D->hasAttr<NakedAttr>()) {
// Naked implies noinline: we should not be inlining such functions.
B.addAttribute(llvm::Attribute::Naked);
@@ -761,13 +844,6 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (D->hasAttr<MinSizeAttr>())
B.addAttribute(llvm::Attribute::MinSize);
- if (LangOpts.getStackProtector() == LangOptions::SSPOn)
- B.addAttribute(llvm::Attribute::StackProtect);
- else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
- B.addAttribute(llvm::Attribute::StackProtectStrong);
- else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
- B.addAttribute(llvm::Attribute::StackProtectReq);
-
F->addAttributes(llvm::AttributeSet::FunctionIndex,
llvm::AttributeSet::get(
F->getContext(), llvm::AttributeSet::FunctionIndex, B));
@@ -778,10 +854,8 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
F->addFnAttr(llvm::Attribute::NoInline);
// OptimizeNone wins over OptimizeForSize, MinSize, AlwaysInline.
- assert(!F->hasFnAttribute(llvm::Attribute::OptimizeForSize) &&
- "OptimizeNone and OptimizeForSize on same function!");
- assert(!F->hasFnAttribute(llvm::Attribute::MinSize) &&
- "OptimizeNone and MinSize on same function!");
+ F->removeFnAttr(llvm::Attribute::OptimizeForSize);
+ F->removeFnAttr(llvm::Attribute::MinSize);
assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
"OptimizeNone and AlwaysInline on same function!");
@@ -800,19 +874,24 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (alignment)
F->setAlignment(alignment);
- // C++ ABI requires 2-byte alignment for member functions.
- if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
- F->setAlignment(2);
+ // Some C++ ABIs require 2-byte alignment for member functions, in order to
+ // reserve a bit for differentiating between virtual and non-virtual member
+ // functions. If the current target's C++ ABI requires this and this is a
+ // member function, set its alignment accordingly.
+ if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
+ if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
+ F->setAlignment(2);
+ }
}
void CodeGenModule::SetCommonAttributes(const Decl *D,
llvm::GlobalValue *GV) {
- if (const auto *ND = dyn_cast<NamedDecl>(D))
+ if (const auto *ND = dyn_cast_or_null<NamedDecl>(D))
setGlobalVisibility(GV, ND);
else
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
- if (D->hasAttr<UsedAttr>())
+ if (D && D->hasAttr<UsedAttr>())
addUsedGlobal(GV);
}
@@ -830,8 +909,9 @@ void CodeGenModule::setNonAliasAttributes(const Decl *D,
llvm::GlobalObject *GO) {
SetCommonAttributes(D, GO);
- if (const SectionAttr *SA = D->getAttr<SectionAttr>())
- GO->setSection(SA->getName());
+ if (D)
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GO->setSection(SA->getName());
getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
}
@@ -872,6 +952,49 @@ static void setLinkageAndVisibilityForGV(llvm::GlobalValue *GV,
}
}
+void CodeGenModule::CreateFunctionBitSetEntry(const FunctionDecl *FD,
+ llvm::Function *F) {
+ // Only if we are checking indirect calls.
+ if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
+ return;
+
+ // Non-static class methods are handled via vtable pointer checks elsewhere.
+ if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
+ return;
+
+ // Additionally, if building with cross-DSO support...
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ // Don't emit entries for function declarations. In cross-DSO mode these are
+ // handled with better precision at run time.
+ if (!FD->hasBody())
+ return;
+ // Skip available_externally functions. They won't be codegen'ed in the
+ // current module anyway.
+ if (getContext().GetGVALinkageForFunction(FD) == GVA_AvailableExternally)
+ return;
+ }
+
+ llvm::NamedMDNode *BitsetsMD =
+ getModule().getOrInsertNamedMetadata("llvm.bitsets");
+
+ llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
+ llvm::Metadata *BitsetOps[] = {
+ MD, llvm::ConstantAsMetadata::get(F),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int64Ty, 0))};
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps));
+
+ // Emit a hash-based bit set entry for cross-DSO calls.
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ if (auto TypeId = CreateCfiIdForTypeMetadata(MD)) {
+ llvm::Metadata *BitsetOps2[] = {
+ llvm::ConstantAsMetadata::get(TypeId),
+ llvm::ConstantAsMetadata::get(F),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int64Ty, 0))};
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps2));
+ }
+ }
+}
+
void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
bool IsIncompleteFunction,
bool IsThunk) {
@@ -913,6 +1036,8 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
if (FD->isReplaceableGlobalAllocationFunction())
F->addAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoBuiltin);
+
+ CreateFunctionBitSetEntry(FD, F);
}
void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
@@ -1104,9 +1229,16 @@ void CodeGenModule::EmitDeferred() {
llvm::GlobalValue *GV = G.GV;
G.GV = nullptr;
- assert(!GV || GV == GetGlobalValue(getMangledName(D)));
- if (!GV)
- GV = GetGlobalValue(getMangledName(D));
+ // We should call GetAddrOfGlobal with IsForDefinition set to true in order
+ // to get GlobalValue with exactly the type we need, not something that
+ // might had been created for another decl with the same mangled name but
+ // different type.
+ // FIXME: Support for variables is not implemented yet.
+ if (isa<FunctionDecl>(D.getDecl()))
+ GV = cast<llvm::GlobalValue>(GetAddrOfGlobal(D, /*IsForDefinition=*/true));
+ else
+ if (!GV)
+ GV = GetGlobalValue(getMangledName(D));
// Check to see if we've already emitted this. This is necessary
// for a couple of reasons: first, decls can end up in the
@@ -1208,7 +1340,7 @@ bool CodeGenModule::isInSanitizerBlacklist(llvm::Function *Fn,
if (SanitizerBL.isBlacklistedFunction(Fn->getName()))
return true;
// Blacklist by location.
- if (!Loc.isInvalid())
+ if (Loc.isValid())
return SanitizerBL.isBlacklistedLocation(Loc);
// If location is unknown, this may be a compiler-generated function. Assume
// it's located in the main file.
@@ -1271,7 +1403,7 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
return true;
}
-llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
+ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
const CXXUuidofExpr* E) {
// Sema has verified that IIDSource has a __declspec(uuid()), and that its
// well-formed.
@@ -1279,9 +1411,12 @@ llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
std::string Name = "_GUID_" + Uuid.lower();
std::replace(Name.begin(), Name.end(), '-', '_');
+ // Contains a 32-bit field.
+ CharUnits Alignment = CharUnits::fromQuantity(4);
+
// Look for an existing global.
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return GV;
+ return ConstantAddress(GV, Alignment);
llvm::Constant *Init = EmitUuidofInitializer(Uuid);
assert(Init && "failed to initialize as constant");
@@ -1291,20 +1426,22 @@ llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
/*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
- return GV;
+ return ConstantAddress(GV, Alignment);
}
-llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
+ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
+ CharUnits Alignment = getContext().getDeclAlign(VD);
llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
if (Entry) {
unsigned AS = getContext().getTargetAddressSpace(VD->getType());
- return llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
+ auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
+ return ConstantAddress(Ptr, Alignment);
}
llvm::Constant *Aliasee;
@@ -1321,7 +1458,7 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
F->setLinkage(llvm::Function::ExternalWeakLinkage);
WeakRefReferences.insert(F);
- return Aliasee;
+ return ConstantAddress(Aliasee, Alignment);
}
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
@@ -1435,7 +1572,7 @@ namespace {
unsigned BuiltinID = FD->getBuiltinID();
if (!BuiltinID || !BI.isLibFunction(BuiltinID))
return true;
- StringRef BuiltinName = BI.GetName(BuiltinID);
+ StringRef BuiltinName = BI.getName(BuiltinID);
if (BuiltinName.startswith("__builtin_") &&
Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
Result = true;
@@ -1444,6 +1581,35 @@ namespace {
return true;
}
};
+
+ struct DLLImportFunctionVisitor
+ : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
+ bool SafeToInline = true;
+
+ bool VisitVarDecl(VarDecl *VD) {
+ // A thread-local variable cannot be imported.
+ SafeToInline = !VD->getTLSKind();
+ return SafeToInline;
+ }
+
+ // Make sure we're not referencing non-imported vars or functions.
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ ValueDecl *VD = E->getDecl();
+ if (isa<FunctionDecl>(VD))
+ SafeToInline = VD->hasAttr<DLLImportAttr>();
+ else if (VarDecl *V = dyn_cast<VarDecl>(VD))
+ SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
+ return SafeToInline;
+ }
+ bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
+ return SafeToInline;
+ }
+ bool VisitCXXNewExpr(CXXNewExpr *E) {
+ SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
+ return SafeToInline;
+ }
+ };
}
// isTriviallyRecursive - Check if this function calls another
@@ -1474,6 +1640,15 @@ CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
const auto *F = cast<FunctionDecl>(GD.getDecl());
if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
return false;
+
+ if (F->hasAttr<DLLImportAttr>()) {
+ // Check whether it would be safe to inline this dllimport function.
+ DLLImportFunctionVisitor Visitor;
+ Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
+ if (!Visitor.SafeToInline)
+ return false;
+ }
+
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
// but a function that calls itself is clearly not equivalent to the real
@@ -1537,6 +1712,9 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
}
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn);
+
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
/// module, create and return an llvm Function with the specified type. If there
/// is something in the module with the specified name, return it potentially
@@ -1549,7 +1727,8 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
llvm::Type *Ty,
GlobalDecl GD, bool ForVTable,
bool DontDefer, bool IsThunk,
- llvm::AttributeSet ExtraAttrs) {
+ llvm::AttributeSet ExtraAttrs,
+ bool IsForDefinition) {
const Decl *D = GD.getDecl();
// Lookup the entry, lazily creating it if necessary.
@@ -1565,11 +1744,33 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
- if (Entry->getType()->getElementType() == Ty)
+ // If there are two attempts to define the same mangled name, issue an
+ // error.
+ if (IsForDefinition && !Entry->isDeclaration()) {
+ GlobalDecl OtherGD;
+ // Check that GD is not yet in ExplicitDefinitions is required to make
+ // sure that we issue an error only once.
+ if (lookupRepresentativeDecl(MangledName, OtherGD) &&
+ (GD.getCanonicalDecl().getDecl() !=
+ OtherGD.getCanonicalDecl().getDecl()) &&
+ DiagnosedConflictingDefinitions.insert(GD).second) {
+ getDiags().Report(D->getLocation(),
+ diag::err_duplicate_mangled_name);
+ getDiags().Report(OtherGD.getDecl()->getLocation(),
+ diag::note_previous_definition);
+ }
+ }
+
+ if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
+ (Entry->getType()->getElementType() == Ty)) {
return Entry;
+ }
// Make sure the result is of the correct type.
- return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
+ // (If function is requested for a definition, we always need to create a new
+ // function, not just return a bitcast.)
+ if (!IsForDefinition)
+ return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
}
// This function doesn't have a complete type (for example, the return
@@ -1584,10 +1785,36 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
FTy = llvm::FunctionType::get(VoidTy, false);
IsIncompleteFunction = true;
}
-
- llvm::Function *F = llvm::Function::Create(FTy,
- llvm::Function::ExternalLinkage,
- MangledName, &getModule());
+
+ llvm::Function *F =
+ llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
+ Entry ? StringRef() : MangledName, &getModule());
+
+ // If we already created a function with the same mangled name (but different
+ // type) before, take its name and add it to the list of functions to be
+ // replaced with F at the end of CodeGen.
+ //
+ // This happens if there is a prototype for a function (e.g. "int f()") and
+ // then a definition of a different type (e.g. "int f(int x)").
+ if (Entry) {
+ F->takeName(Entry);
+
+ // This might be an implementation of a function without a prototype, in
+ // which case, try to do special replacement of calls which match the new
+ // prototype. The really key thing here is that we also potentially drop
+ // arguments from the call site so as to make a direct call, which makes the
+ // inliner happier and suppresses a number of optimizer warnings (!) about
+ // dropping arguments.
+ if (!Entry->use_empty()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(Entry, F);
+ Entry->removeDeadConstantUsers();
+ }
+
+ llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
+ F, Entry->getType()->getElementType()->getPointerTo());
+ addGlobalValReplacement(Entry, BC);
+ }
+
assert(F->getName() == MangledName && "name was uniqued!");
if (D)
SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
@@ -1660,13 +1887,19 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
llvm::Type *Ty,
bool ForVTable,
- bool DontDefer) {
+ bool DontDefer,
+ bool IsForDefinition) {
// If there was no specific requested type, just convert it now.
- if (!Ty)
- Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
-
+ if (!Ty) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ auto CanonTy = Context.getCanonicalType(FD->getType());
+ Ty = getTypes().ConvertFunctionType(CanonTy, FD);
+ }
+
StringRef MangledName = getMangledName(GD);
- return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
+ /*IsThunk=*/false, llvm::AttributeSet(),
+ IsForDefinition);
}
/// CreateRuntimeFunction - Create a new runtime function with the specified
@@ -1781,7 +2014,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
if (D->getTLSKind()) {
if (D->getTLSKind() == VarDecl::TLS_Dynamic)
- CXXThreadLocals.push_back(std::make_pair(D, GV));
+ CXXThreadLocals.push_back(D);
setTLSMode(GV, *D);
}
@@ -1805,6 +2038,33 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
return GV;
}
+llvm::Constant *
+CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
+ bool IsForDefinition) {
+ if (isa<CXXConstructorDecl>(GD.getDecl()))
+ return getAddrOfCXXStructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ getFromCtorType(GD.getCtorType()),
+ /*FnInfo=*/nullptr, /*FnType=*/nullptr,
+ /*DontDefer=*/false, IsForDefinition);
+ else if (isa<CXXDestructorDecl>(GD.getDecl()))
+ return getAddrOfCXXStructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ getFromDtorType(GD.getDtorType()),
+ /*FnInfo=*/nullptr, /*FnType=*/nullptr,
+ /*DontDefer=*/false, IsForDefinition);
+ else if (isa<CXXMethodDecl>(GD.getDecl())) {
+ auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
+ cast<CXXMethodDecl>(GD.getDecl()));
+ auto Ty = getTypes().GetFunctionType(*FInfo);
+ return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
+ IsForDefinition);
+ } else if (isa<FunctionDecl>(GD.getDecl())) {
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
+ IsForDefinition);
+ } else
+ return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
+}
llvm::GlobalVariable *
CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
@@ -1893,8 +2153,8 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
}
CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
- return Context.toCharUnitsFromBits(
- TheDataLayout.getTypeStoreSizeInBits(Ty));
+ return Context.toCharUnitsFromBits(
+ getDataLayout().getTypeStoreSizeInBits(Ty));
}
unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D,
@@ -1986,7 +2246,18 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
const VarDecl *InitDecl;
const Expr *InitExpr = D->getAnyInitializer(InitDecl);
- if (!InitExpr) {
+ // CUDA E.2.4.1 "__shared__ variables cannot have an initialization as part
+ // of their declaration."
+ if (getLangOpts().CPlusPlus && getLangOpts().CUDAIsDevice
+ && D->hasAttr<CUDASharedAttr>()) {
+ if (InitExpr) {
+ const auto *C = dyn_cast<CXXConstructExpr>(InitExpr);
+ if (C == nullptr || !C->getConstructor()->hasTrivialBody())
+ Error(D->getLocation(),
+ "__shared__ variable cannot have an initialization.");
+ }
+ Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
+ } else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
// implicitly initialized with { 0 }.
//
@@ -2072,6 +2343,17 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, GV);
+ // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
+ // the device. [...]"
+ // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
+ // __device__, declares a variable that: [...]
+ // Is accessible from all the threads within the grid and from the host
+ // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
+ // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
+ if (GV && LangOpts.CUDA && LangOpts.CUDAIsDevice &&
+ (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>())) {
+ GV->setExternallyInitialized(true);
+ }
GV->setInitializer(Init);
// If it is safe to mark the global 'constant', do so now.
@@ -2091,12 +2373,17 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::GlobalValue::LinkageTypes Linkage =
getLLVMLinkageVarDefinition(D, GV->isConstant());
- // On Darwin, the backing variable for a C++11 thread_local variable always
- // has internal linkage; all accesses should just be calls to the
+ // On Darwin, if the normal linkage of a C++ thread_local variable is
+ // LinkOnce or Weak, we keep the normal linkage to prevent multiple
+ // copies within a linkage unit; otherwise, the backing variable has
+ // internal linkage and all accesses should just be calls to the
// Itanium-specified entry point, which has the normal linkage of the
- // variable.
+ // variable. This is to preserve the ability to change the implementation
+ // behind the scenes.
if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
- Context.getTargetInfo().getTriple().isMacOSX())
+ Context.getTargetInfo().getTriple().isOSDarwin() &&
+ !llvm::GlobalVariable::isLinkOnceLinkage(Linkage) &&
+ !llvm::GlobalVariable::isWeakLinkage(Linkage))
Linkage = llvm::GlobalValue::InternalLinkage;
GV->setLinkage(Linkage);
@@ -2115,7 +2402,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (D->getTLSKind() && !GV->isThreadLocal()) {
if (D->getTLSKind() == VarDecl::TLS_Dynamic)
- CXXThreadLocals.push_back(std::make_pair(D, GV));
+ CXXThreadLocals.push_back(D);
setTLSMode(GV, *D);
}
@@ -2166,7 +2453,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
// Declarations with a required alignment do not have common linakge in MSVC
// mode.
- if (Context.getLangOpts().MSVCCompat) {
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
if (D->hasAttr<AlignedAttr>())
return true;
QualType VarType = D->getType();
@@ -2263,6 +2550,7 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
llvm::Type *newRetTy = newFn->getReturnType();
SmallVector<llvm::Value*, 4> newArgs;
+ SmallVector<llvm::OperandBundleDef, 1> newBundles;
for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
ui != ue; ) {
@@ -2330,16 +2618,19 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// over the required information.
newArgs.append(callSite.arg_begin(), callSite.arg_begin() + argNo);
+ // Copy over any operand bundles.
+ callSite.getOperandBundlesAsDefs(newBundles);
+
llvm::CallSite newCall;
if (callSite.isCall()) {
- newCall = llvm::CallInst::Create(newFn, newArgs, "",
+ newCall = llvm::CallInst::Create(newFn, newArgs, newBundles, "",
callSite.getInstruction());
} else {
auto *oldInvoke = cast<llvm::InvokeInst>(callSite.getInstruction());
newCall = llvm::InvokeInst::Create(newFn,
oldInvoke->getNormalDest(),
oldInvoke->getUnwindDest(),
- newArgs, "",
+ newArgs, newBundles, "",
callSite.getInstruction());
}
newArgs.clear(); // for the next iteration
@@ -2357,6 +2648,7 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Copy debug location attached to CI.
if (callSite->getDebugLoc())
newCall->setDebugLoc(callSite->getDebugLoc());
+
callSite->eraseFromParent();
}
}
@@ -2397,66 +2689,14 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
// Get or create the prototype for the function.
- if (!GV) {
- llvm::Constant *C =
- GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer*/ true);
-
- // Strip off a bitcast if we got one back.
- if (auto *CE = dyn_cast<llvm::ConstantExpr>(C)) {
- assert(CE->getOpcode() == llvm::Instruction::BitCast);
- GV = cast<llvm::GlobalValue>(CE->getOperand(0));
- } else {
- GV = cast<llvm::GlobalValue>(C);
- }
- }
+ if (!GV || (GV->getType()->getElementType() != Ty))
+ GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/true,
+ /*IsForDefinition=*/true));
- if (!GV->isDeclaration()) {
- getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name);
- GlobalDecl OldGD = Manglings.lookup(GV->getName());
- if (auto *Prev = OldGD.getDecl())
- getDiags().Report(Prev->getLocation(), diag::note_previous_definition);
+ // Already emitted.
+ if (!GV->isDeclaration())
return;
- }
-
- if (GV->getType()->getElementType() != Ty) {
- // If the types mismatch then we have to rewrite the definition.
- assert(GV->isDeclaration() && "Shouldn't replace non-declaration");
-
- // F is the Function* for the one with the wrong type, we must make a new
- // Function* and update everything that used F (a declaration) with the new
- // Function* (which will be a definition).
- //
- // This happens if there is a prototype for a function
- // (e.g. "int f()") and then a definition of a different type
- // (e.g. "int f(int x)"). Move the old function aside so that it
- // doesn't interfere with GetAddrOfFunction.
- GV->setName(StringRef());
- auto *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
-
- // This might be an implementation of a function without a
- // prototype, in which case, try to do special replacement of
- // calls which match the new prototype. The really key thing here
- // is that we also potentially drop arguments from the call site
- // so as to make a direct call, which makes the inliner happier
- // and suppresses a number of optimizer warnings (!) about
- // dropping arguments.
- if (!GV->use_empty()) {
- ReplaceUsesOfNonProtoTypeWithRealFunction(GV, NewFn);
- GV->removeDeadConstantUsers();
- }
-
- // Replace uses of F with the Function we will endow with a body.
- if (!GV->use_empty()) {
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(NewFn, GV->getType());
- GV->replaceAllUsesWith(NewPtrForOldDecl);
- }
-
- // Ok, delete the old function now, which is dead.
- GV->eraseFromParent();
-
- GV = NewFn;
- }
// We need to set linkage and visibility on the function before
// generating code for it because various parts of IR generation
@@ -2521,8 +2761,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
// Create the new alias itself, but don't set a name yet.
auto *GA = llvm::GlobalAlias::create(
- cast<llvm::PointerType>(Aliasee->getType()),
- llvm::Function::ExternalLinkage, "", Aliasee, &getModule());
+ DeclTy, 0, llvm::Function::ExternalLinkage, "", Aliasee, &getModule());
if (Entry) {
if (GA->getAliasee() == Entry) {
@@ -2612,7 +2851,7 @@ GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
return *Map.insert(std::make_pair(String, nullptr)).first;
}
-llvm::Constant *
+ConstantAddress
CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
unsigned StringLength = 0;
bool isUTF16 = false;
@@ -2622,7 +2861,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
StringLength);
if (auto *C = Entry.second)
- return C;
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
@@ -2658,7 +2897,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// String pointer.
llvm::Constant *C = nullptr;
if (isUTF16) {
- ArrayRef<uint16_t> Arr = llvm::makeArrayRef<uint16_t>(
+ auto Arr = llvm::makeArrayRef(
reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
Entry.first().size() / 2);
C = llvm::ConstantDataArray::get(VMContext, Arr);
@@ -2699,25 +2938,28 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Ty = getTypes().ConvertType(getContext().LongTy);
Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+ CharUnits Alignment = getPointerAlign();
+
// The struct.
C = llvm::ConstantStruct::get(STy, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_cfstring_");
GV->setSection("__DATA,__cfstring");
+ GV->setAlignment(Alignment.getQuantity());
Entry.second = GV;
- return GV;
+ return ConstantAddress(GV, Alignment);
}
-llvm::GlobalVariable *
+ConstantAddress
CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
if (auto *C = Entry.second)
- return C;
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
@@ -2810,10 +3052,12 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
// The struct.
+ CharUnits Alignment = getPointerAlign();
C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_nsstring_");
+ GV->setAlignment(Alignment.getQuantity());
const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
const char *NSStringNonFragileABISection =
"__DATA,__objc_stringobj,regular,no_dead_strip";
@@ -2823,7 +3067,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
: NSStringSection);
Entry.second = GV;
- return GV;
+ return ConstantAddress(GV, Alignment);
}
QualType CodeGenModule::getObjCFastEnumerationStateType() {
@@ -2902,7 +3146,7 @@ CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
static llvm::GlobalVariable *
GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
CodeGenModule &CGM, StringRef GlobalName,
- unsigned Alignment) {
+ CharUnits Alignment) {
// OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
unsigned AddrSpace = 0;
if (CGM.getLangOpts().OpenCL)
@@ -2913,7 +3157,7 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
auto *GV = new llvm::GlobalVariable(
M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
- GV->setAlignment(Alignment);
+ GV->setAlignment(Alignment.getQuantity());
GV->setUnnamedAddr(true);
if (GV->isWeakForLinker()) {
assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
@@ -2925,20 +3169,19 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
/// constant array for the given string literal.
-llvm::GlobalVariable *
+ConstantAddress
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
StringRef Name) {
- auto Alignment =
- getContext().getAlignOfGlobalVarInChars(S->getType()).getQuantity();
+ CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
llvm::GlobalVariable **Entry = nullptr;
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
- if (Alignment > GV->getAlignment())
- GV->setAlignment(Alignment);
- return GV;
+ if (Alignment.getQuantity() > GV->getAlignment())
+ GV->setAlignment(Alignment.getQuantity());
+ return ConstantAddress(GV, Alignment);
}
}
@@ -2954,7 +3197,6 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
getCXXABI().getMangleContext().shouldMangleStringLiteral(S)) {
llvm::raw_svector_ostream Out(MangledNameBuffer);
getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
- Out.flush();
LT = llvm::GlobalValue::LinkOnceODRLinkage;
GlobalVariableName = MangledNameBuffer;
@@ -2969,12 +3211,12 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
QualType());
- return GV;
+ return ConstantAddress(GV, Alignment);
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
/// array for the given ObjCEncodeExpr node.
-llvm::GlobalVariable *
+ConstantAddress
CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
std::string Str;
getContext().getObjCEncodingForType(E->getEncodedType(), Str);
@@ -2985,14 +3227,11 @@ CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
/// GetAddrOfConstantCString - Returns a pointer to a character array containing
/// the literal and a terminating '\0' character.
/// The result has pointer to array type.
-llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString(
- const std::string &Str, const char *GlobalName, unsigned Alignment) {
+ConstantAddress CodeGenModule::GetAddrOfConstantCString(
+ const std::string &Str, const char *GlobalName) {
StringRef StrWithNull(Str.c_str(), Str.size() + 1);
- if (Alignment == 0) {
- Alignment = getContext()
- .getAlignOfGlobalVarInChars(getContext().CharTy)
- .getQuantity();
- }
+ CharUnits Alignment =
+ getContext().getAlignOfGlobalVarInChars(getContext().CharTy);
llvm::Constant *C =
llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
@@ -3002,9 +3241,9 @@ llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString(
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
- if (Alignment > GV->getAlignment())
- GV->setAlignment(Alignment);
- return GV;
+ if (Alignment.getQuantity() > GV->getAlignment())
+ GV->setAlignment(Alignment.getQuantity());
+ return ConstantAddress(GV, Alignment);
}
}
@@ -3016,10 +3255,10 @@ llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString(
GlobalName, Alignment);
if (Entry)
*Entry = GV;
- return GV;
+ return ConstantAddress(GV, Alignment);
}
-llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
+ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
const MaterializeTemporaryExpr *E, const Expr *Init) {
assert((E->getStorageDuration() == SD_Static ||
E->getStorageDuration() == SD_Thread) && "not a global temporary");
@@ -3031,9 +3270,10 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
if (Init == E->GetTemporaryExpr())
MaterializedType = E->getType();
- llvm::Constant *&Slot = MaterializedGlobalTemporaryMap[E];
- if (Slot)
- return Slot;
+ CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
+
+ if (llvm::Constant *Slot = MaterializedGlobalTemporaryMap[E])
+ return ConstantAddress(Slot, Align);
// FIXME: If an externally-visible declaration extends multiple temporaries,
// we need to give each temporary the same name in every translation unit (and
@@ -3042,7 +3282,6 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
llvm::raw_svector_ostream Out(Name);
getCXXABI().getMangleContext().mangleReferenceTemporary(
VD, E->getManglingNumber(), Out);
- Out.flush();
APValue *Value = nullptr;
if (E->getStorageDuration() == SD_Static) {
@@ -3098,14 +3337,13 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
AddrSpace);
setGlobalVisibility(GV, VD);
- GV->setAlignment(
- getContext().getTypeAlignInChars(MaterializedType).getQuantity());
+ GV->setAlignment(Align.getQuantity());
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
if (VD->getTLSKind())
setTLSMode(GV, *VD);
- Slot = GV;
- return GV;
+ MaterializedGlobalTemporaryMap[E] = GV;
+ return ConstantAddress(GV, Align);
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
@@ -3367,11 +3605,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
auto *Import = cast<ImportDecl>(D);
// Ignore import declarations that come from imported modules.
- if (clang::Module *Owner = Import->getImportedOwningModule()) {
- if (getLangOpts().CurrentModule.empty() ||
- Owner->getTopLevelModule()->Name == getLangOpts().CurrentModule)
- break;
- }
+ if (Import->getImportedOwningModule())
+ break;
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitImportDecl(*Import);
@@ -3412,7 +3647,7 @@ void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
case Decl::ObjCMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor: {
- if (!cast<FunctionDecl>(D)->hasBody())
+ if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
return;
auto I = DeferredEmptyCoverageMappingDecls.find(D);
if (I == DeferredEmptyCoverageMappingDecls.end())
@@ -3541,10 +3776,12 @@ bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
void CodeGenModule::EmitDeclMetadata() {
llvm::NamedMDNode *GlobalMetadata = nullptr;
- // StaticLocalDeclMap
for (auto &I : MangledDeclNames) {
llvm::GlobalValue *Addr = getModule().getNamedValue(I.second);
- EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
+ // Some mangled names don't necessarily have an associated GlobalValue
+ // in this module, e.g. if we mangled it for DebugInfo.
+ if (Addr)
+ EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
}
}
@@ -3562,7 +3799,7 @@ void CodeGenFunction::EmitDeclMetadata() {
for (auto &I : LocalDeclMap) {
const Decl *D = I.first;
- llvm::Value *Addr = I.second;
+ llvm::Value *Addr = I.second.getPointer();
if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
Alloca->setMetadata(
@@ -3643,12 +3880,6 @@ llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
return llvm::ConstantStruct::getAnon(Fields);
}
-llvm::Constant *
-CodeGenModule::getAddrOfCXXCatchHandlerType(QualType Ty,
- QualType CatchHandlerType) {
- return getCXXABI().getAddrOfCXXCatchHandlerType(Ty, CatchHandlerType);
-}
-
llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
bool ForEH) {
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
@@ -3671,22 +3902,82 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
VD->getAnyInitializer() &&
!VD->getAnyInitializer()->isConstantInitializer(getContext(),
/*ForRef=*/false);
+
+ Address Addr(GetAddrOfGlobalVar(VD), getContext().getDeclAlign(VD));
if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
- VD, GetAddrOfGlobalVar(VD), RefExpr->getLocStart(), PerformInit))
+ VD, Addr, RefExpr->getLocStart(), PerformInit))
CXXGlobalInits.push_back(InitFunction);
}
}
-llvm::MDTuple *CodeGenModule::CreateVTableBitSetEntry(
- llvm::GlobalVariable *VTable, CharUnits Offset, const CXXRecordDecl *RD) {
- std::string OutName;
- llvm::raw_string_ostream Out(OutName);
- getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out);
+llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
+ llvm::Metadata *&InternalId = MetadataIdMap[T.getCanonicalType()];
+ if (InternalId)
+ return InternalId;
+
+ if (isExternallyVisible(T->getLinkage())) {
+ std::string OutName;
+ llvm::raw_string_ostream Out(OutName);
+ getCXXABI().getMangleContext().mangleTypeName(T, Out);
+
+ InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
+ } else {
+ InternalId = llvm::MDNode::getDistinct(getLLVMContext(),
+ llvm::ArrayRef<llvm::Metadata *>());
+ }
+
+ return InternalId;
+}
+void CodeGenModule::CreateVTableBitSetEntry(llvm::NamedMDNode *BitsetsMD,
+ llvm::GlobalVariable *VTable,
+ CharUnits Offset,
+ const CXXRecordDecl *RD) {
+ llvm::Metadata *MD =
+ CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
llvm::Metadata *BitsetOps[] = {
- llvm::MDString::get(getLLVMContext(), Out.str()),
- llvm::ConstantAsMetadata::get(VTable),
+ MD, llvm::ConstantAsMetadata::get(VTable),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(Int64Ty, Offset.getQuantity()))};
- return llvm::MDTuple::get(getLLVMContext(), BitsetOps);
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps));
+
+ if (CodeGenOpts.SanitizeCfiCrossDso) {
+ if (auto TypeId = CreateCfiIdForTypeMetadata(MD)) {
+ llvm::Metadata *BitsetOps2[] = {
+ llvm::ConstantAsMetadata::get(TypeId),
+ llvm::ConstantAsMetadata::get(VTable),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int64Ty, Offset.getQuantity()))};
+ BitsetsMD->addOperand(llvm::MDTuple::get(getLLVMContext(), BitsetOps2));
+ }
+ }
+}
+
+// Fills in the supplied string map with the set of target features for the
+// passed in function.
+void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
+ const FunctionDecl *FD) {
+ StringRef TargetCPU = Target.getTargetOpts().CPU;
+ if (const auto *TD = FD->getAttr<TargetAttr>()) {
+ // If we have a TargetAttr build up the feature map based on that.
+ TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
+
+ // Make a copy of the features as passed on the command line into the
+ // beginning of the additional features from the function to override.
+ ParsedAttr.first.insert(ParsedAttr.first.begin(),
+ Target.getTargetOpts().FeaturesAsWritten.begin(),
+ Target.getTargetOpts().FeaturesAsWritten.end());
+
+ if (ParsedAttr.second != "")
+ TargetCPU = ParsedAttr.second;
+
+ // Now populate the feature map, first with the TargetCPU which is either
+ // the default or a new one from the target attribute string. Then we'll use
+ // the passed in features (FeaturesAsWritten) along with the new ones from
+ // the attribute.
+ Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU, ParsedAttr.first);
+ } else {
+ Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU,
+ Target.getTargetOpts().Features);
+ }
}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index dd167a29f5ac..33113837a4cf 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
#include "CGVTables.h"
+#include "CodeGenTypeCache.h"
#include "CodeGenTypes.h"
#include "SanitizerMetadata.h"
#include "clang/AST/Attr.h"
@@ -30,7 +31,6 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueHandle.h"
@@ -108,65 +108,14 @@ struct OrderGlobalInits {
}
};
-struct CodeGenTypeCache {
- /// void
- llvm::Type *VoidTy;
+struct ObjCEntrypoints {
+ ObjCEntrypoints() { memset(this, 0, sizeof(*this)); }
- /// i8, i16, i32, and i64
- llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
- /// float, double
- llvm::Type *FloatTy, *DoubleTy;
-
- /// int
- llvm::IntegerType *IntTy;
-
- /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
- union {
- llvm::IntegerType *IntPtrTy;
- llvm::IntegerType *SizeTy;
- llvm::IntegerType *PtrDiffTy;
- };
-
- /// void* in address space 0
- union {
- llvm::PointerType *VoidPtrTy;
- llvm::PointerType *Int8PtrTy;
- };
-
- /// void** in address space 0
- union {
- llvm::PointerType *VoidPtrPtrTy;
- llvm::PointerType *Int8PtrPtrTy;
- };
-
- /// The width of a pointer into the generic address space.
- unsigned char PointerWidthInBits;
-
- /// The size and alignment of a pointer into the generic address
- /// space.
- union {
- unsigned char PointerAlignInBytes;
- unsigned char PointerSizeInBytes;
- unsigned char SizeSizeInBytes; // sizeof(size_t)
- };
-
- llvm::CallingConv::ID RuntimeCC;
- llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
- llvm::CallingConv::ID BuiltinCC;
- llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; }
-};
-
-struct RREntrypoints {
- RREntrypoints() { memset(this, 0, sizeof(*this)); }
- /// void objc_autoreleasePoolPop(void*);
+ /// void objc_autoreleasePoolPop(void*);
llvm::Constant *objc_autoreleasePoolPop;
/// void *objc_autoreleasePoolPush(void);
llvm::Constant *objc_autoreleasePoolPush;
-};
-
-struct ARCEntrypoints {
- ARCEntrypoints() { memset(this, 0, sizeof(*this)); }
/// id objc_autorelease(id);
llvm::Constant *objc_autorelease;
@@ -257,6 +206,36 @@ public:
void reportDiagnostics(DiagnosticsEngine &Diags, StringRef MainFile);
};
+/// A pair of helper functions for a __block variable.
+class BlockByrefHelpers : public llvm::FoldingSetNode {
+ // MSVC requires this type to be complete in order to process this
+ // header.
+public:
+ llvm::Constant *CopyHelper;
+ llvm::Constant *DisposeHelper;
+
+ /// The alignment of the field. This is important because
+ /// different offsets to the field within the byref struct need to
+ /// have different helper functions.
+ CharUnits Alignment;
+
+ BlockByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
+ BlockByrefHelpers(const BlockByrefHelpers &) = default;
+ virtual ~BlockByrefHelpers();
+
+ void Profile(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Alignment.getQuantity());
+ profileImpl(id);
+ }
+ virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
+
+ virtual bool needsCopy() const { return true; }
+ virtual void emitCopy(CodeGenFunction &CGF, Address dest, Address src) = 0;
+
+ virtual bool needsDispose() const { return true; }
+ virtual void emitDispose(CodeGenFunction &CGF, Address field) = 0;
+};
+
/// This class organizes the cross-function state that is used while generating
/// LLVM code.
class CodeGenModule : public CodeGenTypeCache {
@@ -285,7 +264,6 @@ private:
const CodeGenOptions &CodeGenOpts;
llvm::Module &TheModule;
DiagnosticsEngine &Diags;
- const llvm::DataLayout &TheDataLayout;
const TargetInfo &Target;
std::unique_ptr<CGCXXABI> ABI;
llvm::LLVMContext &VMContext;
@@ -307,9 +285,8 @@ private:
CGOpenMPRuntime* OpenMPRuntime;
CGCUDARuntime* CUDARuntime;
CGDebugInfo* DebugInfo;
- ARCEntrypoints *ARCData;
+ ObjCEntrypoints *ObjCData;
llvm::MDNode *NoObjCARCExceptionsMetadata;
- RREntrypoints *RRData;
std::unique_ptr<llvm::IndexedInstrProfReader> PGOReader;
InstrProfStats PGOStats;
@@ -343,6 +320,17 @@ private:
typedef llvm::StringMap<llvm::TrackingVH<llvm::Constant> > ReplacementsTy;
ReplacementsTy Replacements;
+ /// List of global values to be replaced with something else. Used when we
+ /// want to replace a GlobalValue but can't identify it by its mangled name
+ /// anymore (because the name is already taken).
+ llvm::SmallVector<std::pair<llvm::GlobalValue *, llvm::Constant *>, 8>
+ GlobalValReplacements;
+
+ /// Set of global decls for which we already diagnosed mangled name conflict.
+ /// Required to not issue a warning (on a mangling conflict) multiple times
+ /// for the same decl.
+ llvm::DenseSet<GlobalDecl> DiagnosedConflictingDefinitions;
+
/// A queue of (optional) vtables to consider emitting.
std::vector<const CXXRecordDecl*> DeferredVTables;
@@ -390,13 +378,12 @@ private:
StaticExternCMap StaticExternCValues;
/// \brief thread_local variables defined or used in this TU.
- std::vector<std::pair<const VarDecl *, llvm::GlobalVariable *> >
- CXXThreadLocals;
+ std::vector<const VarDecl *> CXXThreadLocals;
/// \brief thread_local variables with initializers that need to run
/// before any thread_local variable in this TU is odr-used.
std::vector<llvm::Function *> CXXThreadLocalInits;
- std::vector<llvm::GlobalVariable *> CXXThreadLocalInitVars;
+ std::vector<const VarDecl *> CXXThreadLocalInitVars;
/// Global variables with initializers that need to run before main.
std::vector<llvm::Function *> CXXGlobalInits;
@@ -491,12 +478,16 @@ private:
llvm::DenseMap<const Decl *, bool> DeferredEmptyCoverageMappingDecls;
std::unique_ptr<CoverageMappingModuleGen> CoverageMapping;
+
+ /// Mapping from canonical types to their metadata identifiers. We need to
+ /// maintain this mapping because identifiers may be formed from distinct
+ /// MDNodes.
+ llvm::DenseMap<QualType, llvm::Metadata *> MetadataIdMap;
+
public:
- CodeGenModule(ASTContext &C,
- const HeaderSearchOptions &headersearchopts,
+ CodeGenModule(ASTContext &C, const HeaderSearchOptions &headersearchopts,
const PreprocessorOptions &ppopts,
- const CodeGenOptions &CodeGenOpts,
- llvm::Module &M, const llvm::DataLayout &TD,
+ const CodeGenOptions &CodeGenOpts, llvm::Module &M,
DiagnosticsEngine &Diags,
CoverageSourceInfo *CoverageInfo = nullptr);
@@ -534,14 +525,9 @@ public:
return *CUDARuntime;
}
- ARCEntrypoints &getARCEntrypoints() const {
- assert(getLangOpts().ObjCAutoRefCount && ARCData != nullptr);
- return *ARCData;
- }
-
- RREntrypoints &getRREntrypoints() const {
- assert(RRData != nullptr);
- return *RRData;
+ ObjCEntrypoints &getObjCEntrypoints() const {
+ assert(ObjCData != nullptr);
+ return *ObjCData;
}
InstrProfStats &getPGOStats() { return PGOStats; }
@@ -614,7 +600,9 @@ public:
const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
llvm::Module &getModule() const { return TheModule; }
DiagnosticsEngine &getDiags() const { return Diags; }
- const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
+ const llvm::DataLayout &getDataLayout() const {
+ return TheModule.getDataLayout();
+ }
const TargetInfo &getTarget() const { return Target; }
const llvm::Triple &getTriple() const;
bool supportsCOMDAT() const;
@@ -645,8 +633,6 @@ public:
llvm::MDNode *getTBAAInfo(QualType QTy);
llvm::MDNode *getTBAAInfoForVTablePtr();
llvm::MDNode *getTBAAStructInfo(QualType QTy);
- /// Return the MDNode in the type DAG for the given struct type.
- llvm::MDNode *getTBAAStructTypeInfo(QualType QTy);
/// Return the path-aware tag for given base type, access node and offset.
llvm::MDNode *getTBAAStructTagInfo(QualType BaseTy, llvm::MDNode *AccessN,
uint64_t O);
@@ -660,9 +646,13 @@ public:
/// is the same as the type. For struct-path aware TBAA, the tag
/// is different from the type: base type, access type and offset.
/// When ConvertTypeToTag is true, we create a tag based on the scalar type.
- void DecorateInstruction(llvm::Instruction *Inst,
- llvm::MDNode *TBAAInfo,
- bool ConvertTypeToTag = true);
+ void DecorateInstructionWithTBAA(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo,
+ bool ConvertTypeToTag = true);
+
+ /// Adds !invariant.barrier !tag to instruction
+ void DecorateInstructionWithInvariantGroup(llvm::Instruction *I,
+ const CXXRecordDecl *RD);
/// Emit the given number of characters as a value of type size_t.
llvm::ConstantInt *getSize(CharUnits numChars);
@@ -683,18 +673,7 @@ public:
llvm_unreachable("unknown visibility!");
}
- llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
- if (isa<CXXConstructorDecl>(GD.getDecl()))
- return getAddrOfCXXStructor(cast<CXXConstructorDecl>(GD.getDecl()),
- getFromCtorType(GD.getCtorType()));
- else if (isa<CXXDestructorDecl>(GD.getDecl()))
- return getAddrOfCXXStructor(cast<CXXDestructorDecl>(GD.getDecl()),
- getFromDtorType(GD.getDtorType()));
- else if (isa<FunctionDecl>(GD.getDecl()))
- return GetAddrOfFunction(GD);
- else
- return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
- }
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD, bool IsForDefinition = false);
/// Will return a global variable of the given type. If a variable with a
/// different type already exists then a new variable with the right type
@@ -706,6 +685,7 @@ public:
llvm::Function *
CreateGlobalInitOrDestructFunction(llvm::FunctionType *ty, const Twine &name,
+ const CGFunctionInfo &FI,
SourceLocation Loc = SourceLocation(),
bool TLS = false);
@@ -724,24 +704,37 @@ public:
/// Return the address of the given function. If Ty is non-null, then this
/// function will use the specified type if it has to create it.
- llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = 0,
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = nullptr,
bool ForVTable = false,
- bool DontDefer = false);
+ bool DontDefer = false,
+ bool IsForDefinition = false);
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
- llvm::Constant *getAddrOfCXXCatchHandlerType(QualType Ty,
- QualType CatchHandlerType);
-
/// Get the address of a uuid descriptor .
- llvm::Constant *GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
+ ConstantAddress GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
/// Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
/// Get a reference to the target of VD.
- llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
+ ConstantAddress GetWeakRefReference(const ValueDecl *VD);
+
+ /// Returns the assumed alignment of an opaque pointer to the given class.
+ CharUnits getClassPointerAlignment(const CXXRecordDecl *CD);
+
+ /// Returns the assumed alignment of a virtual base of a class.
+ CharUnits getVBaseAlignment(CharUnits DerivedAlign,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *VBase);
+
+ /// Given a class pointer with an actual known alignment, and the
+ /// expected alignment of an object at a dynamic offset w.r.t that
+ /// pointer, return the alignment to assume at the offset.
+ CharUnits getDynamicOffsetAlignment(CharUnits ActualAlign,
+ const CXXRecordDecl *Class,
+ CharUnits ExpectedTargetAlign);
CharUnits
computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass,
@@ -755,35 +748,7 @@ public:
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd);
- /// A pair of helper functions for a __block variable.
- class ByrefHelpers : public llvm::FoldingSetNode {
- public:
- llvm::Constant *CopyHelper;
- llvm::Constant *DisposeHelper;
-
- /// The alignment of the field. This is important because
- /// different offsets to the field within the byref struct need to
- /// have different helper functions.
- CharUnits Alignment;
-
- ByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
- virtual ~ByrefHelpers();
-
- void Profile(llvm::FoldingSetNodeID &id) const {
- id.AddInteger(Alignment.getQuantity());
- profileImpl(id);
- }
- virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
-
- virtual bool needsCopy() const { return true; }
- virtual void emitCopy(CodeGenFunction &CGF,
- llvm::Value *dest, llvm::Value *src) = 0;
-
- virtual bool needsDispose() const { return true; }
- virtual void emitDispose(CodeGenFunction &CGF, llvm::Value *field) = 0;
- };
-
- llvm::FoldingSet<ByrefHelpers> ByrefHelpersCache;
+ llvm::FoldingSet<BlockByrefHelpers> ByrefHelpersCache;
/// Fetches the global unique block count.
int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
@@ -798,23 +763,23 @@ public:
llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
/// Return a pointer to a constant CFString object for the given string.
- llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+ ConstantAddress GetAddrOfConstantCFString(const StringLiteral *Literal);
/// Return a pointer to a constant NSString object for the given string. Or a
/// user defined String object as defined via
/// -fconstant-string-class=class_name option.
- llvm::GlobalVariable *GetAddrOfConstantString(const StringLiteral *Literal);
+ ConstantAddress GetAddrOfConstantString(const StringLiteral *Literal);
/// Return a constant array for the given string.
llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
/// Return a pointer to a constant array for the given string literal.
- llvm::GlobalVariable *
+ ConstantAddress
GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
StringRef Name = ".str");
/// Return a pointer to a constant array for the given ObjCEncodeExpr node.
- llvm::GlobalVariable *
+ ConstantAddress
GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
/// Returns a pointer to a character array containing the literal and a
@@ -822,18 +787,17 @@ public:
///
/// \param GlobalName If provided, the name to use for the global (if one is
/// created).
- llvm::GlobalVariable *
+ ConstantAddress
GetAddrOfConstantCString(const std::string &Str,
- const char *GlobalName = nullptr,
- unsigned Alignment = 0);
+ const char *GlobalName = nullptr);
/// Returns a pointer to a constant global variable for the given file-scope
/// compound literal expression.
- llvm::Constant *GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
+ ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
/// \brief Returns a pointer to a global variable representing a temporary
/// with static or thread storage duration.
- llvm::Constant *GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E,
+ ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E,
const Expr *Inner);
/// \brief Retrieve the record type that describes the state of an
@@ -847,11 +811,11 @@ public:
StructorType Type);
/// Return the address of the constructor/destructor of the given type.
- llvm::GlobalValue *
+ llvm::Constant *
getAddrOfCXXStructor(const CXXMethodDecl *MD, StructorType Type,
const CGFunctionInfo *FnInfo = nullptr,
llvm::FunctionType *FnType = nullptr,
- bool DontDefer = false);
+ bool DontDefer = false, bool IsForDefinition = false);
/// Given a builtin id for a function like "__builtin_fabsf", return a
/// Function* for "fabsf".
@@ -948,6 +912,11 @@ public:
QualType DestType,
CodeGenFunction *CGF = nullptr);
+ /// \brief Emit type info if type of an expression is a variably modified
+ /// type. Also emit proper debug info for cast types.
+ void EmitExplicitCastExprType(const ExplicitCastExpr *E,
+ CodeGenFunction *CGF = nullptr);
+
/// Return the result of value-initializing the given type, i.e. a null
/// expression of the given type. This is usually, but not always, an LLVM
/// null constant.
@@ -998,16 +967,19 @@ public:
/// function type.
///
/// \param Info - The function type information.
- /// \param TargetDecl - The decl these attributes are being constructed
- /// for. If supplied the attributes applied to this decl may contribute to the
- /// function attributes and calling convention.
+ /// \param CalleeInfo - The callee information these attributes are being
+ /// constructed for. If valid, the attributes applied to this decl may
+ /// contribute to the function attributes and calling convention.
/// \param PAL [out] - On return, the attribute list to use.
/// \param CallingConv [out] - On return, the LLVM calling convention to use.
void ConstructAttributeList(const CGFunctionInfo &Info,
- const Decl *TargetDecl,
- AttributeListType &PAL,
- unsigned &CallingConv,
- bool AttrOnCallSite);
+ CGCalleeInfo CalleeInfo, AttributeListType &PAL,
+ unsigned &CallingConv, bool AttrOnCallSite);
+
+ // Fills in the supplied string map with the set of target features for the
+ // passed in function.
+ void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
+ const FunctionDecl *FD);
StringRef getMangledName(GlobalDecl GD);
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
@@ -1016,9 +988,6 @@ public:
void EmitVTable(CXXRecordDecl *Class);
- /// Emit the RTTI descriptors for the builtin types.
- void EmitFundamentalRTTIDescriptors();
-
/// \brief Appends Opts to the "Linker Options" metadata value.
void AppendLinkerOptions(StringRef Opts);
@@ -1122,6 +1091,8 @@ public:
void addReplacement(StringRef Name, llvm::Constant *C);
+ void addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C);
+
/// \brief Emit a code for threadprivate directive.
/// \param D Threadprivate declaration.
void EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D);
@@ -1135,10 +1106,21 @@ public:
void EmitVTableBitSetEntries(llvm::GlobalVariable *VTable,
const VTableLayout &VTLayout);
- /// Create a bitset entry for the given vtable.
- llvm::MDTuple *CreateVTableBitSetEntry(llvm::GlobalVariable *VTable,
- CharUnits Offset,
- const CXXRecordDecl *RD);
+ /// Generate a cross-DSO type identifier for type.
+ llvm::ConstantInt *CreateCfiIdForTypeMetadata(llvm::Metadata *MD);
+
+ /// Create a metadata identifier for the given type. This may either be an
+ /// MDString (for external identifiers) or a distinct unnamed MDNode (for
+ /// internal identifiers).
+ llvm::Metadata *CreateMetadataIdentifierForType(QualType T);
+
+ /// Create a bitset entry for the given function and add it to BitsetsMD.
+ void CreateFunctionBitSetEntry(const FunctionDecl *FD, llvm::Function *F);
+
+ /// Create a bitset entry for the given vtable and add it to BitsetsMD.
+ void CreateVTableBitSetEntry(llvm::NamedMDNode *BitsetsMD,
+ llvm::GlobalVariable *VTable, CharUnits Offset,
+ const CXXRecordDecl *RD);
/// \breif Get the declaration of std::terminate for the platform.
llvm::Constant *getTerminateFn();
@@ -1148,7 +1130,8 @@ private:
GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl D,
bool ForVTable, bool DontDefer = false,
bool IsThunk = false,
- llvm::AttributeSet ExtraAttrs = llvm::AttributeSet());
+ llvm::AttributeSet ExtraAttrs = llvm::AttributeSet(),
+ bool IsForDefinition = false);
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *PTy,
@@ -1194,7 +1177,7 @@ private:
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority = 65535,
- llvm::Constant *AssociatedData = 0);
+ llvm::Constant *AssociatedData = nullptr);
void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535);
/// Generates a global array of functions and priorities using the given list
@@ -1202,15 +1185,15 @@ private:
/// as a LLVM constructor or destructor array.
void EmitCtorList(const CtorList &Fns, const char *GlobalName);
- /// Emit the RTTI descriptors for the given type.
- void EmitFundamentalRTTIDescriptor(QualType Type);
-
/// Emit any needed decls for which code generation was deferred.
void EmitDeferred();
/// Call replaceAllUsesWith on all pairs in Replacements.
void applyReplacements();
+ /// Call replaceAllUsesWith on all pairs in GlobalValReplacements.
+ void applyGlobalValReplacements();
+
void checkAliases();
/// Emit any vtables which we deferred and still have a use for.
@@ -1258,4 +1241,4 @@ private:
} // end namespace CodeGen
} // end namespace clang
-#endif
+#endif // LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
index 8dffefc871f2..38774332f31d 100644
--- a/lib/CodeGen/CodeGenPGO.cpp
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -28,58 +28,20 @@ using namespace CodeGen;
void CodeGenPGO::setFuncName(StringRef Name,
llvm::GlobalValue::LinkageTypes Linkage) {
- StringRef RawFuncName = Name;
-
- // Function names may be prefixed with a binary '1' to indicate
- // that the backend should not modify the symbols due to any platform
- // naming convention. Do not include that '1' in the PGO profile name.
- if (RawFuncName[0] == '\1')
- RawFuncName = RawFuncName.substr(1);
-
- FuncName = RawFuncName;
- if (llvm::GlobalValue::isLocalLinkage(Linkage)) {
- // For local symbols, prepend the main file name to distinguish them.
- // Do not include the full path in the file name since there's no guarantee
- // that it will stay the same, e.g., if the files are checked out from
- // version control in different locations.
- if (CGM.getCodeGenOpts().MainFileName.empty())
- FuncName = FuncName.insert(0, "<unknown>:");
- else
- FuncName = FuncName.insert(0, CGM.getCodeGenOpts().MainFileName + ":");
- }
+ llvm::IndexedInstrProfReader *PGOReader = CGM.getPGOReader();
+ FuncName = llvm::getPGOFuncName(
+ Name, Linkage, CGM.getCodeGenOpts().MainFileName,
+ PGOReader ? PGOReader->getVersion() : llvm::IndexedInstrProf::Version);
// If we're generating a profile, create a variable for the name.
if (CGM.getCodeGenOpts().ProfileInstrGenerate)
- createFuncNameVar(Linkage);
+ FuncNameVar = llvm::createPGOFuncNameVar(CGM.getModule(), Linkage, FuncName);
}
void CodeGenPGO::setFuncName(llvm::Function *Fn) {
setFuncName(Fn->getName(), Fn->getLinkage());
}
-void CodeGenPGO::createFuncNameVar(llvm::GlobalValue::LinkageTypes Linkage) {
- // We generally want to match the function's linkage, but available_externally
- // and extern_weak both have the wrong semantics, and anything that doesn't
- // need to link across compilation units doesn't need to be visible at all.
- if (Linkage == llvm::GlobalValue::ExternalWeakLinkage)
- Linkage = llvm::GlobalValue::LinkOnceAnyLinkage;
- else if (Linkage == llvm::GlobalValue::AvailableExternallyLinkage)
- Linkage = llvm::GlobalValue::LinkOnceODRLinkage;
- else if (Linkage == llvm::GlobalValue::InternalLinkage ||
- Linkage == llvm::GlobalValue::ExternalLinkage)
- Linkage = llvm::GlobalValue::PrivateLinkage;
-
- auto *Value =
- llvm::ConstantDataArray::getString(CGM.getLLVMContext(), FuncName, false);
- FuncNameVar =
- new llvm::GlobalVariable(CGM.getModule(), Value->getType(), true, Linkage,
- Value, "__llvm_profile_name_" + FuncName);
-
- // Hide the symbol so that we correctly get a copy for each executable.
- if (!llvm::GlobalValue::isLocalLinkage(FuncNameVar->getLinkage()))
- FuncNameVar->setVisibility(llvm::GlobalValue::HiddenVisibility);
-}
-
namespace {
/// \brief Stable hasher for PGO region counters.
///
@@ -604,7 +566,7 @@ struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
RecordNextStmtCount = true;
}
};
-}
+} // end anonymous namespace
void PGOHash::combine(HashType Type) {
// Check that we never combine 0 and only have six bits.
@@ -643,27 +605,24 @@ uint64_t PGOHash::finalize() {
return endian::read<uint64_t, little, unaligned>(Result);
}
-void CodeGenPGO::checkGlobalDecl(GlobalDecl GD) {
- // Make sure we only emit coverage mapping for one constructor/destructor.
- // Clang emits several functions for the constructor and the destructor of
- // a class. Every function is instrumented, but we only want to provide
- // coverage for one of them. Because of that we only emit the coverage mapping
- // for the base constructor/destructor.
- if ((isa<CXXConstructorDecl>(GD.getDecl()) &&
- GD.getCtorType() != Ctor_Base) ||
- (isa<CXXDestructorDecl>(GD.getDecl()) &&
- GD.getDtorType() != Dtor_Base)) {
- SkipCoverageMapping = true;
- }
-}
-
-void CodeGenPGO::assignRegionCounters(const Decl *D, llvm::Function *Fn) {
+void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
+ const Decl *D = GD.getDecl();
bool InstrumentRegions = CGM.getCodeGenOpts().ProfileInstrGenerate;
llvm::IndexedInstrProfReader *PGOReader = CGM.getPGOReader();
if (!InstrumentRegions && !PGOReader)
return;
if (D->isImplicit())
return;
+ // Constructors and destructors may be represented by several functions in IR.
+ // If so, instrument only base variant, others are implemented by delegation
+ // to the base one, it would be counted twice otherwise.
+ if (CGM.getTarget().getCXXABI().hasConstructorVariants() &&
+ ((isa<CXXConstructorDecl>(GD.getDecl()) &&
+ GD.getCtorType() != Ctor_Base) ||
+ (isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() != Dtor_Base))) {
+ return;
+ }
CGM.ClearUnusedCoverageMapping(D);
setFuncName(Fn);
@@ -763,7 +722,7 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
return;
uint64_t MaxFunctionCount = PGOReader->getMaximumFunctionCount();
- uint64_t FunctionCount = getRegionCount(0);
+ uint64_t FunctionCount = getRegionCount(nullptr);
if (FunctionCount >= (uint64_t)(0.3 * (double)MaxFunctionCount))
// Turn on InlineHint attribute for hot functions.
// FIXME: 30% is from preliminary tuning on SPEC, it may not be optimal.
@@ -779,7 +738,7 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S) {
if (!CGM.getCodeGenOpts().ProfileInstrGenerate || !RegionCounterMap)
return;
- if (!Builder.GetInsertPoint())
+ if (!Builder.GetInsertBlock())
return;
unsigned Counter = (*RegionCounterMap)[S];
diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h
index de6f369fb351..6bf29ecaa7c4 100644
--- a/lib/CodeGen/CodeGenPGO.h
+++ b/lib/CodeGen/CodeGenPGO.h
@@ -78,13 +78,11 @@ public:
setCurrentRegionCount(*Count);
}
- /// Check if we need to emit coverage mapping for a given declaration
- void checkGlobalDecl(GlobalDecl GD);
/// Assign counters to regions and configure them for PGO of a given
/// function. Does nothing if instrumentation is not enabled and either
/// generates global variables or associates PGO data with each of the
/// counters depending on whether we are generating or using instrumentation.
- void assignRegionCounters(const Decl *D, llvm::Function *Fn);
+ void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn);
/// Emit a coverage mapping range with a counter zero
/// for an unused declaration.
void emitEmptyCounterMapping(const Decl *D, StringRef FuncName,
@@ -92,14 +90,12 @@ public:
private:
void setFuncName(llvm::Function *Fn);
void setFuncName(StringRef Name, llvm::GlobalValue::LinkageTypes Linkage);
- void createFuncNameVar(llvm::GlobalValue::LinkageTypes Linkage);
void mapRegionCounters(const Decl *D);
void computeRegionCounts(const Decl *D);
void applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
llvm::Function *Fn);
void loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
bool IsInMainFile);
- void emitCounterVariables();
void emitCounterRegionMapping(const Decl *D);
public:
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index 53ba02a81397..c3c925cde2fd 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -155,7 +155,6 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
MContext.mangleTypeName(QualType(ETy, 0), Out);
- Out.flush();
return MetadataCache[Ty] = createTBAAScalarType(OutName, getChar());
}
@@ -271,7 +270,6 @@ CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
// Don't use the mangler for C code.
llvm::raw_svector_ostream Out(OutName);
MContext.mangleTypeName(QualType(Ty, 0), Out);
- Out.flush();
} else {
OutName = RD->getName();
}
diff --git a/lib/CodeGen/CodeGenTypeCache.h b/lib/CodeGen/CodeGenTypeCache.h
new file mode 100644
index 000000000000..c32b66d129da
--- /dev/null
+++ b/lib/CodeGen/CodeGenTypeCache.h
@@ -0,0 +1,108 @@
+//===--- CodeGenTypeCache.h - Commonly used LLVM types and info -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This structure provides a set of common types useful during IR emission.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPECACHE_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPECACHE_H
+
+#include "clang/AST/CharUnits.h"
+#include "llvm/IR/CallingConv.h"
+
+namespace llvm {
+ class Type;
+ class IntegerType;
+ class PointerType;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// This structure provides a set of types that are commonly used
+/// during IR emission. It's initialized once in CodeGenModule's
+/// constructor and then copied around into new CodeGenFunctions.
+struct CodeGenTypeCache {
+ /// void
+ llvm::Type *VoidTy;
+
+ /// i8, i16, i32, and i64
+ llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ /// float, double
+ llvm::Type *FloatTy, *DoubleTy;
+
+ /// int
+ llvm::IntegerType *IntTy;
+
+ /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
+ union {
+ llvm::IntegerType *IntPtrTy;
+ llvm::IntegerType *SizeTy;
+ llvm::IntegerType *PtrDiffTy;
+ };
+
+ /// void* in address space 0
+ union {
+ llvm::PointerType *VoidPtrTy;
+ llvm::PointerType *Int8PtrTy;
+ };
+
+ /// void** in address space 0
+ union {
+ llvm::PointerType *VoidPtrPtrTy;
+ llvm::PointerType *Int8PtrPtrTy;
+ };
+
+ /// The size and alignment of the builtin C type 'int'. This comes
+ /// up enough in various ABI lowering tasks to be worth pre-computing.
+ union {
+ unsigned char IntSizeInBytes;
+ unsigned char IntAlignInBytes;
+ };
+ CharUnits getIntSize() const {
+ return CharUnits::fromQuantity(IntSizeInBytes);
+ }
+ CharUnits getIntAlign() const {
+ return CharUnits::fromQuantity(IntAlignInBytes);
+ }
+
+ /// The width of a pointer into the generic address space.
+ unsigned char PointerWidthInBits;
+
+ /// The size and alignment of a pointer into the generic address space.
+ union {
+ unsigned char PointerAlignInBytes;
+ unsigned char PointerSizeInBytes;
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
+ unsigned char SizeAlignInBytes;
+ };
+ CharUnits getSizeSize() const {
+ return CharUnits::fromQuantity(SizeSizeInBytes);
+ }
+ CharUnits getSizeAlign() const {
+ return CharUnits::fromQuantity(SizeAlignInBytes);
+ }
+ CharUnits getPointerSize() const {
+ return CharUnits::fromQuantity(PointerSizeInBytes);
+ }
+ CharUnits getPointerAlign() const {
+ return CharUnits::fromQuantity(PointerAlignInBytes);
+ }
+
+ llvm::CallingConv::ID RuntimeCC;
+ llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
+ llvm::CallingConv::ID BuiltinCC;
+ llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index a4a8654eb36b..fcda05320551 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -31,7 +31,6 @@ using namespace CodeGen;
CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
: CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
- TheDataLayout(cgm.getDataLayout()),
Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
SkippedLayout = false;
@@ -295,6 +294,76 @@ static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
llvm_unreachable("Unknown float format!");
}
+llvm::Type *CodeGenTypes::ConvertFunctionType(QualType QFT,
+ const FunctionDecl *FD) {
+ assert(QFT.isCanonical());
+ const Type *Ty = QFT.getTypePtr();
+ const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
+ // First, check whether we can build the full function type. If the
+ // function type depends on an incomplete type (e.g. a struct or enum), we
+ // cannot lower the function type.
+ if (!isFuncTypeConvertible(FT)) {
+ // This function's type depends on an incomplete tag type.
+
+ // Force conversion of all the relevant record types, to make sure
+ // we re-convert the FunctionType when appropriate.
+ if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
+ ConvertRecordDeclType(RT->getDecl());
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
+ for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
+ if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
+ ConvertRecordDeclType(RT->getDecl());
+
+ SkippedLayout = true;
+
+ // Return a placeholder type.
+ return llvm::StructType::get(getLLVMContext());
+ }
+
+ // While we're converting the parameter types for a function, we don't want
+ // to recursively convert any pointed-to structs. Converting directly-used
+ // structs is ok though.
+ if (!RecordsBeingLaidOut.insert(Ty).second) {
+ SkippedLayout = true;
+ return llvm::StructType::get(getLLVMContext());
+ }
+
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ const CGFunctionInfo *FI;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ FI = &arrangeFreeFunctionType(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)), FD);
+ } else {
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
+ FI = &arrangeFreeFunctionType(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
+ }
+
+ llvm::Type *ResultType = nullptr;
+ // If there is something higher level prodding our CGFunctionInfo, then
+ // don't recurse into it again.
+ if (FunctionsBeingProcessed.count(FI)) {
+
+ ResultType = llvm::StructType::get(getLLVMContext());
+ SkippedLayout = true;
+ } else {
+
+ // Otherwise, we're good to go, go ahead and convert it.
+ ResultType = GetFunctionType(*FI);
+ }
+
+ RecordsBeingLaidOut.erase(Ty);
+
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+ return ResultType;
+}
+
/// ConvertType - Convert the specified type to its LLVM form.
llvm::Type *CodeGenTypes::ConvertType(QualType T) {
T = Context.getCanonicalType(T);
@@ -389,9 +458,19 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
break;
@@ -476,75 +555,9 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
break;
}
case Type::FunctionNoProto:
- case Type::FunctionProto: {
- const FunctionType *FT = cast<FunctionType>(Ty);
- // First, check whether we can build the full function type. If the
- // function type depends on an incomplete type (e.g. a struct or enum), we
- // cannot lower the function type.
- if (!isFuncTypeConvertible(FT)) {
- // This function's type depends on an incomplete tag type.
-
- // Force conversion of all the relevant record types, to make sure
- // we re-convert the FunctionType when appropriate.
- if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
- for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
- if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
-
- // Return a placeholder type.
- ResultType = llvm::StructType::get(getLLVMContext());
-
- SkippedLayout = true;
- break;
- }
-
- // While we're converting the parameter types for a function, we don't want
- // to recursively convert any pointed-to structs. Converting directly-used
- // structs is ok though.
- if (!RecordsBeingLaidOut.insert(Ty).second) {
- ResultType = llvm::StructType::get(getLLVMContext());
-
- SkippedLayout = true;
- break;
- }
-
- // The function type can be built; call the appropriate routines to
- // build it.
- const CGFunctionInfo *FI;
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
- FI = &arrangeFreeFunctionType(
- CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
- } else {
- const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
- FI = &arrangeFreeFunctionType(
- CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
- }
-
- // If there is something higher level prodding our CGFunctionInfo, then
- // don't recurse into it again.
- if (FunctionsBeingProcessed.count(FI)) {
-
- ResultType = llvm::StructType::get(getLLVMContext());
- SkippedLayout = true;
- } else {
-
- // Otherwise, we're good to go, go ahead and convert it.
- ResultType = GetFunctionType(*FI);
- }
-
- RecordsBeingLaidOut.erase(Ty);
-
- if (SkippedLayout)
- TypeCache.clear();
-
- if (RecordsBeingLaidOut.empty())
- while (!DeferredRecords.empty())
- ConvertRecordDeclType(DeferredRecords.pop_back_val());
+ case Type::FunctionProto:
+ ResultType = ConvertFunctionType(T);
break;
- }
-
case Type::ObjCObject:
ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
break;
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 1580e21d11dc..a96f23c44894 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -122,7 +122,6 @@ class CodeGenTypes {
// Some of this stuff should probably be left on the CGM.
ASTContext &Context;
llvm::Module &TheModule;
- const llvm::DataLayout &TheDataLayout;
const TargetInfo &Target;
CGCXXABI &TheCXXABI;
@@ -159,7 +158,6 @@ class CodeGenTypes {
SmallVector<const RecordDecl *, 8> DeferredRecords;
-private:
/// This map keeps cache of llvm::Types and maps clang::Type to
/// corresponding llvm::Type.
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
@@ -168,7 +166,9 @@ public:
CodeGenTypes(CodeGenModule &cgm);
~CodeGenTypes();
- const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
+ const llvm::DataLayout &getDataLayout() const {
+ return TheModule.getDataLayout();
+ }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
const TargetInfo &getTarget() const { return Target; }
@@ -178,6 +178,14 @@ public:
/// ConvertType - Convert type T into a llvm::Type.
llvm::Type *ConvertType(QualType T);
+ /// \brief Converts the GlobalDecl into an llvm::Type. This should be used
+ /// when we know the target of the function we want to convert. This is
+ /// because some functions (explicitly, those with pass_object_size
+ /// parameters) may not have the same signature as their type portrays, and
+ /// can only be called directly.
+ llvm::Type *ConvertFunctionType(QualType FT,
+ const FunctionDecl *FD = nullptr);
+
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
@@ -264,11 +272,12 @@ public:
const CGFunctionInfo &arrangeMSMemberPointerThunk(const CXXMethodDecl *MD);
const CGFunctionInfo &arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CXXCtorType CT);
-
- const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty,
+ const FunctionDecl *FD);
const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty);
const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP);
+ const FunctionProtoType *FTP,
+ const CXXMethodDecl *MD);
/// "Arrange" the LLVM information for a call or type with the given
/// signature. This is largely an internal method; other clients
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
index eca91590e602..eb6edeac4427 100644
--- a/lib/CodeGen/CoverageMappingGen.cpp
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -47,17 +47,6 @@ public:
Optional<SourceLocation> LocEnd)
: Count(Count), LocStart(LocStart), LocEnd(LocEnd) {}
- SourceMappingRegion(SourceMappingRegion &&Region)
- : Count(std::move(Region.Count)), LocStart(std::move(Region.LocStart)),
- LocEnd(std::move(Region.LocEnd)) {}
-
- SourceMappingRegion &operator=(SourceMappingRegion &&RHS) {
- Count = std::move(RHS.Count);
- LocStart = std::move(RHS.LocStart);
- LocEnd = std::move(RHS.LocEnd);
- return *this;
- }
-
const Counter &getCounter() const { return Count; }
void setCounter(Counter C) { Count = C; }
@@ -66,7 +55,7 @@ public:
void setStartLoc(SourceLocation Loc) { LocStart = Loc; }
- const SourceLocation &getStartLoc() const {
+ SourceLocation getStartLoc() const {
assert(LocStart && "Region has no start location");
return *LocStart;
}
@@ -75,7 +64,7 @@ public:
void setEndLoc(SourceLocation Loc) { LocEnd = Loc; }
- const SourceLocation &getEndLoc() const {
+ SourceLocation getEndLoc() const {
assert(LocEnd && "Region has no end location");
return *LocEnd;
}
@@ -174,7 +163,7 @@ public:
unsigned Depth = 0;
for (SourceLocation Parent = getIncludeOrExpansionLoc(Loc);
- !Parent.isInvalid(); Parent = getIncludeOrExpansionLoc(Parent))
+ Parent.isValid(); Parent = getIncludeOrExpansionLoc(Parent))
++Depth;
FileLocs.push_back(std::make_pair(Loc, Depth));
}
@@ -255,7 +244,7 @@ public:
assert(Region.hasEndLoc() && "incomplete region");
SourceLocation LocStart = Region.getStartLoc();
- assert(!SM.getFileID(LocStart).isInvalid() && "region in invalid file");
+ assert(SM.getFileID(LocStart).isValid() && "region in invalid file");
auto CovFileID = getCoverageFileID(LocStart);
// Ignore regions that don't have a file, such as builtin macros.
@@ -413,8 +402,8 @@ struct CounterCoverageMappingBuilder
SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
- assert(!EndLoc.isInvalid() &&
- "File exit was not handled before popRegions");
+ if (EndLoc.isInvalid())
+ llvm::report_fatal_error("File exit not handled before popRegions");
}
Region.setEndLoc(EndLoc);
@@ -426,7 +415,7 @@ struct CounterCoverageMappingBuilder
MostRecentLocation = getIncludeOrExpansionLoc(EndLoc);
assert(SM.isWrittenInSameFile(Region.getStartLoc(), EndLoc));
- SourceRegions.push_back(std::move(Region));
+ SourceRegions.push_back(Region);
}
RegionStack.pop_back();
}
@@ -496,12 +485,12 @@ struct CounterCoverageMappingBuilder
llvm::SmallSet<SourceLocation, 8> StartLocs;
Optional<Counter> ParentCounter;
- for (auto I = RegionStack.rbegin(), E = RegionStack.rend(); I != E; ++I) {
- if (!I->hasStartLoc())
+ for (SourceMappingRegion &I : llvm::reverse(RegionStack)) {
+ if (!I.hasStartLoc())
continue;
- SourceLocation Loc = I->getStartLoc();
+ SourceLocation Loc = I.getStartLoc();
if (!isNestedIn(Loc, ParentFile)) {
- ParentCounter = I->getCounter();
+ ParentCounter = I.getCounter();
break;
}
@@ -510,11 +499,11 @@ struct CounterCoverageMappingBuilder
// correct count. We avoid creating redundant regions by stopping once
// we've seen this region.
if (StartLocs.insert(Loc).second)
- SourceRegions.emplace_back(I->getCounter(), Loc,
+ SourceRegions.emplace_back(I.getCounter(), Loc,
getEndOfFileOrMacro(Loc));
Loc = getIncludeOrExpansionLoc(Loc);
}
- I->setStartLoc(getPreciseTokenLocEnd(Loc));
+ I.setStartLoc(getPreciseTokenLocEnd(Loc));
}
if (ParentCounter) {
@@ -580,7 +569,7 @@ struct CounterCoverageMappingBuilder
}
void VisitStmt(const Stmt *S) {
- if (!S->getLocStart().isInvalid())
+ if (S->getLocStart().isValid())
extendRegion(S);
for (const Stmt *Child : S->children())
if (Child)
@@ -796,7 +785,7 @@ struct CounterCoverageMappingBuilder
else
pushRegion(Count, getStart(S));
- if (const CaseStmt *CS = dyn_cast<CaseStmt>(S)) {
+ if (const auto *CS = dyn_cast<CaseStmt>(S)) {
Visit(CS->getLHS());
if (const Expr *RHS = CS->getRHS())
Visit(RHS);
@@ -842,7 +831,6 @@ struct CounterCoverageMappingBuilder
}
void VisitCXXCatchStmt(const CXXCatchStmt *S) {
- extendRegion(S);
propagateCounts(getRegionCounter(S), S->getHandlerBlock());
}
@@ -891,7 +879,7 @@ static bool isMachO(const CodeGenModule &CGM) {
}
static StringRef getCoverageSection(const CodeGenModule &CGM) {
- return isMachO(CGM) ? "__DATA,__llvm_covmap" : "__llvm_covmap";
+ return llvm::getInstrProfCoverageSectionName(isMachO(CGM));
}
static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
@@ -922,24 +910,23 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
}
void CoverageMappingModuleGen::addFunctionMappingRecord(
- llvm::GlobalVariable *FunctionName, StringRef FunctionNameValue,
- uint64_t FunctionHash, const std::string &CoverageMapping) {
+ llvm::GlobalVariable *NamePtr, StringRef NameValue,
+ uint64_t FuncHash, const std::string &CoverageMapping) {
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
- auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
- auto *Int64Ty = llvm::Type::getInt64Ty(Ctx);
- auto *Int8PtrTy = llvm::Type::getInt8PtrTy(Ctx);
if (!FunctionRecordTy) {
- llvm::Type *FunctionRecordTypes[] = {Int8PtrTy, Int32Ty, Int32Ty, Int64Ty};
+ #define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) LLVMType,
+ llvm::Type *FunctionRecordTypes[] = {
+ #include "llvm/ProfileData/InstrProfData.inc"
+ };
FunctionRecordTy =
llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes),
/*isPacked=*/true);
}
+ #define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Init,
llvm::Constant *FunctionRecordVals[] = {
- llvm::ConstantExpr::getBitCast(FunctionName, Int8PtrTy),
- llvm::ConstantInt::get(Int32Ty, FunctionNameValue.size()),
- llvm::ConstantInt::get(Int32Ty, CoverageMapping.size()),
- llvm::ConstantInt::get(Int64Ty, FunctionHash)};
+ #include "llvm/ProfileData/InstrProfData.inc"
+ };
FunctionRecords.push_back(llvm::ConstantStruct::get(
FunctionRecordTy, makeArrayRef(FunctionRecordVals)));
CoverageMappings += CoverageMapping;
@@ -961,7 +948,7 @@ void CoverageMappingModuleGen::addFunctionMappingRecord(
Expressions, Regions);
if (Reader.read())
return;
- dump(llvm::outs(), FunctionNameValue, Expressions, Regions);
+ dump(llvm::outs(), NameValue, Expressions, Regions);
}
}
@@ -1023,7 +1010,7 @@ void CoverageMappingModuleGen::emit() {
auto CovData = new llvm::GlobalVariable(CGM.getModule(), CovDataTy, true,
llvm::GlobalValue::InternalLinkage,
CovDataVal,
- "__llvm_coverage_mapping");
+ llvm::getCoverageMappingVarName());
CovData->setSection(getCoverageSection(CGM));
CovData->setAlignment(8);
diff --git a/lib/CodeGen/EHScopeStack.h b/lib/CodeGen/EHScopeStack.h
index a7951888c825..85cd1543e5bf 100644
--- a/lib/CodeGen/EHScopeStack.h
+++ b/lib/CodeGen/EHScopeStack.h
@@ -96,6 +96,9 @@ enum CleanupKind : unsigned {
/// and catch blocks.
class EHScopeStack {
public:
+ /* Should switch to alignof(uint64_t) instead of 8, when EHCleanupScope can */
+ enum { ScopeStackAlignment = 8 };
+
/// A saved depth on the scope stack. This is necessary because
/// pushing scopes onto the stack invalidates iterators.
class stable_iterator {
@@ -141,7 +144,15 @@ public:
class Cleanup {
// Anchor the construction vtable.
virtual void anchor();
+
+ protected:
+ ~Cleanup() = default;
+
public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
/// Generation flags.
class Flags {
enum {
@@ -168,10 +179,6 @@ public:
void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
};
- // Provide a virtual destructor to suppress a very common warning
- // that unfortunately cannot be suppressed without this. Cleanups
- // should not rely on this destructor ever being called.
- virtual ~Cleanup() {}
/// Emit the cleanup. For normal cleanups, this is run in the
/// same EH context as when the cleanup was pushed, i.e. the
@@ -184,7 +191,8 @@ public:
/// ConditionalCleanup stores the saved form of its parameters,
/// then restores them and performs the cleanup.
- template <class T, class... As> class ConditionalCleanup : public Cleanup {
+ template <class T, class... As>
+ class ConditionalCleanup final : public Cleanup {
typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
SavedTuple Saved;
@@ -248,6 +256,7 @@ private:
SmallVector<BranchFixup, 8> BranchFixups;
char *allocate(size_t Size);
+ void deallocate(size_t Size);
void *pushCleanup(CleanupKind K, size_t DataSize);
@@ -259,6 +268,8 @@ public:
/// Push a lazily-created cleanup on the stack.
template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) {
+ static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
void *Buffer = pushCleanup(Kind, sizeof(T));
Cleanup *Obj = new (Buffer) T(A...);
(void) Obj;
@@ -267,6 +278,8 @@ public:
/// Push a lazily-created cleanup on the stack. Tuple version.
template <class T, class... As>
void pushCleanupTuple(CleanupKind Kind, std::tuple<As...> A) {
+ static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
void *Buffer = pushCleanup(Kind, sizeof(T));
Cleanup *Obj = new (Buffer) T(std::move(A));
(void) Obj;
@@ -287,6 +300,8 @@ public:
/// stack is modified.
template <class T, class... As>
T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) {
+ static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
return new (Buffer) T(N, A...);
}
@@ -346,7 +361,6 @@ public:
return InnermostEHScope;
}
- stable_iterator getInnermostActiveEHScope() const;
/// An unstable reference to a scope-stack depth. Invalidated by
/// pushes but not pops.
@@ -377,9 +391,6 @@ public:
/// to the EH stack.
iterator find(stable_iterator save) const;
- /// Removes the cleanup pointed to by the given stable_iterator.
- void removeCleanup(stable_iterator save);
-
/// Add a branch fixup to the current cleanup scope.
BranchFixup &addBranchFixup() {
assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 2be9ceb1d637..0c4008f8ee78 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -69,6 +69,45 @@ public:
return RAA_Default;
}
+ bool isThisCompleteObject(GlobalDecl GD) const override {
+ // The Itanium ABI has separate complete-object vs. base-object
+ // variants of both constructors and destructors.
+ if (isa<CXXDestructorDecl>(GD.getDecl())) {
+ switch (GD.getDtorType()) {
+ case Dtor_Complete:
+ case Dtor_Deleting:
+ return true;
+
+ case Dtor_Base:
+ return false;
+
+ case Dtor_Comdat:
+ llvm_unreachable("emitting dtor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+ if (isa<CXXConstructorDecl>(GD.getDecl())) {
+ switch (GD.getCtorType()) {
+ case Ctor_Complete:
+ return true;
+
+ case Ctor_Base:
+ return false;
+
+ case Ctor_CopyingClosure:
+ case Ctor_DefaultClosure:
+ llvm_unreachable("closure ctors in Itanium ABI?");
+
+ case Ctor_Comdat:
+ llvm_unreachable("emitting ctor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+
+ // No other kinds.
+ return false;
+ }
+
bool isZeroInitializable(const MemberPointerType *MPT) override;
llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
@@ -76,13 +115,14 @@ public:
llvm::Value *
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
const Expr *E,
- llvm::Value *&This,
+ Address This,
+ llvm::Value *&ThisPtrForCall,
llvm::Value *MemFnPtr,
const MemberPointerType *MPT) override;
llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base,
+ Address Base,
llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
@@ -111,9 +151,22 @@ public:
const MemberPointerType *MPT) override;
void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
- llvm::Value *Ptr, QualType ElementType,
+ Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) override;
+ /// Itanium says that an _Unwind_Exception has to be "double-word"
+ /// aligned (and thus the end of it is also so-aligned), meaning 16
+ /// bytes. Of course, that was written for the actual Itanium,
+ /// which is a 64-bit platform. Classically, the ABI doesn't really
+ /// specify the alignment on other platforms, but in practice
+ /// libUnwind declares the struct with __attribute__((aligned)), so
+ /// we assume that alignment here. (It's generally 16 bytes, but
+ /// some targets overwrite it.)
+ CharUnits getAlignmentOfExnObject() {
+ auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
+ return CGM.getContext().toCharUnitsFromBits(align);
+ }
+
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
@@ -126,34 +179,34 @@ public:
void EmitFundamentalRTTIDescriptor(QualType Type);
void EmitFundamentalRTTIDescriptors();
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
- llvm::Constant *
+ CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,
QualType CatchHandlerType) override {
- return getAddrOfRTTIDescriptor(Ty);
+ return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
}
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) override;
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
llvm::Value *
- GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This,
+ GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
@@ -185,15 +238,29 @@ public:
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) override;
+ bool Delegating, Address This) override;
void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) override;
+ bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
+ CodeGenFunction::VPtr Vptr) override;
+
+ bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
+ return true;
+ }
+
+ llvm::Constant *
+ getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) override;
+
llvm::Value *getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
- BaseSubobject Base, const CXXRecordDecl *NearestVBase,
- bool &NeedsVirtualOffset) override;
+ BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
+
+ llvm::Value *getVTableAddressPointInStructorWithVTT(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
+ BaseSubobject Base, const CXXRecordDecl *NearestVBase);
llvm::Constant *
getVTableAddressPointForConstExpr(BaseSubobject Base,
@@ -203,18 +270,19 @@ public:
CharUnits VPtrOffset) override;
llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This,
- llvm::Type *Ty,
+ Address This, llvm::Type *Ty,
SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
CXXDtorType DtorType,
- llvm::Value *This,
+ Address This,
const CXXMemberCallExpr *CE) override;
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
+ bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
+
void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
bool ReturnAdjustment) override {
// Allow inlining of thunks by emitting them with available_externally
@@ -223,10 +291,10 @@ public:
Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
}
- llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This,
+ llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
- llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+ llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) override;
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
@@ -240,13 +308,13 @@ public:
{ return "__cxa_deleted_virtual"; }
CharUnits getArrayCookieSizeImpl(QualType elementType) override;
- llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) override;
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) override;
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
@@ -259,10 +327,9 @@ public:
llvm::Value *Val);
void EmitThreadLocalInitFuncs(
CodeGenModule &CGM,
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
- CXXThreadLocals,
+ ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
- ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) override;
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
bool usesThreadWrapperFunction() const override { return true; }
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
@@ -302,6 +369,41 @@ public:
friend class ItaniumRTTIBuilder;
void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
+
+ private:
+ bool hasAnyUsedVirtualInlineFunction(const CXXRecordDecl *RD) const {
+ const auto &VtableLayout =
+ CGM.getItaniumVTableContext().getVTableLayout(RD);
+
+ for (const auto &VtableComponent : VtableLayout.vtable_components()) {
+ if (!VtableComponent.isUsedFunctionPointerKind())
+ continue;
+
+ const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
+ if (Method->getCanonicalDecl()->isInlined())
+ return true;
+ }
+ return false;
+ }
+
+ bool isVTableHidden(const CXXRecordDecl *RD) const {
+ const auto &VtableLayout =
+ CGM.getItaniumVTableContext().getVTableLayout(RD);
+
+ for (const auto &VtableComponent : VtableLayout.vtable_components()) {
+ if (VtableComponent.isRTTIKind()) {
+ const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
+ if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
+ return true;
+ } else if (VtableComponent.isUsedFunctionPointerKind()) {
+ const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
+ if (Method->getVisibility() == Visibility::HiddenVisibility &&
+ !Method->isDefined())
+ return true;
+ }
+ }
+ return false;
+ }
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -320,12 +422,12 @@ public:
QualType ResTy) override;
CharUnits getArrayCookieSizeImpl(QualType elementType) override;
- llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) override;
- llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
CharUnits cookieSize) override;
};
@@ -336,6 +438,20 @@ public:
// ARM64 libraries are prepared for non-unique RTTI.
bool shouldRTTIBeUnique() const override { return false; }
};
+
+class WebAssemblyCXXABI final : public ItaniumCXXABI {
+public:
+ explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
+ : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
+ /*UseARMGuardVarABI=*/true) {}
+
+private:
+ bool HasThisReturn(GlobalDecl GD) const override {
+ return isa<CXXConstructorDecl>(GD.getDecl()) ||
+ (isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() != Dtor_Deleting);
+ }
+};
}
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
@@ -344,6 +460,7 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
// between the ARM and iOS ABIs.
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
+ case TargetCXXABI::WatchOS:
return new ARMCXXABI(CGM);
case TargetCXXABI::iOS64:
@@ -359,6 +476,9 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::GenericMIPS:
return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true);
+ case TargetCXXABI::WebAssembly:
+ return new WebAssemblyCXXABI(CGM);
+
case TargetCXXABI::GenericItanium:
if (CGM.getContext().getTargetInfo().getTriple().getArch()
== llvm::Triple::le32) {
@@ -404,7 +524,8 @@ ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
/// If the member is non-virtual, memptr.ptr is the address of
/// the function to call.
llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+ CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
+ llvm::Value *&ThisPtrForCall,
llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
CGBuilderTy &Builder = CGF.Builder;
@@ -413,9 +534,8 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
@@ -433,9 +553,11 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Apply the adjustment and cast back to the original struct type
// for consistency.
+ llvm::Value *This = ThisAddr.getPointer();
llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+ ThisPtrForCall = This;
// Load the function pointer.
llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
@@ -457,7 +579,11 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Cast the adjusted this to a pointer to vtable pointer and load.
llvm::Type *VTableTy = Builder.getInt8PtrTy();
- llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy);
+ CharUnits VTablePtrAlign =
+ CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
+ CGF.getPointerAlign());
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
// Apply the offset.
llvm::Value *VTableOffset = FnAsInt;
@@ -467,7 +593,9 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Load the virtual function to call.
VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
- llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
+ llvm::Value *VirtualFn =
+ Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
+ "memptr.virtualfn");
CGF.EmitBranch(FnEnd);
// In the non-virtual path, the function pointer is actually a
@@ -487,24 +615,23 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
/// Compute an l-value by applying the given pointer-to-member to a
/// base object.
llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr,
+ CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MemPtr->getType() == CGM.PtrDiffTy);
CGBuilderTy &Builder = CGF.Builder;
- unsigned AS = Base->getType()->getPointerAddressSpace();
-
// Cast to char*.
- Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+ Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
// Apply the offset, which we assume is non-null.
- llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
+ llvm::Value *Addr =
+ Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the
// address space of the base pointer.
- llvm::Type *PType
- = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
+ ->getPointerTo(Base.getAddressSpace());
return Builder.CreateBitCast(Addr, PType);
}
@@ -858,7 +985,8 @@ bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
// FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
// special members.
if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
- FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
return true;
}
return false;
@@ -874,7 +1002,7 @@ bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
/// at entry -2 in the vtable.
void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr,
+ Address Ptr,
QualType ElementType,
const CXXDestructorDecl *Dtor) {
bool UseGlobalDelete = DE->isGlobalDelete();
@@ -883,16 +1011,20 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// to pass to the deallocation function.
// Grab the vtable pointer as an intptr_t*.
- llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo());
+ auto *ClassDecl =
+ cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl());
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
VTable, -2, "complete-offset.ptr");
- llvm::LoadInst *Offset = CGF.Builder.CreateLoad(OffsetPtr);
- Offset->setAlignment(CGF.PointerAlignInBytes);
+ llvm::Value *Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
// Apply the offset.
- llvm::Value *CompletePtr = CGF.Builder.CreateBitCast(Ptr, CGF.Int8PtrTy);
+ llvm::Value *CompletePtr =
+ CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
// If we're supposed to call the global delete, make sure we do so
@@ -954,7 +1086,8 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
- CGF.EmitAnyExprToExn(E->getSubExpr(), ExceptionPtr);
+ CharUnits ExnAlign = getAlignmentOfExnObject();
+ CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
// Now throw the exception.
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
@@ -1023,25 +1156,25 @@ static CharUnits computeOffsetHint(ASTContext &Context,
CharUnits Offset;
// Now walk all possible inheritance paths.
- for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end(); I != E;
- ++I) {
- if (I->Access != AS_public) // Ignore non-public inheritance.
+ for (const CXXBasePath &Path : Paths) {
+ if (Path.Access != AS_public) // Ignore non-public inheritance.
continue;
++NumPublicPaths;
- for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
+ for (const CXXBasePathElement &PathElement : Path) {
// If the path contains a virtual base class we can't give any hint.
// -1: no hint.
- if (J->Base->isVirtual())
+ if (PathElement.Base->isVirtual())
return CharUnits::fromQuantity(-1ULL);
if (NumPublicPaths > 1) // Won't use offsets, skip computation.
continue;
// Accumulate the base class offsets.
- const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class);
- Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl());
+ const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
+ Offset += L.getBaseClassOffset(
+ PathElement.Base->getType()->getAsCXXRecordDecl());
}
}
@@ -1078,14 +1211,16 @@ void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
+ auto *ClassDecl =
+ cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
llvm::Value *Value =
- CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo());
+ CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
// Load the type info.
Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
- return CGF.Builder.CreateLoad(Value);
+ return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
}
bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
@@ -1094,7 +1229,7 @@ bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
}
llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
- CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy,
+ CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
@@ -1113,6 +1248,7 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
// Emit the call to __dynamic_cast.
+ llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
@@ -1136,22 +1272,28 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
}
llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
- llvm::Value *Value,
+ Address ThisAddr,
QualType SrcRecordTy,
QualType DestTy) {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ auto *ClassDecl =
+ cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl());
// Get the vtable pointer.
- llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
+ llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
+ ClassDecl);
// Get the offset-to-top from the vtable.
llvm::Value *OffsetToTop =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
- OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
+ OffsetToTop =
+ CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
+ "offset.to.top");
// Finally, add the offset to the pointer.
+ llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
@@ -1167,10 +1309,10 @@ bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
llvm::Value *
ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
- llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy);
+ llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
CharUnits VBaseOffsetOffset =
CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
BaseClassDecl);
@@ -1182,7 +1324,8 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
CGM.PtrDiffTy->getPointerTo());
llvm::Value *VBaseOffset =
- CGF.Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+ CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
+ "vbase.offset");
return VBaseOffset;
}
@@ -1293,7 +1436,7 @@ unsigned ItaniumCXXABI::addImplicitConstructorArgs(
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) {
+ bool Delegating, Address This) {
GlobalDecl GD(DD, Type);
llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
@@ -1305,8 +1448,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
if (!Callee)
Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
- CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(), This, VTT,
- VTTTy, nullptr);
+ CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
+ This.getPointer(), VTT, VTTTy, nullptr);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1356,41 +1499,29 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
CGM.EmitVTableBitSetEntries(VTable, VTLayout);
}
+bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
+ CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
+ if (Vptr.NearestVBase == nullptr)
+ return false;
+ return NeedsVTTParameter(CGF.CurGD);
+}
+
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
- const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) {
- bool NeedsVTTParam = CGM.getCXXABI().NeedsVTTParameter(CGF.CurGD);
- NeedsVirtualOffset = (NeedsVTTParam && NearestVBase);
-
- llvm::Value *VTableAddressPoint;
- if (NeedsVTTParam && (Base.getBase()->getNumVBases() || NearestVBase)) {
- // Get the secondary vpointer index.
- uint64_t VirtualPointerIndex =
- CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
-
- /// Load the VTT.
- llvm::Value *VTT = CGF.LoadCXXVTT();
- if (VirtualPointerIndex)
- VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
-
- // And load the address point from the VTT.
- VTableAddressPoint = CGF.Builder.CreateLoad(VTT);
- } else {
- llvm::Constant *VTable =
- CGM.getCXXABI().getAddrOfVTable(VTableClass, CharUnits());
- uint64_t AddressPoint = CGM.getItaniumVTableContext()
- .getVTableLayout(VTableClass)
- .getAddressPoint(Base);
- VTableAddressPoint =
- CGF.Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
- }
+ const CXXRecordDecl *NearestVBase) {
- return VTableAddressPoint;
+ if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
+ NeedsVTTParameter(CGF.CurGD)) {
+ return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
+ NearestVBase);
+ }
+ return getVTableAddressPoint(Base, VTableClass);
}
-llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
- BaseSubobject Base, const CXXRecordDecl *VTableClass) {
- auto *VTable = getAddrOfVTable(VTableClass, CharUnits());
+llvm::Constant *
+ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) {
+ llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
// Find the appropriate vtable within the vtable group.
uint64_t AddressPoint = CGM.getItaniumVTableContext()
@@ -1405,6 +1536,30 @@ llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
VTable, Indices);
}
+llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
+ CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase) {
+ assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
+ NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
+
+ // Get the secondary vpointer index.
+ uint64_t VirtualPointerIndex =
+ CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
+
+ /// Load the VTT.
+ llvm::Value *VTT = CGF.LoadCXXVTT();
+ if (VirtualPointerIndex)
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
+
+ // And load the address point from the VTT.
+ return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
+}
+
+llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
+ BaseSubobject Base, const CXXRecordDecl *VTableClass) {
+ return getVTableAddressPoint(Base, VTableClass);
+}
+
llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) {
assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
@@ -1416,11 +1571,9 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
// Queue up this v-table for possible deferred emission.
CGM.addDeferredVTable(RD);
- SmallString<256> OutName;
- llvm::raw_svector_ostream Out(OutName);
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
getMangleContext().mangleCXXVTable(RD, Out);
- Out.flush();
- StringRef Name = OutName.str();
ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
llvm::ArrayType *ArrayType = llvm::ArrayType::get(
@@ -1440,26 +1593,27 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
- llvm::Value *This,
+ Address This,
llvm::Type *Ty,
SourceLocation Loc) {
GD = GD.getCanonicalDecl();
Ty = Ty->getPointerTo()->getPointerTo();
- llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
+ auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
+ llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
if (CGF.SanOpts.has(SanitizerKind::CFIVCall))
- CGF.EmitVTablePtrCheckForCall(cast<CXXMethodDecl>(GD.getDecl()), VTable,
+ CGF.EmitVTablePtrCheckForCall(MethodDecl, VTable,
CodeGenFunction::CFITCK_VCall, Loc);
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
- return CGF.Builder.CreateLoad(VFuncPtr);
+ return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
- llvm::Value *This, const CXXMemberCallExpr *CE) {
+ Address This, const CXXMemberCallExpr *CE) {
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
@@ -1470,8 +1624,9 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
CE ? CE->getLocStart() : SourceLocation());
- CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(), This,
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
+ This.getPointer(), /*ImplicitParam=*/nullptr,
+ QualType(), CE);
return nullptr;
}
@@ -1481,30 +1636,41 @@ void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
}
+bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
+ // We don't emit available_externally vtables if we are in -fapple-kext mode
+ // because kext mode does not permit devirtualization.
+ if (CGM.getLangOpts().AppleKext)
+ return false;
+
+ // If we don't have any inline virtual functions, and if vtable is not hidden,
+ // then we are safe to emit available_externally copy of vtable.
+ // FIXME we can still emit a copy of the vtable if we
+ // can emit definition of the inline functions.
+ return !hasAnyUsedVirtualInlineFunction(RD) && !isVTableHidden(RD);
+}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
- llvm::Value *Ptr,
+ Address InitialPtr,
int64_t NonVirtualAdjustment,
int64_t VirtualAdjustment,
bool IsReturnAdjustment) {
if (!NonVirtualAdjustment && !VirtualAdjustment)
- return Ptr;
+ return InitialPtr.getPointer();
- llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
- llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+ Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
+ // In a base-to-derived cast, the non-virtual adjustment is applied first.
if (NonVirtualAdjustment && !IsReturnAdjustment) {
- // Perform the non-virtual adjustment for a base-to-derived cast.
- V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ V = CGF.Builder.CreateConstInBoundsByteGEP(V,
+ CharUnits::fromQuantity(NonVirtualAdjustment));
}
+ // Perform the virtual adjustment if we have one.
+ llvm::Value *ResultPtr;
if (VirtualAdjustment) {
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- // Perform the virtual adjustment.
- llvm::Value *VTablePtrPtr =
- CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
-
+ Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
llvm::Value *OffsetPtr =
@@ -1513,23 +1679,28 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
// Load the adjustment offset from the vtable.
- llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
+ llvm::Value *Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
// Adjust our pointer.
- V = CGF.Builder.CreateInBoundsGEP(V, Offset);
+ ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
+ } else {
+ ResultPtr = V.getPointer();
}
+ // In a derived-to-base conversion, the non-virtual adjustment is
+ // applied second.
if (NonVirtualAdjustment && IsReturnAdjustment) {
- // Perform the non-virtual adjustment for a derived-to-base cast.
- V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
+ NonVirtualAdjustment);
}
// Cast back to the original type.
- return CGF.Builder.CreateBitCast(V, Ptr->getType());
+ return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
}
llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const ThisAdjustment &TA) {
return performTypeAdjustment(CGF, This, TA.NonVirtual,
TA.Virtual.Itanium.VCallOffsetOffset,
@@ -1537,7 +1708,7 @@ llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
}
llvm::Value *
-ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) {
return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
RA.Virtual.Itanium.VBaseOffsetOffset,
@@ -1550,8 +1721,7 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
// Destructor thunks in the ARM ABI have indeterminate results.
- llvm::Type *T =
- cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
+ llvm::Type *T = CGF.ReturnValue.getElementType();
RValue Undef = RValue::get(llvm::UndefValue::get(T));
return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
}
@@ -1565,18 +1735,17 @@ CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
CGM.getContext().getTypeAlignInChars(elementType));
}
-llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) {
+Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
assert(requiresArrayCookie(expr));
- unsigned AS = NewPtr->getType()->getPointerAddressSpace();
+ unsigned AS = NewPtr.getAddressSpace();
ASTContext &Ctx = getContext();
- QualType SizeTy = Ctx.getSizeType();
- CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
+ CharUnits SizeSize = CGF.getSizeSize();
// The size of the cookie.
CharUnits CookieSize =
@@ -1584,49 +1753,45 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
assert(CookieSize == getArrayCookieSizeImpl(ElementType));
// Compute an offset to the cookie.
- llvm::Value *CookiePtr = NewPtr;
+ Address CookiePtr = NewPtr;
CharUnits CookieOffset = CookieSize - SizeSize;
if (!CookieOffset.isZero())
- CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
- CookieOffset.getQuantity());
+ CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
// Write the number of elements into the appropriate slot.
- llvm::Type *NumElementsTy = CGF.ConvertType(SizeTy)->getPointerTo(AS);
- llvm::Value *NumElementsPtr =
- CGF.Builder.CreateBitCast(CookiePtr, NumElementsTy);
+ Address NumElementsPtr =
+ CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Handle the array cookie specially in ASan.
if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
// The store to the CookiePtr does not need to be instrumented.
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, NumElementsTy, false);
+ llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
llvm::Constant *F =
CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
- CGF.Builder.CreateCall(F, NumElementsPtr);
+ CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
}
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
- return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
- CookieSize.getQuantity());
+ return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
}
llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) {
// The element size is right-justified in the cookie.
- llvm::Value *numElementsPtr = allocPtr;
- CharUnits numElementsOffset =
- cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes);
+ Address numElementsPtr = allocPtr;
+ CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
if (!numElementsOffset.isZero())
numElementsPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
- numElementsOffset.getQuantity());
+ CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
- unsigned AS = allocPtr->getType()->getPointerAddressSpace();
- numElementsPtr =
- CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ unsigned AS = allocPtr.getAddressSpace();
+ numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
return CGF.Builder.CreateLoad(numElementsPtr);
// In asan mode emit a function call instead of a regular load and let the
@@ -1638,7 +1803,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
llvm::Constant *F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
- return CGF.Builder.CreateCall(F, numElementsPtr);
+ return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
}
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
@@ -1654,47 +1819,41 @@ CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
CGM.getContext().getTypeAlignInChars(elementType));
}
-llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *newPtr,
- llvm::Value *numElements,
- const CXXNewExpr *expr,
- QualType elementType) {
+Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
assert(requiresArrayCookie(expr));
- // NewPtr is a char*, but we generalize to arbitrary addrspaces.
- unsigned AS = newPtr->getType()->getPointerAddressSpace();
-
// The cookie is always at the start of the buffer.
- llvm::Value *cookie = newPtr;
+ Address cookie = newPtr;
// The first element is the element size.
- cookie = CGF.Builder.CreateBitCast(cookie, CGF.SizeTy->getPointerTo(AS));
+ cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
getContext().getTypeSizeInChars(elementType).getQuantity());
CGF.Builder.CreateStore(elementSize, cookie);
// The second element is the element count.
- cookie = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.SizeTy, cookie, 1);
+ cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
CGF.Builder.CreateStore(numElements, cookie);
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
- return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr,
- cookieSize.getQuantity());
+ return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
}
llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) {
// The number of elements is at offset sizeof(size_t) relative to
// the allocated pointer.
- llvm::Value *numElementsPtr
- = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes);
+ Address numElementsPtr
+ = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
- unsigned AS = allocPtr->getType()->getPointerAddressSpace();
- numElementsPtr =
- CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
@@ -1735,7 +1894,7 @@ static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
}
namespace {
- struct CallGuardAbort : EHScopeStack::Cleanup {
+ struct CallGuardAbort final : EHScopeStack::Cleanup {
llvm::GlobalVariable *Guard;
CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
@@ -1764,12 +1923,21 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
llvm::IntegerType *guardTy;
+ CharUnits guardAlignment;
if (useInt8GuardVariable) {
guardTy = CGF.Int8Ty;
+ guardAlignment = CharUnits::One();
} else {
// Guard variables are 64 bits in the generic ABI and size width on ARM
// (i.e. 32-bit on AArch32, 64-bit on AArch64).
- guardTy = (UseARMGuardVarABI ? CGF.SizeTy : CGF.Int64Ty);
+ if (UseARMGuardVarABI) {
+ guardTy = CGF.SizeTy;
+ guardAlignment = CGF.getSizeAlign();
+ } else {
+ guardTy = CGF.Int64Ty;
+ guardAlignment = CharUnits::fromQuantity(
+ CGM.getDataLayout().getABITypeAlignment(guardTy));
+ }
}
llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
@@ -1782,7 +1950,6 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
{
llvm::raw_svector_ostream out(guardName);
getMangleContext().mangleStaticGuardVariable(&D, out);
- out.flush();
}
// Create the guard variable with a zero-initializer.
@@ -1794,11 +1961,14 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
guard->setVisibility(var->getVisibility());
// If the variable is thread-local, so is its guard variable.
guard->setThreadLocalMode(var->getThreadLocalMode());
+ guard->setAlignment(guardAlignment.getQuantity());
- // The ABI says: It is suggested that it be emitted in the same COMDAT group
- // as the associated data object
+ // The ABI says: "It is suggested that it be emitted in the same COMDAT
+ // group as the associated data object." In practice, this doesn't work for
+ // non-ELF object formats, so only do it for ELF.
llvm::Comdat *C = var->getComdat();
- if (!D.isLocalVarDecl() && C) {
+ if (!D.isLocalVarDecl() && C &&
+ CGM.getTarget().getTriple().isOSBinFormatELF()) {
guard->setComdat(C);
CGF.CurFn->setComdat(C);
} else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
@@ -1808,6 +1978,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
+ Address guardAddr = Address(guard, guardAlignment);
+
// Test whether the variable has completed initialization.
//
// Itanium C++ ABI 3.3.2:
@@ -1827,8 +1999,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// Load the first byte of the guard variable.
llvm::LoadInst *LI =
- Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy));
- LI->setAlignment(1);
+ Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
// Itanium ABI:
// An implementation supporting thread-safety on multiprocessor
@@ -1898,9 +2069,10 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGF.PopCleanupBlock();
// Call __cxa_guard_release. This cannot throw.
- CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), guard);
+ CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
+ guardAddr.getPointer());
} else {
- Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard);
+ Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
}
CGF.EmitBlock(EndBlock);
@@ -1914,7 +2086,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
const char *Name = "__cxa_atexit";
if (TLS) {
const llvm::Triple &T = CGF.getTarget().getTriple();
- Name = T.isMacOSX() ? "_tlv_atexit" : "__cxa_thread_atexit";
+ Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
}
// We're assuming that the destructor function is something we can
@@ -1970,10 +2142,10 @@ void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
static bool isThreadWrapperReplaceable(const VarDecl *VD,
CodeGen::CodeGenModule &CGM) {
assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
- // OS X prefers to have references to thread local variables to go through
+ // Darwin prefers to have references to thread local variables to go through
// the thread wrapper instead of directly referencing the backing variable.
return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
- CGM.getTarget().getTriple().isMacOSX();
+ CGM.getTarget().getTriple().isOSDarwin();
}
/// Get the appropriate linkage for the wrapper function. This is essentially
@@ -1989,12 +2161,10 @@ getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
return VarLinkage;
// If the thread wrapper is replaceable, give it appropriate linkage.
- if (isThreadWrapperReplaceable(VD, CGM)) {
- if (llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) ||
- llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
- return llvm::GlobalVariable::WeakAnyLinkage;
- return VarLinkage;
- }
+ if (isThreadWrapperReplaceable(VD, CGM))
+ if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
+ !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
+ return VarLinkage;
return llvm::GlobalValue::WeakODRLinkage;
}
@@ -2006,7 +2176,6 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
{
llvm::raw_svector_ostream Out(WrapperName);
getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
- Out.flush();
}
if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
@@ -2021,22 +2190,29 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
WrapperName.str(), &CGM.getModule());
// Always resolve references to the wrapper at link time.
- if (!Wrapper->hasLocalLinkage() && !isThreadWrapperReplaceable(VD, CGM))
+ if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) &&
+ !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) &&
+ !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())))
Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ if (isThreadWrapperReplaceable(VD, CGM)) {
+ Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
+ Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
+ }
return Wrapper;
}
void ItaniumCXXABI::EmitThreadLocalInitFuncs(
- CodeGenModule &CGM,
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
- CXXThreadLocals, ArrayRef<llvm::Function *> CXXThreadLocalInits,
- ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) {
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
llvm::Function *InitFunc = nullptr;
if (!CXXThreadLocalInits.empty()) {
// Generate a guarded initialization function.
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init",
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
SourceLocation(),
/*TLS=*/true);
llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
@@ -2044,12 +2220,17 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::GlobalVariable::InternalLinkage,
llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
Guard->setThreadLocal(true);
+
+ CharUnits GuardAlign = CharUnits::One();
+ Guard->setAlignment(GuardAlign.getQuantity());
+
CodeGenFunction(CGM)
- .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits, Guard);
+ .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits,
+ Address(Guard, GuardAlign));
}
- for (unsigned I = 0, N = CXXThreadLocals.size(); I != N; ++I) {
- const VarDecl *VD = CXXThreadLocals[I].first;
- llvm::GlobalVariable *Var = CXXThreadLocals[I].second;
+ for (const VarDecl *VD : CXXThreadLocals) {
+ llvm::GlobalVariable *Var =
+ cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
// Some targets require that all access to thread local variables go through
// the thread wrapper. This means that we cannot attempt to create a thread
@@ -2062,7 +2243,6 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
{
llvm::raw_svector_ostream Out(InitFnName);
getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
- Out.flush();
}
// If we have a definition for the variable, emit the initialization
@@ -2092,7 +2272,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
llvm::LLVMContext &Context = CGM.getModule().getContext();
llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
- CGBuilderTy Builder(Entry);
+ CGBuilderTy Builder(CGM, Entry);
if (InitIsInitFunc) {
if (Init)
Builder.CreateCall(Init);
@@ -2114,9 +2294,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
// the referenced object.
llvm::Value *Val = Var;
if (VD->getType()->isReferenceType()) {
- llvm::LoadInst *LI = Builder.CreateLoad(Val);
- LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity());
- Val = LI;
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ Val = Builder.CreateAlignedLoad(Val, Align);
}
if (Val->getType() != Wrapper->getReturnType())
Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -2128,18 +2307,19 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
QualType LValType) {
- QualType T = VD->getType();
- llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T);
- llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty);
+ llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
- Val = CGF.Builder.CreateCall(Wrapper);
+ llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
+ if (isThreadWrapperReplaceable(VD, CGF.CGM))
+ CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
LValue LV;
if (VD->getType()->isReferenceType())
- LV = CGF.MakeNaturalAlignAddrLValue(Val, LValType);
+ LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
else
- LV = CGF.MakeAddrLValue(Val, LValType, CGF.getContext().getDeclAlign(VD));
+ LV = CGF.MakeAddrLValue(CallVal, LValType,
+ CGF.getContext().getDeclAlign(VD));
// FIXME: need setObjCGCLValueClass?
return LV;
}
@@ -2255,11 +2435,9 @@ public:
llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
- SmallString<256> OutName;
- llvm::raw_svector_ostream Out(OutName);
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
- Out.flush();
- StringRef Name = OutName.str();
// We know that the mangled name of the type starts at index 4 of the
// mangled name of the typename, so we can just index into it in order to
@@ -2278,11 +2456,9 @@ llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
llvm::Constant *
ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Mangle the RTTI name.
- SmallString<256> OutName;
- llvm::raw_svector_ostream Out(OutName);
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
- Out.flush();
- StringRef Name = OutName.str();
// Look for an existing global.
llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
@@ -2346,9 +2522,19 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
return true;
case BuiltinType::Dependent:
@@ -2678,11 +2864,9 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
Ty = CGM.getContext().getCanonicalType(Ty);
// Check if we've already emitted an RTTI descriptor for this type.
- SmallString<256> OutName;
- llvm::raw_svector_ostream Out(OutName);
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
- Out.flush();
- StringRef Name = OutName.str();
llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
if (OldGV && !OldGV->isDeclaration()) {
@@ -2818,9 +3002,6 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
new llvm::GlobalVariable(M, Init->getType(),
/*Constant=*/true, Linkage, Init, Name);
- if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
- GV->setComdat(M.getOrInsertComdat(GV->getName()));
-
// If there's already an old global variable, replace it with the new one.
if (OldGV) {
GV->takeName(OldGV);
@@ -2830,6 +3011,9 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
OldGV->eraseFromParent();
}
+ if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
+ GV->setComdat(M.getOrInsertComdat(GV->getName()));
+
// The Itanium ABI specifies that type_info objects must be globally
// unique, with one exception: if the type is an incomplete class
// type or a (possibly indirect) pointer to one. That exception
@@ -3232,15 +3416,13 @@ static void emitConstructorDestructorAlias(CodeGenModule &CGM,
return;
auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
- llvm::PointerType *AliasType = Aliasee->getType();
// Create the alias with no name.
- auto *Alias = llvm::GlobalAlias::create(AliasType, Linkage, "", Aliasee,
- &CGM.getModule());
+ auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
// Switch any previous uses to the alias.
if (Entry) {
- assert(Entry->getType() == AliasType &&
+ assert(Entry->getType() == Aliasee->getType() &&
"declaration exists with different type");
Alias->takeName(Entry);
Entry->replaceAllUsesWith(Alias);
@@ -3278,7 +3460,7 @@ void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
if (CGType == StructorCodegen::RAUW) {
StringRef MangledName = CGM.getMangledName(CompleteDecl);
- auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(BaseDecl));
+ auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
CGM.addReplacement(MangledName, Aliasee);
return;
}
@@ -3345,7 +3527,7 @@ namespace {
/// of the caught type, so we have to assume the actual thrown
/// exception type might have a throwing destructor, even if the
/// caught type's destructor is trivial or nothrow.
- struct CallEndCatch : EHScopeStack::Cleanup {
+ struct CallEndCatch final : EHScopeStack::Cleanup {
CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
bool MightThrow;
@@ -3379,7 +3561,7 @@ static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
/// parameter during catch initialization.
static void InitCatchParam(CodeGenFunction &CGF,
const VarDecl &CatchParam,
- llvm::Value *ParamAddr,
+ Address ParamAddr,
SourceLocation Loc) {
// Load the exception from where the landing pad saved it.
llvm::Value *Exn = CGF.getExceptionFromSlot();
@@ -3433,12 +3615,13 @@ static void InitCatchParam(CodeGenFunction &CGF,
cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
// Create the temporary and write the adjusted pointer into it.
- llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp");
+ Address ExnPtrTmp =
+ CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
CGF.Builder.CreateStore(Casted, ExnPtrTmp);
// Bind the reference to the temporary.
- AdjustedExn = ExnPtrTmp;
+ AdjustedExn = ExnPtrTmp.getPointer();
}
}
@@ -3483,8 +3666,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
- LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType,
- CGF.getContext().getDeclAlign(&CatchParam));
+ LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
switch (TEK) {
case TEK_Complex:
CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
@@ -3502,6 +3684,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
}
assert(isa<RecordType>(CatchType) && "unexpected catch type!");
+ auto catchRD = CatchType->getAsCXXRecordDecl();
+ CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
@@ -3510,7 +3694,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
const Expr *copyExpr = CatchParam.getInit();
if (!copyExpr) {
llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
- llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
+ caughtExnAlignment);
CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
return;
}
@@ -3521,7 +3706,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
// Cast that to the appropriate type.
- llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
+ caughtExnAlignment);
// The copy expression is defined in terms of an OpaqueValueExpr.
// Find it and map it to the adjusted expression.
@@ -3533,9 +3719,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam);
CGF.EmitAggExpr(copyExpr,
- AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
+ AggValueSlot::forAddr(ParamAddr, Qualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
@@ -3619,7 +3804,7 @@ static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
// Set up the function.
llvm::BasicBlock *entry =
llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
- CGBuilderTy builder(entry);
+ CGBuilderTy builder(CGM, entry);
// Pull the exception pointer out of the parameter list.
llvm::Value *exn = &*fn->arg_begin();
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 3433990e1288..93210d54d4bb 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
+#include "CGCleanup.h"
#include "CGVTables.h"
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
@@ -45,7 +46,7 @@ public:
: CGCXXABI(CGM), BaseClassDescriptorType(nullptr),
ClassHierarchyDescriptorType(nullptr),
CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr),
- ThrowInfoType(nullptr), CatchHandlerTypeType(nullptr) {}
+ ThrowInfoType(nullptr) {}
bool HasThisReturn(GlobalDecl GD) const override;
bool hasMostDerivedReturn(GlobalDecl GD) const override;
@@ -56,6 +57,27 @@ public:
bool isSRetParameterAfterThis() const override { return true; }
+ bool isThisCompleteObject(GlobalDecl GD) const override {
+ // The Microsoft ABI doesn't use separate complete-object vs.
+ // base-object variants of constructors, but it does of destructors.
+ if (isa<CXXDestructorDecl>(GD.getDecl())) {
+ switch (GD.getDtorType()) {
+ case Dtor_Complete:
+ case Dtor_Deleting:
+ return true;
+
+ case Dtor_Base:
+ return false;
+
+ case Dtor_Comdat: llvm_unreachable("emitting dtor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+
+ // No other kinds.
+ return false;
+ }
+
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *CD,
FunctionArgList &Args) const override {
assert(Args.size() >= 2 &&
@@ -68,11 +90,30 @@ public:
return 1;
}
+ std::vector<CharUnits> getVBPtrOffsets(const CXXRecordDecl *RD) override {
+ std::vector<CharUnits> VBPtrOffsets;
+ const ASTContext &Context = getContext();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
+ for (const VPtrInfo *VBT : *VBGlobals.VBTables) {
+ const ASTRecordLayout &SubobjectLayout =
+ Context.getASTRecordLayout(VBT->BaseWithVPtr);
+ CharUnits Offs = VBT->NonVirtualOffset;
+ Offs += SubobjectLayout.getVBPtrOffset();
+ if (VBT->getVBaseWithVPtr())
+ Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
+ VBPtrOffsets.push_back(Offs);
+ }
+ llvm::array_pod_sort(VBPtrOffsets.begin(), VBPtrOffsets.end());
+ return VBPtrOffsets;
+ }
+
StringRef GetPureVirtualCallName() override { return "_purecall"; }
StringRef GetDeletedVirtualCallName() override { return "_purecall"; }
void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
- llvm::Value *Ptr, QualType ElementType,
+ Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) override;
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
@@ -84,31 +125,39 @@ public:
const VPtrInfo *Info);
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
- llvm::Constant *
+ CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) override;
+ /// MSVC needs an extra flag to indicate a catchall.
+ CatchTypeInfo getCatchAllTypeInfo() override {
+ return CatchTypeInfo{nullptr, 0x40};
+ }
+
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) override;
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
+ bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override {
+ return false;
+ }
llvm::Value *
- GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This,
+ GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
@@ -182,9 +231,9 @@ public:
return MD->getParent();
}
- llvm::Value *
+ Address
adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This,
+ Address This,
bool VirtualCall) override;
void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
@@ -203,7 +252,7 @@ public:
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) override;
+ bool Delegating, Address This) override;
void emitVTableBitSetEntries(VPtrInfo *Info, const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable);
@@ -211,10 +260,22 @@ public:
void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) override;
+ bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
+ CodeGenFunction::VPtr Vptr) override;
+
+ /// Don't initialize vptrs if dynamic class
+ /// is marked with with the 'novtable' attribute.
+ bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
+ return !VTableClass->hasAttr<MSNoVTableAttr>();
+ }
+
+ llvm::Constant *
+ getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) override;
+
llvm::Value *getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
- BaseSubobject Base, const CXXRecordDecl *NearestVBase,
- bool &NeedsVirtualOffset) override;
+ BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
llvm::Constant *
getVTableAddressPointForConstExpr(BaseSubobject Base,
@@ -224,13 +285,13 @@ public:
CharUnits VPtrOffset) override;
llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This, llvm::Type *Ty,
+ Address This, llvm::Type *Ty,
SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
CXXDtorType DtorType,
- llvm::Value *This,
+ Address This,
const CXXMemberCallExpr *CE) override;
void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
@@ -253,7 +314,6 @@ public:
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
getMangleContext().mangleCXXVirtualDisplacementMap(SrcRD, DstRD, Out);
- Out.flush();
StringRef MangledName = OutName.str();
if (auto *VDispMap = CGM.getModule().getNamedGlobal(MangledName))
@@ -310,18 +370,16 @@ public:
Thunk->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
}
- llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This,
+ llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
- llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+ llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) override;
void EmitThreadLocalInitFuncs(
- CodeGenModule &CGM,
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
- CXXThreadLocals,
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
- ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) override;
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
bool usesThreadWrapperFunction() const override { return false; }
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
@@ -360,13 +418,13 @@ public:
QualType elementType) override;
bool requiresArrayCookie(const CXXNewExpr *expr) override;
CharUnits getArrayCookieSizeImpl(QualType type) override;
- llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) override;
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) override;
friend struct MSRTTIBuilder;
@@ -493,14 +551,6 @@ private:
return llvm::Constant::getAllOnesValue(CGM.IntTy);
}
- llvm::Constant *getConstantOrZeroInt(llvm::Constant *C) {
- return C ? C : getZeroInt();
- }
-
- llvm::Value *getValueOrZeroInt(llvm::Value *C) {
- return C ? C : getZeroInt();
- }
-
CharUnits getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD);
void
@@ -511,13 +561,13 @@ private:
/// the vbptr to the virtual base. Optionally returns the address of the
/// vbptr itself.
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
- llvm::Value *Base,
+ Address Base,
llvm::Value *VBPtrOffset,
llvm::Value *VBTableOffset,
llvm::Value **VBPtr = nullptr);
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
- llvm::Value *Base,
+ Address Base,
int32_t VBPtrOffset,
int32_t VBTableOffset,
llvm::Value **VBPtr = nullptr) {
@@ -527,14 +577,14 @@ private:
return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr);
}
- std::pair<llvm::Value *, llvm::Value *>
- performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value,
+ std::pair<Address, llvm::Value *>
+ performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy);
/// \brief Performs a full virtual base adjustment. Used to dereference
/// pointers to members of virtual bases.
llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E,
- const CXXRecordDecl *RD, llvm::Value *Base,
+ const CXXRecordDecl *RD, Address Base,
llvm::Value *VirtualBaseAdjustmentOffset,
llvm::Value *VBPtrOffset /* optional */);
@@ -570,17 +620,6 @@ public:
return RD->hasAttr<MSInheritanceAttr>();
}
- bool isTypeInfoCalculable(QualType Ty) const override {
- if (!CGCXXABI::isTypeInfoCalculable(Ty))
- return false;
- if (const auto *MPT = Ty->getAs<MemberPointerType>()) {
- const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
- if (!RD->hasAttr<MSInheritanceAttr>())
- return false;
- }
- return true;
- }
-
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
@@ -600,7 +639,7 @@ public:
llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base, llvm::Value *MemPtr,
+ Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
llvm::Value *EmitNonNullMemberPointerConversion(
@@ -623,23 +662,12 @@ public:
llvm::Value *
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *&This, llvm::Value *MemPtr,
+ Address This, llvm::Value *&ThisPtrForCall,
+ llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
- llvm::StructType *getCatchHandlerTypeType() {
- if (!CatchHandlerTypeType) {
- llvm::Type *FieldTypes[] = {
- CGM.IntTy, // Flags
- CGM.Int8PtrTy, // TypeDescriptor
- };
- CatchHandlerTypeType = llvm::StructType::create(
- CGM.getLLVMContext(), FieldTypes, "eh.CatchHandlerType");
- }
- return CatchHandlerTypeType;
- }
-
llvm::StructType *getCatchableTypeType() {
if (CatchableTypeType)
return CatchableTypeType;
@@ -755,7 +783,6 @@ private:
llvm::StructType *CatchableTypeType;
llvm::DenseMap<uint32_t, llvm::StructType *> CatchableTypeArrayTypeMap;
llvm::StructType *ThrowInfoType;
- llvm::StructType *CatchHandlerTypeType;
};
}
@@ -823,7 +850,7 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr,
+ Address Ptr,
QualType ElementType,
const CXXDestructorDecl *Dtor) {
// FIXME: Provide a source location here even though there's no
@@ -848,11 +875,15 @@ void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
}
namespace {
-struct CallEndCatchMSVC : EHScopeStack::Cleanup {
- CallEndCatchMSVC() {}
+struct CatchRetScope final : EHScopeStack::Cleanup {
+ llvm::CatchPadInst *CPI;
+
+ CatchRetScope(llvm::CatchPadInst *CPI) : CPI(CPI) {}
+
void Emit(CodeGenFunction &CGF, Flags flags) override {
- CGF.EmitNounwindRuntimeCall(
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_endcatch));
+ llvm::BasicBlock *BB = CGF.createBasicBlock("catchret.dest");
+ CGF.Builder.CreateCatchRet(CPI, BB);
+ CGF.EmitBlock(BB);
}
};
}
@@ -862,50 +893,59 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
// In the MS ABI, the runtime handles the copy, and the catch handler is
// responsible for destruction.
VarDecl *CatchParam = S->getExceptionDecl();
- llvm::Value *Exn = CGF.getExceptionFromSlot();
- llvm::Function *BeginCatch =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_begincatch);
+ llvm::BasicBlock *CatchPadBB = CGF.Builder.GetInsertBlock();
+ llvm::CatchPadInst *CPI =
+ cast<llvm::CatchPadInst>(CatchPadBB->getFirstNonPHI());
+ CGF.CurrentFuncletPad = CPI;
// If this is a catch-all or the catch parameter is unnamed, we don't need to
// emit an alloca to the object.
if (!CatchParam || !CatchParam->getDeclName()) {
- llvm::Value *Args[2] = {Exn, llvm::Constant::getNullValue(CGF.Int8PtrTy)};
- CGF.EmitNounwindRuntimeCall(BeginCatch, Args);
- CGF.EHStack.pushCleanup<CallEndCatchMSVC>(NormalCleanup);
+ CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
return;
}
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
- llvm::Value *ParamAddr =
- CGF.Builder.CreateBitCast(var.getObjectAddress(CGF), CGF.Int8PtrTy);
- llvm::Value *Args[2] = {Exn, ParamAddr};
- CGF.EmitNounwindRuntimeCall(BeginCatch, Args);
- CGF.EHStack.pushCleanup<CallEndCatchMSVC>(NormalCleanup);
+ CPI->setArgOperand(2, var.getObjectAddress(CGF).getPointer());
+ CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
CGF.EmitAutoVarCleanups(var);
}
-std::pair<llvm::Value *, llvm::Value *>
-MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value,
+/// We need to perform a generic polymorphic operation (like a typeid
+/// or a cast), which requires an object with a vfptr. Adjust the
+/// address to point to an object with a vfptr.
+std::pair<Address, llvm::Value *>
+MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy) {
Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const ASTContext &Context = getContext();
+ // If the class itself has a vfptr, great. This check implicitly
+ // covers non-virtual base subobjects: a class with its own virtual
+ // functions would be a candidate to be a primary base.
if (Context.getASTRecordLayout(SrcDecl).hasExtendableVFPtr())
return std::make_pair(Value, llvm::ConstantInt::get(CGF.Int32Ty, 0));
- // Perform a base adjustment.
- const CXXBaseSpecifier *PolymorphicBase = std::find_if(
- SrcDecl->vbases_begin(), SrcDecl->vbases_end(),
- [&](const CXXBaseSpecifier &Base) {
- const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
- return Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr();
- });
- llvm::Value *Offset = GetVirtualBaseClassOffset(
- CGF, Value, SrcDecl, PolymorphicBase->getType()->getAsCXXRecordDecl());
- Value = CGF.Builder.CreateInBoundsGEP(Value, Offset);
+ // Okay, one of the vbases must have a vfptr, or else this isn't
+ // actually a polymorphic class.
+ const CXXRecordDecl *PolymorphicBase = nullptr;
+ for (auto &Base : SrcDecl->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ if (Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr()) {
+ PolymorphicBase = BaseDecl;
+ break;
+ }
+ }
+ assert(PolymorphicBase && "polymorphic class has no apparent vfptr?");
+
+ llvm::Value *Offset =
+ GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
+ llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(Value.getPointer(), Offset);
Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty);
- return std::make_pair(Value, Offset);
+ CharUnits VBaseAlign =
+ CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
+ return std::make_pair(Address(Ptr, VBaseAlign), Offset);
}
bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
@@ -934,12 +974,12 @@ void MicrosoftCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
llvm::Value *Offset;
std::tie(ThisPtr, Offset) = performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
- return CGF.Builder.CreateBitCast(
- emitRTtypeidCall(CGF, ThisPtr).getInstruction(), StdTypeInfoPtrTy);
+ auto Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer()).getInstruction();
+ return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
}
bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
@@ -950,7 +990,7 @@ bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
}
llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
- CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy,
+ CodeGenFunction &CGF, Address This, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
@@ -960,7 +1000,8 @@ llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
llvm::Value *Offset;
- std::tie(Value, Offset) = performBaseAdjustment(CGF, Value, SrcRecordTy);
+ std::tie(This, Offset) = performBaseAdjustment(CGF, This, SrcRecordTy);
+ llvm::Value *ThisPtr = This.getPointer();
// PVOID __RTDynamicCast(
// PVOID inptr,
@@ -974,14 +1015,14 @@ llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTDynamicCast");
llvm::Value *Args[] = {
- Value, Offset, SrcRTTI, DestRTTI,
+ ThisPtr, Offset, SrcRTTI, DestRTTI,
llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
- Value = CGF.EmitRuntimeCallOrInvoke(Function, Args).getInstruction();
- return CGF.Builder.CreateBitCast(Value, DestLTy);
+ ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args).getInstruction();
+ return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
}
llvm::Value *
-MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
+MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) {
llvm::Value *Offset;
@@ -993,7 +1034,7 @@ MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTCastToVoid");
- llvm::Value *Args[] = {Value};
+ llvm::Value *Args[] = {Value.getPointer()};
return CGF.EmitRuntimeCall(Function, Args);
}
@@ -1002,7 +1043,7 @@ bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
}
llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset(
- CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl,
+ CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
const ASTContext &Context = getContext();
int64_t VBPtrChars =
@@ -1040,15 +1081,16 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
if (!RD)
return false;
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
if (FI.isInstanceMethod()) {
// If it's an instance method, aggregates are always returned indirectly via
// the second parameter.
- FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod());
return true;
} else if (!RD->isPOD()) {
// If it's a free function, non-POD types are returned indirectly.
- FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
return true;
}
@@ -1100,8 +1142,7 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
const VBOffsets &VBaseMap = Layout.getVBaseOffsetsMap();
CGBuilderTy &Builder = CGF.Builder;
- unsigned AS =
- cast<llvm::PointerType>(getThisValue(CGF)->getType())->getAddressSpace();
+ unsigned AS = getThisAddress(CGF).getAddressSpace();
llvm::Value *Int8This = nullptr; // Initialize lazily.
for (VBOffsets::const_iterator I = VBaseMap.begin(), E = VBaseMap.end();
@@ -1110,7 +1151,7 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
continue;
llvm::Value *VBaseOffset =
- GetVirtualBaseClassOffset(CGF, getThisValue(CGF), RD, I->first);
+ GetVirtualBaseClassOffset(CGF, getThisAddress(CGF), RD, I->first);
// FIXME: it doesn't look right that we SExt in GetVirtualBaseClassOffset()
// just to Trunc back immediately.
VBaseOffset = Builder.CreateTruncOrBitCast(VBaseOffset, CGF.Int32Ty);
@@ -1131,7 +1172,8 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
VtorDispPtr = Builder.CreateBitCast(
VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr");
- Builder.CreateStore(VtorDispValue, VtorDispPtr);
+ Builder.CreateAlignedStore(VtorDispValue, VtorDispPtr,
+ CharUnits::fromQuantity(4));
}
}
@@ -1162,8 +1204,8 @@ void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
- llvm::Value *ThisInt8Ptr =
- CGF.Builder.CreateBitCast(getThisValue(CGF), CGM.Int8PtrTy, "this.int8");
+ Address This = getThisAddress(CGF);
+ This = CGF.Builder.CreateElementBitCast(This, CGM.Int8Ty, "this.int8");
const ASTContext &Context = getContext();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -1177,11 +1219,10 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
Offs += SubobjectLayout.getVBPtrOffset();
if (VBT->getVBaseWithVPtr())
Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
- llvm::Value *VBPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(ThisInt8Ptr, Offs.getQuantity());
+ Address VBPtr = CGF.Builder.CreateConstInBoundsByteGEP(This, Offs);
llvm::Value *GVPtr =
CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
- VBPtr = CGF.Builder.CreateBitCast(VBPtr, GVPtr->getType()->getPointerTo(0),
+ VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
"vbptr." + VBT->ReusingBase->getName());
CGF.Builder.CreateStore(GVPtr, VBPtr);
}
@@ -1255,8 +1296,9 @@ MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
return Adjustment;
}
-llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
- CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, bool VirtualCall) {
+Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
+ CodeGenFunction &CGF, GlobalDecl GD, Address This,
+ bool VirtualCall) {
if (!VirtualCall) {
// If the call of a virtual function is not virtual, we just have to
// compensate for the adjustment the virtual function does in its prologue.
@@ -1264,11 +1306,9 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
if (Adjustment.isZero())
return This;
- unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
- llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
- This = CGF.Builder.CreateBitCast(This, charPtrTy);
+ This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
assert(Adjustment.isPositive());
- return CGF.Builder.CreateConstGEP1_32(This, Adjustment.getQuantity());
+ return CGF.Builder.CreateConstByteGEP(This, Adjustment);
}
GD = GD.getCanonicalDecl();
@@ -1288,8 +1328,6 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
MicrosoftVTableContext::MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
- unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
- llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
CharUnits StaticOffset = ML.VFPtrOffset;
// Base destructors expect 'this' to point to the beginning of the base
@@ -1298,27 +1336,34 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
StaticOffset = CharUnits::Zero();
+ Address Result = This;
if (ML.VBase) {
- This = CGF.Builder.CreateBitCast(This, charPtrTy);
+ Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+
+ const CXXRecordDecl *Derived = MD->getParent();
+ const CXXRecordDecl *VBase = ML.VBase;
llvm::Value *VBaseOffset =
- GetVirtualBaseClassOffset(CGF, This, MD->getParent(), ML.VBase);
- This = CGF.Builder.CreateInBoundsGEP(This, VBaseOffset);
+ GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
+ llvm::Value *VBasePtr =
+ CGF.Builder.CreateInBoundsGEP(Result.getPointer(), VBaseOffset);
+ CharUnits VBaseAlign =
+ CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
+ Result = Address(VBasePtr, VBaseAlign);
}
if (!StaticOffset.isZero()) {
assert(StaticOffset.isPositive());
- This = CGF.Builder.CreateBitCast(This, charPtrTy);
+ Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
if (ML.VBase) {
// Non-virtual adjustment might result in a pointer outside the allocated
// object, e.g. if the final overrider class is laid out after the virtual
// base that declares a method in the most derived class.
// FIXME: Update the code that emits this adjustment in thunks prologues.
- This = CGF.Builder.CreateConstGEP1_32(This, StaticOffset.getQuantity());
+ Result = CGF.Builder.CreateConstByteGEP(Result, StaticOffset);
} else {
- This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This,
- StaticOffset.getQuantity());
+ Result = CGF.Builder.CreateConstInBoundsByteGEP(Result, StaticOffset);
}
}
- return This;
+ return Result;
}
void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
@@ -1439,7 +1484,7 @@ unsigned MicrosoftCXXABI::addImplicitConstructorArgs(
void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) {
+ bool Delegating, Address This) {
llvm::Value *Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
if (DD->isVirtual()) {
@@ -1449,7 +1494,7 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
This, false);
}
- CGF.EmitCXXStructorCall(DD, Callee, ReturnValueSlot(), This,
+ CGF.EmitCXXStructorCall(DD, Callee, ReturnValueSlot(), This.getPointer(),
/*ImplicitParam=*/nullptr,
/*ImplicitParamTy=*/QualType(), nullptr,
getFromDtorType(Type));
@@ -1478,15 +1523,14 @@ void MicrosoftCXXABI::emitVTableBitSetEntries(VPtrInfo *Info,
if (Info->PathToBaseWithVPtr.empty()) {
if (!CGM.IsCFIBlacklistedRecord(RD))
- BitsetsMD->addOperand(
- CGM.CreateVTableBitSetEntry(VTable, AddressPoint, RD));
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint, RD);
return;
}
// Add a bitset entry for the least derived base belonging to this vftable.
if (!CGM.IsCFIBlacklistedRecord(Info->PathToBaseWithVPtr.back()))
- BitsetsMD->addOperand(CGM.CreateVTableBitSetEntry(
- VTable, AddressPoint, Info->PathToBaseWithVPtr.back()));
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint,
+ Info->PathToBaseWithVPtr.back());
// Add a bitset entry for each derived class that is laid out at the same
// offset as the least derived base.
@@ -1505,14 +1549,12 @@ void MicrosoftCXXABI::emitVTableBitSetEntries(VPtrInfo *Info,
if (!Offset.isZero())
return;
if (!CGM.IsCFIBlacklistedRecord(DerivedRD))
- BitsetsMD->addOperand(
- CGM.CreateVTableBitSetEntry(VTable, AddressPoint, DerivedRD));
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint, DerivedRD);
}
// Finally do the same for the most derived class.
if (Info->FullOffsetInMDC.isZero() && !CGM.IsCFIBlacklistedRecord(RD))
- BitsetsMD->addOperand(
- CGM.CreateVTableBitSetEntry(VTable, AddressPoint, RD));
+ CGM.CreateVTableBitSetEntry(BitsetsMD, VTable, AddressPoint, RD);
}
void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1542,14 +1584,15 @@ void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
}
}
+bool MicrosoftCXXABI::isVirtualOffsetNeededForVTableField(
+ CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
+ return Vptr.NearestVBase != nullptr;
+}
+
llvm::Value *MicrosoftCXXABI::getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
- const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) {
- NeedsVirtualOffset = (NearestVBase != nullptr);
-
- (void)getAddrOfVTable(VTableClass, Base.getBaseOffset());
- VFTableIdTy ID(VTableClass, Base.getBaseOffset());
- llvm::GlobalValue *VTableAddressPoint = VFTablesMap[ID];
+ const CXXRecordDecl *NearestVBase) {
+ llvm::Constant *VTableAddressPoint = getVTableAddressPoint(Base, VTableClass);
if (!VTableAddressPoint) {
assert(Base.getBase()->getNumVBases() &&
!getContext().getASTRecordLayout(Base.getBase()).hasOwnVFPtr());
@@ -1564,11 +1607,17 @@ static void mangleVFTableName(MicrosoftMangleContext &MangleContext,
MangleContext.mangleCXXVFTable(RD, VFPtr->MangledPath, Out);
}
-llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
- BaseSubobject Base, const CXXRecordDecl *VTableClass) {
+llvm::Constant *
+MicrosoftCXXABI::getVTableAddressPoint(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) {
(void)getAddrOfVTable(VTableClass, Base.getBaseOffset());
VFTableIdTy ID(VTableClass, Base.getBaseOffset());
- llvm::GlobalValue *VFTable = VFTablesMap[ID];
+ return VFTablesMap[ID];
+}
+
+llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
+ BaseSubobject Base, const CXXRecordDecl *VTableClass) {
+ llvm::Constant *VFTable = getVTableAddressPoint(Base, VTableClass);
assert(VFTable && "Couldn't find a vftable for the given base?");
return VFTable;
}
@@ -1578,6 +1627,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
// getAddrOfVTable may return 0 if asked to get an address of a vtable which
// shouldn't be used in the given record type. We want to cache this result in
// VFTablesMap, thus a simple zero check is not sufficient.
+
VFTableIdTy ID(RD, VPtrOffset);
VTablesMapTy::iterator I;
bool Inserted;
@@ -1631,10 +1681,11 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
if (llvm::GlobalValue *VFTable =
CGM.getModule().getNamedGlobal(VFTableName)) {
VFTablesMap[ID] = VFTable;
- return VTableAliasIsRequred
- ? cast<llvm::GlobalVariable>(
- cast<llvm::GlobalAlias>(VFTable)->getBaseObject())
- : cast<llvm::GlobalVariable>(VFTable);
+ VTable = VTableAliasIsRequred
+ ? cast<llvm::GlobalVariable>(
+ cast<llvm::GlobalAlias>(VFTable)->getBaseObject())
+ : cast<llvm::GlobalVariable>(VFTable);
+ return VTable;
}
uint64_t NumVTableSlots =
@@ -1678,9 +1729,10 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
if (C)
C->setSelectionKind(llvm::Comdat::Largest);
}
- VFTable = llvm::GlobalAlias::create(
- cast<llvm::PointerType>(VTableGEP->getType()), VFTableLinkage,
- VFTableName.str(), VTableGEP, &CGM.getModule());
+ VFTable = llvm::GlobalAlias::create(CGM.Int8PtrTy,
+ /*AddressSpace=*/0, VFTableLinkage,
+ VFTableName.str(), VTableGEP,
+ &CGM.getModule());
VFTable->setUnnamedAddr(true);
} else {
// We don't need a GlobalAlias to be a symbol for the VTable if we won't
@@ -1746,16 +1798,18 @@ getClassAtVTableLocation(ASTContext &Ctx, GlobalDecl GD,
llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
- llvm::Value *This,
+ Address This,
llvm::Type *Ty,
SourceLocation Loc) {
GD = GD.getCanonicalDecl();
CGBuilderTy &Builder = CGF.Builder;
Ty = Ty->getPointerTo()->getPointerTo();
- llvm::Value *VPtr =
+ Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
- llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty);
+
+ auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
+ llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty, MethodDecl->getParent());
MicrosoftVTableContext::MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
@@ -1765,12 +1819,12 @@ llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
- return Builder.CreateLoad(VFuncPtr);
+ return Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
- llvm::Value *This, const CXXMemberCallExpr *CE) {
+ Address This, const CXXMemberCallExpr *CE) {
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
@@ -1789,7 +1843,8 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
DtorType == Dtor_Deleting);
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
- RValue RV = CGF.EmitCXXStructorCall(Dtor, Callee, ReturnValueSlot(), This,
+ RValue RV = CGF.EmitCXXStructorCall(Dtor, Callee, ReturnValueSlot(),
+ This.getPointer(),
ImplicitParam, Context.IntTy, CE,
StructorType::Deleting);
return RV.getScalarVal();
@@ -1832,7 +1887,6 @@ llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
getMangleContext().mangleVirtualMemPtrThunk(MD, Out);
- Out.flush();
// If the thunk has been generated previously, just return it.
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
@@ -1882,10 +1936,12 @@ llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
llvm::Value *VTable = CGF.GetVTablePtr(
- getThisValue(CGF), ThunkTy->getPointerTo()->getPointerTo());
+ getThisAddress(CGF), ThunkTy->getPointerTo()->getPointerTo(), MD->getParent());
+
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
- llvm::Value *Callee = CGF.Builder.CreateLoad(VFuncPtr);
+ llvm::Value *Callee =
+ CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
CGF.EmitMustTailThunk(MD, getThisValue(CGF), Callee);
@@ -1908,7 +1964,6 @@ MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
getMangleContext().mangleCXXVBTable(RD, VBT.MangledPath, Out);
- Out.flush();
StringRef Name = OutName.str();
llvm::ArrayType *VBTableType =
@@ -1978,22 +2033,30 @@ void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
}
llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const ThisAdjustment &TA) {
if (TA.isEmpty())
- return This;
+ return This.getPointer();
- llvm::Value *V = CGF.Builder.CreateBitCast(This, CGF.Int8PtrTy);
+ This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
- if (!TA.Virtual.isEmpty()) {
+ llvm::Value *V;
+ if (TA.Virtual.isEmpty()) {
+ V = This.getPointer();
+ } else {
assert(TA.Virtual.Microsoft.VtordispOffset < 0);
// Adjust the this argument based on the vtordisp value.
- llvm::Value *VtorDispPtr =
- CGF.Builder.CreateConstGEP1_32(V, TA.Virtual.Microsoft.VtordispOffset);
- VtorDispPtr =
- CGF.Builder.CreateBitCast(VtorDispPtr, CGF.Int32Ty->getPointerTo());
+ Address VtorDispPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(This,
+ CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
+ VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
- V = CGF.Builder.CreateGEP(V, CGF.Builder.CreateNeg(VtorDisp));
+ V = CGF.Builder.CreateGEP(This.getPointer(),
+ CGF.Builder.CreateNeg(VtorDisp));
+
+ // Unfortunately, having applied the vtordisp means that we no
+ // longer really have a known alignment for the vbptr step.
+ // We'll assume the vbptr is pointer-aligned.
if (TA.Virtual.Microsoft.VBPtrOffset) {
// If the final overrider is defined in a virtual base other than the one
@@ -2003,7 +2066,8 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0);
llvm::Value *VBPtr;
llvm::Value *VBaseOffset =
- GetVBaseOffsetFromVBPtr(CGF, V, -TA.Virtual.Microsoft.VBPtrOffset,
+ GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
+ -TA.Virtual.Microsoft.VBPtrOffset,
TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
}
@@ -2021,20 +2085,21 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
}
llvm::Value *
-MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) {
if (RA.isEmpty())
- return Ret;
+ return Ret.getPointer();
- llvm::Value *V = CGF.Builder.CreateBitCast(Ret, CGF.Int8PtrTy);
+ auto OrigTy = Ret.getType();
+ Ret = CGF.Builder.CreateElementBitCast(Ret, CGF.Int8Ty);
+ llvm::Value *V = Ret.getPointer();
if (RA.Virtual.Microsoft.VBIndex) {
assert(RA.Virtual.Microsoft.VBIndex > 0);
- const ASTContext &Context = getContext();
- int32_t IntSize = Context.getTypeSizeInChars(Context.IntTy).getQuantity();
+ int32_t IntSize = CGF.getIntSize().getQuantity();
llvm::Value *VBPtr;
llvm::Value *VBaseOffset =
- GetVBaseOffsetFromVBPtr(CGF, V, RA.Virtual.Microsoft.VBPtrOffset,
+ GetVBaseOffsetFromVBPtr(CGF, Ret, RA.Virtual.Microsoft.VBPtrOffset,
IntSize * RA.Virtual.Microsoft.VBIndex, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
}
@@ -2043,7 +2108,7 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual);
// Cast back to the original type.
- return CGF.Builder.CreateBitCast(V, Ret->getType());
+ return CGF.Builder.CreateBitCast(V, OrigTy);
}
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
@@ -2068,37 +2133,34 @@ CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
}
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) {
- unsigned AS = allocPtr->getType()->getPointerAddressSpace();
- llvm::Value *numElementsPtr =
- CGF.Builder.CreateBitCast(allocPtr, CGF.SizeTy->getPointerTo(AS));
+ Address numElementsPtr =
+ CGF.Builder.CreateElementBitCast(allocPtr, CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
-llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *newPtr,
- llvm::Value *numElements,
- const CXXNewExpr *expr,
- QualType elementType) {
+Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
assert(requiresArrayCookie(expr));
// The size of the cookie.
CharUnits cookieSize = getArrayCookieSizeImpl(elementType);
// Compute an offset to the cookie.
- llvm::Value *cookiePtr = newPtr;
+ Address cookiePtr = newPtr;
// Write the number of elements into the appropriate slot.
- unsigned AS = newPtr->getType()->getPointerAddressSpace();
- llvm::Value *numElementsPtr
- = CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS));
+ Address numElementsPtr
+ = CGF.Builder.CreateElementBitCast(cookiePtr, CGF.SizeTy);
CGF.Builder.CreateStore(numElements, numElementsPtr);
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
- return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr,
- cookieSize.getQuantity());
+ return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
}
static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
@@ -2130,11 +2192,9 @@ void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
}
void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
- CodeGenModule &CGM,
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
- CXXThreadLocals,
+ CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
- ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) {
+ ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
// This will create a GV in the .CRT$XDU section. It will point to our
// initialization function. The CRT will call all of these function
// pointers at start-up time and, eventually, at thread-creation time.
@@ -2152,7 +2212,8 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
std::vector<llvm::Function *> NonComdatInits;
for (size_t I = 0, E = CXXThreadLocalInitVars.size(); I != E; ++I) {
- llvm::GlobalVariable *GV = CXXThreadLocalInitVars[I];
+ llvm::GlobalVariable *GV = cast<llvm::GlobalVariable>(
+ CGM.GetGlobalValue(CGM.getMangledName(CXXThreadLocalInitVars[I])));
llvm::Function *F = CXXThreadLocalInits[I];
// If the GV is already in a comdat group, then we have to join it.
@@ -2166,8 +2227,8 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
llvm::Function *InitFunc = CGM.CreateGlobalInitOrDestructFunction(
- FTy, "__tls_init", SourceLocation(),
- /*TLS=*/true);
+ FTy, "__tls_init", CGM.getTypes().arrangeNullaryFunction(),
+ SourceLocation(), /*TLS=*/true);
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits);
AddToXDU(InitFunc);
@@ -2181,17 +2242,18 @@ LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
return LValue();
}
-static llvm::GlobalVariable *getInitThreadEpochPtr(CodeGenModule &CGM) {
+static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
StringRef VarName("_Init_thread_epoch");
+ CharUnits Align = CGM.getIntAlign();
if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
- return GV;
+ return ConstantAddress(GV, Align);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CGM.IntTy,
/*Constant=*/false, llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, VarName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
- GV->setAlignment(CGM.getTarget().getIntAlign() / 8);
- return GV;
+ GV->setAlignment(Align.getQuantity());
+ return ConstantAddress(GV, Align);
}
static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) {
@@ -2228,10 +2290,10 @@ static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) {
}
namespace {
-struct ResetGuardBit : EHScopeStack::Cleanup {
- llvm::GlobalVariable *Guard;
+struct ResetGuardBit final : EHScopeStack::Cleanup {
+ Address Guard;
unsigned GuardNum;
- ResetGuardBit(llvm::GlobalVariable *Guard, unsigned GuardNum)
+ ResetGuardBit(Address Guard, unsigned GuardNum)
: Guard(Guard), GuardNum(GuardNum) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -2245,9 +2307,9 @@ struct ResetGuardBit : EHScopeStack::Cleanup {
}
};
-struct CallInitThreadAbort : EHScopeStack::Cleanup {
- llvm::GlobalVariable *Guard;
- CallInitThreadAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+struct CallInitThreadAbort final : EHScopeStack::Cleanup {
+ llvm::Value *Guard;
+ CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Calling _Init_thread_abort will reset the guard's state.
@@ -2280,6 +2342,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
CGBuilderTy &Builder = CGF.Builder;
llvm::IntegerType *GuardTy = CGF.Int32Ty;
llvm::ConstantInt *Zero = llvm::ConstantInt::get(GuardTy, 0);
+ CharUnits GuardAlign = CharUnits::fromQuantity(4);
// Get the guard variable for this function if we have one already.
GuardInfo *GI = nullptr;
@@ -2320,7 +2383,6 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
Out);
else
getMangleContext().mangleStaticGuardVariable(&D, Out);
- Out.flush();
}
// Create the guard variable with a zero-initializer. Just absorb linkage,
@@ -2330,6 +2392,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GV->getLinkage(), Zero, GuardName.str());
GuardVar->setVisibility(GV->getVisibility());
GuardVar->setDLLStorageClass(GV->getDLLStorageClass());
+ GuardVar->setAlignment(GuardAlign.getQuantity());
if (GuardVar->isWeakForLinker())
GuardVar->setComdat(
CGM.getModule().getOrInsertComdat(GuardVar->getName()));
@@ -2339,6 +2402,8 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GI->Guard = GuardVar;
}
+ ConstantAddress GuardAddr(GuardVar, GuardAlign);
+
assert(GuardVar->getLinkage() == GV->getLinkage() &&
"static local from the same function had different linkage");
@@ -2351,7 +2416,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// Test our bit from the guard variable.
llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, 1U << GuardNum);
- llvm::LoadInst *LI = Builder.CreateLoad(GuardVar);
+ llvm::LoadInst *LI = Builder.CreateLoad(GuardAddr);
llvm::Value *IsInitialized =
Builder.CreateICmpNE(Builder.CreateAnd(LI, Bit), Zero);
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
@@ -2361,8 +2426,8 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// Set our bit in the guard variable and emit the initializer and add a global
// destructor if appropriate.
CGF.EmitBlock(InitBlock);
- Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardVar);
- CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardVar, GuardNum);
+ Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardAddr);
+ CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardAddr, GuardNum);
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
CGF.PopCleanupBlock();
Builder.CreateBr(EndBlock);
@@ -2382,11 +2447,8 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// The algorithm is almost identical to what can be found in the appendix
// found in N2325.
- unsigned IntAlign = CGM.getTarget().getIntAlign() / 8;
-
// This BasicBLock determines whether or not we have any work to do.
- llvm::LoadInst *FirstGuardLoad =
- Builder.CreateAlignedLoad(GuardVar, IntAlign);
+ llvm::LoadInst *FirstGuardLoad = Builder.CreateLoad(GuardAddr);
FirstGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
llvm::LoadInst *InitThreadEpoch =
Builder.CreateLoad(getInitThreadEpochPtr(CGM));
@@ -2399,9 +2461,9 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// This BasicBlock attempts to determine whether or not this thread is
// responsible for doing the initialization.
CGF.EmitBlock(AttemptInitBlock);
- CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM), GuardVar);
- llvm::LoadInst *SecondGuardLoad =
- Builder.CreateAlignedLoad(GuardVar, IntAlign);
+ CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM),
+ GuardAddr.getPointer());
+ llvm::LoadInst *SecondGuardLoad = Builder.CreateLoad(GuardAddr);
SecondGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
llvm::Value *ShouldDoInit =
Builder.CreateICmpEQ(SecondGuardLoad, getAllOnesInt());
@@ -2410,10 +2472,11 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// Ok, we ended up getting selected as the initializing thread.
CGF.EmitBlock(InitBlock);
- CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardVar);
+ CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardAddr);
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
CGF.PopCleanupBlock();
- CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM), GuardVar);
+ CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM),
+ GuardAddr.getPointer());
Builder.CreateBr(EndBlock);
CGF.EmitBlock(EndBlock);
@@ -2768,19 +2831,28 @@ bool MicrosoftCXXABI::MemberPointerConstantIsNull(const MemberPointerType *MPT,
llvm::Value *
MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
llvm::Value *VBPtrOffset,
llvm::Value *VBTableOffset,
llvm::Value **VBPtrOut) {
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
- This = Builder.CreateBitCast(This, CGM.Int8PtrTy);
+ This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
llvm::Value *VBPtr =
- Builder.CreateInBoundsGEP(This, VBPtrOffset, "vbptr");
+ Builder.CreateInBoundsGEP(This.getPointer(), VBPtrOffset, "vbptr");
if (VBPtrOut) *VBPtrOut = VBPtr;
VBPtr = Builder.CreateBitCast(VBPtr,
- CGM.Int32Ty->getPointerTo(0)->getPointerTo(0));
- llvm::Value *VBTable = Builder.CreateLoad(VBPtr, "vbtable");
+ CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
+
+ CharUnits VBPtrAlign;
+ if (auto CI = dyn_cast<llvm::ConstantInt>(VBPtrOffset)) {
+ VBPtrAlign = This.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(CI->getSExtValue()));
+ } else {
+ VBPtrAlign = CGF.getPointerAlign();
+ }
+
+ llvm::Value *VBTable = Builder.CreateAlignedLoad(VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
@@ -2790,16 +2862,17 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
// Load an i32 offset from the vb-table.
llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex);
VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
- return Builder.CreateLoad(VBaseOffs, "vbase_offs");
+ return Builder.CreateAlignedLoad(VBaseOffs, CharUnits::fromQuantity(4),
+ "vbase_offs");
}
// Returns an adjusted base cast to i8*, since we do more address arithmetic on
// it.
llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
- llvm::Value *Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
+ Address Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
CGBuilderTy &Builder = CGF.Builder;
- Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy);
+ Base = Builder.CreateElementBitCast(Base, CGM.Int8Ty);
llvm::BasicBlock *OriginalBB = nullptr;
llvm::BasicBlock *SkipAdjustBB = nullptr;
llvm::BasicBlock *VBaseAdjustBB = nullptr;
@@ -2844,7 +2917,7 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
Builder.CreateBr(SkipAdjustBB);
CGF.EmitBlock(SkipAdjustBB);
llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
- Phi->addIncoming(Base, OriginalBB);
+ Phi->addIncoming(Base.getPointer(), OriginalBB);
Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
return Phi;
}
@@ -2852,10 +2925,10 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
}
llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr,
+ CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MPT->isMemberDataPointer());
- unsigned AS = Base->getType()->getPointerAddressSpace();
+ unsigned AS = Base.getAddressSpace();
llvm::Type *PType =
CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
CGBuilderTy &Builder = CGF.Builder;
@@ -2877,17 +2950,19 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
}
+ llvm::Value *Addr;
if (VirtualBaseAdjustmentOffset) {
- Base = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
+ Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
VBPtrOffset);
+ } else {
+ Addr = Base.getPointer();
}
// Cast to char*.
- Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+ Addr = Builder.CreateBitCast(Addr, CGF.Int8Ty->getPointerTo(AS));
// Apply the offset, which we assume is non-null.
- llvm::Value *Addr =
- Builder.CreateInBoundsGEP(Base, FieldOffset, "memptr.offset");
+ Addr = Builder.CreateInBoundsGEP(Addr, FieldOffset, "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the address
// space of the base pointer.
@@ -3050,7 +3125,8 @@ llvm::Value *MicrosoftCXXABI::EmitNonNullMemberPointerConversion(
} else {
llvm::Value *Idxs[] = {getZeroInt(), VBIndex};
VirtualBaseAdjustmentOffset =
- Builder.CreateLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs));
+ Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs),
+ CharUnits::fromQuantity(4));
}
DstVBIndexEqZero =
@@ -3131,7 +3207,7 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
if (CK == CK_ReinterpretMemberPointer)
return Src;
- CGBuilderTy Builder(CGM.getLLVMContext());
+ CGBuilderTy Builder(CGM, CGM.getLLVMContext());
auto *Dst = cast<llvm::Constant>(EmitNonNullMemberPointerConversion(
SrcTy, DstTy, CK, PathBegin, PathEnd, Src, Builder));
@@ -3139,15 +3215,15 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
}
llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
- llvm::Value *MemPtr, const MemberPointerType *MPT) {
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
assert(MPT->isMemberFunctionPointer());
const FunctionProtoType *FPT =
MPT->getPointeeType()->castAs<FunctionProtoType>();
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
- llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
CGBuilderTy &Builder = CGF.Builder;
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
@@ -3171,15 +3247,18 @@ llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
}
if (VirtualBaseAdjustmentOffset) {
- This = AdjustVirtualBase(CGF, E, RD, This, VirtualBaseAdjustmentOffset,
- VBPtrOffset);
+ ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This,
+ VirtualBaseAdjustmentOffset, VBPtrOffset);
+ } else {
+ ThisPtrForCall = This.getPointer();
}
if (NonVirtualBaseAdjustment) {
// Apply the adjustment and cast back to the original struct type.
- llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ llvm::Value *Ptr = Builder.CreateBitCast(ThisPtrForCall, CGF.Int8PtrTy);
Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment);
- This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+ ThisPtrForCall = Builder.CreateBitCast(Ptr, ThisPtrForCall->getType(),
+ "this.adjusted");
}
return Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
@@ -3404,7 +3483,7 @@ llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
auto Type = ABI.getClassHierarchyDescriptorType();
auto CHD = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
/*Initializer=*/nullptr,
- StringRef(MangledName));
+ MangledName);
if (CHD->isWeakForLinker())
CHD->setComdat(CGM.getModule().getOrInsertComdat(CHD->getName()));
@@ -3442,7 +3521,7 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
auto *BCA =
new llvm::GlobalVariable(Module, ArrType,
/*Constant=*/true, Linkage,
- /*Initializer=*/nullptr, StringRef(MangledName));
+ /*Initializer=*/nullptr, MangledName);
if (BCA->isWeakForLinker())
BCA->setComdat(CGM.getModule().getOrInsertComdat(BCA->getName()));
@@ -3484,7 +3563,7 @@ MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
auto Type = ABI.getBaseClassDescriptorType();
auto BCD =
new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
- /*Initializer=*/nullptr, StringRef(MangledName));
+ /*Initializer=*/nullptr, MangledName);
if (BCD->isWeakForLinker())
BCD->setComdat(CGM.getModule().getOrInsertComdat(BCD->getName()));
@@ -3530,7 +3609,7 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo *Info) {
// Forward-declare the complete object locator.
llvm::StructType *Type = ABI.getCompleteObjectLocatorType();
auto COL = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage,
- /*Initializer=*/nullptr, StringRef(MangledName));
+ /*Initializer=*/nullptr, MangledName);
// Initialize the CompleteObjectLocator.
llvm::Constant *Fields[] = {
@@ -3582,7 +3661,7 @@ static QualType decomposeTypeForEH(ASTContext &Context, QualType T,
return T;
}
-llvm::Constant *
+CatchTypeInfo
MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type,
QualType CatchHandlerType) {
// TypeDescriptors for exceptions never have qualified pointer types,
@@ -3601,28 +3680,8 @@ MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type,
if (IsReference)
Flags |= 8;
- SmallString<256> MangledName;
- {
- llvm::raw_svector_ostream Out(MangledName);
- getMangleContext().mangleCXXCatchHandlerType(Type, Flags, Out);
- }
-
- if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
- return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
-
- llvm::Constant *Fields[] = {
- llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
- getAddrOfRTTIDescriptor(Type), // TypeDescriptor
- };
- llvm::StructType *CatchHandlerTypeType = getCatchHandlerTypeType();
- auto *Var = new llvm::GlobalVariable(
- CGM.getModule(), CatchHandlerTypeType, /*Constant=*/true,
- llvm::GlobalValue::PrivateLinkage,
- llvm::ConstantStruct::get(CatchHandlerTypeType, Fields),
- StringRef(MangledName));
- Var->setUnnamedAddr(true);
- Var->setSection("llvm.metadata");
- return Var;
+ return CatchTypeInfo{getAddrOfRTTIDescriptor(Type)->stripPointerCasts(),
+ Flags};
}
/// \brief Gets a TypeDescriptor. Returns a llvm::Constant * rather than a
@@ -3658,7 +3717,7 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
CGM.getModule(), TypeDescriptorType, /*Constant=*/false,
getLinkageForRTTI(Type),
llvm::ConstantStruct::get(TypeDescriptorType, Fields),
- StringRef(MangledName));
+ MangledName);
if (Var->isWeakForLinker())
Var->setComdat(CGM.getModule().getOrInsertComdat(Var->getName()));
return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy);
@@ -3725,7 +3784,6 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
getMangleContext().mangleCXXCtor(CD, CT, Out);
- Out.flush();
// If the thunk has been generated previously, just return it.
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
@@ -3803,9 +3861,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CodeGenFunction::RunCleanupsScope Cleanups(CGF);
const auto *FPT = CD->getType()->castAs<FunctionProtoType>();
- ConstExprIterator ArgBegin(ArgVec.data()),
- ArgEnd(ArgVec.data() + ArgVec.size());
- CGF.EmitCallArgs(Args, FPT, ArgBegin, ArgEnd, CD, IsCopy ? 1 : 0);
+ CGF.EmitCallArgs(Args, FPT, llvm::makeArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
// Insert any ABI-specific implicit constructor arguments.
unsigned ExtraArgs = addImplicitConstructorArgs(CGF, CD, Ctor_Complete,
@@ -3904,7 +3960,7 @@ llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T,
llvm::StructType *CTType = getCatchableTypeType();
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CTType, /*Constant=*/true, getLinkageForRTTI(T),
- llvm::ConstantStruct::get(CTType, Fields), StringRef(MangledName));
+ llvm::ConstantStruct::get(CTType, Fields), MangledName);
GV->setUnnamedAddr(true);
GV->setSection(".xdata");
if (GV->isWeakForLinker())
@@ -4022,7 +4078,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
}
CTA = new llvm::GlobalVariable(
CGM.getModule(), CTAType, /*Constant=*/true, getLinkageForRTTI(T),
- llvm::ConstantStruct::get(CTAType, Fields), StringRef(MangledName));
+ llvm::ConstantStruct::get(CTAType, Fields), MangledName);
CTA->setUnnamedAddr(true);
CTA->setSection(".xdata");
if (CTA->isWeakForLinker())
@@ -4102,7 +4158,7 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
QualType ThrowType = SubExpr->getType();
// The exception object lives on the stack and it's address is passed to the
// runtime function.
- llvm::AllocaInst *AI = CGF.CreateMemTemp(ThrowType);
+ Address AI = CGF.CreateMemTemp(ThrowType);
CGF.EmitAnyExprToMem(SubExpr, AI, ThrowType.getQualifiers(),
/*IsInit=*/true);
@@ -4111,6 +4167,9 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
llvm::GlobalVariable *TI = getThrowInfo(ThrowType);
// Call into the runtime to throw the exception.
- llvm::Value *Args[] = {CGF.Builder.CreateBitCast(AI, CGM.Int8PtrTy), TI};
+ llvm::Value *Args[] = {
+ CGF.Builder.CreateBitCast(AI.getPointer(), CGM.Int8PtrTy),
+ TI
+ };
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
}
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index def56a963126..0be5c5592b22 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -30,7 +30,6 @@ using namespace clang;
namespace {
class CodeGeneratorImpl : public CodeGenerator {
DiagnosticsEngine &Diags;
- std::unique_ptr<const llvm::DataLayout> TD;
ASTContext *Ctx;
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
@@ -99,14 +98,10 @@ namespace {
Ctx = &Context;
M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
- M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
- TD.reset(
- new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription()));
- Builder.reset(new CodeGen::CodeGenModule(Context,
- HeaderSearchOpts,
- PreprocessorOpts,
- CodeGenOpts, *M, *TD,
- Diags, CoverageInfo));
+ M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
+ Builder.reset(new CodeGen::CodeGenModule(Context, HeaderSearchOpts,
+ PreprocessorOpts, CodeGenOpts,
+ *M, Diags, CoverageInfo));
for (size_t i = 0, e = CodeGenOpts.DependentLibraries.size(); i < e; ++i)
HandleDependentLibrary(CodeGenOpts.DependentLibraries[i]);
@@ -180,7 +175,7 @@ namespace {
// For MSVC compatibility, treat declarations of static data members with
// inline initializers as definitions.
- if (Ctx->getLangOpts().MSVCCompat) {
+ if (Ctx->getTargetInfo().getCXXABI().isMicrosoft()) {
for (Decl *Member : D->decls()) {
if (VarDecl *VD = dyn_cast<VarDecl>(Member)) {
if (Ctx->isMSStaticDataMemberInlineDefinition(VD) &&
diff --git a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 9c9b1234a66f..b397eb352a60 100644
--- a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -18,6 +18,9 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/BackendUtil.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitcode/BitstreamReader.h"
@@ -30,6 +33,7 @@
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/TargetRegistry.h"
#include <memory>
+
using namespace clang;
#define DEBUG_TYPE "pchcontainer"
@@ -39,6 +43,7 @@ class PCHContainerGenerator : public ASTConsumer {
DiagnosticsEngine &Diags;
const std::string MainFileName;
ASTContext *Ctx;
+ ModuleMap &MMap;
const HeaderSearchOptions &HeaderSearchOpts;
const PreprocessorOptions &PreprocessorOpts;
CodeGenOptions CodeGenOpts;
@@ -50,34 +55,139 @@ class PCHContainerGenerator : public ASTConsumer {
raw_pwrite_stream *OS;
std::shared_ptr<PCHBuffer> Buffer;
+ /// Visit every type and emit debug info for it.
+ struct DebugTypeVisitor : public RecursiveASTVisitor<DebugTypeVisitor> {
+ clang::CodeGen::CGDebugInfo &DI;
+ ASTContext &Ctx;
+ DebugTypeVisitor(clang::CodeGen::CGDebugInfo &DI, ASTContext &Ctx)
+ : DI(DI), Ctx(Ctx) {}
+
+ /// Determine whether this type can be represented in DWARF.
+ static bool CanRepresent(const Type *Ty) {
+ return !Ty->isDependentType() && !Ty->isUndeducedType();
+ }
+
+ bool VisitImportDecl(ImportDecl *D) {
+ auto *Import = cast<ImportDecl>(D);
+ if (!Import->getImportedOwningModule())
+ DI.EmitImportDecl(*Import);
+ return true;
+ }
+
+ bool VisitTypeDecl(TypeDecl *D) {
+ QualType QualTy = Ctx.getTypeDeclType(D);
+ if (!QualTy.isNull() && CanRepresent(QualTy.getTypePtr()))
+ DI.getOrCreateStandaloneType(QualTy, D->getLocation());
+ return true;
+ }
+
+ bool VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ QualType QualTy(D->getTypeForDecl(), 0);
+ if (!QualTy.isNull() && CanRepresent(QualTy.getTypePtr()))
+ DI.getOrCreateStandaloneType(QualTy, D->getLocation());
+ return true;
+ }
+
+ bool VisitFunctionDecl(FunctionDecl *D) {
+ if (isa<CXXMethodDecl>(D))
+ // This is not yet supported. Constructing the `this' argument
+ // mandates a CodeGenFunction.
+ return true;
+
+ SmallVector<QualType, 16> ArgTypes;
+ for (auto i : D->params())
+ ArgTypes.push_back(i->getType());
+ QualType RetTy = D->getReturnType();
+ QualType FnTy = Ctx.getFunctionType(RetTy, ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ if (CanRepresent(FnTy.getTypePtr()))
+ DI.EmitFunctionDecl(D, D->getLocation(), FnTy);
+ return true;
+ }
+
+ bool VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ if (!D->getClassInterface())
+ return true;
+
+ bool selfIsPseudoStrong, selfIsConsumed;
+ SmallVector<QualType, 16> ArgTypes;
+ ArgTypes.push_back(D->getSelfType(Ctx, D->getClassInterface(),
+ selfIsPseudoStrong, selfIsConsumed));
+ ArgTypes.push_back(Ctx.getObjCSelType());
+ for (auto i : D->params())
+ ArgTypes.push_back(i->getType());
+ QualType RetTy = D->getReturnType();
+ QualType FnTy = Ctx.getFunctionType(RetTy, ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ if (CanRepresent(FnTy.getTypePtr()))
+ DI.EmitFunctionDecl(D, D->getLocation(), FnTy);
+ return true;
+ }
+ };
+
public:
- PCHContainerGenerator(DiagnosticsEngine &diags,
- const HeaderSearchOptions &HSO,
- const PreprocessorOptions &PPO, const TargetOptions &TO,
- const LangOptions &LO, const std::string &MainFileName,
+ PCHContainerGenerator(CompilerInstance &CI, const std::string &MainFileName,
const std::string &OutputFileName,
raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer)
- : Diags(diags), HeaderSearchOpts(HSO), PreprocessorOpts(PPO),
- TargetOpts(TO), LangOpts(LO), OS(OS), Buffer(Buffer) {
+ : Diags(CI.getDiagnostics()), Ctx(nullptr),
+ MMap(CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()),
+ HeaderSearchOpts(CI.getHeaderSearchOpts()),
+ PreprocessorOpts(CI.getPreprocessorOpts()),
+ TargetOpts(CI.getTargetOpts()), LangOpts(CI.getLangOpts()), OS(OS),
+ Buffer(Buffer) {
// The debug info output isn't affected by CodeModel and
// ThreadModel, but the backend expects them to be nonempty.
CodeGenOpts.CodeModel = "default";
CodeGenOpts.ThreadModel = "single";
+ CodeGenOpts.DebugTypeExtRefs = true;
CodeGenOpts.setDebugInfo(CodeGenOptions::FullDebugInfo);
- CodeGenOpts.SplitDwarfFile = OutputFileName;
}
- virtual ~PCHContainerGenerator() {}
+ ~PCHContainerGenerator() override = default;
void Initialize(ASTContext &Context) override {
+ assert(!Ctx && "initialized multiple times");
+
Ctx = &Context;
VMContext.reset(new llvm::LLVMContext());
M.reset(new llvm::Module(MainFileName, *VMContext));
- M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
- Builder.reset(new CodeGen::CodeGenModule(*Ctx, HeaderSearchOpts,
- PreprocessorOpts, CodeGenOpts, *M,
- M->getDataLayout(), Diags));
+ M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
+ Builder.reset(new CodeGen::CodeGenModule(
+ *Ctx, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, Diags));
+ Builder->getModuleDebugInfo()->setModuleMap(MMap);
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ if (Diags.hasErrorOccurred())
+ return true;
+
+ // Collect debug info for all decls in this group.
+ for (auto *I : D)
+ if (!I->isFromASTFile()) {
+ DebugTypeVisitor DTV(*Builder->getModuleDebugInfo(), *Ctx);
+ DTV.TraverseDecl(I);
+ }
+ return true;
+ }
+
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
+ HandleTopLevelDecl(D);
+ }
+
+ void HandleTagDeclDefinition(TagDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->UpdateCompletedType(D);
+ }
+
+ void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ Builder->getModuleDebugInfo()->completeRequiredType(RD);
}
/// Emit a container holding the serialized AST.
@@ -92,7 +202,8 @@ public:
return;
M->setTargetTriple(Ctx.getTargetInfo().getTriple().getTriple());
- M->setDataLayout(Ctx.getTargetInfo().getTargetDescription());
+ M->setDataLayout(Ctx.getTargetInfo().getDataLayoutString());
+ Builder->getModuleDebugInfo()->setDwoId(Buffer->Signature);
// Finalize the Builder.
if (Builder)
@@ -133,15 +244,14 @@ public:
llvm::SmallString<0> Buffer;
llvm::raw_svector_ostream OS(Buffer);
clang::EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
- Ctx.getTargetInfo().getTargetDescription(),
+ Ctx.getTargetInfo().getDataLayoutString(),
M.get(), BackendAction::Backend_EmitLL, &OS);
- OS.flush();
llvm::dbgs() << Buffer;
});
// Use the LLVM backend to emit the pch container.
clang::EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
- Ctx.getTargetInfo().getTargetDescription(),
+ Ctx.getTargetInfo().getDataLayoutString(),
M.get(), BackendAction::Backend_EmitObj, OS);
// Make sure the pch container hits disk.
@@ -153,17 +263,15 @@ public:
}
};
-} // namespace
+} // anonymous namespace
std::unique_ptr<ASTConsumer>
ObjectFilePCHContainerWriter::CreatePCHContainerGenerator(
- DiagnosticsEngine &Diags, const HeaderSearchOptions &HSO,
- const PreprocessorOptions &PPO, const TargetOptions &TO,
- const LangOptions &LO, const std::string &MainFileName,
+ CompilerInstance &CI, const std::string &MainFileName,
const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer) const {
- return llvm::make_unique<PCHContainerGenerator>(
- Diags, HSO, PPO, TO, LO, MainFileName, OutputFileName, OS, Buffer);
+ return llvm::make_unique<PCHContainerGenerator>(CI, MainFileName,
+ OutputFileName, OS, Buffer);
}
void ObjectFilePCHContainerReader::ExtractPCH(
@@ -189,5 +297,4 @@ void ObjectFilePCHContainerReader::ExtractPCH(
// As a fallback, treat the buffer as a raw AST.
StreamFile.init((const unsigned char *)Buffer.getBufferStart(),
(const unsigned char *)Buffer.getBufferEnd());
- return;
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 25bd7335fadb..4566fdbebf88 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -39,7 +39,7 @@ static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
llvm::Value *Cell =
Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
- Builder.CreateStore(Value, Cell);
+ Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
}
}
@@ -48,6 +48,24 @@ static bool isAggregateTypeForABI(QualType T) {
T->isMemberFunctionPointerType();
}
+ABIArgInfo
+ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
+ llvm::Type *Padding) const {
+ return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
+ ByRef, Realign, Padding);
+}
+
+ABIArgInfo
+ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
+ return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
+ /*ByRef*/ false, Realign);
+}
+
+Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
+}
+
ABIInfo::~ABIInfo() {}
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
@@ -133,7 +151,7 @@ void ABIArgInfo::dump() const {
OS << "InAlloca Offset=" << getInAllocaFieldIndex();
break;
case Indirect:
- OS << "Indirect Align=" << getIndirectAlign()
+ OS << "Indirect Align=" << getIndirectAlign().getQuantity()
<< " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
@@ -144,6 +162,135 @@ void ABIArgInfo::dump() const {
OS << ")\n";
}
+// Dynamically round a pointer up to a multiple of the given alignment.
+static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ CharUnits Align) {
+ llvm::Value *PtrAsInt = Ptr;
+ // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
+ PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
+ PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
+ llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
+ PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
+ llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
+ PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
+ Ptr->getType(),
+ Ptr->getName() + ".aligned");
+ return PtrAsInt;
+}
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// This version implements the core direct-value passing rules.
+///
+/// \param SlotSize - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding. If this
+/// is false, the returned address might be less-aligned than
+/// DirectAlign.
+static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
+ Address VAListAddr,
+ llvm::Type *DirectTy,
+ CharUnits DirectSize,
+ CharUnits DirectAlign,
+ CharUnits SlotSize,
+ bool AllowHigherAlign) {
+ // Cast the element type to i8* if necessary. Some platforms define
+ // va_list as a struct containing an i8* instead of just an i8*.
+ if (VAListAddr.getElementType() != CGF.Int8PtrTy)
+ VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
+
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
+
+ // If the CC aligns values higher than the slot size, do so if needed.
+ Address Addr = Address::invalid();
+ if (AllowHigherAlign && DirectAlign > SlotSize) {
+ Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
+ DirectAlign);
+ } else {
+ Addr = Address(Ptr, SlotSize);
+ }
+
+ // Advance the pointer past the argument, then store that back.
+ CharUnits FullDirectSize = DirectSize.RoundUpToAlignment(SlotSize);
+ llvm::Value *NextPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
+ "argp.next");
+ CGF.Builder.CreateStore(NextPtr, VAListAddr);
+
+ // If the argument is smaller than a slot, and this is a big-endian
+ // target, the argument will be right-adjusted in its slot.
+ if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian()) {
+ Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
+ }
+
+ Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
+ return Addr;
+}
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// \param IsIndirect - Values of this type are passed indirectly.
+/// \param ValueInfo - The size and alignment of this type, generally
+/// computed with getContext().getTypeInfoInChars(ValueTy).
+/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding.
+static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ std::pair<CharUnits, CharUnits> ValueInfo,
+ CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign) {
+ // The size and alignment of the value that was passed directly.
+ CharUnits DirectSize, DirectAlign;
+ if (IsIndirect) {
+ DirectSize = CGF.getPointerSize();
+ DirectAlign = CGF.getPointerAlign();
+ } else {
+ DirectSize = ValueInfo.first;
+ DirectAlign = ValueInfo.second;
+ }
+
+ // Cast the address we've calculated to the right type.
+ llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
+ if (IsIndirect)
+ DirectTy = DirectTy->getPointerTo(0);
+
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
+ DirectSize, DirectAlign,
+ SlotSizeAndAlign,
+ AllowHigherAlign);
+
+ if (IsIndirect) {
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
+ }
+
+ return Addr;
+
+}
+
+static Address emitMergePHI(CodeGenFunction &CGF,
+ Address Addr1, llvm::BasicBlock *Block1,
+ Address Addr2, llvm::BasicBlock *Block2,
+ const llvm::Twine &Name = "") {
+ assert(Addr1.getType() == Addr2.getType());
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
+ PHI->addIncoming(Addr1.getPointer(), Block1);
+ PHI->addIncoming(Addr2.getPointer(), Block2);
+ CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
+ return Address(PHI, Align);
+}
+
TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
// If someone can figure out a general rule for this, that would be great.
@@ -394,8 +541,8 @@ public:
I.info = classifyArgumentType(I.type);
}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -404,9 +551,9 @@ public:
: TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
};
-llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return nullptr;
+Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
}
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
@@ -416,9 +563,9 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
// Records with non-trivial destructors/copy-constructors should not be
// passed by value.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(Ty);
}
// Treat an enum type as its underlying type.
@@ -434,7 +581,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
@@ -445,6 +592,80 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
}
//===----------------------------------------------------------------------===//
+// WebAssembly ABI Implementation
+//
+// This is a very simple ABI that relies a lot on DefaultABIInfo.
+//===----------------------------------------------------------------------===//
+
+class WebAssemblyABIInfo final : public DefaultABIInfo {
+public:
+ explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
+ : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo is virtual, so we overload that.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+ }
+};
+
+class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
+public:
+ explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
+};
+
+/// \brief Classify argument of given type \p Ty.
+ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just pass a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using getExpand(),
+ // though watch out for things like bitfields.
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ }
+
+ // Otherwise just do the default thing.
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
+ if (isAggregateTypeForABI(RetTy)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // returned by value.
+ if (!getRecordArgABI(RetTy, getCXXABI())) {
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just return a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using
+ // ABIArgInfo::getDirect().
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+//===----------------------------------------------------------------------===//
// le32/PNaCl bitcode ABI Implementation
//
// This is a simplified version of the x86_32 ABI. Arguments and return values
@@ -459,8 +680,8 @@ class PNaClABIInfo : public ABIInfo {
ABIArgInfo classifyArgumentType(QualType RetTy) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const override;
};
class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -477,17 +698,17 @@ void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classifyArgumentType(I.type);
}
-llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return nullptr;
+Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
}
/// \brief Classify argument of given type \p Ty.
ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty);
} else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
// Treat an enum type as its underlying type.
Ty = EnumTy->getDecl()->getIntegerType();
@@ -506,7 +727,7 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
// In the PNaCl ABI we always return records/structures on the stack.
if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
@@ -585,8 +806,10 @@ class X86_32ABIInfo : public ABIInfo {
static const unsigned MinABIStackAlignInBytes = 4;
bool IsDarwinVectorABI;
- bool IsSmallStructInRegABI;
+ bool IsRetSmallStructInRegABI;
bool IsWin32StructABI;
+ bool IsSoftFloatABI;
+ bool IsMCUABI;
unsigned DefaultNumRegisterParameters;
static bool isRegisterSize(unsigned Size) {
@@ -610,7 +833,7 @@ class X86_32ABIInfo : public ABIInfo {
/// such that the argument will be passed in memory.
ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
- ABIArgInfo getIndirectReturnResult(CCState &State) const;
+ ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
@@ -618,33 +841,47 @@ class X86_32ABIInfo : public ABIInfo {
Class classify(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
- bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
+ /// \brief Updates the number of available free registers, returns
+ /// true if any registers were allocated.
+ bool updateFreeRegs(QualType Ty, CCState &State) const;
+
+ bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
+ bool &NeedsPadding) const;
+ bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
/// \brief Rewrite the function info so that all memory arguments use
/// inalloca.
void rewriteWithInAlloca(CGFunctionInfo &FI) const;
void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- unsigned &StackOffset, ABIArgInfo &Info,
+ CharUnits &StackOffset, ABIArgInfo &Info,
QualType Type) const;
public:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
-
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
- unsigned r)
- : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
- IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
+ IsRetSmallStructInRegABI(RetSmallStructInRegABI),
+ IsWin32StructABI(Win32StructABI),
+ IsSoftFloatABI(SoftFloatABI),
+ IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
+ DefaultNumRegisterParameters(NumRegisterParameters) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool d, bool p, bool w, unsigned r)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : TargetCodeGenInfo(new X86_32ABIInfo(
+ CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters, SoftFloatABI)) {}
static bool isStructReturnInRegABI(
const llvm::Triple &Triple, const CodeGenOptions &Opts);
@@ -767,14 +1004,15 @@ void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
}
/// shouldReturnTypeInRegister - Determine if the given type should be
-/// passed in a register (for the Darwin ABI).
+/// returned in a register (for the Darwin and MCU ABI).
bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
ASTContext &Context) const {
uint64_t Size = Context.getTypeSize(Ty);
- // Type must be register sized.
- if (!isRegisterSize(Size))
- return false;
+ // For i386, type must be register sized.
+ // For the MCU ABI, it only needs to be <= 8-byte
+ if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
+ return false;
if (Ty->isVectorType()) {
// 64- and 128- bit vectors inside structures are not returned in
@@ -816,14 +1054,15 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
return true;
}
-ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
+ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
if (State.FreeRegs) {
--State.FreeRegs;
- return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(RetTy);
}
- return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
}
ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
@@ -858,7 +1097,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
- return getIndirectReturnResult(State);
+ return getIndirectReturnResult(RetTy, State);
}
return ABIArgInfo::getDirect();
@@ -868,12 +1107,12 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
// Structures with flexible arrays are always indirect.
if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectReturnResult(State);
+ return getIndirectReturnResult(RetTy, State);
}
// If specified, structs and unions are always indirect.
- if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
- return getIndirectReturnResult(State);
+ if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return getIndirectReturnResult(RetTy, State);
// Small structures which are register sized are generally returned
// in a register.
@@ -895,7 +1134,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
}
- return getIndirectReturnResult(State);
+ return getIndirectReturnResult(RetTy, State);
}
// Treat an enum type as its underlying type.
@@ -961,21 +1200,23 @@ ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
if (!ByVal) {
if (State.FreeRegs) {
--State.FreeRegs; // Non-byval indirects just use one pointer.
- return ABIArgInfo::getIndirectInReg(0, false);
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(Ty);
}
- return ABIArgInfo::getIndirect(0, false);
+ return getNaturalAlignIndirect(Ty, false);
}
// Compute the byval alignment.
unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
if (StackAlign == 0)
- return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
// If the stack alignment is less than the type alignment, realign the
// argument.
bool Realign = TypeAlign > StackAlign;
- return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
+ /*ByVal=*/true, Realign);
}
X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
@@ -991,12 +1232,12 @@ X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
return Integer;
}
-bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
- bool &NeedsPadding) const {
- NeedsPadding = false;
- Class C = classify(Ty);
- if (C == Float)
- return false;
+bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
+ if (!IsSoftFloatABI) {
+ Class C = classify(Ty);
+ if (C == Float)
+ return false;
+ }
unsigned Size = getContext().getTypeSize(Ty);
unsigned SizeInRegs = (Size + 31) / 32;
@@ -1004,31 +1245,61 @@ bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
if (SizeInRegs == 0)
return false;
- if (SizeInRegs > State.FreeRegs) {
- State.FreeRegs = 0;
- return false;
+ if (!IsMCUABI) {
+ if (SizeInRegs > State.FreeRegs) {
+ State.FreeRegs = 0;
+ return false;
+ }
+ } else {
+ // The MCU psABI allows passing parameters in-reg even if there are
+ // earlier parameters that are passed on the stack. Also,
+ // it does not allow passing >8-byte structs in-register,
+ // even if there are 3 free registers available.
+ if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
+ return false;
}
State.FreeRegs -= SizeInRegs;
+ return true;
+}
+
+bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
+ bool &InReg,
+ bool &NeedsPadding) const {
+ NeedsPadding = false;
+ InReg = !IsMCUABI;
+
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (IsMCUABI)
+ return true;
if (State.CC == llvm::CallingConv::X86_FastCall ||
State.CC == llvm::CallingConv::X86_VectorCall) {
- if (Size > 32)
- return false;
-
- if (Ty->isIntegralOrEnumerationType())
- return true;
+ if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
+ NeedsPadding = true;
- if (Ty->isPointerType())
- return true;
+ return false;
+ }
- if (Ty->isReferenceType())
- return true;
+ return true;
+}
- if (State.FreeRegs)
- NeedsPadding = true;
+bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
+ if (!updateFreeRegs(Ty, State))
+ return false;
+ if (IsMCUABI)
return false;
+
+ if (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall) {
+ if (getContext().getTypeSize(Ty) > 32)
+ return false;
+
+ return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
+ Ty->isReferenceType());
}
return true;
@@ -1084,12 +1355,15 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
llvm::LLVMContext &LLVMContext = getVMContext();
llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- bool NeedsPadding;
- if (shouldUseInReg(Ty, State, NeedsPadding)) {
+ bool NeedsPadding, InReg;
+ if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- return ABIArgInfo::getDirectInReg(Result);
+ if (InReg)
+ return ABIArgInfo::getDirectInReg(Result);
+ else
+ return ABIArgInfo::getDirect(Result);
}
llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
@@ -1097,8 +1371,11 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
+ // Don't do this for the MCU if there are still free integer registers
+ // (see X86_64 ABI for full explanation).
if (getContext().getTypeSize(Ty) <= 4*32 &&
- canExpandIndirectArgument(Ty, getContext()))
+ canExpandIndirectArgument(Ty, getContext()) &&
+ (!IsMCUABI || State.FreeRegs == 0))
return ABIArgInfo::getExpandWithPadding(
State.CC == llvm::CallingConv::X86_FastCall ||
State.CC == llvm::CallingConv::X86_VectorCall,
@@ -1128,14 +1405,14 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- bool NeedsPadding;
- bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
+ bool InReg = shouldPrimitiveUseInReg(Ty, State);
if (Ty->isPromotableIntegerType()) {
if (InReg)
return ABIArgInfo::getExtendInReg();
return ABIArgInfo::getExtend();
}
+
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getDirect();
@@ -1143,7 +1420,9 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
CCState State(FI.getCallingConvention());
- if (State.CC == llvm::CallingConv::X86_FastCall)
+ if (IsMCUABI)
+ State.FreeRegs = 3;
+ else if (State.CC == llvm::CallingConv::X86_FastCall)
State.FreeRegs = 2;
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
State.FreeRegs = 2;
@@ -1160,7 +1439,8 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
// return value was sret and put it in a register ourselves if appropriate.
if (State.FreeRegs) {
--State.FreeRegs; // The sret parameter consumes a register.
- FI.getReturnInfo().setInReg(true);
+ if (!IsMCUABI)
+ FI.getReturnInfo().setInReg(true);
}
}
@@ -1182,22 +1462,23 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
void
X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- unsigned &StackOffset,
- ABIArgInfo &Info, QualType Type) const {
- assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const {
+ // Arguments are always 4-byte-aligned.
+ CharUnits FieldAlign = CharUnits::fromQuantity(4);
+
+ assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
Info = ABIArgInfo::getInAlloca(FrameFields.size());
FrameFields.push_back(CGT.ConvertTypeForMem(Type));
- StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
-
- // Insert padding bytes to respect alignment. For x86_32, each argument is 4
- // byte aligned.
- if (StackOffset % 4U) {
- unsigned OldOffset = StackOffset;
- StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
- unsigned NumBytes = StackOffset - OldOffset;
- assert(NumBytes);
+ StackOffset += getContext().getTypeSizeInChars(Type);
+
+ // Insert padding bytes to respect alignment.
+ CharUnits FieldEnd = StackOffset;
+ StackOffset = FieldEnd.RoundUpToAlignment(FieldAlign);
+ if (StackOffset != FieldEnd) {
+ CharUnits NumBytes = StackOffset - FieldEnd;
llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
FrameFields.push_back(Ty);
}
}
@@ -1228,7 +1509,10 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
// Build a packed struct type for all of the arguments in memory.
SmallVector<llvm::Type *, 6> FrameFields;
- unsigned StackOffset = 0;
+ // The stack alignment is always 4.
+ CharUnits StackAlign = CharUnits::fromQuantity(4);
+
+ CharUnits StackOffset;
CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
// Put 'this' into the struct before 'sret', if necessary.
@@ -1260,47 +1544,25 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
}
FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
- /*isPacked=*/true));
+ /*isPacked=*/true),
+ StackAlign);
}
-llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
+Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const {
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
-
- // Compute if the address needs to be aligned
- unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
- Align = getTypeStackAlignInBytes(Ty, Align);
- Align = std::max(Align, 4U);
- if (Align > 4) {
- // addr = (addr + align - 1) & -align;
- llvm::Value *Offset =
- llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
- Addr = CGF.Builder.CreateGEP(Addr, Offset);
- llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
- CGF.Int32Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
- Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
- Addr->getType(),
- "ap.cur.aligned");
- }
-
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ // x86-32 changes the alignment of certain arguments on the stack.
+ //
+ // Just messing with TypeInfo like this works because we never pass
+ // anything indirectly.
+ TypeInfo.second = CharUnits::fromQuantity(
+ getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
}
bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
@@ -1316,7 +1578,7 @@ bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
return true;
}
- if (Triple.isOSDarwin())
+ if (Triple.isOSDarwin() || Triple.isOSIAMCU())
return true;
switch (Triple.getOS()) {
@@ -1334,7 +1596,7 @@ bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
// Get the LLVM function.
llvm::Function *Fn = cast<llvm::Function>(GV);
@@ -1372,8 +1634,9 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
} else {
// 9 is %eflags, which doesn't get a size on Darwin for some
// reason.
- Builder.CreateStore(
- Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9));
+ Builder.CreateAlignedStore(
+ Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
+ CharUnits::One());
// 11-16 are st(0..5). Not sure why we stop at 5.
// These have size 12, which is sizeof(long double) on
@@ -1542,8 +1805,10 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
bool has64BitPointers() const {
return Has64BitPointers;
@@ -1559,8 +1824,8 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
bool isHomogeneousAggregateBaseType(QualType Ty) const override {
// FIXME: Assumes vectorcall is in use.
@@ -1659,7 +1924,11 @@ public:
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
Opt = "\01";
- Opt += Lib;
+ // If the argument contains a space, enclose it in quotes.
+ if (Lib.find(" ") != StringRef::npos)
+ Opt += "\"" + Lib.str() + "\"";
+ else
+ Opt += Lib;
}
};
@@ -1679,8 +1948,10 @@ static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
public:
WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool d, bool p, bool w, unsigned RegParms)
- : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
+ bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters)
+ : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
+ Win32StructABI, NumRegisterParameters, false) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
@@ -1701,7 +1972,7 @@ public:
static void addStackProbeSizeTargetAttribute(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) {
- if (isa<FunctionDecl>(D)) {
+ if (D && isa<FunctionDecl>(D)) {
if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
llvm::Function *Fn = cast<llvm::Function>(GV);
@@ -1918,16 +2189,18 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
if (const VectorType *VT = Ty->getAs<VectorType>()) {
uint64_t Size = getContext().getTypeSize(VT);
- if (Size == 32) {
- // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
- // float> as integer.
+ if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
+ // gcc passes the following as integer:
+ // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
+ // 2 bytes - <2 x char>, <1 x short>
+ // 1 byte - <1 x char>
Current = Integer;
// If this type crosses an eightbyte boundary, it should be
// split.
- uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
- if (EB_Real != EB_Imag)
+ uint64_t EB_Lo = (OffsetBase) / 64;
+ uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
+ if (EB_Lo != EB_Hi)
Hi = Lo;
} else if (Size == 64) {
// gcc passes <1 x double> in memory. :(
@@ -2178,7 +2451,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(Ty);
}
bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
@@ -2212,7 +2485,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Compute the byval alignment. We specify the alignment of the byval in all
// cases so that the mid-level optimizer knows the alignment of the byval.
@@ -2249,7 +2522,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
Size));
}
- return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
}
/// The ABI specifies that a value should be passed in a full vector XMM/YMM
@@ -2833,11 +3106,10 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
}
-static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) {
- llvm::Value *overflow_arg_area_p = CGF.Builder.CreateStructGEP(
- nullptr, VAListAddr, 2, "overflow_arg_area_p");
+static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) {
+ Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
llvm::Value *overflow_arg_area =
CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
@@ -2845,19 +3117,10 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// byte boundary if alignment needed by type exceeds 8 byte boundary.
// It isn't stated explicitly in the standard, but in practice we use
// alignment greater than 16 where necessary.
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
- if (Align > 8) {
- // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
- llvm::Value *Offset =
- llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
- overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
- llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
- CGF.Int64Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
- overflow_arg_area =
- CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
- overflow_arg_area->getType(),
- "overflow_arg_area.align");
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > CharUnits::fromQuantity(8)) {
+ overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
+ Align);
}
// AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
@@ -2879,11 +3142,11 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
// AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Res;
+ return Address(Res, Align);
}
-llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
@@ -2893,14 +3156,14 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// };
unsigned neededInt, neededSSE;
- Ty = CGF.getContext().getCanonicalType(Ty);
+ Ty = getContext().getCanonicalType(Ty);
ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
/*isNamedArg*/false);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
if (!neededInt && !neededSSE)
- return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+ return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
// AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
// general purpose registers needed to pass type and num_fp to hold
@@ -2914,11 +3177,12 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// register save space).
llvm::Value *InRegs = nullptr;
- llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
- llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
+ Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
+ llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
if (neededInt) {
gp_offset_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "gp_offset_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
+ "gp_offset_p");
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
@@ -2926,7 +3190,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
if (neededSSE) {
fp_offset_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 1, "fp_offset_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
+ "fp_offset_p");
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
@@ -2954,14 +3219,17 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// simple assembling of a structure from scattered addresses has many more
// loads than necessary. Can we clean this up?
llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *RegAddr = CGF.Builder.CreateLoad(
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3), "reg_save_area");
+ llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
+ "reg_save_area");
+
+ Address RegAddr = Address::invalid();
if (neededInt && neededSSE) {
// FIXME: Cleanup.
assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
- llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
llvm::Type *TyLo = ST->getElementType(0);
llvm::Type *TyHi = ST->getElementType(1);
@@ -2969,57 +3237,77 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
"Unexpected ABI info for mixed regs");
llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
- llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
- llvm::Value *V =
- CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
+ // Copy the first element.
+ llvm::Value *V =
+ CGF.Builder.CreateDefaultAlignedLoad(
+ CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+
+ // Copy the second element.
+ V = CGF.Builder.CreateDefaultAlignedLoad(
+ CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CharUnits Offset = CharUnits::fromQuantity(
+ getDataLayout().getStructLayout(ST)->getElementOffset(1));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
+
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
} else if (neededInt) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
+ RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
+ CharUnits::fromQuantity(8));
+ RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
// Copy to a temporary if necessary to ensure the appropriate alignment.
std::pair<CharUnits, CharUnits> SizeAlign =
- CGF.getContext().getTypeInfoInChars(Ty);
+ getContext().getTypeInfoInChars(Ty);
uint64_t TySize = SizeAlign.first.getQuantity();
- unsigned TyAlign = SizeAlign.second.getQuantity();
- if (TyAlign > 8) {
- llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
- CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
+ CharUnits TyAlign = SizeAlign.second;
+
+ // Copy into a temporary if the type is more aligned than the
+ // register save area.
+ if (TyAlign.getQuantity() > 8) {
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
RegAddr = Tmp;
}
+
} else if (neededSSE == 1) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
+ RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ CharUnits::fromQuantity(16));
+ RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
} else {
assert(neededSSE == 2 && "Invalid number of needed registers!");
// SSE registers are spaced 16 bytes apart in the register save
// area, we need to collect the two eightbytes together.
- llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
+ // The ABI isn't explicit about this, but it seems reasonable
+ // to assume that the slots are 16-byte aligned, since the stack is
+ // naturally 16-byte aligned and the prologue is expected to store
+ // all the SSE registers to the RSA.
+ Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ CharUnits::fromQuantity(16));
+ Address RegAddrHi =
+ CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
+ CharUnits::fromQuantity(16));
llvm::Type *DoubleTy = CGF.DoubleTy;
- llvm::Type *DblPtrTy =
- llvm::PointerType::getUnqual(DoubleTy);
llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
- llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
+ llvm::Value *V;
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
+ V = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+ V = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
+
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
}
// AMD64-ABI 3.5.7p5: Step 5. Set:
@@ -3040,18 +3328,24 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// Emit code to load the value if it was passed in memory.
CGF.EmitBlock(InMemBlock);
- llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+ Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
- "vaarg.addr");
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(MemAddr, InMemBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "vaarg.addr");
return ResAddr;
}
+Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
bool IsReturnType) const {
@@ -3063,17 +3357,18 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
TypeInfo Info = getContext().getTypeInfo(Ty);
uint64_t Width = Info.Width;
- unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity();
+ CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
const RecordType *RT = Ty->getAs<RecordType>();
if (RT) {
if (!IsReturnType) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
}
if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
}
// vectorcall adds the concept of a homogenous vector aggregate, similar to
@@ -3103,7 +3398,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
if (Width > 64 || !llvm::isPowerOf2_64(Width))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// Otherwise, coerce it to a small integer.
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
@@ -3141,43 +3436,31 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classify(I.type, FreeSSERegs, false);
}
-llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
+Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
}
// PowerPC-32
namespace {
/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
+bool IsSoftFloatABI;
public:
- PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+ PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
+ : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PPC32TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {}
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
+ : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -3190,64 +3473,51 @@ public:
}
-llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) const {
+Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
+ QualType Ty) const {
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
// TODO: Implement this. For now ignore.
(void)CTy;
- return nullptr;
+ return Address::invalid();
}
+ // struct __va_list_tag {
+ // unsigned char gpr;
+ // unsigned char fpr;
+ // unsigned short reserved;
+ // void *overflow_arg_area;
+ // void *reg_save_area;
+ // };
+
bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
bool isInt =
Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
- llvm::Type *CharPtr = CGF.Int8PtrTy;
- llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
+ bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
+
+ // All aggregates are passed indirectly? That doesn't seem consistent
+ // with the argument-lowering code.
+ bool isIndirect = Ty->isAggregateType();
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
- llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
- llvm::Value *FPRPtrAsInt =
- Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
- llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
- llvm::Value *OverflowAreaPtrAsInt =
- Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
- llvm::Value *OverflowAreaPtr =
- Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
- llvm::Value *RegsaveAreaPtrAsInt =
- Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
- llvm::Value *RegsaveAreaPtr =
- Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
- llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
- // Align GPR when TY is i64.
- if (isI64) {
- llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
- llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
- llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
- GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
- }
- llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
- llvm::Value *OverflowArea =
- Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
- llvm::Value *OverflowAreaAsInt =
- Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
- llvm::Value *RegsaveArea =
- Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
- llvm::Value *RegsaveAreaAsInt =
- Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
- llvm::Value *CC =
- Builder.CreateICmpULT(isInt ? GPR : FPR, Builder.getInt8(8), "cond");
+ // The calling convention either uses 1-2 GPRs or 1 FPR.
+ Address NumRegsAddr = Address::invalid();
+ if (isInt || IsSoftFloatABI) {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
+ } else {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
+ }
- llvm::Value *RegConstant =
- Builder.CreateMul(isInt ? GPR : FPR, Builder.getInt8(isInt ? 4 : 8));
+ llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
- llvm::Value *OurReg = Builder.CreateAdd(
- RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
+ // "Align" the register count when TY is i64.
+ if (isI64 || (isF64 && IsSoftFloatABI)) {
+ NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
+ NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
+ }
- if (Ty->isFloatingType())
- OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
+ llvm::Value *CC =
+ Builder.CreateICmpULT(NumRegs, Builder.getInt8(8), "cond");
llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
@@ -3255,39 +3525,91 @@ llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
- CGF.EmitBlock(UsingRegs);
+ llvm::Type *DirectTy = CGF.ConvertType(Ty);
+ if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
- // Increase the GPR/FPR indexes.
- if (isInt) {
- GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
- Builder.CreateStore(GPR, GPRPtr);
- } else {
- FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
- Builder.CreateStore(FPR, FPRPtr);
- }
- CGF.EmitBranch(Cont);
+ // Case 1: consume registers.
+ Address RegAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingRegs);
+
+ Address RegSaveAreaPtr =
+ Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
+ RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
+ CharUnits::fromQuantity(8));
+ assert(RegAddr.getElementType() == CGF.Int8Ty);
+
+ // Floating-point registers start after the general-purpose registers.
+ if (!(isInt || IsSoftFloatABI)) {
+ RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
+ CharUnits::fromQuantity(32));
+ }
+
+ // Get the address of the saved value by scaling the number of
+ // registers we've used by the number of
+ CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
+ llvm::Value *RegOffset =
+ Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
+ RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
+ RegAddr.getPointer(), RegOffset),
+ RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+ RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
+
+ // Increase the used-register count.
+ NumRegs =
+ Builder.CreateAdd(NumRegs,
+ Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
+ Builder.CreateStore(NumRegs, NumRegsAddr);
+
+ CGF.EmitBranch(Cont);
+ }
+
+ // Case 2: consume space in the overflow area.
+ Address MemAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingOverflow);
+
+ // Everything in the overflow area is rounded up to a size of at least 4.
+ CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
+
+ CharUnits Size;
+ if (!isIndirect) {
+ auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
+ Size = TypeInfo.first.RoundUpToAlignment(OverflowAreaAlign);
+ } else {
+ Size = CGF.getPointerSize();
+ }
- CGF.EmitBlock(UsingOverflow);
+ Address OverflowAreaAddr =
+ Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
+ Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
+ OverflowAreaAlign);
+ // Round up address of argument to alignment
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > OverflowAreaAlign) {
+ llvm::Value *Ptr = OverflowArea.getPointer();
+ OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
+ Align);
+ }
+
+ MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
- // Increase the overflow area.
- llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
- OverflowAreaAsInt =
- Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
- Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr),
- OverflowAreaPtr);
- CGF.EmitBranch(Cont);
+ // Increase the overflow area.
+ OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
+ Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
+ CGF.EmitBranch(Cont);
+ }
CGF.EmitBlock(Cont);
- llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
- Result->addIncoming(Result1, UsingRegs);
- Result->addIncoming(Result2, UsingOverflow);
+ // Merge the cases with a phi.
+ Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
+ "vaarg.addr");
- if (Ty->isAggregateType()) {
- llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr");
- return Builder.CreateLoad(AGGPtr, false, "aggr");
+ // Load the pointer if the argument was passed indirectly.
+ if (isIndirect) {
+ Result = Address(Builder.CreateLoad(Result, "aggr"),
+ getContext().getTypeAlignInChars(Ty));
}
return Result;
@@ -3383,7 +3705,7 @@ public:
: DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
bool isPromotableTypeForABI(QualType Ty) const;
- bool isAlignedParamType(QualType Ty, bool &Align32) const;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -3420,8 +3742,8 @@ public:
}
}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -3481,12 +3803,9 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
return false;
}
-/// isAlignedParamType - Determine whether a type requires 16-byte
-/// alignment in the parameter area.
-bool
-PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
- Align32 = false;
-
+/// isAlignedParamType - Determine whether a type requires 16-byte or
+/// higher alignment in the parameter area. Always returns at least 8.
+CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
// Complex types are passed just like their elements.
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
@@ -3495,11 +3814,11 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
// passed via reference, smaller types are not aligned).
if (IsQPXVectorTy(Ty)) {
if (getContext().getTypeSize(Ty) > 128)
- Align32 = true;
+ return CharUnits::fromQuantity(32);
- return true;
+ return CharUnits::fromQuantity(16);
} else if (Ty->isVectorType()) {
- return getContext().getTypeSize(Ty) == 128;
+ return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
}
// For single-element float/vector structs, we consider the whole type
@@ -3524,22 +3843,22 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
// With special case aggregates, only vector base types need alignment.
if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
if (getContext().getTypeSize(AlignAsType) > 128)
- Align32 = true;
+ return CharUnits::fromQuantity(32);
- return true;
+ return CharUnits::fromQuantity(16);
} else if (AlignAsType) {
- return AlignAsType->isVectorType();
+ return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
}
// Otherwise, we only need alignment for any aggregate type that
// has an alignment requirement of >= 16 bytes.
if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
- Align32 = true;
- return true;
+ return CharUnits::fromQuantity(32);
+ return CharUnits::fromQuantity(16);
}
- return false;
+ return CharUnits::fromQuantity(8);
}
/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
@@ -3672,7 +3991,7 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 128)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
else if (Size < 128) {
llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
return ABIArgInfo::getDirect(CoerceTy);
@@ -3681,12 +4000,10 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- bool Align32;
- uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ?
- (Align32 ? 32 : 16) : 8;
- uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
+ uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
+ uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
// ELFv2 homogeneous aggregates are passed as array types.
const Type *Base = nullptr;
@@ -3724,7 +4041,8 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
}
// All other aggregates are passed ByVal.
- return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
/*Realign=*/TyAlign > ABIAlign);
}
@@ -3745,7 +4063,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size > 128)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
else if (Size < 128) {
llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
return ABIArgInfo::getDirect(CoerceTy);
@@ -3780,7 +4098,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
// All other aggregates are returned indirectly.
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
return (isPromotableTypeForABI(RetTy) ?
@@ -3788,47 +4106,12 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
-llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
-
- // Handle types that require 16-byte alignment in the parameter save area.
- bool Align32;
- if (isAlignedParamType(Ty, Align32)) {
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt,
- Builder.getInt64(Align32 ? 31 : 15));
- AddrAsInt = Builder.CreateAnd(AddrAsInt,
- Builder.getInt64(Align32 ? -32 : -16));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
- }
-
- // Update the va_list pointer. The pointer should be bumped by the
- // size of the object. We can trust getTypeSize() except for a complex
- // type whose base type is smaller than a doubleword. For these, the
- // size of the object is 16 bytes; see below for further explanation.
- unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
- QualType BaseTy;
- unsigned CplxBaseSize = 0;
+Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.second = getParamTypeAlignment(Ty);
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- BaseTy = CTy->getElementType();
- CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
- if (CplxBaseSize < 8)
- SizeInBytes = 16;
- }
-
- unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
// If we have a complex type and the base type is smaller than 8 bytes,
// the ABI calls for the real and imaginary parts to be right-adjusted
@@ -3836,44 +4119,40 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
// pointer to a structure with the two parts packed tightly. So generate
// loads of the real and imaginary parts relative to the va_list pointer,
// and store them to a temporary structure.
- if (CplxBaseSize && CplxBaseSize < 8) {
- llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- llvm::Value *ImagAddr = RealAddr;
- if (CGF.CGM.getDataLayout().isBigEndian()) {
- RealAddr =
- Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
- ImagAddr =
- Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
- } else {
- ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ CharUnits EltSize = TypeInfo.first / 2;
+ if (EltSize < SlotSize) {
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
+ SlotSize * 2, SlotSize,
+ SlotSize, /*AllowHigher*/ true);
+
+ Address RealAddr = Addr;
+ Address ImagAddr = RealAddr;
+ if (CGF.CGM.getDataLayout().isBigEndian()) {
+ RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
+ SlotSize - EltSize);
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
+ 2 * SlotSize - EltSize);
+ } else {
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
+ }
+
+ llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
+ RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
+ ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
+ llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
+ llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
+
+ Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
+ CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
+ /*init*/ true);
+ return Temp;
}
- llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
- RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
- ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
- llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
- llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
- llvm::AllocaInst *Ptr =
- CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), "vacplx");
- llvm::Value *RealPtr =
- Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0, ".real");
- llvm::Value *ImagPtr =
- Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1, ".imag");
- Builder.CreateStore(Real, RealPtr, false);
- Builder.CreateStore(Imag, ImagPtr, false);
- return Ptr;
- }
-
- // If the argument is smaller than 8 bytes, it is right-adjusted in
- // its doubleword slot. Adjust the pointer to pick it up from the
- // correct offset.
- if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
- }
-
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- return Builder.CreateBitCast(Addr, PTy);
+ }
+
+ // Otherwise, just use the general rule.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, SlotSize, /*AllowHigher*/ true);
}
static bool
@@ -3971,14 +4250,14 @@ private:
it.info = classifyArgumentType(it.type);
}
- llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
+ Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
- llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
+ Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override {
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
}
@@ -4021,7 +4300,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
if (!isAggregateTypeForABI(Ty)) {
@@ -4037,8 +4316,8 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
}
// Empty records are always ignored on Darwin, but actually passed in C++ mode
@@ -4073,7 +4352,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
@@ -4082,7 +4361,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
@@ -4118,7 +4397,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
/// isIllegalVectorType - check whether the vector type is legal for AArch64.
@@ -4156,7 +4435,7 @@ bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
-llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
+Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
QualType Ty,
CodeGenFunction &CGF) const {
ABIArgInfo AI = classifyArgumentType(Ty);
@@ -4190,24 +4469,32 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- auto &Ctx = CGF.getContext();
- llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlign = TyInfo.second;
+
+ Address reg_offs_p = Address::invalid();
+ llvm::Value *reg_offs = nullptr;
int reg_top_index;
- int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
+ CharUnits reg_top_offset;
+ int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
if (!IsFPR) {
// 3 is the field number of __gr_offs
reg_offs_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "gr_offs_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
+ "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
+ reg_top_offset = CharUnits::fromQuantity(8);
RegSize = llvm::RoundUpToAlignment(RegSize, 8);
} else {
// 4 is the field number of __vr_offs.
reg_offs_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 4, "vr_offs_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
+ "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
+ reg_top_offset = CharUnits::fromQuantity(16);
RegSize = 16 * NumRegs;
}
@@ -4232,8 +4519,8 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
// Integer arguments may need to correct register alignment (for example a
// "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
// align __gr_offs to calculate the potential address.
- if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
- int Align = Ctx.getTypeAlign(Ty) / 8;
+ if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
reg_offs = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
@@ -4244,6 +4531,9 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
}
// Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
+ // The fact that this is done unconditionally reflects the fact that
+ // allocating an argument to the stack also uses up all the remaining
+ // registers of the appropriate kind.
llvm::Value *NewOffset = nullptr;
NewOffset = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
@@ -4265,13 +4555,14 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
// registers. First start the appropriate block:
CGF.EmitBlock(InRegBlock);
- llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
- reg_top_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, reg_top_index,
- "reg_top_p");
+ llvm::Value *reg_top = nullptr;
+ Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
+ reg_top_offset, "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
- llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
- llvm::Value *RegAddr = nullptr;
- llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+ Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
+ CharUnits::fromQuantity(IsFPR ? 16 : 8));
+ Address RegAddr = Address::invalid();
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
if (IsIndirect) {
// If it's been passed indirectly (actually a struct), whatever we find from
@@ -4288,43 +4579,45 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
// qN+1, ...). We reload and store into a temporary local variable
// contiguously.
assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
+ auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
- llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(HFATy);
+ Address Tmp = CGF.CreateTempAlloca(HFATy,
+ std::max(TyAlign, BaseTyInfo.second));
+
+ // On big-endian platforms, the value will be right-aligned in its slot.
int Offset = 0;
+ if (CGF.CGM.getDataLayout().isBigEndian() &&
+ BaseTyInfo.first.getQuantity() < 16)
+ Offset = 16 - BaseTyInfo.first.getQuantity();
- if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
- Offset = 16 - Ctx.getTypeSize(Base) / 8;
for (unsigned i = 0; i < NumMembers; ++i) {
- llvm::Value *BaseOffset =
- llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
- llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
- LoadAddr = CGF.Builder.CreateBitCast(
- LoadAddr, llvm::PointerType::getUnqual(BaseTy));
- llvm::Value *StoreAddr =
- CGF.Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i);
+ CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
+ Address LoadAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
+ LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
+
+ Address StoreAddr =
+ CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
CGF.Builder.CreateStore(Elem, StoreAddr);
}
- RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
} else {
- // Otherwise the object is contiguous in memory
- unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
- if (CGF.CGM.getDataLayout().isBigEndian() &&
- (IsHFA || !isAggregateTypeForABI(Ty)) &&
- Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
- int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
- BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
+ // Otherwise the object is contiguous in memory.
- BaseAddr = CGF.Builder.CreateAdd(
- BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
-
- BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
+ // It might be right-aligned in its slot.
+ CharUnits SlotSize = BaseAddr.getAlignment();
+ if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
+ (IsHFA || !isAggregateTypeForABI(Ty)) &&
+ TyInfo.first < SlotSize) {
+ CharUnits Offset = SlotSize - TyInfo.first;
+ BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
}
- RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
+ RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
}
CGF.EmitBranch(ContBlock);
@@ -4334,55 +4627,51 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
//=======================================
CGF.EmitBlock(OnStackBlock);
- llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
- stack_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "stack_p");
- OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
+ Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
+ CharUnits::Zero(), "stack_p");
+ llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
- // Again, stack arguments may need realigmnent. In this case both integer and
+ // Again, stack arguments may need realignment. In this case both integer and
// floating-point ones might be affected.
- if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
- int Align = Ctx.getTypeAlign(Ty) / 8;
+ if (!IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
- OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
+ OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
- OnStackAddr = CGF.Builder.CreateAdd(
- OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
+ OnStackPtr = CGF.Builder.CreateAdd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
"align_stack");
- OnStackAddr = CGF.Builder.CreateAnd(
- OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
+ OnStackPtr = CGF.Builder.CreateAnd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
"align_stack");
- OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
+ OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
}
+ Address OnStackAddr(OnStackPtr,
+ std::max(CharUnits::fromQuantity(8), TyAlign));
- uint64_t StackSize;
+ // All stack slots are multiples of 8 bytes.
+ CharUnits StackSlotSize = CharUnits::fromQuantity(8);
+ CharUnits StackSize;
if (IsIndirect)
- StackSize = 8;
+ StackSize = StackSlotSize;
else
- StackSize = Ctx.getTypeSize(Ty) / 8;
-
- // All stack slots are 8 bytes
- StackSize = llvm::RoundUpToAlignment(StackSize, 8);
+ StackSize = TyInfo.first.RoundUpToAlignment(StackSlotSize);
- llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
+ llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
llvm::Value *NewStack =
- CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
+ CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
// Write the new value of __stack for the next call to va_arg
CGF.Builder.CreateStore(NewStack, stack_p);
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
- Ctx.getTypeSize(Ty) < 64) {
- int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
- OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
-
- OnStackAddr = CGF.Builder.CreateAdd(
- OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
-
- OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
+ TyInfo.first < StackSlotSize) {
+ CharUnits Offset = StackSlotSize - TyInfo.first;
+ OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
}
- OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
+ OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
CGF.EmitBranch(ContBlock);
@@ -4391,75 +4680,48 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
//=======================================
CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(OnStackAddr, OnStackBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
+ OnStackAddr, OnStackBlock, "vaargs.addr");
if (IsIndirect)
- return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
+ return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
+ TyInfo.second);
return ResAddr;
}
-llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) const {
- // We do not support va_arg for aggregates or illegal vector types.
- // Lower VAArg here for these cases and use the LLVM va_arg instruction for
- // other cases.
+Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // The backend's lowering doesn't support va_arg for aggregates or
+ // illegal vector types. Lower VAArg here for these cases and use
+ // the LLVM va_arg instruction for everything else.
if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
- return nullptr;
-
- uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
-
- const Type *Base = nullptr;
- uint64_t Members = 0;
- bool isHA = isHomogeneousAggregate(Ty, Base, Members);
-
- bool isIndirect = false;
- // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
- // be passed indirectly.
- if (Size > 16 && !isHA) {
- isIndirect = true;
- Size = 8;
- Align = 8;
- }
+ return Address::invalid();
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+ // Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true)) {
- // These are ignored for parameter passing purposes.
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- return Builder.CreateBitCast(Addr, PTy);
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
}
- const uint64_t MinABIAlign = 8;
- if (Align > MinABIAlign) {
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
- Addr = Builder.CreateGEP(Addr, Offset);
- llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
- llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
- Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
- }
-
- uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
- llvm::Value *NextAddr = Builder.CreateGEP(
- Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ // The size of the actual thing passed, which might end up just
+ // being a pointer for indirect types.
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
- if (isIndirect)
- Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+ // Arguments bigger than 16 bytes which aren't homogeneous
+ // aggregates should be passed indirectly.
+ bool IsIndirect = false;
+ if (TyInfo.first.getQuantity() > 16) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
+ }
- return AddrTyped;
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ TyInfo, SlotSize, /*AllowHigherAlign*/ true);
}
//===----------------------------------------------------------------------===//
@@ -4473,7 +4735,8 @@ public:
enum ABIKind {
APCS = 0,
AAPCS = 1,
- AAPCS_VFP
+ AAPCS_VFP = 2,
+ AAPCS16_VFP = 3,
};
private:
@@ -4507,6 +4770,11 @@ public:
}
}
+ bool isAndroid() const {
+ return (getTarget().getTriple().getEnvironment() ==
+ llvm::Triple::Android);
+ }
+
ABIKind getABIKind() const { return Kind; }
private:
@@ -4520,8 +4788,8 @@ private:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
llvm::CallingConv::ID getLLVMDefaultCC() const;
llvm::CallingConv::ID getABIDefaultCC() const;
@@ -4561,7 +4829,7 @@ public:
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD)
return;
@@ -4583,7 +4851,8 @@ public:
Fn->addFnAttr("interrupt", Kind);
- if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
+ ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
+ if (ABI == ARMABIInfo::APCS)
return;
// AAPCS guarantees that sp will be 8-byte aligned on any public interface,
@@ -4649,7 +4918,7 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
/// Return the default calling convention that LLVM will use.
llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
// The default calling convention that LLVM will infer.
- if (isEABIHF())
+ if (isEABIHF() || getTarget().getTriple().isWatchOS())
return llvm::CallingConv::ARM_AAPCS_VFP;
else if (isEABI())
return llvm::CallingConv::ARM_AAPCS;
@@ -4664,6 +4933,7 @@ llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
case APCS: return llvm::CallingConv::ARM_APCS;
case AAPCS: return llvm::CallingConv::ARM_AAPCS;
case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
+ case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
}
llvm_unreachable("bad ABI kind");
}
@@ -4677,8 +4947,20 @@ void ARMABIInfo::setCCs() {
if (abiCC != getLLVMDefaultCC())
RuntimeCC = abiCC;
- BuiltinCC = (getABIKind() == APCS ?
- llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
+ // AAPCS apparently requires runtime support functions to be soft-float, but
+ // that's almost certainly for historic reasons (Thumb1 not supporting VFP
+ // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
+ switch (getABIKind()) {
+ case APCS:
+ case AAPCS16_VFP:
+ if (abiCC != getLLVMDefaultCC())
+ BuiltinCC = abiCC;
+ break;
+ case AAPCS:
+ case AAPCS_VFP:
+ BuiltinCC = llvm::CallingConv::ARM_AAPCS;
+ break;
+ }
}
ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
@@ -4712,7 +4994,17 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
+ // __fp16 gets passed as if it were an int or float, but with the top 16 bits
+ // unspecified. This is not done for OpenCL as it handles the half type
+ // natively, and does not need to interwork with AAPCS code.
+ if (Ty->isHalfType() && !getContext().getLangOpts().OpenCL) {
+ llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type::getFloatTy(getVMContext()) :
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
}
if (!isAggregateTypeForABI(Ty)) {
@@ -4726,7 +5018,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
}
// Ignore empty records.
@@ -4743,6 +5035,27 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
// Base can be a floating-point or a vector.
return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
+ } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
+ // WatchOS does have homogeneous aggregates. Note that we intentionally use
+ // this convention even for a variadic function: the backend will use GPRs
+ // if needed.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
+ assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
+ llvm::Type *Ty =
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
+ return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
+ }
+ }
+
+ if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
+ getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
+ // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
+ // bigger than 128-bits, they get placed in space allocated by the caller,
+ // and a pointer is passed.
+ return ABIArgInfo::getIndirect(
+ CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
}
// Support byval for ARM.
@@ -4756,8 +5069,10 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
- return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
- /*Realign=*/TyAlign > ABIAlign);
+ assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
}
// Otherwise, pass by coercing to a structure of the appropriate size.
@@ -4863,14 +5178,25 @@ static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
bool isVariadic) const {
- bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
+ bool IsEffectivelyAAPCS_VFP =
+ (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // __fp16 gets returned as if it were an int or float, but with the top 16
+ // bits unspecified. This is not done for OpenCL as it handles the half type
+ // natively, and does not need to interwork with AAPCS code.
+ if (RetTy->isHalfType() && !getContext().getLangOpts().OpenCL) {
+ llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
+ llvm::Type::getFloatTy(getVMContext()) :
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
}
if (!isAggregateTypeForABI(RetTy)) {
@@ -4907,7 +5233,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
}
// Otherwise return in memory.
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
// Otherwise this is an AAPCS variant.
@@ -4918,7 +5244,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// Check for homogeneous aggregates with AAPCS-VFP.
if (IsEffectivelyAAPCS_VFP) {
const Type *Base = nullptr;
- uint64_t Members;
+ uint64_t Members = 0;
if (isHomogeneousAggregate(RetTy, Base, Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
// Homogeneous Aggregates are returned directly.
@@ -4940,22 +5266,39 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
if (Size <= 16)
return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
+ llvm::Type *CoerceTy =
+ llvm::ArrayType::get(Int32Ty, llvm::RoundUpToAlignment(Size, 32) / 32);
+ return ABIArgInfo::getDirect(CoerceTy);
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
/// isIllegalVector - check whether Ty is an illegal vector type.
bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // Check whether VT is legal.
- unsigned NumElements = VT->getNumElements();
- uint64_t Size = getContext().getTypeSize(VT);
- // NumElements should be power of 2.
- if ((NumElements & (NumElements - 1)) != 0)
- return true;
- // Size should be greater than 32 bits.
- return Size <= 32;
+ if (const VectorType *VT = Ty->getAs<VectorType> ()) {
+ if (isAndroid()) {
+ // Android shipped using Clang 3.1, which supported a slightly different
+ // vector ABI. The primary differences were that 3-element vector types
+ // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
+ // accepts that legacy behavior for Android only.
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ // NumElements should be power of 2 or equal to 3.
+ if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
+ return true;
+ } else {
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if (!llvm::isPowerOf2_32(NumElements))
+ return true;
+ // Size should be greater than 32 bits.
+ return Size <= 32;
+ }
}
return false;
}
@@ -4981,80 +5324,53 @@ bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
-llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ // Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true)) {
- // These are ignored for parameter passing purposes.
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- return Builder.CreateBitCast(Addr, PTy);
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
}
- uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
- uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
- bool IsIndirect = false;
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlignForABI = TyInfo.second;
- // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
- // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
- if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
- getABIKind() == ARMABIInfo::AAPCS)
- TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
- else
- TyAlign = 4;
// Use indirect if size of the illegal vector is bigger than 16 bytes.
- if (isIllegalVectorType(Ty) && Size > 16) {
+ bool IsIndirect = false;
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
IsIndirect = true;
- Size = 4;
- TyAlign = 4;
- }
- // Handle address alignment for ABI alignment > 4 bytes.
- if (TyAlign > 4) {
- assert((TyAlign & (TyAlign - 1)) == 0 &&
- "Alignment is not power of 2!");
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
- AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
- }
+ // ARMv7k passes structs bigger than 16 bytes indirectly, in space
+ // allocated by the caller.
+ } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
+ getABIKind() == ARMABIInfo::AAPCS16_VFP &&
+ !isHomogeneousAggregate(Ty, Base, Members)) {
+ IsIndirect = true;
- uint64_t Offset =
- llvm::RoundUpToAlignment(Size, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ // Otherwise, bound the type's ABI alignment.
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ // Our callers should be prepared to handle an under-aligned address.
+ } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS) {
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
+ } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
+ // ARMv7k allows type alignment up to 16 bytes.
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
+ } else {
+ TyAlignForABI = CharUnits::fromQuantity(4);
+ }
+ TyInfo.second = TyAlignForABI;
- if (IsIndirect)
- Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
- else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
- // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
- // may not be correctly aligned for the vector type. We create an aligned
- // temporary space and copy the content over from ap.cur to the temporary
- // space. This is necessary if the natural alignment of the type is greater
- // than the ABI alignment.
- llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
- CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
- llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
- "var.align");
- llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
- llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
- Builder.CreateMemCpy(Dst, Src,
- llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
- TyAlign, false);
- Addr = AlignedTemp; //The content is in aligned location.
- }
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- return AddrTyped;
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
+ SlotSize, /*AllowHigherAlign*/ true);
}
//===----------------------------------------------------------------------===//
@@ -5071,8 +5387,8 @@ public:
ABIArgInfo classifyArgumentType(QualType Ty) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CFG) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -5111,7 +5427,7 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
// Return aggregates type as indirect by value
if (isAggregateTypeForABI(Ty))
- return ABIArgInfo::getIndirect(0, /* byval */ true);
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
@@ -5130,15 +5446,15 @@ void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.setEffectiveCallingConvention(getRuntimeCC());
}
-llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CFG) const {
+Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
llvm_unreachable("NVPTX does not support varargs");
}
void NVPTXTargetCodeGenInfo::
setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const{
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
llvm::Function *F = cast<llvm::Function>(GV);
@@ -5232,8 +5548,8 @@ public:
I.info = classifyArgumentType(I.type);
}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -5334,8 +5650,8 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
return Ty;
}
-llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i64 __gpr;
@@ -5347,59 +5663,69 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// Every non-vector argument occupies 8 bytes and is passed by preference
// in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
// always passed on the stack.
- Ty = CGF.getContext().getCanonicalType(Ty);
+ Ty = getContext().getCanonicalType(Ty);
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy);
+ llvm::Type *DirectTy = ArgTy;
ABIArgInfo AI = classifyArgumentType(Ty);
bool IsIndirect = AI.isIndirect();
bool InFPRs = false;
bool IsVector = false;
- unsigned UnpaddedBitSize;
+ CharUnits UnpaddedSize;
+ CharUnits DirectAlign;
if (IsIndirect) {
- APTy = llvm::PointerType::getUnqual(APTy);
- UnpaddedBitSize = 64;
+ DirectTy = llvm::PointerType::getUnqual(DirectTy);
+ UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
} else {
if (AI.getCoerceToType())
ArgTy = AI.getCoerceToType();
InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
IsVector = ArgTy->isVectorTy();
- UnpaddedBitSize = getContext().getTypeSize(Ty);
+ UnpaddedSize = TyInfo.first;
+ DirectAlign = TyInfo.second;
}
- unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64;
- assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
+ CharUnits PaddedSize = CharUnits::fromQuantity(8);
+ if (IsVector && UnpaddedSize > PaddedSize)
+ PaddedSize = CharUnits::fromQuantity(16);
+ assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
- unsigned PaddedSize = PaddedBitSize / 8;
- unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
+ CharUnits Padding = (PaddedSize - UnpaddedSize);
llvm::Type *IndexTy = CGF.Int64Ty;
- llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
+ llvm::Value *PaddedSizeV =
+ llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
if (IsVector) {
// Work out the address of a vector argument on the stack.
// Vector arguments are always passed in the high bits of a
// single (8 byte) or double (16 byte) stack slot.
- llvm::Value *OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2,
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
"overflow_arg_area_ptr");
- llvm::Value *OverflowArgArea =
- CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
- llvm::Value *MemAddr =
- CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ TyInfo.second);
+ Address MemAddr =
+ CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
return MemAddr;
}
- unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
+ assert(PaddedSize.getQuantity() == 8);
+
+ unsigned MaxRegs, RegCountField, RegSaveIndex;
+ CharUnits RegPadding;
if (InFPRs) {
MaxRegs = 4; // Maximum of 4 FPR arguments
RegCountField = 1; // __fpr
RegSaveIndex = 16; // save offset for f0
- RegPadding = 0; // floats are passed in the high bits of an FPR
+ RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
} else {
MaxRegs = 5; // Maximum of 5 GPR arguments
RegCountField = 0; // __gpr
@@ -5407,8 +5733,9 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
RegPadding = Padding; // values are passed in the low bits of a GPR
}
- llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP(
- nullptr, VAListAddr, RegCountField, "reg_count_ptr");
+ Address RegCountPtr = CGF.Builder.CreateStructGEP(
+ VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
+ "reg_count_ptr");
llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
@@ -5426,17 +5753,20 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *ScaledRegCount =
CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
llvm::Value *RegBase =
- llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
+ llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
+ + RegPadding.getQuantity());
llvm::Value *RegOffset =
CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
- llvm::Value *RegSaveAreaPtr =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "reg_save_area_ptr");
+ Address RegSaveAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
+ "reg_save_area_ptr");
llvm::Value *RegSaveArea =
CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
- llvm::Value *RawRegAddr =
- CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
- llvm::Value *RegAddr =
- CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
+ Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
+ "raw_reg_addr"),
+ PaddedSize);
+ Address RegAddr =
+ CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
// Update the register count
llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
@@ -5449,30 +5779,31 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGF.EmitBlock(InMemBlock);
// Work out the address of a stack argument.
- llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
- nullptr, VAListAddr, 2, "overflow_arg_area_ptr");
- llvm::Value *OverflowArgArea =
- CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
- llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
- llvm::Value *RawMemAddr =
- CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
- llvm::Value *MemAddr =
- CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
+ Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
+ VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ PaddedSize);
+ Address RawMemAddr =
+ CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
+ Address MemAddr =
+ CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
CGF.EmitBranch(ContBlock);
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(MemAddr, InMemBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
+ MemAddr, InMemBlock, "va_arg.addr");
if (IsIndirect)
- return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
+ ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
+ TyInfo.second);
return ResAddr;
}
@@ -5483,7 +5814,7 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
if (isVectorArgumentType(RetTy))
return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
return (isPromotableIntegerType(RetTy) ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
@@ -5491,7 +5822,7 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Handle the generic C++ ABI.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Integers and enums are extended to full register width.
if (isPromotableIntegerType(Ty))
@@ -5508,7 +5839,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// Handle small structures.
if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -5516,7 +5847,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// fail the size test above.
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// The structure is passed as an unextended integer, a float, or a double.
llvm::Type *PassTy;
@@ -5533,7 +5864,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Non-structure compounds are passed indirectly.
if (isCompoundType(Ty))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
return ABIArgInfo::getDirect(nullptr);
}
@@ -5557,7 +5888,7 @@ public:
void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
// Handle 'interrupt' attribute:
llvm::Function *F = cast<llvm::Function>(GV);
@@ -5598,8 +5929,8 @@ public:
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
bool shouldSignExtUnsignedType(QualType Ty) const override;
};
@@ -5616,7 +5947,7 @@ public:
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
llvm::Function *Fn = cast<llvm::Function>(GV);
if (FD->hasAttr<Mips16Attr>()) {
@@ -5625,6 +5956,26 @@ public:
else if (FD->hasAttr<NoMips16Attr>()) {
Fn->addFnAttr("nomips16");
}
+
+ const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case MipsInterruptAttr::eic: Kind = "eic"; break;
+ case MipsInterruptAttr::sw0: Kind = "sw0"; break;
+ case MipsInterruptAttr::sw1: Kind = "sw1"; break;
+ case MipsInterruptAttr::hw0: Kind = "hw0"; break;
+ case MipsInterruptAttr::hw1: Kind = "hw1"; break;
+ case MipsInterruptAttr::hw2: Kind = "hw2"; break;
+ case MipsInterruptAttr::hw3: Kind = "hw3"; break;
+ case MipsInterruptAttr::hw4: Kind = "hw4"; break;
+ case MipsInterruptAttr::hw5: Kind = "hw5"; break;
+ }
+
+ Fn->addFnAttr("interrupt", Kind);
+
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
@@ -5738,7 +6089,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
Offset = OrigOffset + MinABIStackAlignInBytes;
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
}
// If we have reached here, aggregates are passed directly by coercing to
@@ -5832,7 +6183,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
}
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
// Treat an enum type as its underlying type.
@@ -5855,52 +6206,55 @@ void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classifyArgumentType(I.type, Offset);
}
-llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
+Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType OrigTy) const {
+ QualType Ty = OrigTy;
// Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
// Pointers are also promoted in the same way but this only matters for N32.
unsigned SlotSizeInBits = IsO32 ? 32 : 64;
unsigned PtrWidth = getTarget().getPointerWidth(0);
+ bool DidPromote = false;
if ((Ty->isIntegerType() &&
- CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
+ getContext().getIntWidth(Ty) < SlotSizeInBits) ||
(Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
- Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
- Ty->isSignedIntegerType());
+ DidPromote = true;
+ Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
+ Ty->isSignedIntegerType());
}
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- int64_t TypeAlign =
- std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped;
- llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
-
- if (TypeAlign > MinABIStackAlignInBytes) {
- llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
- llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
- llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
- llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
- llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
- AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
- }
- else
- AddrTyped = Builder.CreateBitCast(Addr, PTy);
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+
+ // The alignment of things in the argument area is never larger than
+ // StackAlignInBytes.
+ TyInfo.second =
+ std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
+
+ // MinABIStackAlignInBytes is the size of argument slots on the stack.
+ CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
+
+ Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
+
- llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
- TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
- unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
- uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
- llvm::Value *NextAddr =
- Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ // If there was a promotion, "unpromote" into a temporary.
+ // TODO: can we just use a pointer into a subset of the original slot?
+ if (DidPromote) {
+ Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
+ llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
- return AddrTyped;
+ // Truncate down to the right width.
+ llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
+ : CGF.IntPtrTy);
+ llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
+ if (OrigTy->isPointerType())
+ V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
+
+ CGF.Builder.CreateStore(V, Temp);
+ Addr = Temp;
+ }
+
+ return Addr;
}
bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
@@ -5960,7 +6314,7 @@ public:
void TCETargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
llvm::Function *F = cast<llvm::Function>(GV);
@@ -6022,8 +6376,8 @@ private:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -6060,11 +6414,11 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getIgnore();
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 64)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
// Pass in the smallest viable integer type.
else if (Size > 32)
return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
@@ -6082,7 +6436,7 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
@@ -6110,30 +6464,16 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
}
-llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // FIXME: Need to handle alignment
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
+Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // FIXME: Someone needs to audit that this handle alignment correctly.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
}
//===----------------------------------------------------------------------===//
@@ -6156,7 +6496,7 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD)
return;
@@ -6210,8 +6550,8 @@ public:
private:
ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
// Coercion type builder for structs passed in registers. The coercion type
// serves two purposes:
@@ -6331,7 +6671,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
// Anything too big to fit in registers is passed with an explicit indirect
// pointer / sret pointer.
if (Size > SizeLimit)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
@@ -6348,7 +6688,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
// If a C++ object has either a non-trivial copy constructor or a non-trivial
// destructor, it is passed with an explicit indirect pointer / sret pointer.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// This is a small aggregate type that should be passed in registers.
// Build a coercion type from the LLVM struct type.
@@ -6369,55 +6709,59 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
return ABIArgInfo::getDirect(CoerceTy);
}
-llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
ABIArgInfo AI = classifyType(Ty, 16 * 8);
llvm::Type *ArgTy = CGT.ConvertType(Ty);
if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
AI.setCoerceToType(ArgTy);
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
- llvm::Value *ArgAddr;
- unsigned Stride;
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ Address ArgAddr = Address::invalid();
+ CharUnits Stride;
switch (AI.getKind()) {
case ABIArgInfo::Expand:
case ABIArgInfo::InAlloca:
llvm_unreachable("Unsupported ABI kind for va_arg");
- case ABIArgInfo::Extend:
- Stride = 8;
- ArgAddr = Builder
- .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
- "extend");
+ case ABIArgInfo::Extend: {
+ Stride = SlotSize;
+ CharUnits Offset = SlotSize - TypeInfo.first;
+ ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
break;
+ }
- case ABIArgInfo::Direct:
- Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
+ case ABIArgInfo::Direct: {
+ auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
+ Stride = CharUnits::fromQuantity(AllocSize).RoundUpToAlignment(SlotSize);
ArgAddr = Addr;
break;
+ }
case ABIArgInfo::Indirect:
- Stride = 8;
- ArgAddr = Builder.CreateBitCast(Addr,
- llvm::PointerType::getUnqual(ArgPtrTy),
- "indirect");
- ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
+ Stride = SlotSize;
+ ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
+ ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
+ TypeInfo.second);
break;
case ABIArgInfo::Ignore:
- return llvm::UndefValue::get(ArgPtrTy);
+ return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
}
// Update VAList.
- Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
- Builder.CreateStore(Addr, VAListAddrAsBPP);
+ llvm::Value *NextPtr =
+ Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
+ Builder.CreateStore(NextPtr, VAListAddr);
- return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
+ return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
}
void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -6552,7 +6896,7 @@ class TypeStringCache {
unsigned IncompleteCount; // Number of Incomplete entries in the Map.
unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
public:
- TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
+ TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
bool removeIncomplete(const IdentifierInfo *ID);
void addIfComplete(const IdentifierInfo *ID, StringRef Str,
@@ -6566,8 +6910,8 @@ class FieldEncoding {
bool HasName;
std::string Enc;
public:
- FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
- StringRef str() {return Enc.c_str();};
+ FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
+ StringRef str() {return Enc.c_str();}
bool operator<(const FieldEncoding &rhs) const {
if (HasName != rhs.HasName) return HasName;
return Enc < rhs.Enc;
@@ -6577,8 +6921,8 @@ public:
class XCoreABIInfo : public DefaultABIInfo {
public:
XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -6592,52 +6936,53 @@ public:
} // End anonymous namespace.
-llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
CGBuilderTy &Builder = CGF.Builder;
// Get the VAList.
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
- CGF.Int8PtrPtrTy);
- llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
// Handle the argument.
ABIArgInfo AI = classifyArgumentType(Ty);
+ CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
llvm::Type *ArgTy = CGT.ConvertType(Ty);
if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
AI.setCoerceToType(ArgTy);
llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
- llvm::Value *Val;
- uint64_t ArgSize = 0;
+
+ Address Val = Address::invalid();
+ CharUnits ArgSize = CharUnits::Zero();
switch (AI.getKind()) {
case ABIArgInfo::Expand:
case ABIArgInfo::InAlloca:
llvm_unreachable("Unsupported ABI kind for va_arg");
case ABIArgInfo::Ignore:
- Val = llvm::UndefValue::get(ArgPtrTy);
- ArgSize = 0;
+ Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
+ ArgSize = CharUnits::Zero();
break;
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- Val = Builder.CreatePointerCast(AP, ArgPtrTy);
- ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
- if (ArgSize < 4)
- ArgSize = 4;
+ Val = Builder.CreateBitCast(AP, ArgPtrTy);
+ ArgSize = CharUnits::fromQuantity(
+ getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
+ ArgSize = ArgSize.RoundUpToAlignment(SlotSize);
break;
case ABIArgInfo::Indirect:
- llvm::Value *ArgAddr;
- ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
- ArgAddr = Builder.CreateLoad(ArgAddr);
- Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
- ArgSize = 4;
+ Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
+ Val = Address(Builder.CreateLoad(Val), TypeAlign);
+ ArgSize = SlotSize;
break;
}
// Increment the VAList.
- if (ArgSize) {
- llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
- Builder.CreateStore(APN, VAListAddrAsBPP);
+ if (!ArgSize.isZero()) {
+ llvm::Value *APN =
+ Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
+ Builder.CreateStore(APN, VAListAddr);
}
+
return Val;
}
@@ -6781,9 +7126,7 @@ static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
if (Field->isBitField()) {
Enc += "b(";
llvm::raw_svector_ostream OS(Enc);
- OS.resync();
OS << Field->getBitWidthValue(CGM.getContext());
- OS.flush();
Enc += ':';
}
if (!appendType(Enc, Field->getType(), CGM, TSC))
@@ -6897,7 +7240,7 @@ static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
/// This is done prior to appending the type's encoding.
static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
// Qualifiers are emitted in alphabetical order.
- static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
+ static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
int Lookup = 0;
if (QT.isConstQualified())
Lookup += 1<<0;
@@ -7138,6 +7481,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
}
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return *(TheTargetCodeGenInfo = new WebAssemblyTargetCodeGenInfo(Types));
+
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
@@ -7150,8 +7497,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
- if (getTarget().getABI() == "apcs-gnu")
+ StringRef ABIStr = getTarget().getABI();
+ if (ABIStr == "apcs-gnu")
Kind = ARMABIInfo::APCS;
+ else if (ABIStr == "aapcs16")
+ Kind = ARMABIInfo::AAPCS16_VFP;
else if (CodeGenOpts.FloatABI == "hard" ||
(CodeGenOpts.FloatABI != "soft" &&
Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
@@ -7161,7 +7511,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::ppc:
- return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+ return *(TheTargetCodeGenInfo =
+ new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
case llvm::Triple::ppc64:
if (Triple.isOSBinFormatELF()) {
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
@@ -7202,18 +7553,19 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::x86: {
bool IsDarwinVectorABI = Triple.isOSDarwin();
- bool IsSmallStructInRegABI =
+ bool RetSmallStructInRegABI =
X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
if (Triple.getOS() == llvm::Triple::Win32) {
return *(TheTargetCodeGenInfo = new WinX86_32TargetCodeGenInfo(
- Types, IsDarwinVectorABI, IsSmallStructInRegABI,
+ Types, IsDarwinVectorABI, RetSmallStructInRegABI,
IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
} else {
return *(TheTargetCodeGenInfo = new X86_32TargetCodeGenInfo(
- Types, IsDarwinVectorABI, IsSmallStructInRegABI,
- IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
+ Types, IsDarwinVectorABI, RetSmallStructInRegABI,
+ IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
+ CodeGenOpts.FloatABI == "soft"));
}
}
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 95275d5d42ee..87b470498623 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -47,7 +47,7 @@ class TargetCodeGenInfo {
public:
// WARNING: Acquires the ownership of ABIInfo.
- TargetCodeGenInfo(ABIInfo *info = 0) : Info(info) {}
+ TargetCodeGenInfo(ABIInfo *info = nullptr) : Info(info) {}
virtual ~TargetCodeGenInfo();
/// getABIInfo() - Returns ABI info helper for the target.
@@ -219,6 +219,6 @@ public:
llvm::StringRef Value,
llvm::SmallString<32> &Opt) const {}
};
-}
+} // namespace clang
-#endif
+#endif // LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
index 3219dc1cc0e9..49dccd224bff 100644
--- a/lib/Driver/Action.cpp
+++ b/lib/Driver/Action.cpp
@@ -65,13 +65,12 @@ CudaDeviceAction::CudaDeviceAction(std::unique_ptr<Action> Input,
void CudaHostAction::anchor() {}
CudaHostAction::CudaHostAction(std::unique_ptr<Action> Input,
- const ActionList &_DeviceActions)
- : Action(CudaHostClass, std::move(Input)), DeviceActions(_DeviceActions) {}
+ const ActionList &DeviceActions)
+ : Action(CudaHostClass, std::move(Input)), DeviceActions(DeviceActions) {}
CudaHostAction::~CudaHostAction() {
- for (iterator it = DeviceActions.begin(), ie = DeviceActions.end(); it != ie;
- ++it)
- delete *it;
+ for (auto &DA : DeviceActions)
+ delete DA;
}
void JobAction::anchor() {}
@@ -153,13 +152,6 @@ VerifyJobAction::VerifyJobAction(ActionClass Kind,
"ActionClass is not a valid VerifyJobAction");
}
-VerifyJobAction::VerifyJobAction(ActionClass Kind, ActionList &Inputs,
- types::ID Type)
- : JobAction(Kind, Inputs, Type) {
- assert((Kind == VerifyDebugInfoJobClass || Kind == VerifyPCHJobClass) &&
- "ActionClass is not a valid VerifyJobAction");
-}
-
void VerifyDebugInfoJobAction::anchor() {}
VerifyDebugInfoJobAction::VerifyDebugInfoJobAction(
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index 101d1fcc832a..e4af2a6ced8a 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -24,8 +24,9 @@ using namespace llvm::opt;
Compilation::Compilation(const Driver &D, const ToolChain &_DefaultToolChain,
InputArgList *_Args, DerivedArgList *_TranslatedArgs)
- : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args),
- TranslatedArgs(_TranslatedArgs), Redirects(nullptr),
+ : TheDriver(D), DefaultToolChain(_DefaultToolChain),
+ CudaHostToolChain(&DefaultToolChain), CudaDeviceToolChain(nullptr),
+ Args(_Args), TranslatedArgs(_TranslatedArgs), Redirects(nullptr),
ForDiagnostics(false) {}
Compilation::~Compilation() {
diff --git a/lib/Driver/CrossWindowsToolChain.cpp b/lib/Driver/CrossWindowsToolChain.cpp
index ffb1469df21d..57bf89635987 100644
--- a/lib/Driver/CrossWindowsToolChain.cpp
+++ b/lib/Driver/CrossWindowsToolChain.cpp
@@ -107,6 +107,12 @@ AddCXXStdlibLibArgs(const llvm::opt::ArgList &DriverArgs,
}
}
+clang::SanitizerMask CrossWindowsToolChain::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ return Res;
+}
+
Tool *CrossWindowsToolChain::buildLinker() const {
return new tools::CrossWindows::Linker(*this);
}
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 180c412bd791..4f8481c0beec 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -11,6 +11,7 @@
#include "InputInfo.h"
#include "ToolChains.h"
#include "clang/Basic/Version.h"
+#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
@@ -46,9 +47,11 @@ using namespace clang;
using namespace llvm::opt;
Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
- DiagnosticsEngine &Diags)
- : Opts(createDriverOptTable()), Diags(Diags), Mode(GCCMode),
- SaveTemps(SaveTempsNone), ClangExecutable(ClangExecutable),
+ DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS)
+ : Opts(createDriverOptTable()), Diags(Diags), VFS(VFS), Mode(GCCMode),
+ SaveTemps(SaveTempsNone), LTOMode(LTOK_None),
+ ClangExecutable(ClangExecutable),
SysRoot(DEFAULT_SYSROOT), UseStdLib(true),
DefaultTargetTriple(DefaultTargetTriple),
DriverTitle("clang LLVM compiler"), CCPrintOptionsFilename(nullptr),
@@ -57,8 +60,13 @@ Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true),
CCCUsePCH(true), SuppressMissingInputWarning(false) {
+ // Provide a sane fallback if no VFS is specified.
+ if (!this->VFS)
+ this->VFS = vfs::getRealFileSystem();
+
Name = llvm::sys::path::filename(ClangExecutable);
Dir = llvm::sys::path::parent_path(ClangExecutable);
+ InstalledDir = Dir; // Provide a sensible default installed dir.
// Compute the path to the resource directory.
StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
@@ -174,10 +182,8 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
} else if ((PhaseArg = DAL.getLastArg(options::OPT_S))) {
FinalPhase = phases::Backend;
- // -c and partial CUDA compilations only run up to the assembler.
- } else if ((PhaseArg = DAL.getLastArg(options::OPT_c)) ||
- (PhaseArg = DAL.getLastArg(options::OPT_cuda_device_only)) ||
- (PhaseArg = DAL.getLastArg(options::OPT_cuda_host_only))) {
+ // -c compilation only runs up to the assembler.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_c))) {
FinalPhase = phases::Assemble;
// Otherwise do everything.
@@ -203,6 +209,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
DerivedArgList *DAL = new DerivedArgList(Args);
bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
+ bool HasNodefaultlib = Args.hasArg(options::OPT_nodefaultlibs);
for (Arg *A : Args) {
// Unfortunately, we have to parse some forwarding options (-Xassembler,
// -Xlinker, -Xpreprocessor) because we either integrate their functionality
@@ -217,7 +224,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_Xlinker__no_demangle));
// Add the remaining values as Xlinker arguments.
- for (const StringRef Val : A->getValues())
+ for (StringRef Val : A->getValues())
if (Val != "--no-demangle")
DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker), Val);
@@ -246,7 +253,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
StringRef Value = A->getValue();
// Rewrite unless -nostdlib is present.
- if (!HasNostdlib && Value == "stdc++") {
+ if (!HasNostdlib && !HasNodefaultlib && Value == "stdc++") {
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_stdcxx));
continue;
}
@@ -261,7 +268,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
// Pick up inputs via the -- option.
if (A->getOption().matches(options::OPT__DASH_DASH)) {
A->claim();
- for (const StringRef Val : A->getValues())
+ for (StringRef Val : A->getValues())
DAL->append(MakeInputArg(*DAL, Opts, Val));
continue;
}
@@ -327,7 +334,8 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
}
// Skip further flag support on OSes which don't support '-m32' or '-m64'.
- if (Target.getArchName() == "tce" || Target.getOS() == llvm::Triple::Minix)
+ if (Target.getArch() == llvm::Triple::tce ||
+ Target.getOS() == llvm::Triple::Minix)
return Target;
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
@@ -360,6 +368,32 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
return Target;
}
+// \brief Parse the LTO options and record the type of LTO compilation
+// based on which -f(no-)?lto(=.*)? option occurs last.
+void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
+ LTOMode = LTOK_None;
+ if (!Args.hasFlag(options::OPT_flto, options::OPT_flto_EQ,
+ options::OPT_fno_lto, false))
+ return;
+
+ StringRef LTOName("full");
+
+ const Arg *A = Args.getLastArg(options::OPT_flto_EQ);
+ if (A)
+ LTOName = A->getValue();
+
+ LTOMode = llvm::StringSwitch<LTOKind>(LTOName)
+ .Case("full", LTOK_Full)
+ .Case("thin", LTOK_Thin)
+ .Default(LTOK_Unknown);
+
+ if (LTOMode == LTOK_Unknown) {
+ assert(A);
+ Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName()
+ << A->getValue();
+ }
+}
+
Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
llvm::PrettyStackTraceString CrashInfo("Compilation construction");
@@ -387,6 +421,9 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
InputArgList Args = ParseArgStrings(ArgList.slice(1));
+ // Silence driver warnings if requested
+ Diags.setIgnoreAllWarnings(Args.hasArg(options::OPT_w));
+
// -no-canonical-prefixes is used very early in main.
Args.ClaimAllArgs(options::OPT_no_canonical_prefixes);
@@ -411,6 +448,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// clang-cl targets MSVC-style Win32.
llvm::Triple T(DefaultTargetTriple);
T.setOS(llvm::Triple::Win32);
+ T.setVendor(llvm::Triple::PC);
T.setEnvironment(llvm::Triple::MSVC);
DefaultTargetTriple = T.str();
}
@@ -439,6 +477,8 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
.Default(SaveTempsCwd);
}
+ setLTOMode(Args);
+
std::unique_ptr<llvm::opt::InputArgList> UArgs =
llvm::make_unique<InputArgList>(std::move(Args));
@@ -452,6 +492,10 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// The compilation takes ownership of Args.
Compilation *C = new Compilation(*this, TC, UArgs.release(), TranslatedArgs);
+ C->setCudaDeviceToolChain(
+ &getToolChain(C->getArgs(), llvm::Triple(TC.getTriple().isArch64Bit()
+ ? "nvptx64-nvidia-cuda"
+ : "nvptx-nvidia-cuda")));
if (!HandleImmediateArgs(*C))
return C;
@@ -462,10 +506,9 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// Construct the list of abstract actions to perform for this compilation. On
// MachO targets this uses the driver-driver and universal actions.
if (TC.getTriple().isOSBinFormatMachO())
- BuildUniversalActions(C->getDefaultToolChain(), C->getArgs(), Inputs,
- C->getActions());
+ BuildUniversalActions(*C, C->getDefaultToolChain(), Inputs);
else
- BuildActions(C->getDefaultToolChain(), C->getArgs(), Inputs,
+ BuildActions(*C, C->getDefaultToolChain(), C->getArgs(), Inputs,
C->getActions());
if (CCCPrintPhases) {
@@ -578,9 +621,9 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
// Darwin OSes this uses the driver-driver and builds universal actions.
const ToolChain &TC = C.getDefaultToolChain();
if (TC.getTriple().isOSBinFormatMachO())
- BuildUniversalActions(TC, C.getArgs(), Inputs, C.getActions());
+ BuildUniversalActions(C, TC, Inputs);
else
- BuildActions(TC, C.getArgs(), Inputs, C.getActions());
+ BuildActions(C, TC, C.getArgs(), Inputs, C.getActions());
BuildJobs(C);
@@ -761,6 +804,9 @@ void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
} else
OS << "Thread model: " << TC.getThreadModel();
OS << '\n';
+
+ // Print out the install directory.
+ OS << "InstalledDir: " << InstalledDir << '\n';
}
/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
@@ -906,7 +952,7 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
os << '"' << CDA->getGpuArchName() << '"' << ", {"
<< PrintActions1(C, *CDA->begin(), Ids) << "}";
} else {
- ActionList *AL;
+ const ActionList *AL;
if (CudaHostAction *CHA = dyn_cast<CudaHostAction>(A)) {
os << "{" << PrintActions1(C, *CHA->begin(), Ids) << "}"
<< ", gpu binaries ";
@@ -914,12 +960,15 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
} else
AL = &A->getInputs();
- const char *Prefix = "{";
- for (Action *PreRequisite : *AL) {
- os << Prefix << PrintActions1(C, PreRequisite, Ids);
- Prefix = ", ";
- }
- os << "}";
+ if (AL->size()) {
+ const char *Prefix = "{";
+ for (Action *PreRequisite : *AL) {
+ os << Prefix << PrintActions1(C, PreRequisite, Ids);
+ Prefix = ", ";
+ }
+ os << "}";
+ } else
+ os << "{}";
}
unsigned Id = Ids.size();
@@ -945,16 +994,17 @@ static bool ContainsCompileOrAssembleAction(const Action *A) {
isa<AssembleJobAction>(A))
return true;
- for (Action::const_iterator it = A->begin(), ie = A->end(); it != ie; ++it)
- if (ContainsCompileOrAssembleAction(*it))
+ for (const Action *Input : *A)
+ if (ContainsCompileOrAssembleAction(Input))
return true;
return false;
}
-void Driver::BuildUniversalActions(const ToolChain &TC, DerivedArgList &Args,
- const InputList &BAInputs,
- ActionList &Actions) const {
+void Driver::BuildUniversalActions(Compilation &C, const ToolChain &TC,
+ const InputList &BAInputs) const {
+ DerivedArgList &Args = C.getArgs();
+ ActionList &Actions = C.getActions();
llvm::PrettyStackTraceString CrashInfo("Building universal build actions");
// Collect the list of architectures. Duplicates are allowed, but should only
// be handled once (in the order seen).
@@ -983,13 +1033,11 @@ void Driver::BuildUniversalActions(const ToolChain &TC, DerivedArgList &Args,
Archs.push_back(Args.MakeArgString(TC.getDefaultUniversalArchName()));
ActionList SingleActions;
- BuildActions(TC, Args, BAInputs, SingleActions);
+ BuildActions(C, TC, Args, BAInputs, SingleActions);
// Add in arch bindings for every top level action, as well as lipo and
// dsymutil steps if needed.
- for (unsigned i = 0, e = SingleActions.size(); i != e; ++i) {
- Action *Act = SingleActions[i];
-
+ for (Action* Act : SingleActions) {
// Make sure we can lipo this kind of output. If not (and it is an actual
// output) then we disallow, since we can't create an output file with the
// right name without overwriting it. We could remove this oddity by just
@@ -1228,18 +1276,23 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
}
}
-// For each unique --cuda-gpu-arch= argument creates a TY_CUDA_DEVICE input
-// action and then wraps each in CudaDeviceAction paired with appropriate GPU
-// arch name. If we're only building device-side code, each action remains
-// independent. Otherwise we pass device-side actions as inputs to a new
-// CudaHostAction which combines both host and device side actions.
+// For each unique --cuda-gpu-arch= argument creates a TY_CUDA_DEVICE
+// input action and then wraps each in CudaDeviceAction paired with
+// appropriate GPU arch name. In case of partial (i.e preprocessing
+// only) or device-only compilation, each device action is added to /p
+// Actions and /p Current is released. Otherwise the function creates
+// and returns a new CudaHostAction which wraps /p Current and device
+// side actions.
static std::unique_ptr<Action>
-buildCudaActions(const Driver &D, const ToolChain &TC, DerivedArgList &Args,
- const Arg *InputArg, const types::ID InputType,
- std::unique_ptr<Action> Current, ActionList &Actions) {
-
- assert(InputType == types::TY_CUDA &&
- "CUDA Actions only apply to CUDA inputs.");
+buildCudaActions(Compilation &C, DerivedArgList &Args, const Arg *InputArg,
+ std::unique_ptr<Action> HostAction, ActionList &Actions) {
+ Arg *PartialCompilationArg = Args.getLastArg(options::OPT_cuda_host_only,
+ options::OPT_cuda_device_only);
+ // Host-only compilation case.
+ if (PartialCompilationArg &&
+ PartialCompilationArg->getOption().matches(options::OPT_cuda_host_only))
+ return std::unique_ptr<Action>(
+ new CudaHostAction(std::move(HostAction), {}));
// Collect all cuda_gpu_arch parameters, removing duplicates.
SmallVector<const char *, 4> GpuArchList;
@@ -1259,20 +1312,22 @@ buildCudaActions(const Driver &D, const ToolChain &TC, DerivedArgList &Args,
// Replicate inputs for each GPU architecture.
Driver::InputList CudaDeviceInputs;
- for (unsigned i = 0, e = GpuArchList.size(); i != e; ++i)
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
CudaDeviceInputs.push_back(std::make_pair(types::TY_CUDA_DEVICE, InputArg));
// Build actions for all device inputs.
+ assert(C.getCudaDeviceToolChain() &&
+ "Missing toolchain for device-side compilation.");
ActionList CudaDeviceActions;
- D.BuildActions(TC, Args, CudaDeviceInputs, CudaDeviceActions);
+ C.getDriver().BuildActions(C, *C.getCudaDeviceToolChain(), Args,
+ CudaDeviceInputs, CudaDeviceActions);
assert(GpuArchList.size() == CudaDeviceActions.size() &&
"Failed to create actions for all devices");
// Check whether any of device actions stopped before they could generate PTX.
bool PartialCompilation = false;
- bool DeviceOnlyCompilation = Args.hasArg(options::OPT_cuda_device_only);
- for (unsigned i = 0, e = GpuArchList.size(); i != e; ++i) {
- if (CudaDeviceActions[i]->getKind() != Action::BackendJobClass) {
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ if (CudaDeviceActions[I]->getKind() != Action::BackendJobClass) {
PartialCompilation = true;
break;
}
@@ -1280,6 +1335,7 @@ buildCudaActions(const Driver &D, const ToolChain &TC, DerivedArgList &Args,
// Figure out what to do with device actions -- pass them as inputs to the
// host action or run each of them independently.
+ bool DeviceOnlyCompilation = PartialCompilationArg != nullptr;
if (PartialCompilation || DeviceOnlyCompilation) {
// In case of partial or device-only compilation results of device actions
// are not consumed by the host action device actions have to be added to
@@ -1288,35 +1344,37 @@ buildCudaActions(const Driver &D, const ToolChain &TC, DerivedArgList &Args,
// -o is ambiguous if we have more than one top-level action.
if (Args.hasArg(options::OPT_o) &&
(!DeviceOnlyCompilation || GpuArchList.size() > 1)) {
- D.Diag(clang::diag::err_drv_output_argument_with_multiple_files);
+ C.getDriver().Diag(
+ clang::diag::err_drv_output_argument_with_multiple_files);
return nullptr;
}
- for (unsigned i = 0, e = GpuArchList.size(); i != e; ++i)
- Actions.push_back(
- new CudaDeviceAction(std::unique_ptr<Action>(CudaDeviceActions[i]),
- GpuArchList[i], /* AtTopLevel */ true));
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
+ Actions.push_back(new CudaDeviceAction(
+ std::unique_ptr<Action>(CudaDeviceActions[I]), GpuArchList[I],
+ /* AtTopLevel */ true));
// Kill host action in case of device-only compilation.
if (DeviceOnlyCompilation)
- Current.reset(nullptr);
- return Current;
- } else {
- // Outputs of device actions during complete CUDA compilation get created
- // with AtTopLevel=false and become inputs for the host action.
- ActionList DeviceActions;
- for (unsigned i = 0, e = GpuArchList.size(); i != e; ++i)
- DeviceActions.push_back(
- new CudaDeviceAction(std::unique_ptr<Action>(CudaDeviceActions[i]),
- GpuArchList[i], /* AtTopLevel */ false));
- // Return a new host action that incorporates original host action and all
- // device actions.
- return std::unique_ptr<Action>(
- new CudaHostAction(std::move(Current), DeviceActions));
- }
+ HostAction.reset(nullptr);
+ return HostAction;
+ }
+
+ // Outputs of device actions during complete CUDA compilation get created
+ // with AtTopLevel=false and become inputs for the host action.
+ ActionList DeviceActions;
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
+ DeviceActions.push_back(new CudaDeviceAction(
+ std::unique_ptr<Action>(CudaDeviceActions[I]), GpuArchList[I],
+ /* AtTopLevel */ false));
+ // Return a new host action that incorporates original host action and all
+ // device actions.
+ return std::unique_ptr<Action>(
+ new CudaHostAction(std::move(HostAction), DeviceActions));
}
-void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
- const InputList &Inputs, ActionList &Actions) const {
+void Driver::BuildActions(Compilation &C, const ToolChain &TC,
+ DerivedArgList &Args, const InputList &Inputs,
+ ActionList &Actions) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation actions");
if (!SuppressMissingInputWarning && Inputs.empty()) {
@@ -1373,9 +1431,9 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
ActionList LinkerInputs;
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
- for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
- types::ID InputType = Inputs[i].first;
- const Arg *InputArg = Inputs[i].second;
+ for (auto &I : Inputs) {
+ types::ID InputType = I.first;
+ const Arg *InputArg = I.second;
PL.clear();
types::getCompilationPhases(InputType, PL);
@@ -1412,24 +1470,12 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
continue;
}
- phases::ID CudaInjectionPhase;
- if (isSaveTempsEnabled()) {
- // All phases are done independently, inject GPU blobs during compilation
- // phase as that's where we generate glue code to init them.
- CudaInjectionPhase = phases::Compile;
- } else {
- // Assumes that clang does everything up until linking phase, so we inject
- // cuda device actions at the last step before linking. Otherwise CUDA
- // host action forces preprocessor into a separate invocation.
- if (FinalPhase == phases::Link) {
- for (auto i = PL.begin(), e = PL.end(); i != e; ++i) {
- auto next = i + 1;
- if (next != e && *next == phases::Link)
- CudaInjectionPhase = *i;
- }
- } else
- CudaInjectionPhase = FinalPhase;
- }
+ phases::ID CudaInjectionPhase = FinalPhase;
+ for (const auto &Phase : PL)
+ if (Phase <= FinalPhase && Phase == phases::Compile) {
+ CudaInjectionPhase = Phase;
+ break;
+ }
// Build the pipeline for this file.
std::unique_ptr<Action> Current(new InputAction(*InputArg, InputType));
@@ -1457,10 +1503,9 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
// Otherwise construct the appropriate action.
Current = ConstructPhaseAction(TC, Args, Phase, std::move(Current));
- if (InputType == types::TY_CUDA && Phase == CudaInjectionPhase &&
- !Args.hasArg(options::OPT_cuda_host_only)) {
- Current = buildCudaActions(*this, TC, Args, InputArg, InputType,
- std::move(Current), Actions);
+ if (InputType == types::TY_CUDA && Phase == CudaInjectionPhase) {
+ Current =
+ buildCudaActions(C, Args, InputArg, std::move(Current), Actions);
if (!Current)
break;
}
@@ -1487,6 +1532,10 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args,
// Claim ignored clang-cl options.
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
+
+ // Claim --cuda-host-only arg which may be passed to non-CUDA
+ // compilations and should not trigger warnings there.
+ Args.ClaimAllArgs(options::OPT_cuda_host_only);
}
std::unique_ptr<Action>
@@ -1551,7 +1600,7 @@ Driver::ConstructPhaseAction(const ToolChain &TC, const ArgList &Args,
types::TY_LLVM_BC);
}
case phases::Backend: {
- if (IsUsingLTO(Args)) {
+ if (isUsingLTO()) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return llvm::make_unique<BackendJobAction>(std::move(Input), Output);
@@ -1572,10 +1621,6 @@ Driver::ConstructPhaseAction(const ToolChain &TC, const ArgList &Args,
llvm_unreachable("invalid phase in ConstructPhaseAction");
}
-bool Driver::IsUsingLTO(const ArgList &Args) const {
- return Args.hasFlag(options::OPT_flto, options::OPT_fno_lto, false);
-}
-
void Driver::BuildJobs(Compilation &C) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
@@ -1668,10 +1713,17 @@ void Driver::BuildJobs(Compilation &C) const {
}
}
-static const Tool *SelectToolForJob(Compilation &C, bool SaveTemps,
+// Returns a Tool for a given JobAction. In case the action and its
+// predecessors can be combined, updates Inputs with the inputs of the
+// first combined action. If one of the collapsed actions is a
+// CudaHostAction, updates CollapsedCHA with the pointer to it so the
+// caller can deal with extra handling such action requires.
+static const Tool *selectToolForJob(Compilation &C, bool SaveTemps,
const ToolChain *TC, const JobAction *JA,
- const ActionList *&Inputs) {
+ const ActionList *&Inputs,
+ const CudaHostAction *&CollapsedCHA) {
const Tool *ToolForJob = nullptr;
+ CollapsedCHA = nullptr;
// See if we should look for a compiler with an integrated assembler. We match
// bottom up, so what we are actually looking for is an assembler job with a
@@ -1688,13 +1740,19 @@ static const Tool *SelectToolForJob(Compilation &C, bool SaveTemps,
// checking the backend tool, check if the tool for the CompileJob
// has an integrated assembler.
const ActionList *BackendInputs = &(*Inputs)[0]->getInputs();
- JobAction *CompileJA = cast<CompileJobAction>(*BackendInputs->begin());
+ // Compile job may be wrapped in CudaHostAction, extract it if
+ // that's the case and update CollapsedCHA if we combine phases.
+ CudaHostAction *CHA = dyn_cast<CudaHostAction>(*BackendInputs->begin());
+ JobAction *CompileJA =
+ cast<CompileJobAction>(CHA ? *CHA->begin() : *BackendInputs->begin());
+ assert(CompileJA && "Backend job is not preceeded by compile job.");
const Tool *Compiler = TC->SelectTool(*CompileJA);
if (!Compiler)
return nullptr;
if (Compiler->hasIntegratedAssembler()) {
- Inputs = &(*BackendInputs)[0]->getInputs();
+ Inputs = &CompileJA->getInputs();
ToolForJob = Compiler;
+ CollapsedCHA = CHA;
}
}
@@ -1704,19 +1762,19 @@ static const Tool *SelectToolForJob(Compilation &C, bool SaveTemps,
if (isa<BackendJobAction>(JA)) {
// Check if the compiler supports emitting LLVM IR.
assert(Inputs->size() == 1);
- JobAction *CompileJA;
- // Extract real host action, if it's a CudaHostAction.
- if (CudaHostAction *CudaHA = dyn_cast<CudaHostAction>(*Inputs->begin()))
- CompileJA = cast<CompileJobAction>(*CudaHA->begin());
- else
- CompileJA = cast<CompileJobAction>(*Inputs->begin());
-
+ // Compile job may be wrapped in CudaHostAction, extract it if
+ // that's the case and update CollapsedCHA if we combine phases.
+ CudaHostAction *CHA = dyn_cast<CudaHostAction>(*Inputs->begin());
+ JobAction *CompileJA =
+ cast<CompileJobAction>(CHA ? *CHA->begin() : *Inputs->begin());
+ assert(CompileJA && "Backend job is not preceeded by compile job.");
const Tool *Compiler = TC->SelectTool(*CompileJA);
if (!Compiler)
return nullptr;
if (!Compiler->canEmitIR() || !SaveTemps) {
- Inputs = &(*Inputs)[0]->getInputs();
+ Inputs = &CompileJA->getInputs();
ToolForJob = Compiler;
+ CollapsedCHA = CHA;
}
}
@@ -1749,7 +1807,7 @@ void Driver::BuildJobsForAction(Compilation &C, const Action *A,
InputInfo II;
// Append outputs of device jobs to the input list.
for (const Action *DA : CHA->getDeviceActions()) {
- BuildJobsForAction(C, DA, TC, "", AtTopLevel,
+ BuildJobsForAction(C, DA, TC, nullptr, AtTopLevel,
/*MultipleArchs*/ false, LinkingOutput, II);
CudaDeviceInputInfos.push_back(II);
}
@@ -1789,13 +1847,10 @@ void Driver::BuildJobsForAction(Compilation &C, const Action *A,
}
if (const CudaDeviceAction *CDA = dyn_cast<CudaDeviceAction>(A)) {
- // Figure out which NVPTX triple to use for device-side compilation based on
- // whether host is 64-bit.
- llvm::Triple DeviceTriple(C.getDefaultToolChain().getTriple().isArch64Bit()
- ? "nvptx64-nvidia-cuda"
- : "nvptx-nvidia-cuda");
- BuildJobsForAction(C, *CDA->begin(),
- &getToolChain(C.getArgs(), DeviceTriple),
+ // Initial processing of CudaDeviceAction carries host params.
+ // Call BuildJobsForAction() again, now with correct device parameters.
+ assert(CDA->getGpuArchName() && "No GPU name in device action.");
+ BuildJobsForAction(C, *CDA->begin(), C.getCudaDeviceToolChain(),
CDA->getGpuArchName(), CDA->isAtTopLevel(),
/*MultipleArchs*/ true, LinkingOutput, Result);
return;
@@ -1804,10 +1859,23 @@ void Driver::BuildJobsForAction(Compilation &C, const Action *A,
const ActionList *Inputs = &A->getInputs();
const JobAction *JA = cast<JobAction>(A);
- const Tool *T = SelectToolForJob(C, isSaveTempsEnabled(), TC, JA, Inputs);
+ const CudaHostAction *CollapsedCHA = nullptr;
+ const Tool *T =
+ selectToolForJob(C, isSaveTempsEnabled(), TC, JA, Inputs, CollapsedCHA);
if (!T)
return;
+ // If we've collapsed action list that contained CudaHostAction we
+ // need to build jobs for device-side inputs it may have held.
+ if (CollapsedCHA) {
+ InputInfo II;
+ for (const Action *DA : CollapsedCHA->getDeviceActions()) {
+ BuildJobsForAction(C, DA, TC, "", AtTopLevel,
+ /*MultipleArchs*/ false, LinkingOutput, II);
+ CudaDeviceInputInfos.push_back(II);
+ }
+ }
+
// Only use pipes when there is exactly one input.
InputInfoList InputInfos;
for (const Action *Input : *Inputs) {
@@ -2091,6 +2159,11 @@ void Driver::generatePrefixedToolNames(
// FIXME: Needs a better variable than DefaultTargetTriple
Names.emplace_back(DefaultTargetTriple + "-" + Tool);
Names.emplace_back(Tool);
+
+ // Allow the discovery of tools prefixed with LLVM's default target triple.
+ std::string LLVMDefaultTargetTriple = llvm::sys::getDefaultTargetTriple();
+ if (LLVMDefaultTargetTriple != DefaultTargetTriple)
+ Names.emplace_back(LLVMDefaultTargetTriple + "-" + Tool);
}
static bool ScanDirForExecutable(SmallString<128> &Dir,
@@ -2163,6 +2236,8 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ case llvm::Triple::WatchOS:
TC = new toolchains::DarwinClang(*this, Target, Args);
break;
case llvm::Triple::DragonFly:
@@ -2185,16 +2260,22 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
break;
case llvm::Triple::Linux:
if (Target.getArch() == llvm::Triple::hexagon)
- TC = new toolchains::Hexagon_TC(*this, Target, Args);
+ TC = new toolchains::HexagonToolChain(*this, Target, Args);
+ else if ((Target.getVendor() == llvm::Triple::MipsTechnologies) &&
+ !Target.hasEnvironment())
+ TC = new toolchains::MipsLLVMToolChain(*this, Target, Args);
else
TC = new toolchains::Linux(*this, Target, Args);
break;
case llvm::Triple::NaCl:
- TC = new toolchains::NaCl_TC(*this, Target, Args);
+ TC = new toolchains::NaClToolChain(*this, Target, Args);
break;
case llvm::Triple::Solaris:
TC = new toolchains::Solaris(*this, Target, Args);
break;
+ case llvm::Triple::AMDHSA:
+ TC = new toolchains::AMDGPUToolChain(*this, Target, Args);
+ break;
case llvm::Triple::Win32:
switch (Target.getEnvironment()) {
default:
@@ -2220,24 +2301,36 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::CUDA:
TC = new toolchains::CudaToolChain(*this, Target, Args);
break;
+ case llvm::Triple::PS4:
+ TC = new toolchains::PS4CPU(*this, Target, Args);
+ break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
- if (Target.getArchName() == "tce")
+ switch (Target.getArch()) {
+ case llvm::Triple::tce:
TC = new toolchains::TCEToolChain(*this, Target, Args);
- else if (Target.getArch() == llvm::Triple::hexagon)
- TC = new toolchains::Hexagon_TC(*this, Target, Args);
- else if (Target.getArch() == llvm::Triple::xcore)
- TC = new toolchains::XCore(*this, Target, Args);
- else if (Target.getArch() == llvm::Triple::shave)
- TC = new toolchains::SHAVEToolChain(*this, Target, Args);
- else if (Target.isOSBinFormatELF())
- TC = new toolchains::Generic_ELF(*this, Target, Args);
- else if (Target.isOSBinFormatMachO())
- TC = new toolchains::MachO(*this, Target, Args);
- else
- TC = new toolchains::Generic_GCC(*this, Target, Args);
- break;
+ break;
+ case llvm::Triple::hexagon:
+ TC = new toolchains::HexagonToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::xcore:
+ TC = new toolchains::XCoreToolChain(*this, Target, Args);
+ break;
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ TC = new toolchains::WebAssembly(*this, Target, Args);
+ break;
+ default:
+ if (Target.getVendor() == llvm::Triple::Myriad)
+ TC = new toolchains::MyriadToolChain(*this, Target, Args);
+ else if (Target.isOSBinFormatELF())
+ TC = new toolchains::Generic_ELF(*this, Target, Args);
+ else if (Target.isOSBinFormatMachO())
+ TC = new toolchains::MachO(*this, Target, Args);
+ else
+ TC = new toolchains::Generic_GCC(*this, Target, Args);
+ }
}
}
return *TC;
diff --git a/lib/Driver/DriverOptions.cpp b/lib/Driver/DriverOptions.cpp
index 6ff1cbafb3bc..8d5332b5cc24 100644
--- a/lib/Driver/DriverOptions.cpp
+++ b/lib/Driver/DriverOptions.cpp
@@ -34,7 +34,7 @@ namespace {
class DriverOptTable : public OptTable {
public:
DriverOptTable()
- : OptTable(InfoTable, llvm::array_lengthof(InfoTable)) {}
+ : OptTable(InfoTable) {}
};
}
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index 42bba56f5d41..22904e5398a0 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "InputInfo.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Job.h"
@@ -26,9 +27,14 @@ using llvm::StringRef;
using llvm::ArrayRef;
Command::Command(const Action &Source, const Tool &Creator,
- const char *Executable, const ArgStringList &Arguments)
+ const char *Executable, const ArgStringList &Arguments,
+ ArrayRef<InputInfo> Inputs)
: Source(Source), Creator(Creator), Executable(Executable),
- Arguments(Arguments), ResponseFile(nullptr) {}
+ Arguments(Arguments), ResponseFile(nullptr) {
+ for (const auto &II : Inputs)
+ if (II.isFilename())
+ InputFilenames.push_back(II.getFilename());
+}
static int skipArgs(const char *Flag, bool HaveCrashVFS) {
// These flags are all of the form -Flag <Arg> and are treated as two
@@ -42,6 +48,7 @@ static int skipArgs(const char *Flag, bool HaveCrashVFS) {
.Cases("-iwithprefixbefore", "-isystem", "-iquote", true)
.Cases("-resource-dir", "-serialize-diagnostic-file", true)
.Cases("-dwarf-debug-flags", "-ivfsoverlay", true)
+ .Cases("-header-include-file", "-diagnostic-log-file", true)
// Some include flags shouldn't be skipped if we have a crash VFS
.Case("-isysroot", !HaveCrashVFS)
.Default(false);
@@ -98,7 +105,9 @@ void Command::writeResponseFile(raw_ostream &OS) const {
return;
}
- // In regular response files, we send all arguments to the response file
+ // In regular response files, we send all arguments to the response file.
+ // Wrapping all arguments in double quotes ensures that both Unix tools and
+ // Windows tools understand the response file.
for (const char *Arg : Arguments) {
OS << '"';
@@ -155,13 +164,6 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
Args = ArrayRef<const char *>(ArgsRespFile).slice(1); // no executable name
}
- StringRef MainFilename;
- // We'll need the argument to -main-file-name to find the input file name.
- if (CrashInfo)
- for (size_t I = 0, E = Args.size(); I + 1 < E; ++I)
- if (StringRef(Args[I]).equals("-main-file-name"))
- MainFilename = Args[I + 1];
-
bool HaveCrashVFS = CrashInfo && !CrashInfo->VFSPath.empty();
for (size_t i = 0, e = Args.size(); i < e; ++i) {
const char *const Arg = Args[i];
@@ -170,8 +172,11 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
if (int Skip = skipArgs(Arg, HaveCrashVFS)) {
i += Skip - 1;
continue;
- } else if (llvm::sys::path::filename(Arg) == MainFilename &&
- (i == 0 || StringRef(Args[i - 1]) != "-main-file-name")) {
+ }
+ auto Found = std::find_if(InputFilenames.begin(), InputFilenames.end(),
+ [&Arg](StringRef IF) { return IF == Arg; });
+ if (Found != InputFilenames.end() &&
+ (i == 0 || StringRef(Args[i - 1]) != "-main-file-name")) {
// Replace the input file name with the crashinfo's file name.
OS << ' ';
StringRef ShortName = llvm::sys::path::filename(CrashInfo->Filename);
@@ -254,8 +259,9 @@ int Command::Execute(const StringRef **Redirects, std::string *ErrMsg,
FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
const char *Executable_,
const ArgStringList &Arguments_,
+ ArrayRef<InputInfo> Inputs,
std::unique_ptr<Command> Fallback_)
- : Command(Source_, Creator_, Executable_, Arguments_),
+ : Command(Source_, Creator_, Executable_, Arguments_, Inputs),
Fallback(std::move(Fallback_)) {}
void FallbackCommand::Print(raw_ostream &OS, const char *Terminator,
diff --git a/lib/Driver/MSVCToolChain.cpp b/lib/Driver/MSVCToolChain.cpp
index c816b29dca23..b7e576e53e8f 100644
--- a/lib/Driver/MSVCToolChain.cpp
+++ b/lib/Driver/MSVCToolChain.cpp
@@ -205,27 +205,103 @@ static bool getSystemRegistryString(const char *keyPath, const char *valueName,
#endif // USE_WIN32
}
+// Convert LLVM's ArchType
+// to the corresponding name of Windows SDK libraries subfolder
+static StringRef getWindowsSDKArch(llvm::Triple::ArchType Arch) {
+ switch (Arch) {
+ case llvm::Triple::x86:
+ return "x86";
+ case llvm::Triple::x86_64:
+ return "x64";
+ case llvm::Triple::arm:
+ return "arm";
+ default:
+ return "";
+ }
+}
+
+// Find the most recent version of Universal CRT or Windows 10 SDK.
+// vcvarsqueryregistry.bat from Visual Studio 2015 sorts entries in the include
+// directory by name and uses the last one of the list.
+// So we compare entry names lexicographically to find the greatest one.
+static bool getWindows10SDKVersion(const std::string &SDKPath,
+ std::string &SDKVersion) {
+ SDKVersion.clear();
+
+ std::error_code EC;
+ llvm::SmallString<128> IncludePath(SDKPath);
+ llvm::sys::path::append(IncludePath, "Include");
+ for (llvm::sys::fs::directory_iterator DirIt(IncludePath, EC), DirEnd;
+ DirIt != DirEnd && !EC; DirIt.increment(EC)) {
+ if (!llvm::sys::fs::is_directory(DirIt->path()))
+ continue;
+ StringRef CandidateName = llvm::sys::path::filename(DirIt->path());
+ // If WDK is installed, there could be subfolders like "wdf" in the
+ // "Include" directory.
+ // Allow only directories which names start with "10.".
+ if (!CandidateName.startswith("10."))
+ continue;
+ if (CandidateName > SDKVersion)
+ SDKVersion = CandidateName;
+ }
+
+ return !SDKVersion.empty();
+}
+
/// \brief Get Windows SDK installation directory.
-bool MSVCToolChain::getWindowsSDKDir(std::string &path, int &major,
- int &minor) const {
- std::string sdkVersion;
+bool MSVCToolChain::getWindowsSDKDir(std::string &Path, int &Major,
+ std::string &WindowsSDKIncludeVersion,
+ std::string &WindowsSDKLibVersion) const {
+ std::string RegistrySDKVersion;
// Try the Windows registry.
- bool hasSDKDir = getSystemRegistryString(
- "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
- "InstallationFolder", path, &sdkVersion);
- if (!sdkVersion.empty())
- std::sscanf(sdkVersion.c_str(), "v%d.%d", &major, &minor);
- return hasSDKDir && !path.empty();
+ if (!getSystemRegistryString(
+ "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
+ "InstallationFolder", Path, &RegistrySDKVersion))
+ return false;
+ if (Path.empty() || RegistrySDKVersion.empty())
+ return false;
+
+ WindowsSDKIncludeVersion.clear();
+ WindowsSDKLibVersion.clear();
+ Major = 0;
+ std::sscanf(RegistrySDKVersion.c_str(), "v%d.", &Major);
+ if (Major <= 7)
+ return true;
+ if (Major == 8) {
+ // Windows SDK 8.x installs libraries in a folder whose names depend on the
+ // version of the OS you're targeting. By default choose the newest, which
+ // usually corresponds to the version of the OS you've installed the SDK on.
+ const char *Tests[] = {"winv6.3", "win8", "win7"};
+ for (const char *Test : Tests) {
+ llvm::SmallString<128> TestPath(Path);
+ llvm::sys::path::append(TestPath, "Lib", Test);
+ if (llvm::sys::fs::exists(TestPath.c_str())) {
+ WindowsSDKLibVersion = Test;
+ break;
+ }
+ }
+ return !WindowsSDKLibVersion.empty();
+ }
+ if (Major == 10) {
+ if (!getWindows10SDKVersion(Path, WindowsSDKIncludeVersion))
+ return false;
+ WindowsSDKLibVersion = WindowsSDKIncludeVersion;
+ return true;
+ }
+ // Unsupported SDK version
+ return false;
}
// Gets the library path required to link against the Windows SDK.
bool MSVCToolChain::getWindowsSDKLibraryPath(std::string &path) const {
std::string sdkPath;
int sdkMajor = 0;
- int sdkMinor = 0;
+ std::string windowsSDKIncludeVersion;
+ std::string windowsSDKLibVersion;
path.clear();
- if (!getWindowsSDKDir(sdkPath, sdkMajor, sdkMinor))
+ if (!getWindowsSDKDir(sdkPath, sdkMajor, windowsSDKIncludeVersion,
+ windowsSDKLibVersion))
return false;
llvm::SmallString<128> libPath(sdkPath);
@@ -245,44 +321,57 @@ bool MSVCToolChain::getWindowsSDKLibraryPath(std::string &path) const {
return false;
}
} else {
- // Windows SDK 8.x installs libraries in a folder whose names depend on the
- // version of the OS you're targeting. By default choose the newest, which
- // usually corresponds to the version of the OS you've installed the SDK on.
- const char *tests[] = {"winv6.3", "win8", "win7"};
- bool found = false;
- for (const char *test : tests) {
- llvm::SmallString<128> testPath(libPath);
- llvm::sys::path::append(testPath, test);
- if (llvm::sys::fs::exists(testPath.c_str())) {
- libPath = testPath;
- found = true;
- break;
- }
- }
-
- if (!found)
- return false;
-
- llvm::sys::path::append(libPath, "um");
- switch (getArch()) {
- case llvm::Triple::x86:
- llvm::sys::path::append(libPath, "x86");
- break;
- case llvm::Triple::x86_64:
- llvm::sys::path::append(libPath, "x64");
- break;
- case llvm::Triple::arm:
- llvm::sys::path::append(libPath, "arm");
- break;
- default:
+ const StringRef archName = getWindowsSDKArch(getArch());
+ if (archName.empty())
return false;
- }
+ llvm::sys::path::append(libPath, windowsSDKLibVersion, "um", archName);
}
path = libPath.str();
return true;
}
+// Check if the Include path of a specified version of Visual Studio contains
+// specific header files. If not, they are probably shipped with Universal CRT.
+bool clang::driver::toolchains::MSVCToolChain::useUniversalCRT(
+ std::string &VisualStudioDir) const {
+ llvm::SmallString<128> TestPath(VisualStudioDir);
+ llvm::sys::path::append(TestPath, "VC\\include\\stdlib.h");
+
+ return !llvm::sys::fs::exists(TestPath);
+}
+
+bool MSVCToolChain::getUniversalCRTSdkDir(std::string &Path,
+ std::string &UCRTVersion) const {
+ // vcvarsqueryregistry.bat for Visual Studio 2015 queries the registry
+ // for the specific key "KitsRoot10". So do we.
+ if (!getSystemRegistryString(
+ "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", "KitsRoot10",
+ Path, nullptr))
+ return false;
+
+ return getWindows10SDKVersion(Path, UCRTVersion);
+}
+
+bool MSVCToolChain::getUniversalCRTLibraryPath(std::string &Path) const {
+ std::string UniversalCRTSdkPath;
+ std::string UCRTVersion;
+
+ Path.clear();
+ if (!getUniversalCRTSdkDir(UniversalCRTSdkPath, UCRTVersion))
+ return false;
+
+ StringRef ArchName = getWindowsSDKArch(getArch());
+ if (ArchName.empty())
+ return false;
+
+ llvm::SmallString<128> LibPath(UniversalCRTSdkPath);
+ llvm::sys::path::append(LibPath, "Lib", UCRTVersion, "ucrt", ArchName);
+
+ Path = LibPath.str();
+ return true;
+}
+
// Get the location to use for Visual Studio binaries. The location priority
// is: %VCINSTALLDIR% > %PATH% > newest copy of Visual Studio installed on
// system (as reported by the registry).
@@ -419,12 +508,12 @@ bool MSVCToolChain::getVisualStudioInstallDir(std::string &path) const {
return false;
}
-void MSVCToolChain::AddSystemIncludeWithSubfolder(const ArgList &DriverArgs,
- ArgStringList &CC1Args,
- const std::string &folder,
- const char *subfolder) const {
+void MSVCToolChain::AddSystemIncludeWithSubfolder(
+ const ArgList &DriverArgs, ArgStringList &CC1Args,
+ const std::string &folder, const Twine &subfolder1, const Twine &subfolder2,
+ const Twine &subfolder3) const {
llvm::SmallString<128> path(folder);
- llvm::sys::path::append(path, subfolder);
+ llvm::sys::path::append(path, subfolder1, subfolder2, subfolder3);
addSystemInclude(DriverArgs, CC1Args, path);
}
@@ -434,9 +523,8 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
- SmallString<128> P(getDriver().ResourceDir);
- llvm::sys::path::append(P, "include");
- addSystemInclude(DriverArgs, CC1Args, P);
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, getDriver().ResourceDir,
+ "include");
}
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
@@ -460,16 +548,33 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (getVisualStudioInstallDir(VSDir)) {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, VSDir, "VC\\include");
+ if (useUniversalCRT(VSDir)) {
+ std::string UniversalCRTSdkPath;
+ std::string UCRTVersion;
+ if (getUniversalCRTSdkDir(UniversalCRTSdkPath, UCRTVersion)) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, UniversalCRTSdkPath,
+ "Include", UCRTVersion, "ucrt");
+ }
+ }
+
std::string WindowsSDKDir;
- int major, minor;
- if (getWindowsSDKDir(WindowsSDKDir, major, minor)) {
+ int major;
+ std::string windowsSDKIncludeVersion;
+ std::string windowsSDKLibVersion;
+ if (getWindowsSDKDir(WindowsSDKDir, major, windowsSDKIncludeVersion,
+ windowsSDKLibVersion)) {
if (major >= 8) {
+ // Note: windowsSDKIncludeVersion is empty for SDKs prior to v10.
+ // Anyway, llvm::sys::path::append is able to manage it.
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
- "include\\shared");
+ "include", windowsSDKIncludeVersion,
+ "shared");
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
- "include\\um");
+ "include", windowsSDKIncludeVersion,
+ "um");
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
- "include\\winrt");
+ "include", windowsSDKIncludeVersion,
+ "winrt");
} else {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
"include");
@@ -528,3 +633,112 @@ SanitizerMask MSVCToolChain::getSupportedSanitizers() const {
Res |= SanitizerKind::Address;
return Res;
}
+
+llvm::opt::DerivedArgList *
+MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ const OptTable &Opts = getDriver().getOpts();
+
+ // /Oy and /Oy- only has an effect under X86-32.
+ bool SupportsForcingFramePointer = getArch() == llvm::Triple::x86;
+
+ // The -O[12xd] flag actually expands to several flags. We must desugar the
+ // flags so that options embedded can be negated. For example, the '-O2' flag
+ // enables '-Oy'. Expanding '-O2' into its constituent flags allows us to
+ // correctly handle '-O2 -Oy-' where the trailing '-Oy-' disables a single
+ // aspect of '-O2'.
+ //
+ // Note that this expansion logic only applies to the *last* of '[12xd]'.
+
+ // First step is to search for the character we'd like to expand.
+ const char *ExpandChar = nullptr;
+ for (Arg *A : Args) {
+ if (!A->getOption().matches(options::OPT__SLASH_O))
+ continue;
+ StringRef OptStr = A->getValue();
+ for (size_t I = 0, E = OptStr.size(); I != E; ++I) {
+ const char &OptChar = *(OptStr.data() + I);
+ if (OptChar == '1' || OptChar == '2' || OptChar == 'x' || OptChar == 'd')
+ ExpandChar = OptStr.data() + I;
+ }
+ }
+
+ // The -O flag actually takes an amalgam of other options. For example,
+ // '/Ogyb2' is equivalent to '/Og' '/Oy' '/Ob2'.
+ for (Arg *A : Args) {
+ if (!A->getOption().matches(options::OPT__SLASH_O)) {
+ DAL->append(A);
+ continue;
+ }
+
+ StringRef OptStr = A->getValue();
+ for (size_t I = 0, E = OptStr.size(); I != E; ++I) {
+ const char &OptChar = *(OptStr.data() + I);
+ switch (OptChar) {
+ default:
+ break;
+ case '1':
+ case '2':
+ case 'x':
+ case 'd':
+ if (&OptChar == ExpandChar) {
+ if (OptChar == 'd') {
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_O0));
+ } else {
+ if (OptChar == '1') {
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "s");
+ } else if (OptChar == '2' || OptChar == 'x') {
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_fbuiltin));
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "2");
+ }
+ if (SupportsForcingFramePointer)
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_fomit_frame_pointer));
+ if (OptChar == '1' || OptChar == '2')
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_ffunction_sections));
+ }
+ }
+ break;
+ case 'b':
+ if (I + 1 != E && isdigit(OptStr[I + 1]))
+ ++I;
+ break;
+ case 'g':
+ break;
+ case 'i':
+ if (I + 1 != E && OptStr[I + 1] == '-') {
+ ++I;
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_fno_builtin));
+ } else {
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_fbuiltin));
+ }
+ break;
+ case 's':
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "s");
+ break;
+ case 't':
+ DAL->AddJoinedArg(A, Opts.getOption(options::OPT_O), "2");
+ break;
+ case 'y': {
+ bool OmitFramePointer = true;
+ if (I + 1 != E && OptStr[I + 1] == '-') {
+ OmitFramePointer = false;
+ ++I;
+ }
+ if (SupportsForcingFramePointer) {
+ if (OmitFramePointer)
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_fomit_frame_pointer));
+ else
+ DAL->AddFlagArg(
+ A, Opts.getOption(options::OPT_fno_omit_frame_pointer));
+ }
+ break;
+ }
+ }
+ }
+ }
+ return DAL;
+}
diff --git a/lib/Driver/MinGWToolChain.cpp b/lib/Driver/MinGWToolChain.cpp
index 938440b08f60..c5287bb41575 100644
--- a/lib/Driver/MinGWToolChain.cpp
+++ b/lib/Driver/MinGWToolChain.cpp
@@ -66,23 +66,17 @@ MinGW::MinGW(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
-// In Windows there aren't any standard install locations, we search
-// for gcc on the PATH. In Linux the base is always /usr.
-#ifdef LLVM_ON_WIN32
+ // On Windows if there is no sysroot we search for gcc on the PATH.
if (getDriver().SysRoot.size())
- Base = getDriver().SysRoot;
+ Base = getDriver().SysRoot;
+#ifdef LLVM_ON_WIN32
else if (llvm::ErrorOr<std::string> GPPName =
llvm::sys::findProgramByName("gcc"))
Base = llvm::sys::path::parent_path(
llvm::sys::path::parent_path(GPPName.get()));
- else
- Base = llvm::sys::path::parent_path(getDriver().getInstalledDir());
-#else
- if (getDriver().SysRoot.size())
- Base = getDriver().SysRoot;
- else
- Base = "/usr";
#endif
+ if (!Base.size())
+ Base = llvm::sys::path::parent_path(getDriver().getInstalledDir());
Base += llvm::sys::path::get_separator();
findGccLibDir();
diff --git a/lib/Driver/Multilib.cpp b/lib/Driver/Multilib.cpp
index 8acda6794d72..34ad6a7efb24 100644
--- a/lib/Driver/Multilib.cpp
+++ b/lib/Driver/Multilib.cpp
@@ -260,16 +260,15 @@ bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
return false;
}, Multilibs);
- if (Filtered.size() == 0) {
+ if (Filtered.size() == 0)
return false;
- } else if (Filtered.size() == 1) {
+ if (Filtered.size() == 1) {
M = Filtered[0];
return true;
}
// TODO: pick the "best" multlib when more than one is suitable
assert(false);
-
return false;
}
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index c3ad8ef9c1ef..2fded1c80da9 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -29,11 +29,11 @@ enum : SanitizerMask {
NeedsUbsanRt = Undefined | Integer | CFI,
NeedsUbsanCxxRt = Vptr | CFI,
NotAllowedWithTrap = Vptr,
- RequiresPIE = Memory | DataFlow,
+ RequiresPIE = DataFlow,
NeedsUnwindTables = Address | Thread | Memory | DataFlow,
SupportsCoverage = Address | Memory | Leak | Undefined | Integer | DataFlow,
RecoverableByDefault = Undefined | Integer,
- Unrecoverable = Address | Unreachable | Return,
+ Unrecoverable = Unreachable | Return,
LegacyFsanitizeRecoverMask = Undefined | Integer,
NeedsLTO = CFI,
TrappingSupported =
@@ -90,6 +90,8 @@ static bool getDefaultBlacklist(const Driver &D, SanitizerMask Kinds,
BlacklistFile = "tsan_blacklist.txt";
else if (Kinds & DataFlow)
BlacklistFile = "dfsan_abilist.txt";
+ else if (Kinds & CFI)
+ BlacklistFile = "cfi_blacklist.txt";
if (BlacklistFile) {
clang::SmallString<64> Path(D.ResourceDir);
@@ -158,11 +160,20 @@ bool SanitizerArgs::needsUbsanRt() const {
return (Sanitizers.Mask & NeedsUbsanRt & ~TrapSanitizers.Mask) &&
!Sanitizers.has(Address) &&
!Sanitizers.has(Memory) &&
- !Sanitizers.has(Thread);
+ !Sanitizers.has(Thread) &&
+ !CfiCrossDso;
+}
+
+bool SanitizerArgs::needsCfiRt() const {
+ return !(Sanitizers.Mask & CFI & ~TrapSanitizers.Mask) && CfiCrossDso;
+}
+
+bool SanitizerArgs::needsCfiDiagRt() const {
+ return (Sanitizers.Mask & CFI & ~TrapSanitizers.Mask) && CfiCrossDso;
}
bool SanitizerArgs::requiresPIE() const {
- return AsanZeroBaseShadow || (Sanitizers.Mask & RequiresPIE);
+ return NeedPIE || (Sanitizers.Mask & RequiresPIE);
}
bool SanitizerArgs::needsUnwindTables() const {
@@ -174,13 +185,15 @@ void SanitizerArgs::clear() {
RecoverableSanitizers.clear();
TrapSanitizers.clear();
BlacklistFiles.clear();
+ ExtraDeps.clear();
CoverageFeatures = 0;
MsanTrackOrigins = 0;
MsanUseAfterDtor = false;
+ NeedPIE = false;
AsanFieldPadding = 0;
- AsanZeroBaseShadow = false;
AsanSharedRuntime = false;
LinkCXXRuntimes = false;
+ CfiCrossDso = false;
}
SanitizerArgs::SanitizerArgs(const ToolChain &TC,
@@ -280,7 +293,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
// Check that LTO is enabled if we need it.
- if ((Kinds & NeedsLTO) && !D.IsUsingLTO(Args)) {
+ if ((Kinds & NeedsLTO) && !D.isUsingLTO()) {
D.Diag(diag::err_drv_argument_only_allowed_with)
<< lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
}
@@ -381,13 +394,16 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (Arg->getOption().matches(options::OPT_fsanitize_blacklist)) {
Arg->claim();
std::string BLPath = Arg->getValue();
- if (llvm::sys::fs::exists(BLPath))
+ if (llvm::sys::fs::exists(BLPath)) {
BlacklistFiles.push_back(BLPath);
- else
+ ExtraDeps.push_back(BLPath);
+ } else
D.Diag(clang::diag::err_drv_no_such_file) << BLPath;
+
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_blacklist)) {
Arg->claim();
BlacklistFiles.clear();
+ ExtraDeps.clear();
}
}
// Validate blacklists format.
@@ -418,8 +434,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
}
}
- MsanUseAfterDtor =
- Args.hasArg(options::OPT_fsanitize_memory_use_after_dtor);
+ MsanUseAfterDtor =
+ Args.hasArg(options::OPT_fsanitize_memory_use_after_dtor);
+ NeedPIE |= !(TC.getTriple().isOSLinux() &&
+ TC.getTriple().getArch() == llvm::Triple::x86_64);
+ }
+
+ if (AllAddedKinds & CFI) {
+ CfiCrossDso = Args.hasFlag(options::OPT_fsanitize_cfi_cross_dso,
+ options::OPT_fno_sanitize_cfi_cross_dso, false);
+ // Without PIE, external function address may resolve to a PLT record, which
+ // can not be verified by the target module.
+ NeedPIE |= CfiCrossDso;
}
// Parse -f(no-)?sanitize-coverage flags if coverage is supported by the
@@ -489,10 +515,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (AllAddedKinds & Address) {
AsanSharedRuntime =
- Args.hasArg(options::OPT_shared_libasan) ||
- (TC.getTriple().getEnvironment() == llvm::Triple::Android);
- AsanZeroBaseShadow =
- (TC.getTriple().getEnvironment() == llvm::Triple::Android);
+ Args.hasArg(options::OPT_shared_libasan) || TC.getTriple().isAndroid();
+ NeedPIE |= TC.getTriple().isAndroid();
if (Arg *A =
Args.getLastArg(options::OPT_fsanitize_address_field_padding)) {
StringRef S = A->getValue();
@@ -561,6 +585,11 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
BlacklistOpt += BLPath;
CmdArgs.push_back(Args.MakeArgString(BlacklistOpt));
}
+ for (const auto &Dep : ExtraDeps) {
+ SmallString<64> ExtraDepOpt("-fdepfile-entry=");
+ ExtraDepOpt += Dep;
+ CmdArgs.push_back(Args.MakeArgString(ExtraDepOpt));
+ }
if (MsanTrackOrigins)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins=" +
@@ -569,6 +598,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (MsanUseAfterDtor)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-use-after-dtor"));
+ if (CfiCrossDso)
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-cfi-cross-dso"));
+
if (AsanFieldPadding)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-address-field-padding=" +
llvm::utostr(AsanFieldPadding)));
@@ -599,11 +631,10 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
// Instruct the code generator to embed linker directives in the object file
// that cause the required runtime libraries to be linked.
CmdArgs.push_back(Args.MakeArgString(
- "--dependent-lib=" + tools::getCompilerRT(TC, "ubsan_standalone")));
+ "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone")));
if (types::isCXX(InputType))
- CmdArgs.push_back(
- Args.MakeArgString("--dependent-lib=" +
- tools::getCompilerRT(TC, "ubsan_standalone_cxx")));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone_cxx")));
}
}
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index d40bb951e50a..cbbd485a9b77 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -22,8 +22,13 @@
#include "llvm/Option/Option.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetParser.h"
+
using namespace clang::driver;
+using namespace clang::driver::tools;
using namespace clang;
+using namespace llvm;
using namespace llvm::opt;
static llvm::opt::Arg *GetRTTIArgument(const ArgList &Args) {
@@ -72,9 +77,7 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
ToolChain::~ToolChain() {
}
-const Driver &ToolChain::getDriver() const {
- return D;
-}
+vfs::FileSystem &ToolChain::getVFS() const { return getDriver().getVFS(); }
bool ToolChain::useIntegratedAs() const {
return Args.hasFlag(options::OPT_fintegrated_as,
@@ -88,6 +91,99 @@ const SanitizerArgs& ToolChain::getSanitizerArgs() const {
return *SanitizerArguments.get();
}
+namespace {
+struct DriverSuffix {
+ const char *Suffix;
+ const char *ModeFlag;
+};
+
+const DriverSuffix *FindDriverSuffix(StringRef ProgName) {
+ // A list of known driver suffixes. Suffixes are compared against the
+ // program name in order. If there is a match, the frontend type is updated as
+ // necessary by applying the ModeFlag.
+ static const DriverSuffix DriverSuffixes[] = {
+ {"clang", nullptr},
+ {"clang++", "--driver-mode=g++"},
+ {"clang-c++", "--driver-mode=g++"},
+ {"clang-cc", nullptr},
+ {"clang-cpp", "--driver-mode=cpp"},
+ {"clang-g++", "--driver-mode=g++"},
+ {"clang-gcc", nullptr},
+ {"clang-cl", "--driver-mode=cl"},
+ {"cc", nullptr},
+ {"cpp", "--driver-mode=cpp"},
+ {"cl", "--driver-mode=cl"},
+ {"++", "--driver-mode=g++"},
+ };
+
+ for (size_t i = 0; i < llvm::array_lengthof(DriverSuffixes); ++i)
+ if (ProgName.endswith(DriverSuffixes[i].Suffix))
+ return &DriverSuffixes[i];
+ return nullptr;
+}
+
+/// Normalize the program name from argv[0] by stripping the file extension if
+/// present and lower-casing the string on Windows.
+std::string normalizeProgramName(llvm::StringRef Argv0) {
+ std::string ProgName = llvm::sys::path::stem(Argv0);
+#ifdef LLVM_ON_WIN32
+ // Transform to lowercase for case insensitive file systems.
+ std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(), ::tolower);
+#endif
+ return ProgName;
+}
+
+const DriverSuffix *parseDriverSuffix(StringRef ProgName) {
+ // Try to infer frontend type and default target from the program name by
+ // comparing it against DriverSuffixes in order.
+
+ // If there is a match, the function tries to identify a target as prefix.
+ // E.g. "x86_64-linux-clang" as interpreted as suffix "clang" with target
+ // prefix "x86_64-linux". If such a target prefix is found, it may be
+ // added via -target as implicit first argument.
+ const DriverSuffix *DS = FindDriverSuffix(ProgName);
+
+ if (!DS) {
+ // Try again after stripping any trailing version number:
+ // clang++3.5 -> clang++
+ ProgName = ProgName.rtrim("0123456789.");
+ DS = FindDriverSuffix(ProgName);
+ }
+
+ if (!DS) {
+ // Try again after stripping trailing -component.
+ // clang++-tot -> clang++
+ ProgName = ProgName.slice(0, ProgName.rfind('-'));
+ DS = FindDriverSuffix(ProgName);
+ }
+ return DS;
+}
+} // anonymous namespace
+
+std::pair<std::string, std::string>
+ToolChain::getTargetAndModeFromProgramName(StringRef PN) {
+ std::string ProgName = normalizeProgramName(PN);
+ const DriverSuffix *DS = parseDriverSuffix(ProgName);
+ if (!DS)
+ return std::make_pair("", "");
+ std::string ModeFlag = DS->ModeFlag == nullptr ? "" : DS->ModeFlag;
+
+ std::string::size_type LastComponent =
+ ProgName.rfind('-', ProgName.size() - strlen(DS->Suffix));
+ if (LastComponent == std::string::npos)
+ return std::make_pair("", ModeFlag);
+
+ // Infer target from the prefix.
+ StringRef Prefix(ProgName);
+ Prefix = Prefix.slice(0, LastComponent);
+ std::string IgnoredError;
+ std::string Target;
+ if (llvm::TargetRegistry::lookupTarget(Prefix, IgnoredError)) {
+ Target = Prefix;
+ }
+ return std::make_pair(Target, ModeFlag);
+}
+
StringRef ToolChain::getDefaultUniversalArchName() const {
// In universal driver terms, the arch name accepted by -arch isn't exactly
// the same as the ones that appear in the triple. Roughly speaking, this is
@@ -171,9 +267,64 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
llvm_unreachable("Invalid tool kind.");
}
+static StringRef getArchNameForCompilerRTLib(const ToolChain &TC,
+ const ArgList &Args) {
+ const llvm::Triple &Triple = TC.getTriple();
+ bool IsWindows = Triple.isOSWindows();
+
+ if (Triple.isWindowsMSVCEnvironment() && TC.getArch() == llvm::Triple::x86)
+ return "i386";
+
+ if (TC.getArch() == llvm::Triple::arm || TC.getArch() == llvm::Triple::armeb)
+ return (arm::getARMFloatABI(TC, Args) == arm::FloatABI::Hard && !IsWindows)
+ ? "armhf"
+ : "arm";
+
+ return TC.getArchName();
+}
+
+std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
+ bool Shared) const {
+ const llvm::Triple &TT = getTriple();
+ const char *Env = TT.isAndroid() ? "-android" : "";
+ bool IsITANMSVCWindows =
+ TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
+
+ StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
+ const char *Prefix = IsITANMSVCWindows ? "" : "lib";
+ const char *Suffix = Shared ? (Triple.isOSWindows() ? ".dll" : ".so")
+ : (IsITANMSVCWindows ? ".lib" : ".a");
+
+ SmallString<128> Path(getDriver().ResourceDir);
+ StringRef OSLibName = Triple.isOSFreeBSD() ? "freebsd" : getOS();
+ llvm::sys::path::append(Path, "lib", OSLibName);
+ llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
+ Arch + Env + Suffix);
+ return Path.str();
+}
+
+const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ bool Shared) const {
+ return Args.MakeArgString(getCompilerRT(Args, Component, Shared));
+}
+
+bool ToolChain::needsProfileRT(const ArgList &Args) {
+ if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fprofile_generate_EQ) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage))
+ return true;
+
+ return false;
+}
+
Tool *ToolChain::SelectTool(const JobAction &JA) const {
- if (getDriver().ShouldUseClangCompiler(JA))
- return getClang();
+ if (getDriver().ShouldUseClangCompiler(JA)) return getClang();
Action::ActionClass AC = JA.getKind();
if (AC == Action::AssembleJobClass && useIntegratedAs())
return getClangAs();
@@ -182,7 +333,6 @@ Tool *ToolChain::SelectTool(const JobAction &JA) const {
std::string ToolChain::GetFilePath(const char *Name) const {
return D.GetFilePath(Name, *this);
-
}
std::string ToolChain::GetProgramPath(const char *Name) const {
@@ -209,10 +359,9 @@ std::string ToolChain::GetLinkerPath() const {
return "";
}
- return GetProgramPath("ld");
+ return GetProgramPath(DefaultLinker);
}
-
types::ID ToolChain::LookupTypeForExtension(const char *Ext) const {
return types::lookupTypeForExtension(Ext);
}
@@ -244,11 +393,13 @@ ObjCRuntime ToolChain::getDefaultObjCRuntime(bool isNonFragile) const {
bool ToolChain::isThreadModelSupported(const StringRef Model) const {
if (Model == "single") {
- // FIXME: 'single' is only supported on ARM so far.
+ // FIXME: 'single' is only supported on ARM and WebAssembly so far.
return Triple.getArch() == llvm::Triple::arm ||
Triple.getArch() == llvm::Triple::armeb ||
Triple.getArch() == llvm::Triple::thumb ||
- Triple.getArch() == llvm::Triple::thumbeb;
+ Triple.getArch() == llvm::Triple::thumbeb ||
+ Triple.getArch() == llvm::Triple::wasm32 ||
+ Triple.getArch() == llvm::Triple::wasm64;
} else if (Model == "posix")
return true;
@@ -310,15 +461,15 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
MCPU = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
MArch = A->getValue();
- std::string CPU = Triple.isOSBinFormatMachO()
- ? tools::arm::getARMCPUForMArch(MArch, Triple)
- : tools::arm::getARMTargetCPU(MCPU, MArch, Triple);
- StringRef Suffix =
- tools::arm::getLLVMArchSuffixForARM(CPU,
- tools::arm::getARMArch(MArch, Triple));
- bool ThumbDefault = Suffix.startswith("v6m") || Suffix.startswith("v7m") ||
- Suffix.startswith("v7em") ||
- (Suffix.startswith("v7") && getTriple().isOSBinFormatMachO());
+ std::string CPU =
+ Triple.isOSBinFormatMachO()
+ ? tools::arm::getARMCPUForMArch(MArch, Triple).str()
+ : tools::arm::getARMTargetCPU(MCPU, MArch, Triple);
+ StringRef Suffix =
+ tools::arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
+ bool IsMProfile = ARM::parseArchProfile(Suffix) == ARM::PK_M;
+ bool ThumbDefault = IsMProfile || (ARM::parseArchVersion(Suffix) == 7 &&
+ getTriple().isOSBinFormatMachO());
// FIXME: this is invalid for WindowsCE
if (getTriple().isOSWindows())
ThumbDefault = true;
@@ -328,10 +479,9 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
else
ArchName = "arm";
- // Assembly files should start in ARM mode.
- if (InputType != types::TY_PP_Asm &&
- Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb, ThumbDefault))
- {
+ // Assembly files should start in ARM mode, unless arch is M-profile.
+ if ((InputType != types::TY_PP_Asm && Args.hasFlag(options::OPT_mthumb,
+ options::OPT_mno_thumb, ThumbDefault)) || IsMProfile) {
if (IsBigEndian)
ArchName = "thumbeb";
else
@@ -344,7 +494,7 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
}
}
-std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
+std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType) const {
return ComputeLLVMTriple(Args, InputType);
}
@@ -360,9 +510,16 @@ void ToolChain::addClangTargetOptions(const ArgList &DriverArgs,
void ToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {}
+void ToolChain::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ if (!needsProfileRT(Args)) return;
+
+ CmdArgs.push_back(getCompilerRTArgString(Args, "profile"));
+ return;
+}
+
ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
- const ArgList &Args) const
-{
+ const ArgList &Args) const {
if (Arg *A = Args.getLastArg(options::OPT_rtlib_EQ)) {
StringRef Value = A->getValue();
if (Value == "compiler-rt")
@@ -424,10 +581,9 @@ void ToolChain::addExternCSystemIncludeIfExists(const ArgList &DriverArgs,
/*static*/ void ToolChain::addSystemIncludes(const ArgList &DriverArgs,
ArgStringList &CC1Args,
ArrayRef<StringRef> Paths) {
- for (ArrayRef<StringRef>::iterator I = Paths.begin(), E = Paths.end();
- I != E; ++I) {
+ for (StringRef Path : Paths) {
CC1Args.push_back("-internal-isystem");
- CC1Args.push_back(DriverArgs.MakeArgString(*I));
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
}
}
@@ -460,6 +616,13 @@ void ToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
}
}
+void ToolChain::AddFilePathLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ for (const auto &LibPath : getFilePaths())
+ if(LibPath.length() > 0)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
+}
+
void ToolChain::AddCCKextLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CmdArgs.push_back("-lcc_kext");
@@ -491,9 +654,15 @@ bool ToolChain::AddFastMathRuntimeIfAvailable(const ArgList &Args,
SanitizerMask ToolChain::getSupportedSanitizers() const {
// Return sanitizers which don't require runtime support and are not
- // platform or architecture-dependent.
+ // platform dependent.
using namespace SanitizerKind;
- return (Undefined & ~Vptr & ~Function) | CFI | CFICastStrict |
- UnsignedIntegerOverflow | LocalBounds;
+ SanitizerMask Res = (Undefined & ~Vptr & ~Function) | (CFI & ~CFIICall) |
+ CFICastStrict | UnsignedIntegerOverflow | LocalBounds;
+ if (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64)
+ Res |= CFIICall;
+ return Res;
}
+void ToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {}
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
index 15e36a1e6ce0..b02430e01763 100644
--- a/lib/Driver/ToolChains.cpp
+++ b/lib/Driver/ToolChains.cpp
@@ -1,4 +1,4 @@
-//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
+//===--- ToolChains.cpp - ToolChain Implementations -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,6 +10,7 @@
#include "ToolChains.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
+#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -24,6 +25,7 @@
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
+#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -65,6 +67,8 @@ bool MachO::HasNativeLLVMSupport() const { return true; }
/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
+ if (isTargetWatchOSBased())
+ return ObjCRuntime(ObjCRuntime::WatchOS, TargetVersion);
if (isTargetIOSBased())
return ObjCRuntime(ObjCRuntime::iOS, TargetVersion);
if (isNonFragile)
@@ -74,7 +78,9 @@ ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
/// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
bool Darwin::hasBlocksRuntime() const {
- if (isTargetIOSBased())
+ if (isTargetWatchOSBased())
+ return true;
+ else if (isTargetIOSBased())
return !isIPhoneOSVersionLT(3, 2);
else {
assert(isTargetMacOS() && "unexpected darwin target");
@@ -104,10 +110,10 @@ static const char *ArmMachOArchName(StringRef Arch) {
}
static const char *ArmMachOArchNameCPU(StringRef CPU) {
- unsigned ArchKind = llvm::ARMTargetParser::parseCPUArch(CPU);
+ unsigned ArchKind = llvm::ARM::parseCPUArch(CPU);
if (ArchKind == llvm::ARM::AK_INVALID)
return nullptr;
- StringRef Arch = llvm::ARMTargetParser::getArchName(ArchKind);
+ StringRef Arch = llvm::ARM::getArchName(ArchKind);
// FIXME: Make sure this MachO triple mangling is really necessary.
// ARMv5* normalises to ARMv5.
@@ -142,7 +148,7 @@ StringRef MachO::getMachOArchName(const ArgList &Args) const {
return "arm64";
case llvm::Triple::thumb:
- case llvm::Triple::arm: {
+ case llvm::Triple::arm:
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
if (const char *Arch = ArmMachOArchName(A->getValue()))
return Arch;
@@ -153,7 +159,6 @@ StringRef MachO::getMachOArchName(const ArgList &Args) const {
return "arm";
}
- }
}
Darwin::~Darwin() {}
@@ -177,7 +182,14 @@ std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
return Triple.getTriple();
SmallString<16> Str;
- Str += isTargetIOSBased() ? "ios" : "macosx";
+ if (isTargetWatchOSBased())
+ Str += "watchos";
+ else if (isTargetTvOSBased())
+ Str += "tvos";
+ else if (isTargetIOSBased())
+ Str += "ios";
+ else
+ Str += "macosx";
Str += getTargetVersion().getAsString();
Triple.setOSName(Str);
@@ -216,16 +228,17 @@ DarwinClang::DarwinClang(const Driver &D, const llvm::Triple &Triple,
: Darwin(D, Triple, Args) {}
void DarwinClang::addClangWarningOptions(ArgStringList &CC1Args) const {
- // For iOS, 64-bit, promote certain warnings to errors.
- if (!isTargetMacOS() && getTriple().isArch64Bit()) {
+ // For modern targets, promote certain warnings to errors.
+ if (isTargetWatchOSBased() || getTriple().isArch64Bit()) {
// Always enable -Wdeprecated-objc-isa-usage and promote it
// to an error.
CC1Args.push_back("-Wdeprecated-objc-isa-usage");
CC1Args.push_back("-Werror=deprecated-objc-isa-usage");
- // Also error about implicit function declarations, as that
- // can impact calling conventions.
- CC1Args.push_back("-Werror=implicit-function-declaration");
+ // For iOS and watchOS, also error about implicit function declarations,
+ // as that can impact calling conventions.
+ if (!isTargetMacOS())
+ CC1Args.push_back("-Werror=implicit-function-declaration");
}
}
@@ -253,7 +266,15 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
llvm::sys::path::remove_filename(P); // 'bin'
llvm::sys::path::append(P, "lib", "arc", "libarclite_");
// Mash in the platform.
- if (isTargetIOSSimulator())
+ if (isTargetWatchOSSimulator())
+ P += "watchsimulator";
+ else if (isTargetWatchOS())
+ P += "watchos";
+ else if (isTargetTvOSSimulator())
+ P += "appletvsimulator";
+ else if (isTargetTvOS())
+ P += "appletvos";
+ else if (isTargetIOSSimulator())
P += "iphonesimulator";
else if (isTargetIPhoneOS())
P += "iphoneos";
@@ -276,7 +297,7 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
// For now, allow missing resource libraries to support developers who may
// not have compiler-rt checked out or integrated into their build (unless
// we explicitly force linking with this library).
- if (AlwaysLink || llvm::sys::fs::exists(P))
+ if (AlwaysLink || getVFS().exists(P))
CmdArgs.push_back(Args.MakeArgString(P));
// Adding the rpaths might negatively interact when other rpaths are involved,
@@ -300,23 +321,38 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
void Darwin::addProfileRTLibs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- if (!(Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
- false) ||
- Args.hasArg(options::OPT_fprofile_generate) ||
- Args.hasArg(options::OPT_fprofile_generate_EQ) ||
- Args.hasArg(options::OPT_fprofile_instr_generate) ||
- Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
- Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_coverage)))
- return;
+ if (!needsProfileRT(Args)) return;
+
+ // TODO: Clean this up once autoconf is gone
+ SmallString<128> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "lib", "darwin");
+ const char *Library = "libclang_rt.profile_osx.a";
// Select the appropriate runtime library for the target.
- if (isTargetIOSBased())
- AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_ios.a",
- /*AlwaysLink*/ true);
- else
- AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_osx.a",
- /*AlwaysLink*/ true);
+ if (isTargetWatchOS()) {
+ Library = "libclang_rt.profile_watchos.a";
+ } else if (isTargetWatchOSSimulator()) {
+ llvm::sys::path::append(P, "libclang_rt.profile_watchossim.a");
+ Library = getVFS().exists(P) ? "libclang_rt.profile_watchossim.a"
+ : "libclang_rt.profile_watchos.a";
+ } else if (isTargetTvOS()) {
+ Library = "libclang_rt.profile_tvos.a";
+ } else if (isTargetTvOSSimulator()) {
+ llvm::sys::path::append(P, "libclang_rt.profile_tvossim.a");
+ Library = getVFS().exists(P) ? "libclang_rt.profile_tvossim.a"
+ : "libclang_rt.profile_tvos.a";
+ } else if (isTargetIPhoneOS()) {
+ Library = "libclang_rt.profile_ios.a";
+ } else if (isTargetIOSSimulator()) {
+ llvm::sys::path::append(P, "libclang_rt.profile_iossim.a");
+ Library = getVFS().exists(P) ? "libclang_rt.profile_iossim.a"
+ : "libclang_rt.profile_ios.a";
+ } else {
+ assert(isTargetMacOS() && "unexpected non MacOS platform");
+ }
+ AddLinkRuntimeLib(Args, CmdArgs, Library,
+ /*AlwaysLink*/ true);
+ return;
}
void DarwinClang::AddLinkSanitizerLibArgs(const ArgList &Args,
@@ -327,6 +363,7 @@ void DarwinClang::AddLinkSanitizerLibArgs(const ArgList &Args,
// Sanitizer runtime libraries requires C++.
AddCXXStdlibLibArgs(Args, CmdArgs);
}
+ // ASan is not supported on watchOS.
assert(isTargetMacOS() || isTargetIOSSimulator());
StringRef OS = isTargetMacOS() ? "osx" : "iossim";
AddLinkRuntimeLib(
@@ -374,13 +411,21 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
AddLinkSanitizerLibArgs(Args, CmdArgs, "asan");
if (Sanitize.needsUbsanRt())
AddLinkSanitizerLibArgs(Args, CmdArgs, "ubsan");
+ if (Sanitize.needsTsanRt())
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan");
// Otherwise link libSystem, then the dynamic runtime library, and finally any
// target specific static runtime library.
CmdArgs.push_back("-lSystem");
// Select the dynamic runtime library and the target specific static library.
- if (isTargetIOSBased()) {
+ if (isTargetWatchOSBased()) {
+ // We currently always need a static runtime library for watchOS.
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.watchos.a");
+ } else if (isTargetTvOSBased()) {
+ // We currently always need a static runtime library for tvOS.
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.tvos.a");
+ } else if (isTargetIOSBased()) {
// If we are compiling as iOS / simulator, don't attempt to link libgcc_s.1,
// it never went into the SDK.
// Linking against libgcc_s.1 isn't needed for iOS 5.0+
@@ -425,13 +470,13 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// isysroot.
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
// Warn if the path does not exist.
- if (!llvm::sys::fs::exists(A->getValue()))
+ if (!getVFS().exists(A->getValue()))
getDriver().Diag(clang::diag::warn_missing_sysroot) << A->getValue();
} else {
if (char *env = ::getenv("SDKROOT")) {
// We only use this value as the default if it is an absolute path,
// exists, and it is not the root path.
- if (llvm::sys::path::is_absolute(env) && llvm::sys::fs::exists(env) &&
+ if (llvm::sys::path::is_absolute(env) && getVFS().exists(env) &&
StringRef(env) != "/") {
Args.append(Args.MakeSeparateArg(
nullptr, Opts.getOption(options::OPT_isysroot), env));
@@ -441,25 +486,46 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
Arg *OSXVersion = Args.getLastArg(options::OPT_mmacosx_version_min_EQ);
Arg *iOSVersion = Args.getLastArg(options::OPT_miphoneos_version_min_EQ);
+ Arg *TvOSVersion = Args.getLastArg(options::OPT_mtvos_version_min_EQ);
+ Arg *WatchOSVersion = Args.getLastArg(options::OPT_mwatchos_version_min_EQ);
- if (OSXVersion && iOSVersion) {
+ if (OSXVersion && (iOSVersion || TvOSVersion || WatchOSVersion)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << OSXVersion->getAsString(Args)
+ << (iOSVersion ? iOSVersion :
+ TvOSVersion ? TvOSVersion : WatchOSVersion)->getAsString(Args);
+ iOSVersion = TvOSVersion = WatchOSVersion = nullptr;
+ } else if (iOSVersion && (TvOSVersion || WatchOSVersion)) {
getDriver().Diag(diag::err_drv_argument_not_allowed_with)
- << OSXVersion->getAsString(Args) << iOSVersion->getAsString(Args);
- iOSVersion = nullptr;
- } else if (!OSXVersion && !iOSVersion) {
+ << iOSVersion->getAsString(Args)
+ << (TvOSVersion ? TvOSVersion : WatchOSVersion)->getAsString(Args);
+ TvOSVersion = WatchOSVersion = nullptr;
+ } else if (TvOSVersion && WatchOSVersion) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << TvOSVersion->getAsString(Args)
+ << WatchOSVersion->getAsString(Args);
+ WatchOSVersion = nullptr;
+ } else if (!OSXVersion && !iOSVersion && !TvOSVersion && !WatchOSVersion) {
// If no deployment target was specified on the command line, check for
// environment defines.
std::string OSXTarget;
std::string iOSTarget;
+ std::string TvOSTarget;
+ std::string WatchOSTarget;
+
if (char *env = ::getenv("MACOSX_DEPLOYMENT_TARGET"))
OSXTarget = env;
if (char *env = ::getenv("IPHONEOS_DEPLOYMENT_TARGET"))
iOSTarget = env;
+ if (char *env = ::getenv("TVOS_DEPLOYMENT_TARGET"))
+ TvOSTarget = env;
+ if (char *env = ::getenv("WATCHOS_DEPLOYMENT_TARGET"))
+ WatchOSTarget = env;
// If there is no command-line argument to specify the Target version and
// no environment variable defined, see if we can set the default based
// on -isysroot.
- if (iOSTarget.empty() && OSXTarget.empty() &&
+ if (OSXTarget.empty() && iOSTarget.empty() && WatchOSTarget.empty() &&
Args.hasArg(options::OPT_isysroot)) {
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
StringRef isysroot = A->getValue();
@@ -479,6 +545,12 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
iOSTarget = Version;
else if (SDK.startswith("MacOSX"))
OSXTarget = Version;
+ else if (SDK.startswith("WatchOS") ||
+ SDK.startswith("WatchSimulator"))
+ WatchOSTarget = Version;
+ else if (SDK.startswith("AppleTVOS") ||
+ SDK.startswith("AppleTVSimulator"))
+ TvOSTarget = Version;
}
}
}
@@ -486,7 +558,8 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// If no OSX or iOS target has been specified, try to guess platform
// from arch name and compute the version from the triple.
- if (OSXTarget.empty() && iOSTarget.empty()) {
+ if (OSXTarget.empty() && iOSTarget.empty() && TvOSTarget.empty() &&
+ WatchOSTarget.empty()) {
StringRef MachOArchName = getMachOArchName(Args);
unsigned Major, Minor, Micro;
if (MachOArchName == "armv7" || MachOArchName == "armv7s" ||
@@ -494,6 +567,10 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
getTriple().getiOSVersion(Major, Minor, Micro);
llvm::raw_string_ostream(iOSTarget) << Major << '.' << Minor << '.'
<< Micro;
+ } else if (MachOArchName == "armv7k") {
+ getTriple().getWatchOSVersion(Major, Minor, Micro);
+ llvm::raw_string_ostream(WatchOSTarget) << Major << '.' << Minor << '.'
+ << Micro;
} else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
MachOArchName != "armv7em") {
if (!getTriple().getMacOSXVersion(Major, Minor, Micro)) {
@@ -505,15 +582,32 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
}
}
+ // Do not allow conflicts with the watchOS target.
+ if (!WatchOSTarget.empty() && (!iOSTarget.empty() || !TvOSTarget.empty())) {
+ getDriver().Diag(diag::err_drv_conflicting_deployment_targets)
+ << "WATCHOS_DEPLOYMENT_TARGET"
+ << (!iOSTarget.empty() ? "IPHONEOS_DEPLOYMENT_TARGET" :
+ "TVOS_DEPLOYMENT_TARGET");
+ }
+
+ // Do not allow conflicts with the tvOS target.
+ if (!TvOSTarget.empty() && !iOSTarget.empty()) {
+ getDriver().Diag(diag::err_drv_conflicting_deployment_targets)
+ << "TVOS_DEPLOYMENT_TARGET"
+ << "IPHONEOS_DEPLOYMENT_TARGET";
+ }
+
// Allow conflicts among OSX and iOS for historical reasons, but choose the
// default platform.
- if (!OSXTarget.empty() && !iOSTarget.empty()) {
+ if (!OSXTarget.empty() && (!iOSTarget.empty() ||
+ !WatchOSTarget.empty() ||
+ !TvOSTarget.empty())) {
if (getTriple().getArch() == llvm::Triple::arm ||
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::thumb)
OSXTarget = "";
else
- iOSTarget = "";
+ iOSTarget = WatchOSTarget = TvOSTarget = "";
}
if (!OSXTarget.empty()) {
@@ -524,6 +618,14 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
const Option O = Opts.getOption(options::OPT_miphoneos_version_min_EQ);
iOSVersion = Args.MakeJoinedArg(nullptr, O, iOSTarget);
Args.append(iOSVersion);
+ } else if (!TvOSTarget.empty()) {
+ const Option O = Opts.getOption(options::OPT_mtvos_version_min_EQ);
+ TvOSVersion = Args.MakeJoinedArg(nullptr, O, TvOSTarget);
+ Args.append(TvOSVersion);
+ } else if (!WatchOSTarget.empty()) {
+ const Option O = Opts.getOption(options::OPT_mwatchos_version_min_EQ);
+ WatchOSVersion = Args.MakeJoinedArg(nullptr, O, WatchOSTarget);
+ Args.append(WatchOSVersion);
}
}
@@ -532,6 +634,10 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
Platform = MacOS;
else if (iOSVersion)
Platform = IPhoneOS;
+ else if (TvOSVersion)
+ Platform = TvOS;
+ else if (WatchOSVersion)
+ Platform = WatchOS;
else
llvm_unreachable("Unable to infer Darwin variant");
@@ -539,7 +645,8 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
unsigned Major, Minor, Micro;
bool HadExtra;
if (Platform == MacOS) {
- assert(!iOSVersion && "Unknown target platform!");
+ assert((!iOSVersion && !TvOSVersion && !WatchOSVersion) &&
+ "Unknown target platform!");
if (!Driver::GetReleaseVersion(OSXVersion->getValue(), Major, Minor, Micro,
HadExtra) ||
HadExtra || Major != 10 || Minor >= 100 || Micro >= 100)
@@ -552,6 +659,18 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
HadExtra || Major >= 10 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< iOSVersion->getAsString(Args);
+ } else if (Platform == TvOS) {
+ if (!Driver::GetReleaseVersion(TvOSVersion->getValue(), Major, Minor,
+ Micro, HadExtra) || HadExtra ||
+ Major >= 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << TvOSVersion->getAsString(Args);
+ } else if (Platform == WatchOS) {
+ if (!Driver::GetReleaseVersion(WatchOSVersion->getValue(), Major, Minor,
+ Micro, HadExtra) || HadExtra ||
+ Major >= 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << WatchOSVersion->getAsString(Args);
} else
llvm_unreachable("unknown kind of Darwin platform");
@@ -559,6 +678,12 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
if (iOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64))
Platform = IPhoneOSSimulator;
+ if (TvOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64))
+ Platform = TvOSSimulator;
+ if (WatchOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64))
+ Platform = WatchOSSimulator;
setTarget(Platform, Major, Minor, Micro);
}
@@ -572,7 +697,7 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
CmdArgs.push_back("-lc++");
break;
- case ToolChain::CST_Libstdcxx: {
+ case ToolChain::CST_Libstdcxx:
// Unfortunately, -lstdc++ doesn't always exist in the standard search path;
// it was previously found in the gcc lib dir. However, for all the Darwin
// platforms we care about it was -lstdc++.6, so we search for that
@@ -583,10 +708,10 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
SmallString<128> P(A->getValue());
llvm::sys::path::append(P, "usr", "lib", "libstdc++.dylib");
- if (!llvm::sys::fs::exists(P)) {
+ if (!getVFS().exists(P)) {
llvm::sys::path::remove_filename(P);
llvm::sys::path::append(P, "libstdc++.6.dylib");
- if (llvm::sys::fs::exists(P)) {
+ if (getVFS().exists(P)) {
CmdArgs.push_back(Args.MakeArgString(P));
return;
}
@@ -596,8 +721,8 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
// Otherwise, look in the root.
// FIXME: This should be removed someday when we don't have to care about
// 10.6 and earlier, where /usr/lib/libstdc++.dylib does not exist.
- if (!llvm::sys::fs::exists("/usr/lib/libstdc++.dylib") &&
- llvm::sys::fs::exists("/usr/lib/libstdc++.6.dylib")) {
+ if (!getVFS().exists("/usr/lib/libstdc++.dylib") &&
+ getVFS().exists("/usr/lib/libstdc++.6.dylib")) {
CmdArgs.push_back("/usr/lib/libstdc++.6.dylib");
return;
}
@@ -606,7 +731,6 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
CmdArgs.push_back("-lstdc++");
break;
}
- }
}
void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
@@ -620,17 +744,19 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
llvm::sys::path::append(P, "lib", "darwin");
// Use the newer cc_kext for iOS ARM after 6.0.
- if (!isTargetIPhoneOS() || isTargetIOSSimulator() ||
- getTriple().getArch() == llvm::Triple::aarch64 ||
- !isIPhoneOSVersionLT(6, 0)) {
- llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
+ if (isTargetWatchOS()) {
+ llvm::sys::path::append(P, "libclang_rt.cc_kext_watchos.a");
+ } else if (isTargetTvOS()) {
+ llvm::sys::path::append(P, "libclang_rt.cc_kext_tvos.a");
+ } else if (isTargetIPhoneOS()) {
+ llvm::sys::path::append(P, "libclang_rt.cc_kext_ios.a");
} else {
- llvm::sys::path::append(P, "libclang_rt.cc_kext_ios5.a");
+ llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
}
// For now, allow missing resource libraries to support developers who may
// not have compiler-rt checked out or integrated into their build.
- if (llvm::sys::fs::exists(P))
+ if (getVFS().exists(P))
CmdArgs.push_back(Args.MakeArgString(P));
}
@@ -856,7 +982,7 @@ void MachO::AddLinkRuntimeLibArgs(const ArgList &Args,
// { hard-float, soft-float }
llvm::SmallString<32> CompilerRT = StringRef("libclang_rt.");
CompilerRT +=
- tools::arm::getARMFloatABI(getDriver(), Args, getTriple()) == "hard"
+ (tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard)
? "hard"
: "soft";
CompilerRT += Args.hasArg(options::OPT_fPIC) ? "_pic.a" : "_static.a";
@@ -883,8 +1009,9 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
// FIXME: It would be far better to avoid inserting those -static arguments,
// but we can't check the deployment target in the translation code until
// it is set here.
- if (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0)) {
- for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie;) {
+ if (isTargetWatchOSBased() ||
+ (isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0))) {
+ for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
Arg *A = *it;
++it;
if (A->getOption().getID() != options::OPT_mkernel &&
@@ -900,7 +1027,8 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
// Default to use libc++ on OS X 10.9+ and iOS 7+.
if (((isTargetMacOS() && !isMacosxVersionLT(10, 9)) ||
- (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0))) &&
+ (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0)) ||
+ isTargetWatchOSBased()) &&
!Args.getLastArg(options::OPT_stdlib_EQ))
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_stdlib_EQ),
"libc++");
@@ -933,10 +1061,14 @@ bool MachO::UseDwarfDebugFlags() const {
return false;
}
-bool Darwin::UseSjLjExceptions() const {
+bool Darwin::UseSjLjExceptions(const ArgList &Args) const {
// Darwin uses SjLj exceptions on ARM.
- return (getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::thumb);
+ if (getTriple().getArch() != llvm::Triple::arm &&
+ getTriple().getArch() != llvm::Triple::thumb)
+ return false;
+
+ // Only watchOS uses the new DWARF/Compact unwinding method.
+ return !isTargetWatchOS();
}
bool MachO::isPICDefault() const { return true; }
@@ -957,7 +1089,15 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
VersionTuple TargetVersion = getTargetVersion();
- if (isTargetIOSSimulator())
+ if (isTargetWatchOS())
+ CmdArgs.push_back("-watchos_version_min");
+ else if (isTargetWatchOSSimulator())
+ CmdArgs.push_back("-watchos_simulator_version_min");
+ else if (isTargetTvOS())
+ CmdArgs.push_back("-tvos_version_min");
+ else if (isTargetTvOSSimulator())
+ CmdArgs.push_back("-tvos_simulator_version_min");
+ else if (isTargetIOSSimulator())
CmdArgs.push_back("-ios_simulator_version_min");
else if (isTargetIOSBased())
CmdArgs.push_back("-iphoneos_version_min");
@@ -974,7 +1114,9 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
// Derived from startfile spec.
if (Args.hasArg(options::OPT_dynamiclib)) {
// Derived from darwin_dylib1 spec.
- if (isTargetIOSSimulator()) {
+ if (isTargetWatchOSBased()) {
+ ; // watchOS does not need dylib1.o.
+ } else if (isTargetIOSSimulator()) {
; // iOS simulator does not need dylib1.o.
} else if (isTargetIPhoneOS()) {
if (isIPhoneOSVersionLT(3, 1))
@@ -989,7 +1131,9 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
if (Args.hasArg(options::OPT_bundle)) {
if (!Args.hasArg(options::OPT_static)) {
// Derived from darwin_bundle1 spec.
- if (isTargetIOSSimulator()) {
+ if (isTargetWatchOSBased()) {
+ ; // watchOS does not need bundle1.o.
+ } else if (isTargetIOSSimulator()) {
; // iOS simulator does not need bundle1.o.
} else if (isTargetIPhoneOS()) {
if (isIPhoneOSVersionLT(3, 1))
@@ -1024,7 +1168,9 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
CmdArgs.push_back("-lcrt0.o");
} else {
// Derived from darwin_crt1 spec.
- if (isTargetIOSSimulator()) {
+ if (isTargetWatchOSBased()) {
+ ; // watchOS does not need crt1.o.
+ } else if (isTargetIOSSimulator()) {
; // iOS simulator does not need crt1.o.
} else if (isTargetIPhoneOS()) {
if (getArch() == llvm::Triple::aarch64)
@@ -1049,6 +1195,7 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
}
if (!isTargetIPhoneOS() && Args.hasArg(options::OPT_shared_libgcc) &&
+ !isTargetWatchOS() &&
isMacosxVersionLT(10, 5)) {
const char *Str = Args.MakeArgString(GetFilePath("crt3.o"));
CmdArgs.push_back(Str);
@@ -1058,7 +1205,8 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
bool Darwin::SupportsObjCGC() const { return isTargetMacOS(); }
void Darwin::CheckObjCARC() const {
- if (isTargetIOSBased() || (isTargetMacOS() && !isMacosxVersionLT(10, 6)))
+ if (isTargetIOSBased() || isTargetWatchOSBased() ||
+ (isTargetMacOS() && !isMacosxVersionLT(10, 6)))
return;
getDriver().Diag(diag::err_arc_unsupported_on_toolchain);
}
@@ -1071,6 +1219,7 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
if (!isMacosxVersionLT(10, 9))
Res |= SanitizerKind::Vptr;
Res |= SanitizerKind::SafeStack;
+ Res |= SanitizerKind::Thread;
}
return Res;
}
@@ -1170,7 +1319,8 @@ static llvm::StringRef getGCCToolchainDir(const ArgList &Args) {
/// necessary because the driver doesn't store the final version of the target
/// triple.
void Generic_GCC::GCCInstallationDetector::init(
- const Driver &D, const llvm::Triple &TargetTriple, const ArgList &Args) {
+ const llvm::Triple &TargetTriple, const ArgList &Args,
+ ArrayRef<std::string> ExtraTripleAliases) {
llvm::Triple BiarchVariantTriple = TargetTriple.isArch32Bit()
? TargetTriple.get64BitArchVariant()
: TargetTriple.get32BitArchVariant();
@@ -1212,20 +1362,22 @@ void Generic_GCC::GCCInstallationDetector::init(
// installation available. GCC installs are ranked by version number.
Version = GCCVersion::Parse("0.0.0");
for (const std::string &Prefix : Prefixes) {
- if (!llvm::sys::fs::exists(Prefix))
+ if (!D.getVFS().exists(Prefix))
continue;
- for (const StringRef Suffix : CandidateLibDirs) {
+ for (StringRef Suffix : CandidateLibDirs) {
const std::string LibDir = Prefix + Suffix.str();
- if (!llvm::sys::fs::exists(LibDir))
+ if (!D.getVFS().exists(LibDir))
continue;
- for (const StringRef Candidate : CandidateTripleAliases)
+ for (StringRef Candidate : ExtraTripleAliases) // Try these first.
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
+ for (StringRef Candidate : CandidateTripleAliases)
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
}
- for (const StringRef Suffix : CandidateBiarchLibDirs) {
+ for (StringRef Suffix : CandidateBiarchLibDirs) {
const std::string LibDir = Prefix + Suffix.str();
- if (!llvm::sys::fs::exists(LibDir))
+ if (!D.getVFS().exists(LibDir))
continue;
- for (const StringRef Candidate : CandidateBiarchTripleAliases)
+ for (StringRef Candidate : CandidateBiarchTripleAliases)
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate,
/*NeedsBiarchSuffix=*/ true);
}
@@ -1300,8 +1452,9 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"i586-linux-gnu"};
static const char *const MIPSLibDirs[] = {"/lib"};
- static const char *const MIPSTriples[] = {
- "mips-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu"};
+ static const char *const MIPSTriples[] = {"mips-linux-gnu", "mips-mti-linux",
+ "mips-mti-linux-gnu",
+ "mips-img-linux-gnu"};
static const char *const MIPSELLibDirs[] = {"/lib"};
static const char *const MIPSELTriples[] = {
"mipsel-linux-gnu", "mipsel-linux-android", "mips-img-linux-gnu"};
@@ -1340,9 +1493,20 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
"s390x-suse-linux", "s390x-redhat-linux"};
+ // Solaris.
+ static const char *const SolarisSPARCLibDirs[] = {"/gcc"};
+ static const char *const SolarisSPARCTriples[] = {"sparc-sun-solaris2.11",
+ "i386-pc-solaris2.11"};
+
using std::begin;
using std::end;
+ if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ LibDirs.append(begin(SolarisSPARCLibDirs), end(SolarisSPARCLibDirs));
+ TripleAliases.append(begin(SolarisSPARCTriples), end(SolarisSPARCTriples));
+ return;
+ }
+
switch (TargetTriple.getArch()) {
case llvm::Triple::aarch64:
LibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
@@ -1436,6 +1600,7 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
TripleAliases.append(begin(PPC64LETriples), end(PPC64LETriples));
break;
case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
LibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
TripleAliases.append(begin(SPARCv8Triples), end(SPARCv8Triples));
BiarchLibDirs.append(begin(SPARCv9LibDirs), end(SPARCv9LibDirs));
@@ -1451,7 +1616,6 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
LibDirs.append(begin(SystemZLibDirs), end(SystemZLibDirs));
TripleAliases.append(begin(SystemZTriples), end(SystemZTriples));
break;
-
default:
// By default, just rely on the standard lib directories and the original
// triple.
@@ -1467,15 +1631,83 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
BiarchTripleAliases.push_back(BiarchTriple.str());
}
+// \brief -- try common CUDA installation paths looking for files we need for
+// CUDA compilation.
+
+void Generic_GCC::CudaInstallationDetector::init(
+ const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args) {
+ SmallVector<std::string, 4> CudaPathCandidates;
+
+ if (Args.hasArg(options::OPT_cuda_path_EQ))
+ CudaPathCandidates.push_back(
+ Args.getLastArgValue(options::OPT_cuda_path_EQ));
+ else {
+ CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda");
+ CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-7.5");
+ CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-7.0");
+ }
+
+ for (const auto &CudaPath : CudaPathCandidates) {
+ if (CudaPath.empty() || !D.getVFS().exists(CudaPath))
+ continue;
+
+ CudaInstallPath = CudaPath;
+ CudaIncludePath = CudaInstallPath + "/include";
+ CudaLibDevicePath = CudaInstallPath + "/nvvm/libdevice";
+ CudaLibPath =
+ CudaInstallPath + (TargetTriple.isArch64Bit() ? "/lib64" : "/lib");
+
+ if (!(D.getVFS().exists(CudaIncludePath) &&
+ D.getVFS().exists(CudaLibPath) &&
+ D.getVFS().exists(CudaLibDevicePath)))
+ continue;
+
+ std::error_code EC;
+ for (llvm::sys::fs::directory_iterator LI(CudaLibDevicePath, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef FilePath = LI->path();
+ StringRef FileName = llvm::sys::path::filename(FilePath);
+ // Process all bitcode filenames that look like libdevice.compute_XX.YY.bc
+ const StringRef LibDeviceName = "libdevice.";
+ if (!(FileName.startswith(LibDeviceName) && FileName.endswith(".bc")))
+ continue;
+ StringRef GpuArch = FileName.slice(
+ LibDeviceName.size(), FileName.find('.', LibDeviceName.size()));
+ CudaLibDeviceMap[GpuArch] = FilePath.str();
+ // Insert map entries for specifc devices with this compute capability.
+ if (GpuArch == "compute_20") {
+ CudaLibDeviceMap["sm_20"] = FilePath;
+ CudaLibDeviceMap["sm_21"] = FilePath;
+ } else if (GpuArch == "compute_30") {
+ CudaLibDeviceMap["sm_30"] = FilePath;
+ CudaLibDeviceMap["sm_32"] = FilePath;
+ } else if (GpuArch == "compute_35") {
+ CudaLibDeviceMap["sm_35"] = FilePath;
+ CudaLibDeviceMap["sm_37"] = FilePath;
+ }
+ }
+
+ IsValid = true;
+ break;
+ }
+}
+
+void Generic_GCC::CudaInstallationDetector::print(raw_ostream &OS) const {
+ if (isValid())
+ OS << "Found CUDA installation: " << CudaInstallPath << "\n";
+}
+
namespace {
// Filter to remove Multilibs that don't exist as a suffix to Path
class FilterNonExistent {
StringRef Base;
+ vfs::FileSystem &VFS;
public:
- FilterNonExistent(StringRef Base) : Base(Base) {}
+ FilterNonExistent(StringRef Base, vfs::FileSystem &VFS)
+ : Base(Base), VFS(VFS) {}
bool operator()(const Multilib &M) {
- return !llvm::sys::fs::exists(Base + M.gccSuffix() + "/crtbegin.o");
+ return !VFS.exists(Base + M.gccSuffix() + "/crtbegin.o");
}
};
} // end anonymous namespace
@@ -1515,6 +1747,7 @@ static bool isMicroMips(const ArgList &Args) {
return A && A->getOption().matches(options::OPT_mmicromips);
}
+namespace {
struct DetectedMultilibs {
/// The set of multilibs that the detected installation supports.
MultilibSet Multilibs;
@@ -1526,13 +1759,15 @@ struct DetectedMultilibs {
/// targeting the non-default multilib. Otherwise, it is empty.
llvm::Optional<Multilib> BiarchSibling;
};
+} // end anonymous namespace
static Multilib makeMultilib(StringRef commonSuffix) {
return Multilib(commonSuffix, commonSuffix, commonSuffix);
}
-static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
- const ArgList &Args, DetectedMultilibs &Result) {
+static bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ DetectedMultilibs &Result) {
// Some MIPS toolchains put libraries and object files compiled
// using different options in to the sub-directoris which names
// reflects the flags used for compilation. For example sysroot
@@ -1558,7 +1793,7 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
// /usr
// /lib <= crt*.o files compiled with '-mips32'
- FilterNonExistent NonExistent(Path);
+ FilterNonExistent NonExistent(Path, D.getVFS());
// Check for FSF toolchain multilibs
MultilibSet FSFMipsMultilibs;
@@ -1636,6 +1871,32 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
});
}
+ // Check for Musl toolchain multilibs
+ MultilibSet MuslMipsMultilibs;
+ {
+ auto MArchMipsR2 = makeMultilib("")
+ .osSuffix("/mips-r2-hard-musl")
+ .flag("+EB")
+ .flag("-EL")
+ .flag("+march=mips32r2");
+
+ auto MArchMipselR2 = makeMultilib("/mipsel-r2-hard-musl")
+ .flag("-EB")
+ .flag("+EL")
+ .flag("+march=mips32r2");
+
+ MuslMipsMultilibs = MultilibSet().Either(MArchMipsR2, MArchMipselR2);
+
+ // Specify the callback that computes the include directories.
+ MuslMipsMultilibs.setIncludeDirsCallback([](
+ StringRef InstallDir, StringRef TripleStr, const Multilib &M) {
+ std::vector<std::string> Dirs;
+ Dirs.push_back(
+ (InstallDir + "/../sysroot" + M.osSuffix() + "/usr/include").str());
+ return Dirs;
+ });
+ }
+
// Check for Code Sourcery toolchain multilibs
MultilibSet CSMipsMultilibs;
{
@@ -1754,7 +2015,7 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
addMultilibFlag(isMips16(Args), "mips16", Flags);
addMultilibFlag(CPUName == "mips32", "march=mips32", Flags);
addMultilibFlag(CPUName == "mips32r2" || CPUName == "mips32r3" ||
- CPUName == "mips32r5",
+ CPUName == "mips32r5" || CPUName == "p5600",
"march=mips32r2", Flags);
addMultilibFlag(CPUName == "mips32r6", "march=mips32r6", Flags);
addMultilibFlag(CPUName == "mips64", "march=mips64", Flags);
@@ -1772,7 +2033,7 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
addMultilibFlag(isMipsEL(TargetArch), "EL", Flags);
addMultilibFlag(!isMipsEL(TargetArch), "EB", Flags);
- if (TargetTriple.getEnvironment() == llvm::Triple::Android) {
+ if (TargetTriple.isAndroid()) {
// Select Android toolchain. It's the only choice in that case.
if (AndroidMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
Result.Multilibs = AndroidMipsMultilibs;
@@ -1781,6 +2042,16 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
return false;
}
+ if (TargetTriple.getVendor() == llvm::Triple::MipsTechnologies &&
+ TargetTriple.getOS() == llvm::Triple::Linux &&
+ TargetTriple.getEnvironment() == llvm::Triple::UnknownEnvironment) {
+ if (MuslMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
+ Result.Multilibs = MuslMipsMultilibs;
+ return true;
+ }
+ return false;
+ }
+
if (TargetTriple.getVendor() == llvm::Triple::ImaginationTechnologies &&
TargetTriple.getOS() == llvm::Triple::Linux &&
TargetTriple.getEnvironment() == llvm::Triple::GNU) {
@@ -1823,11 +2094,11 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path,
return false;
}
-static bool findBiarchMultilibs(const llvm::Triple &TargetTriple,
+static bool findBiarchMultilibs(const Driver &D,
+ const llvm::Triple &TargetTriple,
StringRef Path, const ArgList &Args,
bool NeedsBiarchSuffix,
DetectedMultilibs &Result) {
-
// Some versions of SUSE and Fedora on ppc64 put 32-bit libs
// in what would normally be GCCInstallPath and put the 64-bit
// libs in a subdirectory named 64. The simple logic we follow is that
@@ -1855,7 +2126,7 @@ static bool findBiarchMultilibs(const llvm::Triple &TargetTriple,
.flag("-m64")
.flag("+mx32");
- FilterNonExistent NonExistent(Path);
+ FilterNonExistent NonExistent(Path, D.getVFS());
// Determine default multilib from: 32, 64, x32
// Also handle cases such as 64 on 32, 32 on 64, etc.
@@ -1907,6 +2178,56 @@ static bool findBiarchMultilibs(const llvm::Triple &TargetTriple,
return true;
}
+void Generic_GCC::GCCInstallationDetector::scanLibDirForGCCTripleSolaris(
+ const llvm::Triple &TargetArch, const llvm::opt::ArgList &Args,
+ const std::string &LibDir, StringRef CandidateTriple,
+ bool NeedsBiarchSuffix) {
+ // Solaris is a special case. The GCC installation is under
+ // /usr/gcc/<major>.<minor>/lib/gcc/<triple>/<major>.<minor>.<patch>/, so we
+ // need to iterate twice.
+ std::error_code EC;
+ for (vfs::directory_iterator LI = D.getVFS().dir_begin(LibDir, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->getName());
+ GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
+
+ if (CandidateVersion.Major != -1) // Filter obviously bad entries.
+ if (!CandidateGCCInstallPaths.insert(LI->getName()).second)
+ continue; // Saw this path before; no need to look at it again.
+ if (CandidateVersion.isOlderThan(4, 1, 1))
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+
+ GCCInstallPath =
+ LibDir + "/" + VersionText.str() + "/lib/gcc/" + CandidateTriple.str();
+ if (!D.getVFS().exists(GCCInstallPath))
+ continue;
+
+ // If we make it here there has to be at least one GCC version, let's just
+ // use the latest one.
+ std::error_code EEC;
+ for (vfs::directory_iterator
+ LLI = D.getVFS().dir_begin(GCCInstallPath, EEC),
+ LLE;
+ !EEC && LLI != LLE; LLI = LLI.increment(EEC)) {
+
+ StringRef SubVersionText = llvm::sys::path::filename(LLI->getName());
+ GCCVersion CandidateSubVersion = GCCVersion::Parse(SubVersionText);
+
+ if (CandidateSubVersion > Version)
+ Version = CandidateSubVersion;
+ }
+
+ GCCTriple.setTriple(CandidateTriple);
+
+ GCCInstallPath += "/" + Version.Text;
+ GCCParentLibPath = GCCInstallPath + "/../../../../";
+
+ IsValid = true;
+ }
+}
+
void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const llvm::Triple &TargetTriple, const ArgList &Args,
const std::string &LibDir, StringRef CandidateTriple,
@@ -1914,41 +2235,48 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
// There are various different suffixes involving the triple we
// check for. We also record what is necessary to walk from each back
- // up to the lib directory.
- const std::string LibSuffixes[] = {
- "/gcc/" + CandidateTriple.str(),
+ // up to the lib directory. Specifically, the number of "up" steps
+ // in the second half of each row is 1 + the number of path separators
+ // in the first half.
+ const std::string LibAndInstallSuffixes[][2] = {
+ {"/gcc/" + CandidateTriple.str(), "/../../.."},
+
// Debian puts cross-compilers in gcc-cross
- "/gcc-cross/" + CandidateTriple.str(),
- "/" + CandidateTriple.str() + "/gcc/" + CandidateTriple.str(),
+ {"/gcc-cross/" + CandidateTriple.str(), "/../../.."},
+
+ {"/" + CandidateTriple.str() + "/gcc/" + CandidateTriple.str(),
+ "/../../../.."},
// The Freescale PPC SDK has the gcc libraries in
// <sysroot>/usr/lib/<triple>/x.y.z so have a look there as well.
- "/" + CandidateTriple.str(),
+ {"/" + CandidateTriple.str(), "/../.."},
// Ubuntu has a strange mis-matched pair of triples that this happens to
// match.
// FIXME: It may be worthwhile to generalize this and look for a second
// triple.
- "/i386-linux-gnu/gcc/" + CandidateTriple.str()};
- const std::string InstallSuffixes[] = {
- "/../../..", // gcc/
- "/../../..", // gcc-cross/
- "/../../../..", // <triple>/gcc/
- "/../..", // <triple>/
- "/../../../.." // i386-linux-gnu/gcc/<triple>/
- };
+ {"/i386-linux-gnu/gcc/" + CandidateTriple.str(), "/../../../.."}};
+
+ if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ scanLibDirForGCCTripleSolaris(TargetTriple, Args, LibDir, CandidateTriple,
+ NeedsBiarchSuffix);
+ return;
+ }
+
// Only look at the final, weird Ubuntu suffix for i386-linux-gnu.
- const unsigned NumLibSuffixes =
- (llvm::array_lengthof(LibSuffixes) - (TargetArch != llvm::Triple::x86));
+ const unsigned NumLibSuffixes = (llvm::array_lengthof(LibAndInstallSuffixes) -
+ (TargetArch != llvm::Triple::x86));
for (unsigned i = 0; i < NumLibSuffixes; ++i) {
- StringRef LibSuffix = LibSuffixes[i];
+ StringRef LibSuffix = LibAndInstallSuffixes[i][0];
std::error_code EC;
- for (llvm::sys::fs::directory_iterator LI(LibDir + LibSuffix, EC), LE;
+ for (vfs::directory_iterator
+ LI = D.getVFS().dir_begin(LibDir + LibSuffix, EC),
+ LE;
!EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->path());
+ StringRef VersionText = llvm::sys::path::filename(LI->getName());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
if (CandidateVersion.Major != -1) // Filter obviously bad entries.
- if (!CandidateGCCInstallPaths.insert(LI->path()).second)
+ if (!CandidateGCCInstallPaths.insert(LI->getName()).second)
continue; // Saw this path before; no need to look at it again.
if (CandidateVersion.isOlderThan(4, 1, 1))
continue;
@@ -1960,9 +2288,9 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// Debian mips multilibs behave more like the rest of the biarch ones,
// so handle them there
if (isMipsArch(TargetArch)) {
- if (!findMIPSMultilibs(TargetTriple, LI->path(), Args, Detected))
+ if (!findMIPSMultilibs(D, TargetTriple, LI->getName(), Args, Detected))
continue;
- } else if (!findBiarchMultilibs(TargetTriple, LI->path(), Args,
+ } else if (!findBiarchMultilibs(D, TargetTriple, LI->getName(), Args,
NeedsBiarchSuffix, Detected)) {
continue;
}
@@ -1975,8 +2303,9 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// FIXME: We hack together the directory name here instead of
// using LI to ensure stable path separators across Windows and
// Linux.
- GCCInstallPath = LibDir + LibSuffixes[i] + "/" + VersionText.str();
- GCCParentLibPath = GCCInstallPath + InstallSuffixes[i];
+ GCCInstallPath =
+ LibDir + LibAndInstallSuffixes[i][0] + "/" + VersionText.str();
+ GCCParentLibPath = GCCInstallPath + LibAndInstallSuffixes[i][1];
IsValid = true;
}
}
@@ -1984,7 +2313,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : ToolChain(D, Triple, Args), GCCInstallation() {
+ : ToolChain(D, Triple, Args), GCCInstallation(D), CudaInstallation(D) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
@@ -2016,6 +2345,7 @@ Tool *Generic_GCC::buildLinker() const { return new tools::gcc::Linker(*this); }
void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
// Print the information about how we detected the GCC installation.
GCCInstallation.print(OS);
+ CudaInstallation.print(OS);
}
bool Generic_GCC::IsUnwindTablesDefault() const {
@@ -2047,9 +2377,6 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
- case llvm::Triple::sparcv9:
case llvm::Triple::systemz:
return true;
default:
@@ -2057,6 +2384,40 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
}
}
+/// \brief Helper to add the variant paths of a libstdc++ installation.
+bool Generic_GCC::addLibStdCXXIncludePaths(
+ Twine Base, Twine Suffix, StringRef GCCTriple, StringRef GCCMultiarchTriple,
+ StringRef TargetMultiarchTriple, Twine IncludeSuffix,
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (!getVFS().exists(Base + Suffix))
+ return false;
+
+ addSystemInclude(DriverArgs, CC1Args, Base + Suffix);
+
+ // The vanilla GCC layout of libstdc++ headers uses a triple subdirectory. If
+ // that path exists or we have neither a GCC nor target multiarch triple, use
+ // this vanilla search path.
+ if ((GCCMultiarchTriple.empty() && TargetMultiarchTriple.empty()) ||
+ getVFS().exists(Base + Suffix + "/" + GCCTriple + IncludeSuffix)) {
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + Suffix + "/" + GCCTriple + IncludeSuffix);
+ } else {
+ // Otherwise try to use multiarch naming schemes which have normalized the
+ // triples and put the triple before the suffix.
+ //
+ // GCC surprisingly uses *both* the GCC triple with a multilib suffix and
+ // the target triple, so we support that here.
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + "/" + GCCMultiarchTriple + Suffix + IncludeSuffix);
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + "/" + TargetMultiarchTriple + Suffix);
+ }
+
+ addSystemInclude(DriverArgs, CC1Args, Base + Suffix + "/backward");
+ return true;
+}
+
+
void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
@@ -2064,238 +2425,324 @@ void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
(getTriple().getOS() == llvm::Triple::Linux &&
- (!V.isOlderThan(4, 7, 0) ||
- getTriple().getEnvironment() == llvm::Triple::Android)) ||
- getTriple().getOS() == llvm::Triple::NaCl;
+ (!V.isOlderThan(4, 7, 0) || getTriple().isAndroid())) ||
+ getTriple().getOS() == llvm::Triple::NaCl ||
+ (getTriple().getVendor() == llvm::Triple::MipsTechnologies &&
+ !getTriple().hasEnvironment());
if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array, UseInitArrayDefault))
CC1Args.push_back("-fuse-init-array");
}
+/// Mips Toolchain
+MipsLLVMToolChain::MipsLLVMToolChain(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Linux(D, Triple, Args) {
+ // Select the correct multilib according to the given arguments.
+ DetectedMultilibs Result;
+ findMIPSMultilibs(D, Triple, "", Args, Result);
+ Multilibs = Result.Multilibs;
+ SelectedMultilib = Result.SelectedMultilib;
+
+ // Find out the library suffix based on the ABI.
+ LibSuffix = tools::mips::getMipsABILibSuffix(Args, Triple);
+ getFilePaths().clear();
+ getFilePaths().push_back(computeSysRoot() + "/usr/lib" + LibSuffix);
+
+ // Use LLD by default.
+ DefaultLinker = "lld";
+}
+
+void MipsLLVMToolChain::AddClangSystemIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ const Driver &D = getDriver();
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ const auto IncludePaths =
+ Callback(D.getInstalledDir(), getTripleString(), SelectedMultilib);
+ for (const auto &Path : IncludePaths)
+ addExternCSystemIncludeIfExists(DriverArgs, CC1Args, Path);
+ }
+}
+
+Tool *MipsLLVMToolChain::buildLinker() const {
+ return new tools::gnutools::Linker(*this);
+}
+
+std::string MipsLLVMToolChain::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot + SelectedMultilib.osSuffix();
+
+ const std::string InstalledDir(getDriver().getInstalledDir());
+ std::string SysRootPath =
+ InstalledDir + "/../sysroot" + SelectedMultilib.osSuffix();
+ if (llvm::sys::fs::exists(SysRootPath))
+ return SysRootPath;
+
+ return std::string();
+}
+
+ToolChain::CXXStdlibType
+MipsLLVMToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
+ if (A) {
+ StringRef Value = A->getValue();
+ if (Value != "libc++")
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libcxx;
+}
+
+void MipsLLVMToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ assert((GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) &&
+ "Only -lc++ (aka libcxx) is suported in this toolchain.");
+
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ const auto IncludePaths = Callback(getDriver().getInstalledDir(),
+ getTripleString(), SelectedMultilib);
+ for (const auto &Path : IncludePaths) {
+ if (llvm::sys::fs::exists(Path + "/c++/v1")) {
+ addSystemInclude(DriverArgs, CC1Args, Path + "/c++/v1");
+ break;
+ }
+ }
+ }
+}
+
+void MipsLLVMToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ assert((GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) &&
+ "Only -lc++ (aka libxx) is suported in this toolchain.");
+
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+}
+
+std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
+ StringRef Component,
+ bool Shared) const {
+ SmallString<128> Path(getDriver().ResourceDir);
+ llvm::sys::path::append(Path, SelectedMultilib.osSuffix(), "lib" + LibSuffix,
+ getOS());
+ llvm::sys::path::append(Path, Twine("libclang_rt." + Component + "-" +
+ "mips" + (Shared ? ".so" : ".a")));
+ return Path.str();
+}
+
/// Hexagon Toolchain
-std::string Hexagon_TC::GetGnuDir(const std::string &InstalledDir,
- const ArgList &Args) {
+std::string HexagonToolChain::getHexagonTargetDir(
+ const std::string &InstalledDir,
+ const SmallVectorImpl<std::string> &PrefixDirs) const {
+ std::string InstallRelDir;
+ const Driver &D = getDriver();
// Locate the rest of the toolchain ...
- std::string GccToolchain = getGCCToolchainDir(Args);
-
- if (!GccToolchain.empty())
- return GccToolchain;
+ for (auto &I : PrefixDirs)
+ if (D.getVFS().exists(I))
+ return I;
- std::string InstallRelDir = InstalledDir + "/../../gnu";
- if (llvm::sys::fs::exists(InstallRelDir))
+ if (getVFS().exists(InstallRelDir = InstalledDir + "/../target"))
return InstallRelDir;
- std::string PrefixRelDir = std::string(LLVM_PREFIX) + "/../gnu";
- if (llvm::sys::fs::exists(PrefixRelDir))
+ std::string PrefixRelDir = std::string(LLVM_PREFIX) + "/target";
+ if (getVFS().exists(PrefixRelDir))
return PrefixRelDir;
return InstallRelDir;
}
-const char *Hexagon_TC::GetSmallDataThreshold(const ArgList &Args) {
- Arg *A;
- A = Args.getLastArg(options::OPT_G, options::OPT_G_EQ,
- options::OPT_msmall_data_threshold_EQ);
- if (A)
- return A->getValue();
+Optional<unsigned> HexagonToolChain::getSmallDataThreshold(
+ const ArgList &Args) {
+ StringRef Gn = "";
+ if (Arg *A = Args.getLastArg(options::OPT_G, options::OPT_G_EQ,
+ options::OPT_msmall_data_threshold_EQ)) {
+ Gn = A->getValue();
+ } else if (Args.getLastArg(options::OPT_shared, options::OPT_fpic,
+ options::OPT_fPIC)) {
+ Gn = "0";
+ }
- A = Args.getLastArg(options::OPT_shared, options::OPT_fpic,
- options::OPT_fPIC);
- if (A)
- return "0";
+ unsigned G;
+ if (!Gn.getAsInteger(10, G))
+ return G;
- return 0;
+ return None;
}
-bool Hexagon_TC::UsesG0(const char *smallDataThreshold) {
- return smallDataThreshold && smallDataThreshold[0] == '0';
-}
-static void GetHexagonLibraryPaths(const ArgList &Args, const std::string &Ver,
- const std::string &MarchString,
- const std::string &InstalledDir,
- ToolChain::path_list *LibPaths) {
- bool buildingLib = Args.hasArg(options::OPT_shared);
+void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
+ ToolChain::path_list &LibPaths) const {
+ const Driver &D = getDriver();
//----------------------------------------------------------------------------
// -L Args
//----------------------------------------------------------------------------
for (Arg *A : Args.filtered(options::OPT_L))
for (const char *Value : A->getValues())
- LibPaths->push_back(Value);
+ LibPaths.push_back(Value);
//----------------------------------------------------------------------------
// Other standard paths
//----------------------------------------------------------------------------
- const std::string MarchSuffix = "/" + MarchString;
- const std::string G0Suffix = "/G0";
- const std::string MarchG0Suffix = MarchSuffix + G0Suffix;
- const std::string RootDir = Hexagon_TC::GetGnuDir(InstalledDir, Args) + "/";
-
- // lib/gcc/hexagon/...
- std::string LibGCCHexagonDir = RootDir + "lib/gcc/hexagon/";
- if (buildingLib) {
- LibPaths->push_back(LibGCCHexagonDir + Ver + MarchG0Suffix);
- LibPaths->push_back(LibGCCHexagonDir + Ver + G0Suffix);
- }
- LibPaths->push_back(LibGCCHexagonDir + Ver + MarchSuffix);
- LibPaths->push_back(LibGCCHexagonDir + Ver);
-
- // lib/gcc/...
- LibPaths->push_back(RootDir + "lib/gcc");
-
- // hexagon/lib/...
- std::string HexagonLibDir = RootDir + "hexagon/lib";
- if (buildingLib) {
- LibPaths->push_back(HexagonLibDir + MarchG0Suffix);
- LibPaths->push_back(HexagonLibDir + G0Suffix);
+ std::vector<std::string> RootDirs;
+ std::copy(D.PrefixDirs.begin(), D.PrefixDirs.end(), RootDirs.begin());
+
+ std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
+ D.PrefixDirs);
+ if (std::find(RootDirs.begin(), RootDirs.end(), TargetDir) == RootDirs.end())
+ RootDirs.push_back(TargetDir);
+
+ bool HasPIC = Args.hasArg(options::OPT_fpic, options::OPT_fPIC);
+ // Assume G0 with -shared.
+ bool HasG0 = Args.hasArg(options::OPT_shared);
+ if (auto G = getSmallDataThreshold(Args))
+ HasG0 = G.getValue() == 0;
+
+ const std::string CpuVer = GetTargetCPUVersion(Args).str();
+ for (auto &Dir : RootDirs) {
+ std::string LibDir = Dir + "/hexagon/lib";
+ std::string LibDirCpu = LibDir + '/' + CpuVer;
+ if (HasG0) {
+ if (HasPIC)
+ LibPaths.push_back(LibDirCpu + "/G0/pic");
+ LibPaths.push_back(LibDirCpu + "/G0");
+ }
+ LibPaths.push_back(LibDirCpu);
+ LibPaths.push_back(LibDir);
}
- LibPaths->push_back(HexagonLibDir + MarchSuffix);
- LibPaths->push_back(HexagonLibDir);
}
-Hexagon_TC::Hexagon_TC(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
+HexagonToolChain::HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
: Linux(D, Triple, Args) {
- const std::string InstalledDir(getDriver().getInstalledDir());
- const std::string GnuDir = Hexagon_TC::GetGnuDir(InstalledDir, Args);
+ const std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
+ D.PrefixDirs);
// Note: Generic_GCC::Generic_GCC adds InstalledDir and getDriver().Dir to
// program paths
- const std::string BinDir(GnuDir + "/bin");
- if (llvm::sys::fs::exists(BinDir))
+ const std::string BinDir(TargetDir + "/bin");
+ if (D.getVFS().exists(BinDir))
getProgramPaths().push_back(BinDir);
- // Determine version of GCC libraries and headers to use.
- const std::string HexagonDir(GnuDir + "/lib/gcc/hexagon");
- std::error_code ec;
- GCCVersion MaxVersion = GCCVersion::Parse("0.0.0");
- for (llvm::sys::fs::directory_iterator di(HexagonDir, ec), de;
- !ec && di != de; di = di.increment(ec)) {
- GCCVersion cv = GCCVersion::Parse(llvm::sys::path::filename(di->path()));
- if (MaxVersion < cv)
- MaxVersion = cv;
- }
- GCCLibAndIncVersion = MaxVersion;
-
- ToolChain::path_list *LibPaths = &getFilePaths();
+ ToolChain::path_list &LibPaths = getFilePaths();
// Remove paths added by Linux toolchain. Currently Hexagon_TC really targets
// 'elf' OS type, so the Linux paths are not appropriate. When we actually
// support 'linux' we'll need to fix this up
- LibPaths->clear();
-
- GetHexagonLibraryPaths(Args, GetGCCLibAndIncVersion(), GetTargetCPU(Args),
- InstalledDir, LibPaths);
+ LibPaths.clear();
+ getHexagonLibraryPaths(Args, LibPaths);
}
-Hexagon_TC::~Hexagon_TC() {}
+HexagonToolChain::~HexagonToolChain() {}
-Tool *Hexagon_TC::buildAssembler() const {
+Tool *HexagonToolChain::buildAssembler() const {
return new tools::hexagon::Assembler(*this);
}
-Tool *Hexagon_TC::buildLinker() const {
+Tool *HexagonToolChain::buildLinker() const {
return new tools::hexagon::Linker(*this);
}
-void Hexagon_TC::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- const Driver &D = getDriver();
-
+void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc) ||
DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
- std::string Ver(GetGCCLibAndIncVersion());
- std::string GnuDir = Hexagon_TC::GetGnuDir(D.InstalledDir, DriverArgs);
- std::string HexagonDir(GnuDir + "/lib/gcc/hexagon/" + Ver);
- addExternCSystemInclude(DriverArgs, CC1Args, HexagonDir + "/include");
- addExternCSystemInclude(DriverArgs, CC1Args, HexagonDir + "/include-fixed");
- addExternCSystemInclude(DriverArgs, CC1Args, GnuDir + "/hexagon/include");
+ const Driver &D = getDriver();
+ std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
+ D.PrefixDirs);
+ addExternCSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include");
}
-void Hexagon_TC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
-
+void HexagonToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
const Driver &D = getDriver();
- std::string Ver(GetGCCLibAndIncVersion());
- SmallString<128> IncludeDir(
- Hexagon_TC::GetGnuDir(D.InstalledDir, DriverArgs));
-
- llvm::sys::path::append(IncludeDir, "hexagon/include/c++/");
- llvm::sys::path::append(IncludeDir, Ver);
- addSystemInclude(DriverArgs, CC1Args, IncludeDir);
+ std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
+ addSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include/c++");
}
ToolChain::CXXStdlibType
-Hexagon_TC::GetCXXStdlibType(const ArgList &Args) const {
+HexagonToolChain::GetCXXStdlibType(const ArgList &Args) const {
Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
if (!A)
return ToolChain::CST_Libstdcxx;
StringRef Value = A->getValue();
- if (Value != "libstdc++") {
+ if (Value != "libstdc++")
getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
- }
return ToolChain::CST_Libstdcxx;
}
-static int getHexagonVersion(const ArgList &Args) {
- Arg *A = Args.getLastArg(options::OPT_march_EQ, options::OPT_mcpu_EQ);
- // Select the default CPU (v4) if none was given.
- if (!A)
- return 4;
+//
+// Returns the default CPU for Hexagon. This is the default compilation target
+// if no Hexagon processor is selected at the command-line.
+//
+const StringRef HexagonToolChain::GetDefaultCPU() {
+ return "hexagonv60";
+}
- // FIXME: produce errors if we cannot parse the version.
- StringRef WhichHexagon = A->getValue();
- if (WhichHexagon.startswith("hexagonv")) {
- int Val;
- if (!WhichHexagon.substr(sizeof("hexagonv") - 1).getAsInteger(10, Val))
- return Val;
- }
- if (WhichHexagon.startswith("v")) {
- int Val;
- if (!WhichHexagon.substr(1).getAsInteger(10, Val))
- return Val;
+const StringRef HexagonToolChain::GetTargetCPUVersion(const ArgList &Args) {
+ Arg *CpuArg = nullptr;
+
+ for (auto &A : Args) {
+ if (A->getOption().matches(options::OPT_mcpu_EQ)) {
+ CpuArg = A;
+ A->claim();
+ }
}
- // FIXME: should probably be an error.
- return 4;
+ StringRef CPU = CpuArg ? CpuArg->getValue() : GetDefaultCPU();
+ if (CPU.startswith("hexagon"))
+ return CPU.substr(sizeof("hexagon") - 1);
+ return CPU;
}
+// End Hexagon
-StringRef Hexagon_TC::GetTargetCPU(const ArgList &Args) {
- int V = getHexagonVersion(Args);
- // FIXME: We don't support versions < 4. We should error on them.
- switch (V) {
- default:
- llvm_unreachable("Unexpected version");
- case 5:
- return "v5";
- case 4:
- return "v4";
- case 3:
- return "v3";
- case 2:
- return "v2";
- case 1:
- return "v1";
- }
+/// AMDGPU Toolchain
+AMDGPUToolChain::AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) { }
+
+Tool *AMDGPUToolChain::buildLinker() const {
+ return new tools::amdgpu::Linker(*this);
}
-// End Hexagon
+// End AMDGPU
/// NaCl Toolchain
-NaCl_TC::NaCl_TC(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
+NaClToolChain::NaClToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
// Remove paths added by Generic_GCC. NaCl Toolchain cannot use the
@@ -2317,45 +2764,39 @@ NaCl_TC::NaCl_TC(const Driver &D, const llvm::Triple &Triple,
std::string ToolPath(getDriver().ResourceDir + "/lib/");
switch (Triple.getArch()) {
- case llvm::Triple::x86: {
+ case llvm::Triple::x86:
file_paths.push_back(FilePath + "x86_64-nacl/lib32");
- file_paths.push_back(FilePath + "x86_64-nacl/usr/lib32");
+ file_paths.push_back(FilePath + "i686-nacl/usr/lib");
prog_paths.push_back(ProgPath + "x86_64-nacl/bin");
file_paths.push_back(ToolPath + "i686-nacl");
break;
- }
- case llvm::Triple::x86_64: {
+ case llvm::Triple::x86_64:
file_paths.push_back(FilePath + "x86_64-nacl/lib");
file_paths.push_back(FilePath + "x86_64-nacl/usr/lib");
prog_paths.push_back(ProgPath + "x86_64-nacl/bin");
file_paths.push_back(ToolPath + "x86_64-nacl");
break;
- }
- case llvm::Triple::arm: {
+ case llvm::Triple::arm:
file_paths.push_back(FilePath + "arm-nacl/lib");
file_paths.push_back(FilePath + "arm-nacl/usr/lib");
prog_paths.push_back(ProgPath + "arm-nacl/bin");
file_paths.push_back(ToolPath + "arm-nacl");
break;
- }
- case llvm::Triple::mipsel: {
+ case llvm::Triple::mipsel:
file_paths.push_back(FilePath + "mipsel-nacl/lib");
file_paths.push_back(FilePath + "mipsel-nacl/usr/lib");
prog_paths.push_back(ProgPath + "bin");
file_paths.push_back(ToolPath + "mipsel-nacl");
break;
- }
default:
break;
}
- // Use provided linker, not system linker
- Linker = GetProgramPath("ld");
NaClArmMacrosPath = GetFilePath("nacl-arm-macros.s");
}
-void NaCl_TC::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+void NaClToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
const Driver &D = getDriver();
if (DriverArgs.hasArg(options::OPT_nostdinc))
return;
@@ -2371,12 +2812,21 @@ void NaCl_TC::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
SmallString<128> P(D.Dir + "/../");
switch (getTriple().getArch()) {
+ case llvm::Triple::x86:
+ // x86 is special because multilib style uses x86_64-nacl/include for libc
+ // headers but the SDK wants i686-nacl/usr/include. The other architectures
+ // have the same substring.
+ llvm::sys::path::append(P, "i686-nacl/usr/include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::remove_filename(P);
+ llvm::sys::path::append(P, "x86_64-nacl/include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ return;
case llvm::Triple::arm:
llvm::sys::path::append(P, "arm-nacl/usr/include");
break;
- case llvm::Triple::x86:
- llvm::sys::path::append(P, "x86_64-nacl/usr/include");
- break;
case llvm::Triple::x86_64:
llvm::sys::path::append(P, "x86_64-nacl/usr/include");
break;
@@ -2394,16 +2844,16 @@ void NaCl_TC::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, P.str());
}
-void NaCl_TC::AddCXXStdlibLibArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
+void NaClToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
// Check for -stdlib= flags. We only support libc++ but this consumes the arg
// if the value is libc++, and emits an error for other values.
GetCXXStdlibType(Args);
CmdArgs.push_back("-lc++");
}
-void NaCl_TC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+void NaClToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
const Driver &D = getDriver();
if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
DriverArgs.hasArg(options::OPT_nostdincxx))
@@ -2436,7 +2886,8 @@ void NaCl_TC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
}
}
-ToolChain::CXXStdlibType NaCl_TC::GetCXXStdlibType(const ArgList &Args) const {
+ToolChain::CXXStdlibType
+NaClToolChain::GetCXXStdlibType(const ArgList &Args) const {
if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
StringRef Value = A->getValue();
if (Value == "libc++")
@@ -2447,8 +2898,9 @@ ToolChain::CXXStdlibType NaCl_TC::GetCXXStdlibType(const ArgList &Args) const {
return ToolChain::CST_Libcxx;
}
-std::string NaCl_TC::ComputeEffectiveClangTriple(const ArgList &Args,
- types::ID InputType) const {
+std::string
+NaClToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
llvm::Triple TheTriple(ComputeLLVMTriple(Args, InputType));
if (TheTriple.getArch() == llvm::Triple::arm &&
TheTriple.getEnvironment() == llvm::Triple::UnknownEnvironment)
@@ -2456,11 +2908,11 @@ std::string NaCl_TC::ComputeEffectiveClangTriple(const ArgList &Args,
return TheTriple.getTriple();
}
-Tool *NaCl_TC::buildLinker() const {
+Tool *NaClToolChain::buildLinker() const {
return new tools::nacltools::Linker(*this);
}
-Tool *NaCl_TC::buildAssembler() const {
+Tool *NaClToolChain::buildAssembler() const {
if (getTriple().getArch() == llvm::Triple::arm)
return new tools::nacltools::AssemblerARM(*this);
return new tools::gnutools::Assembler(*this);
@@ -2619,7 +3071,7 @@ FreeBSD::FreeBSD(const Driver &D, const llvm::Triple &Triple,
// back to '/usr/lib' if it doesn't exist.
if ((Triple.getArch() == llvm::Triple::x86 ||
Triple.getArch() == llvm::Triple::ppc) &&
- llvm::sys::fs::exists(getDriver().SysRoot + "/usr/lib32/crt1.o"))
+ D.getVFS().exists(getDriver().SysRoot + "/usr/lib32/crt1.o"))
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib32");
else
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
@@ -2666,7 +3118,7 @@ Tool *FreeBSD::buildAssembler() const {
Tool *FreeBSD::buildLinker() const { return new tools::freebsd::Linker(*this); }
-bool FreeBSD::UseSjLjExceptions() const {
+bool FreeBSD::UseSjLjExceptions(const ArgList &Args) const {
// FreeBSD uses SjLj exceptions on ARM oabi.
switch (getTriple().getEnvironment()) {
case llvm::Triple::GNUEABIHF:
@@ -2829,18 +3281,46 @@ Tool *Minix::buildAssembler() const {
Tool *Minix::buildLinker() const { return new tools::minix::Linker(*this); }
+static void addPathIfExists(const Driver &D, const Twine &Path,
+ ToolChain::path_list &Paths) {
+ if (D.getVFS().exists(Path))
+ Paths.push_back(Path.str());
+}
+
/// Solaris - Solaris tool chain which can call as(1) and ld(1) directly.
Solaris::Solaris(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_GCC(D, Triple, Args) {
- getProgramPaths().push_back(getDriver().getInstalledDir());
+ GCCInstallation.init(Triple, Args);
+
+ path_list &Paths = getFilePaths();
+ if (GCCInstallation.isValid())
+ addPathIfExists(D, GCCInstallation.getInstallPath(), Paths);
+
+ addPathIfExists(D, getDriver().getInstalledDir(), Paths);
if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
+ addPathIfExists(D, getDriver().Dir, Paths);
- getFilePaths().push_back(getDriver().Dir + "/../lib");
- getFilePaths().push_back("/usr/lib");
+ addPathIfExists(D, getDriver().SysRoot + getDriver().Dir + "/../lib", Paths);
+
+ std::string LibPath = "/usr/lib/";
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::sparc:
+ break;
+ case llvm::Triple::x86_64:
+ LibPath += "amd64/";
+ break;
+ case llvm::Triple::sparcv9:
+ LibPath += "sparcv9/";
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+
+ addPathIfExists(D, getDriver().SysRoot + LibPath, Paths);
}
Tool *Solaris::buildAssembler() const {
@@ -2849,6 +3329,31 @@ Tool *Solaris::buildAssembler() const {
Tool *Solaris::buildLinker() const { return new tools::solaris::Linker(*this); }
+void Solaris::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ // Include the support directory for things like xlocale and fudged system
+ // headers.
+ addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/v1/support/solaris");
+
+ if (GCCInstallation.isValid()) {
+ GCCVersion Version = GCCInstallation.getVersion();
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/gcc/" +
+ Version.MajorStr + "." +
+ Version.MinorStr +
+ "/include/c++/" + Version.Text);
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/gcc/" + Version.MajorStr +
+ "." + Version.MinorStr + "/include/c++/" +
+ Version.Text + "/" +
+ GCCInstallation.getTriple().str());
+ }
+}
+
/// Distribution (very bare-bones at the moment).
enum Distro {
@@ -2884,6 +3389,7 @@ enum Distro {
UbuntuUtopic,
UbuntuVivid,
UbuntuWily,
+ UbuntuXenial,
UnknownDistro
};
@@ -2898,10 +3404,10 @@ static bool IsDebian(enum Distro Distro) {
}
static bool IsUbuntu(enum Distro Distro) {
- return Distro >= UbuntuHardy && Distro <= UbuntuWily;
+ return Distro >= UbuntuHardy && Distro <= UbuntuXenial;
}
-static Distro DetectDistro(llvm::Triple::ArchType Arch) {
+static Distro DetectDistro(const Driver &D, llvm::Triple::ArchType Arch) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
llvm::MemoryBuffer::getFile("/etc/lsb-release");
if (File) {
@@ -2909,7 +3415,7 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) {
SmallVector<StringRef, 16> Lines;
Data.split(Lines, "\n");
Distro Version = UnknownDistro;
- for (const StringRef Line : Lines)
+ for (StringRef Line : Lines)
if (Version == UnknownDistro && Line.startswith("DISTRIB_CODENAME="))
Version = llvm::StringSwitch<Distro>(Line.substr(17))
.Case("hardy", UbuntuHardy)
@@ -2928,6 +3434,7 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) {
.Case("utopic", UbuntuUtopic)
.Case("vivid", UbuntuVivid)
.Case("wily", UbuntuWily)
+ .Case("xenial", UbuntuXenial)
.Default(UnknownDistro);
return Version;
}
@@ -2967,13 +3474,13 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) {
return UnknownDistro;
}
- if (llvm::sys::fs::exists("/etc/SuSE-release"))
+ if (D.getVFS().exists("/etc/SuSE-release"))
return OpenSUSE;
- if (llvm::sys::fs::exists("/etc/exherbo-release"))
+ if (D.getVFS().exists("/etc/exherbo-release"))
return Exherbo;
- if (llvm::sys::fs::exists("/etc/arch-release"))
+ if (D.getVFS().exists("/etc/arch-release"))
return ArchLinux;
return UnknownDistro;
@@ -2985,9 +3492,11 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) {
/// a target-triple directory in the library and header search paths.
/// Unfortunately, this triple does not align with the vanilla target triple,
/// so we provide a rough mapping here.
-static std::string getMultiarchTriple(const llvm::Triple &TargetTriple,
+static std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
StringRef SysRoot) {
- llvm::Triple::EnvironmentType TargetEnvironment = TargetTriple.getEnvironment();
+ llvm::Triple::EnvironmentType TargetEnvironment =
+ TargetTriple.getEnvironment();
// For most architectures, just use whatever we have rather than trying to be
// clever.
@@ -3002,92 +3511,91 @@ static std::string getMultiarchTriple(const llvm::Triple &TargetTriple,
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
- if (llvm::sys::fs::exists(SysRoot + "/lib/arm-linux-gnueabihf"))
+ if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabihf"))
return "arm-linux-gnueabihf";
} else {
- if (llvm::sys::fs::exists(SysRoot + "/lib/arm-linux-gnueabi"))
+ if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabi"))
return "arm-linux-gnueabi";
}
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
- if (llvm::sys::fs::exists(SysRoot + "/lib/armeb-linux-gnueabihf"))
+ if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabihf"))
return "armeb-linux-gnueabihf";
} else {
- if (llvm::sys::fs::exists(SysRoot + "/lib/armeb-linux-gnueabi"))
+ if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabi"))
return "armeb-linux-gnueabi";
}
break;
case llvm::Triple::x86:
- if (llvm::sys::fs::exists(SysRoot + "/lib/i386-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/i386-linux-gnu"))
return "i386-linux-gnu";
break;
case llvm::Triple::x86_64:
// We don't want this for x32, otherwise it will match x86_64 libs
if (TargetEnvironment != llvm::Triple::GNUX32 &&
- llvm::sys::fs::exists(SysRoot + "/lib/x86_64-linux-gnu"))
+ D.getVFS().exists(SysRoot + "/lib/x86_64-linux-gnu"))
return "x86_64-linux-gnu";
break;
case llvm::Triple::aarch64:
- if (llvm::sys::fs::exists(SysRoot + "/lib/aarch64-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/aarch64-linux-gnu"))
return "aarch64-linux-gnu";
break;
case llvm::Triple::aarch64_be:
- if (llvm::sys::fs::exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
return "aarch64_be-linux-gnu";
break;
case llvm::Triple::mips:
- if (llvm::sys::fs::exists(SysRoot + "/lib/mips-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/mips-linux-gnu"))
return "mips-linux-gnu";
break;
case llvm::Triple::mipsel:
- if (llvm::sys::fs::exists(SysRoot + "/lib/mipsel-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/mipsel-linux-gnu"))
return "mipsel-linux-gnu";
break;
case llvm::Triple::mips64:
- if (llvm::sys::fs::exists(SysRoot + "/lib/mips64-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
return "mips64-linux-gnu";
- if (llvm::sys::fs::exists(SysRoot + "/lib/mips64-linux-gnuabi64"))
+ if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnuabi64"))
return "mips64-linux-gnuabi64";
break;
case llvm::Triple::mips64el:
- if (llvm::sys::fs::exists(SysRoot + "/lib/mips64el-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
return "mips64el-linux-gnu";
- if (llvm::sys::fs::exists(SysRoot + "/lib/mips64el-linux-gnuabi64"))
+ if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnuabi64"))
return "mips64el-linux-gnuabi64";
break;
case llvm::Triple::ppc:
- if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
return "powerpc-linux-gnuspe";
- if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnu"))
return "powerpc-linux-gnu";
break;
case llvm::Triple::ppc64:
- if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc64-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc64-linux-gnu"))
return "powerpc64-linux-gnu";
break;
case llvm::Triple::ppc64le:
- if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc64le-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/powerpc64le-linux-gnu"))
return "powerpc64le-linux-gnu";
break;
case llvm::Triple::sparc:
- if (llvm::sys::fs::exists(SysRoot + "/lib/sparc-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/sparc-linux-gnu"))
return "sparc-linux-gnu";
break;
case llvm::Triple::sparcv9:
- if (llvm::sys::fs::exists(SysRoot + "/lib/sparc64-linux-gnu"))
+ if (D.getVFS().exists(SysRoot + "/lib/sparc64-linux-gnu"))
return "sparc64-linux-gnu";
break;
+ case llvm::Triple::systemz:
+ if (D.getVFS().exists(SysRoot + "/lib/s390x-linux-gnu"))
+ return "s390x-linux-gnu";
+ break;
}
return TargetTriple.str();
}
-static void addPathIfExists(Twine Path, ToolChain::path_list &Paths) {
- if (llvm::sys::fs::exists(Path))
- Paths.push_back(Path.str());
-}
-
static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
if (isMipsArch(Triple.getArch())) {
// lib32 directory has a special meaning on MIPS targets.
@@ -3120,7 +3628,8 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
- GCCInstallation.init(D, Triple, Args);
+ GCCInstallation.init(Triple, Args);
+ CudaInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
@@ -3138,9 +3647,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
GCCInstallation.getTriple().str() + "/bin")
.str());
- Linker = GetLinkerPath();
-
- Distro Distro = DetectDistro(Arch);
+ Distro Distro = DetectDistro(D, Arch);
if (IsOpenSUSE(Distro) || IsUbuntu(Distro)) {
ExtraOpts.push_back("-z");
@@ -3150,7 +3657,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
ExtraOpts.push_back("-X");
- const bool IsAndroid = Triple.getEnvironment() == llvm::Triple::Android;
+ const bool IsAndroid = Triple.isAndroid();
const bool IsMips = isMipsArch(Arch);
if (IsMips && !SysRoot.empty())
@@ -3190,7 +3697,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
path_list &Paths = getFilePaths();
const std::string OSLibDir = getOSLibDir(Triple, Args);
- const std::string MultiarchTriple = getMultiarchTriple(Triple, SysRoot);
+ const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
// Add the multilib suffixed paths where they are available.
if (GCCInstallation.isValid()) {
@@ -3200,7 +3707,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// Sourcery CodeBench MIPS toolchain holds some libraries under
// a biarch-like suffix of the GCC installation.
- addPathIfExists((GCCInstallation.getInstallPath() + Multilib.gccSuffix()),
+ addPathIfExists(D, GCCInstallation.getInstallPath() + Multilib.gccSuffix(),
Paths);
// GCC cross compiling toolchains will install target libraries which ship
@@ -3221,8 +3728,8 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
//
// Note that this matches the GCC behavior. See the below comment for where
// Clang diverges from GCC's behavior.
- addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib/../" + OSLibDir +
- Multilib.osSuffix(),
+ addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib/../" +
+ OSLibDir + Multilib.osSuffix(),
Paths);
// If the GCC installation we found is inside of the sysroot, we want to
@@ -3235,8 +3742,8 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// configurations but this seems somewhere between questionable and simply
// a bug.
if (StringRef(LibPath).startswith(SysRoot)) {
- addPathIfExists(LibPath + "/" + MultiarchTriple, Paths);
- addPathIfExists(LibPath + "/../" + OSLibDir, Paths);
+ addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
+ addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
}
}
@@ -3246,27 +3753,29 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot)) {
- addPathIfExists(D.Dir + "/../lib/" + MultiarchTriple, Paths);
- addPathIfExists(D.Dir + "/../" + OSLibDir, Paths);
+ addPathIfExists(D, D.Dir + "/../lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, D.Dir + "/../" + OSLibDir, Paths);
}
- addPathIfExists(SysRoot + "/lib/" + MultiarchTriple, Paths);
- addPathIfExists(SysRoot + "/lib/../" + OSLibDir, Paths);
- addPathIfExists(SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
- addPathIfExists(SysRoot + "/usr/lib/../" + OSLibDir, Paths);
+ addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
// Try walking via the GCC triple path in case of biarch or multiarch GCC
// installations with strange symlinks.
if (GCCInstallation.isValid()) {
- addPathIfExists(SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
+ addPathIfExists(D,
+ SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
"/../../" + OSLibDir,
Paths);
// Add the 'other' biarch variant path
Multilib BiarchSibling;
if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
- addPathIfExists(
- GCCInstallation.getInstallPath() + BiarchSibling.gccSuffix(), Paths);
+ addPathIfExists(D, GCCInstallation.getInstallPath() +
+ BiarchSibling.gccSuffix(),
+ Paths);
}
// See comments above on the multilib variant for details of why this is
@@ -3274,14 +3783,14 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const std::string &LibPath = GCCInstallation.getParentLibPath();
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const Multilib &Multilib = GCCInstallation.getMultilib();
- addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib" +
- Multilib.osSuffix(),
+ addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib" +
+ Multilib.osSuffix(),
Paths);
// See comments above on the multilib variant for details of why this is
// only included from within the sysroot.
if (StringRef(LibPath).startswith(SysRoot))
- addPathIfExists(LibPath, Paths);
+ addPathIfExists(D, LibPath, Paths);
}
// Similar to the logic for GCC above, if we are currently running Clang
@@ -3290,10 +3799,10 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot))
- addPathIfExists(D.Dir + "/../lib", Paths);
+ addPathIfExists(D, D.Dir + "/../lib", Paths);
- addPathIfExists(SysRoot + "/lib", Paths);
- addPathIfExists(SysRoot + "/usr/lib", Paths);
+ addPathIfExists(D, SysRoot + "/lib", Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib", Paths);
}
bool Linux::HasNativeLLVMSupport() const { return true; }
@@ -3323,12 +3832,12 @@ std::string Linux::computeSysRoot() const {
(InstallDir + "/../../../../" + TripleStr + "/libc" + Multilib.osSuffix())
.str();
- if (llvm::sys::fs::exists(Path))
+ if (getVFS().exists(Path))
return Path;
Path = (InstallDir + "/../../../../sysroot" + Multilib.osSuffix()).str();
- if (llvm::sys::fs::exists(Path))
+ if (getVFS().exists(Path))
return Path;
return std::string();
@@ -3404,6 +3913,10 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
"/usr/include/arm-linux-gnueabi"};
const StringRef ARMHFMultiarchIncludeDirs[] = {
"/usr/include/arm-linux-gnueabihf"};
+ const StringRef ARMEBMultiarchIncludeDirs[] = {
+ "/usr/include/armeb-linux-gnueabi"};
+ const StringRef ARMEBHFMultiarchIncludeDirs[] = {
+ "/usr/include/armeb-linux-gnueabihf"};
const StringRef MIPSMultiarchIncludeDirs[] = {"/usr/include/mips-linux-gnu"};
const StringRef MIPSELMultiarchIncludeDirs[] = {
"/usr/include/mipsel-linux-gnu"};
@@ -3422,6 +3935,8 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
"/usr/include/sparc-linux-gnu"};
const StringRef Sparc64MultiarchIncludeDirs[] = {
"/usr/include/sparc64-linux-gnu"};
+ const StringRef SYSTEMZMultiarchIncludeDirs[] = {
+ "/usr/include/s390x-linux-gnu"};
ArrayRef<StringRef> MultiarchIncludeDirs;
switch (getTriple().getArch()) {
case llvm::Triple::x86_64:
@@ -3435,11 +3950,19 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
MultiarchIncludeDirs = AArch64MultiarchIncludeDirs;
break;
case llvm::Triple::arm:
+ case llvm::Triple::thumb:
if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
MultiarchIncludeDirs = ARMHFMultiarchIncludeDirs;
else
MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
+ MultiarchIncludeDirs = ARMEBHFMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = ARMEBMultiarchIncludeDirs;
+ break;
case llvm::Triple::mips:
MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
break;
@@ -3467,11 +3990,14 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
case llvm::Triple::sparcv9:
MultiarchIncludeDirs = Sparc64MultiarchIncludeDirs;
break;
+ case llvm::Triple::systemz:
+ MultiarchIncludeDirs = SYSTEMZMultiarchIncludeDirs;
+ break;
default:
break;
}
for (StringRef Dir : MultiarchIncludeDirs) {
- if (llvm::sys::fs::exists(SysRoot + Dir)) {
+ if (D.getVFS().exists(SysRoot + Dir)) {
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + Dir);
break;
}
@@ -3488,37 +4014,24 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
}
-/// \brief Helper to add the variant paths of a libstdc++ installation.
-/*static*/ bool Linux::addLibStdCXXIncludePaths(
- Twine Base, Twine Suffix, StringRef GCCTriple, StringRef GCCMultiarchTriple,
- StringRef TargetMultiarchTriple, Twine IncludeSuffix,
- const ArgList &DriverArgs, ArgStringList &CC1Args) {
- if (!llvm::sys::fs::exists(Base + Suffix))
- return false;
-
- addSystemInclude(DriverArgs, CC1Args, Base + Suffix);
- // The vanilla GCC layout of libstdc++ headers uses a triple subdirectory. If
- // that path exists or we have neither a GCC nor target multiarch triple, use
- // this vanilla search path.
- if ((GCCMultiarchTriple.empty() && TargetMultiarchTriple.empty()) ||
- llvm::sys::fs::exists(Base + Suffix + "/" + GCCTriple + IncludeSuffix)) {
- addSystemInclude(DriverArgs, CC1Args,
- Base + Suffix + "/" + GCCTriple + IncludeSuffix);
- } else {
- // Otherwise try to use multiarch naming schemes which have normalized the
- // triples and put the triple before the suffix.
- //
- // GCC surprisingly uses *both* the GCC triple with a multilib suffix and
- // the target triple, so we support that here.
- addSystemInclude(DriverArgs, CC1Args,
- Base + "/" + GCCMultiarchTriple + Suffix + IncludeSuffix);
- addSystemInclude(DriverArgs, CC1Args,
- Base + "/" + TargetMultiarchTriple + Suffix);
+static std::string DetectLibcxxIncludePath(StringRef base) {
+ std::error_code EC;
+ int MaxVersion = 0;
+ std::string MaxVersionString = "";
+ for (llvm::sys::fs::directory_iterator LI(base, EC), LE; !EC && LI != LE;
+ LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ int Version;
+ if (VersionText[0] == 'v' &&
+ !VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
+ if (Version > MaxVersion) {
+ MaxVersion = Version;
+ MaxVersionString = VersionText;
+ }
+ }
}
-
- addSystemInclude(DriverArgs, CC1Args, Base + Suffix + "/backward");
- return true;
+ return MaxVersion ? (base + "/" + MaxVersionString).str() : "";
}
void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
@@ -3530,17 +4043,14 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
// Check if libc++ has been enabled and provide its include paths if so.
if (GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) {
const std::string LibCXXIncludePathCandidates[] = {
- // The primary location is within the Clang installation.
- // FIXME: We shouldn't hard code 'v1' here to make Clang future proof to
- // newer ABI versions.
- getDriver().Dir + "/../include/c++/v1",
+ DetectLibcxxIncludePath(getDriver().Dir + "/../include/c++"),
// We also check the system as for a long time this is the only place
// Clang looked.
// FIXME: We should really remove this. It doesn't make any sense.
- getDriver().SysRoot + "/usr/include/c++/v1"};
+ DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/include/c++")};
for (const auto &IncludePath : LibCXXIncludePathCandidates) {
- if (!llvm::sys::fs::exists(IncludePath))
+ if (IncludePath.empty() || !getVFS().exists(IncludePath))
continue;
// Add the first candidate that exists.
addSystemInclude(DriverArgs, CC1Args, IncludePath);
@@ -3561,10 +4071,10 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
StringRef InstallDir = GCCInstallation.getInstallPath();
StringRef TripleStr = GCCInstallation.getTriple().str();
const Multilib &Multilib = GCCInstallation.getMultilib();
- const std::string GCCMultiarchTriple =
- getMultiarchTriple(GCCInstallation.getTriple(), getDriver().SysRoot);
+ const std::string GCCMultiarchTriple = getMultiarchTriple(
+ getDriver(), GCCInstallation.getTriple(), getDriver().SysRoot);
const std::string TargetMultiarchTriple =
- getMultiarchTriple(getTriple(), getDriver().SysRoot);
+ getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
const GCCVersion &Version = GCCInstallation.getVersion();
// The primary search for libstdc++ supports multiarch variants.
@@ -3598,6 +4108,18 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
}
}
+void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nocudainc))
+ return;
+
+ if (CudaInstallation.isValid()) {
+ addSystemInclude(DriverArgs, CC1Args, CudaInstallation.getIncludePath());
+ CC1Args.push_back("-include");
+ CC1Args.push_back("__clang_cuda_runtime_wrapper.h");
+ }
+}
+
bool Linux::isPIEDefault() const { return getSanitizerArgs().requiresPIE(); }
SanitizerMask Linux::getSupportedSanitizers() const {
@@ -3607,24 +4129,39 @@ SanitizerMask Linux::getSupportedSanitizers() const {
getTriple().getArch() == llvm::Triple::mips64el;
const bool IsPowerPC64 = getTriple().getArch() == llvm::Triple::ppc64 ||
getTriple().getArch() == llvm::Triple::ppc64le;
+ const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::aarch64_be;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::KernelAddress;
Res |= SanitizerKind::Vptr;
- if (IsX86_64 || IsMIPS64) {
+ Res |= SanitizerKind::SafeStack;
+ if (IsX86_64 || IsMIPS64 || IsAArch64)
Res |= SanitizerKind::DataFlow;
+ if (IsX86_64 || IsMIPS64 || IsAArch64)
Res |= SanitizerKind::Leak;
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
Res |= SanitizerKind::Thread;
- }
- if (IsX86_64 || IsMIPS64 || IsPowerPC64)
+ if (IsX86_64 || IsMIPS64 || IsPowerPC64 || IsAArch64)
Res |= SanitizerKind::Memory;
if (IsX86 || IsX86_64) {
Res |= SanitizerKind::Function;
- Res |= SanitizerKind::SafeStack;
}
return Res;
}
+void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ if (!needsProfileRT(Args)) return;
+
+ // Add linker option -u__llvm_runtime_variable to cause runtime
+ // initialization module to be linked in.
+ if (!Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
+ ToolChain::addProfileRTLibs(Args, CmdArgs);
+}
+
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
DragonFly::DragonFly(const Driver &D, const llvm::Triple &Triple,
@@ -3638,10 +4175,7 @@ DragonFly::DragonFly(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(getDriver().Dir + "/../lib");
getFilePaths().push_back("/usr/lib");
- if (llvm::sys::fs::exists("/usr/lib/gcc47"))
- getFilePaths().push_back("/usr/lib/gcc47");
- else
- getFilePaths().push_back("/usr/lib/gcc44");
+ getFilePaths().push_back("/usr/lib/gcc50");
}
Tool *DragonFly::buildAssembler() const {
@@ -3665,6 +4199,22 @@ CudaToolChain::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
Linux::addClangTargetOptions(DriverArgs, CC1Args);
CC1Args.push_back("-fcuda-is-device");
+
+ if (DriverArgs.hasArg(options::OPT_nocudalib))
+ return;
+
+ std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(
+ DriverArgs.getLastArgValue(options::OPT_march_EQ));
+ if (!LibDeviceFile.empty()) {
+ CC1Args.push_back("-mlink-cuda-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+
+ // Libdevice in CUDA-7.0 requires PTX version that's more recent
+ // than LLVM defaults to. Use PTX4.2 which is the PTX version that
+ // came with CUDA-7.0.
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+ptx42");
+ }
}
llvm::opt::DerivedArgList *
@@ -3712,29 +4262,32 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
}
/// XCore tool chain
-XCore::XCore(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+XCoreToolChain::XCoreToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
: ToolChain(D, Triple, Args) {
// ProgramPaths are found via 'PATH' environment variable.
}
-Tool *XCore::buildAssembler() const {
+Tool *XCoreToolChain::buildAssembler() const {
return new tools::XCore::Assembler(*this);
}
-Tool *XCore::buildLinker() const { return new tools::XCore::Linker(*this); }
+Tool *XCoreToolChain::buildLinker() const {
+ return new tools::XCore::Linker(*this);
+}
-bool XCore::isPICDefault() const { return false; }
+bool XCoreToolChain::isPICDefault() const { return false; }
-bool XCore::isPIEDefault() const { return false; }
+bool XCoreToolChain::isPIEDefault() const { return false; }
-bool XCore::isPICDefaultForced() const { return false; }
+bool XCoreToolChain::isPICDefaultForced() const { return false; }
-bool XCore::SupportsProfiling() const { return false; }
+bool XCoreToolChain::SupportsProfiling() const { return false; }
-bool XCore::hasBlocksRuntime() const { return false; }
+bool XCoreToolChain::hasBlocksRuntime() const { return false; }
-void XCore::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+void XCoreToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc) ||
DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
@@ -3747,13 +4300,13 @@ void XCore::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
}
}
-void XCore::addClangTargetOptions(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+void XCoreToolChain::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
CC1Args.push_back("-nostdsysteminc");
}
-void XCore::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+void XCoreToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc) ||
DriverArgs.hasArg(options::OPT_nostdlibinc) ||
DriverArgs.hasArg(options::OPT_nostdincxx))
@@ -3767,15 +4320,84 @@ void XCore::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
}
}
-void XCore::AddCXXStdlibLibArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
+void XCoreToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
// We don't output any lib args. This is handled by xcc.
}
-// SHAVEToolChain does not call Clang's C compiler.
-// We override SelectTool to avoid testing ShouldUseClangCompiler().
-Tool *SHAVEToolChain::SelectTool(const JobAction &JA) const {
+MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {
+ // If a target of 'sparc-myriad-elf' is specified to clang, it wants to use
+ // 'sparc-myriad--elf' (note the unknown OS) as the canonical triple.
+ // This won't work to find gcc. Instead we give the installation detector an
+ // extra triple, which is preferable to further hacks of the logic that at
+ // present is based solely on getArch(). In particular, it would be wrong to
+ // choose the myriad installation when targeting a non-myriad sparc install.
+ switch (Triple.getArch()) {
+ default:
+ D.Diag(diag::err_target_unsupported_arch) << Triple.getArchName()
+ << "myriad";
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::shave:
+ GCCInstallation.init(Triple, Args, {"sparc-myriad-elf"});
+ }
+
+ if (GCCInstallation.isValid()) {
+ // The contents of LibDir are independent of the version of gcc.
+ // This contains libc, libg (a superset of libc), libm, libstdc++, libssp.
+ SmallString<128> LibDir(GCCInstallation.getParentLibPath());
+ if (Triple.getArch() == llvm::Triple::sparcel)
+ llvm::sys::path::append(LibDir, "../sparc-myriad-elf/lib/le");
+ else
+ llvm::sys::path::append(LibDir, "../sparc-myriad-elf/lib");
+ addPathIfExists(D, LibDir, getFilePaths());
+
+ // This directory contains crt{i,n,begin,end}.o as well as libgcc.
+ // These files are tied to a particular version of gcc.
+ SmallString<128> CompilerSupportDir(GCCInstallation.getInstallPath());
+ // There are actually 4 choices: {le,be} x {fpu,nofpu}
+ // but as this toolchain is for LEON sparc, it can assume FPU.
+ if (Triple.getArch() == llvm::Triple::sparcel)
+ llvm::sys::path::append(CompilerSupportDir, "le");
+ addPathIfExists(D, CompilerSupportDir, getFilePaths());
+ }
+}
+
+MyriadToolChain::~MyriadToolChain() {}
+
+void MyriadToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (!DriverArgs.hasArg(options::OPT_nostdinc))
+ addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
+}
+
+void MyriadToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ // Only libstdc++, for now.
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ const GCCVersion &Version = GCCInstallation.getVersion();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+
+ addLibStdCXXIncludePaths(
+ LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
+ "", TripleStr, "", "", Multilib.includeSuffix(), DriverArgs, CC1Args);
+}
+
+// MyriadToolChain handles several triples:
+// {shave,sparc{,el}}-myriad-{rtems,unknown}-elf
+Tool *MyriadToolChain::SelectTool(const JobAction &JA) const {
+ // The inherited method works fine if not targeting the SHAVE.
+ if (!isShaveCompilation(getTriple()))
+ return ToolChain::SelectTool(JA);
switch (JA.getKind()) {
+ case Action::PreprocessJobClass:
case Action::CompileJobClass:
if (!Compiler)
Compiler.reset(new tools::SHAVE::Compiler(*this));
@@ -3789,28 +4411,122 @@ Tool *SHAVEToolChain::SelectTool(const JobAction &JA) const {
}
}
-SHAVEToolChain::SHAVEToolChain(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
- : Generic_GCC(D, Triple, Args) {}
+Tool *MyriadToolChain::buildLinker() const {
+ return new tools::Myriad::Linker(*this);
+}
+
+WebAssembly::WebAssembly(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ // Use LLD by default.
+ DefaultLinker = "lld";
+}
+
+bool WebAssembly::IsMathErrnoDefault() const { return false; }
+
+bool WebAssembly::IsObjCNonFragileABIDefault() const { return true; }
+
+bool WebAssembly::UseObjCMixedDispatch() const { return true; }
+
+bool WebAssembly::isPICDefault() const { return false; }
+
+bool WebAssembly::isPIEDefault() const { return false; }
+
+bool WebAssembly::isPICDefaultForced() const { return false; }
+
+bool WebAssembly::IsIntegratedAssemblerDefault() const { return true; }
+
+// TODO: Support Objective C stuff.
+bool WebAssembly::SupportsObjCGC() const { return false; }
-SHAVEToolChain::~SHAVEToolChain() {}
+bool WebAssembly::hasBlocksRuntime() const { return false; }
-/// Following are methods necessary to avoid having moviClang be an abstract
-/// class.
+// TODO: Support profiling.
+bool WebAssembly::SupportsProfiling() const { return false; }
-Tool *SHAVEToolChain::getTool(Action::ActionClass AC) const {
- // SelectTool() must find a tool using the method in the superclass.
- // There's nothing we can do if that fails.
- llvm_unreachable("SHAVEToolChain can't getTool");
+bool WebAssembly::HasNativeLLVMSupport() const { return true; }
+
+void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array, true))
+ CC1Args.push_back("-fuse-init-array");
}
-Tool *SHAVEToolChain::buildLinker() const {
- // SHAVEToolChain executables can not be linked except by the vendor tools.
- llvm_unreachable("SHAVEToolChain can't buildLinker");
+Tool *WebAssembly::buildLinker() const {
+ return new tools::wasm::Linker(*this);
}
-Tool *SHAVEToolChain::buildAssembler() const {
- // This one you'd think should be reachable since we expose an
- // assembler to the driver, except not the way it expects.
- llvm_unreachable("SHAVEToolChain can't buildAssembler");
+PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ if (Args.hasArg(options::OPT_static))
+ D.Diag(diag::err_drv_unsupported_opt_for_target) << "-static" << "PS4";
+
+ // Determine where to find the PS4 libraries. We use SCE_PS4_SDK_DIR
+ // if it exists; otherwise use the driver's installation path, which
+ // should be <SDK_DIR>/host_tools/bin.
+
+ SmallString<512> PS4SDKDir;
+ if (const char *EnvValue = getenv("SCE_PS4_SDK_DIR")) {
+ if (!llvm::sys::fs::exists(EnvValue))
+ getDriver().Diag(clang::diag::warn_drv_ps4_sdk_dir) << EnvValue;
+ PS4SDKDir = EnvValue;
+ } else {
+ PS4SDKDir = getDriver().Dir;
+ llvm::sys::path::append(PS4SDKDir, "/../../");
+ }
+
+ // By default, the driver won't report a warning if it can't find
+ // PS4's include or lib directories. This behavior could be changed if
+ // -Weverything or -Winvalid-or-nonexistent-directory options are passed.
+ // If -isysroot was passed, use that as the SDK base path.
+ std::string PrefixDir;
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ PrefixDir = A->getValue();
+ if (!llvm::sys::fs::exists(PrefixDir))
+ getDriver().Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
+ } else
+ PrefixDir = PS4SDKDir.str();
+
+ SmallString<512> PS4SDKIncludeDir(PrefixDir);
+ llvm::sys::path::append(PS4SDKIncludeDir, "target/include");
+ if (!Args.hasArg(options::OPT_nostdinc) &&
+ !Args.hasArg(options::OPT_nostdlibinc) &&
+ !Args.hasArg(options::OPT_isysroot) &&
+ !Args.hasArg(options::OPT__sysroot_EQ) &&
+ !llvm::sys::fs::exists(PS4SDKIncludeDir)) {
+ getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << "PS4 system headers" << PS4SDKIncludeDir;
+ }
+
+ SmallString<512> PS4SDKLibDir(PS4SDKDir);
+ llvm::sys::path::append(PS4SDKLibDir, "target/lib");
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs) &&
+ !Args.hasArg(options::OPT__sysroot_EQ) && !Args.hasArg(options::OPT_E) &&
+ !Args.hasArg(options::OPT_c) && !Args.hasArg(options::OPT_S) &&
+ !Args.hasArg(options::OPT_emit_ast) &&
+ !llvm::sys::fs::exists(PS4SDKLibDir)) {
+ getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << "PS4 system libraries" << PS4SDKLibDir;
+ return;
+ }
+ getFilePaths().push_back(PS4SDKLibDir.str());
+}
+
+Tool *PS4CPU::buildAssembler() const {
+ return new tools::PS4cpu::Assemble(*this);
+}
+
+Tool *PS4CPU::buildLinker() const { return new tools::PS4cpu::Link(*this); }
+
+bool PS4CPU::isPICDefault() const { return true; }
+
+bool PS4CPU::HasNativeLLVMSupport() const { return true; }
+
+SanitizerMask PS4CPU::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::Vptr;
+ return Res;
}
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
index 59eaade6b51c..f4b6b1529b30 100644
--- a/lib/Driver/ToolChains.h
+++ b/lib/Driver/ToolChains.h
@@ -78,6 +78,7 @@ public:
class GCCInstallationDetector {
bool IsValid;
llvm::Triple GCCTriple;
+ const Driver &D;
// FIXME: These might be better as path objects.
std::string GCCInstallPath;
@@ -99,9 +100,9 @@ public:
MultilibSet Multilibs;
public:
- GCCInstallationDetector() : IsValid(false) {}
- void init(const Driver &D, const llvm::Triple &TargetTriple,
- const llvm::opt::ArgList &Args);
+ explicit GCCInstallationDetector(const Driver &D) : IsValid(false), D(D) {}
+ void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args,
+ ArrayRef<std::string> ExtraTripleAliases = None);
/// \brief Check whether we detected a valid GCC install.
bool isValid() const { return IsValid; }
@@ -145,11 +146,53 @@ public:
const std::string &LibDir,
StringRef CandidateTriple,
bool NeedsBiarchSuffix = false);
+
+ void scanLibDirForGCCTripleSolaris(const llvm::Triple &TargetArch,
+ const llvm::opt::ArgList &Args,
+ const std::string &LibDir,
+ StringRef CandidateTriple,
+ bool NeedsBiarchSuffix = false);
};
protected:
GCCInstallationDetector GCCInstallation;
+ // \brief A class to find a viable CUDA installation
+
+ class CudaInstallationDetector {
+ bool IsValid;
+ const Driver &D;
+ std::string CudaInstallPath;
+ std::string CudaLibPath;
+ std::string CudaLibDevicePath;
+ std::string CudaIncludePath;
+ llvm::StringMap<std::string> CudaLibDeviceMap;
+
+ public:
+ CudaInstallationDetector(const Driver &D) : IsValid(false), D(D) {}
+ void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args);
+
+ /// \brief Check whether we detected a valid Cuda install.
+ bool isValid() const { return IsValid; }
+ /// \brief Print information about the detected CUDA installation.
+ void print(raw_ostream &OS) const;
+
+ /// \brief Get the detected Cuda installation path.
+ StringRef getInstallPath() const { return CudaInstallPath; }
+ /// \brief Get the detected Cuda Include path.
+ StringRef getIncludePath() const { return CudaIncludePath; }
+ /// \brief Get the detected Cuda library path.
+ StringRef getLibPath() const { return CudaLibPath; }
+ /// \brief Get the detected Cuda device library path.
+ StringRef getLibDevicePath() const { return CudaLibDevicePath; }
+ /// \brief Get libdevice file for given architecture
+ std::string getLibDeviceFile(StringRef Gpu) const {
+ return CudaLibDeviceMap.lookup(Gpu);
+ }
+ };
+
+ CudaInstallationDetector CudaInstallation;
+
public:
Generic_GCC(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
@@ -177,6 +220,13 @@ protected:
/// \brief Check whether the target triple's architecture is 32-bits.
bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
+ bool addLibStdCXXIncludePaths(Twine Base, Twine Suffix, StringRef GCCTriple,
+ StringRef GCCMultiarchTriple,
+ StringRef TargetMultiarchTriple,
+ Twine IncludeSuffix,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
/// @}
private:
@@ -236,8 +286,8 @@ public:
/// Add any profiling runtime libraries that are needed. This is essentially a
/// MachO specific version of addProfileRT in Tools.cpp.
- virtual void addProfileRTLibs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const {
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override {
// There aren't any profiling libs for embedded targets currently.
}
@@ -293,7 +343,9 @@ public:
bool UseDwarfDebugFlags() const override;
- bool UseSjLjExceptions() const override { return false; }
+ bool UseSjLjExceptions(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
/// }
};
@@ -308,7 +360,15 @@ public:
// the argument translation business.
mutable bool TargetInitialized;
- enum DarwinPlatformKind { MacOS, IPhoneOS, IPhoneOSSimulator };
+ enum DarwinPlatformKind {
+ MacOS,
+ IPhoneOS,
+ IPhoneOSSimulator,
+ TvOS,
+ TvOSSimulator,
+ WatchOS,
+ WatchOSSimulator
+ };
mutable DarwinPlatformKind TargetPlatform;
@@ -336,7 +396,8 @@ public:
llvm::opt::ArgStringList &CmdArgs) const override;
bool isKernelStatic() const override {
- return !isTargetIPhoneOS() || isIPhoneOSVersionLT(6, 0);
+ return (!(isTargetIPhoneOS() && !isIPhoneOSVersionLT(6, 0)) &&
+ !isTargetWatchOS());
}
void addProfileRTLibs(const llvm::opt::ArgList &Args,
@@ -365,12 +426,13 @@ protected:
bool isTargetIPhoneOS() const {
assert(TargetInitialized && "Target not initialized!");
- return TargetPlatform == IPhoneOS;
+ return TargetPlatform == IPhoneOS || TargetPlatform == TvOS;
}
bool isTargetIOSSimulator() const {
assert(TargetInitialized && "Target not initialized!");
- return TargetPlatform == IPhoneOSSimulator;
+ return TargetPlatform == IPhoneOSSimulator ||
+ TargetPlatform == TvOSSimulator;
}
bool isTargetIOSBased() const {
@@ -378,6 +440,36 @@ protected:
return isTargetIPhoneOS() || isTargetIOSSimulator();
}
+ bool isTargetTvOS() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == TvOS;
+ }
+
+ bool isTargetTvOSSimulator() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == TvOSSimulator;
+ }
+
+ bool isTargetTvOSBased() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == TvOS || TargetPlatform == TvOSSimulator;
+ }
+
+ bool isTargetWatchOS() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == WatchOS;
+ }
+
+ bool isTargetWatchOSSimulator() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == WatchOSSimulator;
+ }
+
+ bool isTargetWatchOSBased() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == WatchOS || TargetPlatform == WatchOSSimulator;
+ }
+
bool isTargetMacOS() const {
assert(TargetInitialized && "Target not initialized!");
return TargetPlatform == MacOS;
@@ -428,7 +520,7 @@ public:
unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
// Stack protectors default to on for user code on 10.5,
// and for everything in 10.6 and beyond
- if (isTargetIOSBased())
+ if (isTargetIOSBased() || isTargetWatchOSBased())
return 1;
else if (isTargetMacOS() && !isMacosxVersionLT(10, 6))
return 1;
@@ -442,7 +534,7 @@ public:
void CheckObjCARC() const override;
- bool UseSjLjExceptions() const override;
+ bool UseSjLjExceptions(const llvm::opt::ArgList &Args) const override;
SanitizerMask getSupportedSanitizers() const override;
};
@@ -469,6 +561,15 @@ public:
void AddLinkARCArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+ // Until dtrace (via CTF) and LLDB can deal with distributed debug info,
+ // Darwin defaults to standalone/full debug info.
+ bool GetDefaultStandaloneDebug() const override { return true; }
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::LLDB;
+ }
+
/// }
private:
@@ -521,6 +622,12 @@ public:
bool IsIntegratedAssemblerDefault() const override { return true; }
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
@@ -572,6 +679,7 @@ public:
unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
return 2;
}
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
protected:
Tool *buildAssembler() const override;
@@ -615,9 +723,16 @@ public:
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- bool UseSjLjExceptions() const override;
+ bool UseSjLjExceptions(const llvm::opt::ArgList &Args) const override;
bool isPIEDefault() const override;
SanitizerMask getSupportedSanitizers() const override;
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+ // Until dtrace (via CTF) and LLDB can deal with distributed debug info,
+ // FreeBSD defaults to standalone/full debug info.
+ bool GetDefaultStandaloneDebug() const override { return true; }
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::LLDB;
+ }
protected:
Tool *buildAssembler() const override;
@@ -679,26 +794,19 @@ public:
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
bool isPIEDefault() const override;
SanitizerMask getSupportedSanitizers() const override;
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+ virtual std::string computeSysRoot() const;
- std::string Linker;
std::vector<std::string> ExtraOpts;
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
-
-private:
- static bool addLibStdCXXIncludePaths(Twine Base, Twine Suffix,
- StringRef GCCTriple,
- StringRef GCCMultiarchTriple,
- StringRef TargetMultiarchTriple,
- Twine IncludeSuffix,
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args);
-
- std::string computeSysRoot() const;
};
class LLVM_LIBRARY_VISIBILITY CudaToolChain : public Linux {
@@ -713,16 +821,52 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
};
-class LLVM_LIBRARY_VISIBILITY Hexagon_TC : public Linux {
+class LLVM_LIBRARY_VISIBILITY MipsLLVMToolChain : public Linux {
+protected:
+ Tool *buildLinker() const override;
+
+public:
+ MipsLLVMToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ bool Shared = false) const override;
+
+ std::string computeSysRoot() const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return GCCInstallation.isValid() ? RuntimeLibType::RLT_Libgcc
+ : RuntimeLibType::RLT_CompilerRT;
+ }
+
+private:
+ Multilib SelectedMultilib;
+ std::string LibSuffix;
+};
+
+class LLVM_LIBRARY_VISIBILITY HexagonToolChain : public Linux {
protected:
GCCVersion GCCLibAndIncVersion;
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
public:
- Hexagon_TC(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
- ~Hexagon_TC() override;
+ HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~HexagonToolChain() override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
@@ -733,21 +877,37 @@ public:
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
+ bool IsIntegratedAssemblerDefault() const override {
+ return true;
+ }
+
+ std::string getHexagonTargetDir(
+ const std::string &InstalledDir,
+ const SmallVectorImpl<std::string> &PrefixDirs) const;
+ void getHexagonLibraryPaths(const llvm::opt::ArgList &Args,
+ ToolChain::path_list &LibPaths) const;
- static std::string GetGnuDir(const std::string &InstalledDir,
- const llvm::opt::ArgList &Args);
+ static const StringRef GetDefaultCPU();
+ static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args);
- static StringRef GetTargetCPU(const llvm::opt::ArgList &Args);
+ static Optional<unsigned> getSmallDataThreshold(
+ const llvm::opt::ArgList &Args);
+};
- static const char *GetSmallDataThreshold(const llvm::opt::ArgList &Args);
+class LLVM_LIBRARY_VISIBILITY AMDGPUToolChain : public Generic_ELF {
+protected:
+ Tool *buildLinker() const override;
- static bool UsesG0(const char *smallDataThreshold);
+public:
+ AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ bool IsIntegratedAssemblerDefault() const override { return true; }
};
-class LLVM_LIBRARY_VISIBILITY NaCl_TC : public Generic_ELF {
+class LLVM_LIBRARY_VISIBILITY NaClToolChain : public Generic_ELF {
public:
- NaCl_TC(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
+ NaClToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
@@ -765,14 +925,13 @@ public:
return getTriple().getArch() == llvm::Triple::mipsel;
}
- // Get the path to the file containing NaCl's ARM macros. It lives in NaCl_TC
- // because the AssembleARM tool needs a const char * that it can pass around
- // and the toolchain outlives all the jobs.
+ // Get the path to the file containing NaCl's ARM macros.
+ // It lives in NaClToolChain because the ARMAssembler tool needs a
+ // const char * that it can pass around,
const char *GetNaClArmMacrosPath() const { return NaClArmMacrosPath.c_str(); }
std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
types::ID InputType) const override;
- std::string Linker;
protected:
Tool *buildLinker() const override;
@@ -801,6 +960,10 @@ public:
MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ const char *BoundArch) const override;
+
bool IsIntegratedAssemblerDefault() const override;
bool IsUnwindTablesDefault() const override;
bool isPICDefault() const override;
@@ -814,8 +977,14 @@ public:
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- bool getWindowsSDKDir(std::string &path, int &major, int &minor) const;
+ bool getWindowsSDKDir(std::string &path, int &major,
+ std::string &windowsSDKIncludeVersion,
+ std::string &windowsSDKLibVersion) const;
bool getWindowsSDKLibraryPath(std::string &path) const;
+ /// \brief Check if Universal CRT should be used if available
+ bool useUniversalCRT(std::string &visualStudioDir) const;
+ bool getUniversalCRTSdkDir(std::string &path, std::string &ucrtVersion) const;
+ bool getUniversalCRTLibraryPath(std::string &path) const;
bool getVisualStudioInstallDir(std::string &path) const;
bool getVisualStudioBinariesFolder(const char *clangProgramPath,
std::string &path) const;
@@ -828,7 +997,9 @@ protected:
void AddSystemIncludeWithSubfolder(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
const std::string &folder,
- const char *subfolder) const;
+ const Twine &subfolder1,
+ const Twine &subfolder2 = "",
+ const Twine &subfolder3 = "") const;
Tool *buildLinker() const override;
Tool *buildAssembler() const override;
@@ -858,15 +1029,17 @@ public:
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ SanitizerMask getSupportedSanitizers() const override;
+
protected:
Tool *buildLinker() const override;
Tool *buildAssembler() const override;
};
-class LLVM_LIBRARY_VISIBILITY XCore : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY XCoreToolChain : public ToolChain {
public:
- XCore(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
+ XCoreToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
protected:
Tool *buildAssembler() const override;
@@ -890,29 +1063,84 @@ public:
llvm::opt::ArgStringList &CmdArgs) const override;
};
-/// SHAVEToolChain - A tool chain using the compiler installed by the the
-// Movidius SDK into MV_TOOLS_DIR (which we assume will be copied to llvm's
-// installation dir) to perform all subcommands.
-class LLVM_LIBRARY_VISIBILITY SHAVEToolChain : public Generic_GCC {
+/// MyriadToolChain - A tool chain using either clang or the external compiler
+/// installed by the Movidius SDK to perform all subcommands.
+class LLVM_LIBRARY_VISIBILITY MyriadToolChain : public Generic_GCC {
public:
- SHAVEToolChain(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
- ~SHAVEToolChain() override;
+ MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~MyriadToolChain() override;
- virtual Tool *SelectTool(const JobAction &JA) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ Tool *SelectTool(const JobAction &JA) const override;
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
protected:
- Tool *getTool(Action::ActionClass AC) const override;
- Tool *buildAssembler() const override;
Tool *buildLinker() const override;
+ bool isShaveCompilation(const llvm::Triple &T) const {
+ return T.getArch() == llvm::Triple::shave;
+ }
private:
mutable std::unique_ptr<Tool> Compiler;
mutable std::unique_ptr<Tool> Assembler;
};
+class LLVM_LIBRARY_VISIBILITY WebAssembly final : public ToolChain {
+public:
+ WebAssembly(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+private:
+ bool IsMathErrnoDefault() const override;
+ bool IsObjCNonFragileABIDefault() const override;
+ bool UseObjCMixedDispatch() const override;
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+ bool IsIntegratedAssemblerDefault() const override;
+ bool hasBlocksRuntime() const override;
+ bool SupportsObjCGC() const override;
+ bool SupportsProfiling() const override;
+ bool HasNativeLLVMSupport() const override;
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ Tool *buildLinker() const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY PS4CPU : public Generic_ELF {
+public:
+ PS4CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+ bool HasNativeLLVMSupport() const override;
+ bool isPICDefault() const override;
+
+ unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return 2; // SSPStrong
+ }
+
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::SCE;
+ }
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
} // end namespace toolchains
} // end namespace driver
} // end namespace clang
-#endif
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_H
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index b801705a8f57..7a185dc0764d 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -1,4 +1,4 @@
-//===--- Tools.cpp - Tools Implementations --------------------------------===//
+//===--- Tools.cpp - Tools Implementations ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -32,7 +32,7 @@
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
@@ -41,6 +41,7 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/TargetParser.h"
#ifdef LLVM_ON_UNIX
#include <unistd.h> // For getuid().
@@ -51,18 +52,51 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-static void addAssemblerKPIC(const ArgList &Args, ArgStringList &CmdArgs) {
- Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
- options::OPT_fpic, options::OPT_fno_pic,
- options::OPT_fPIE, options::OPT_fno_PIE,
- options::OPT_fpie, options::OPT_fno_pie);
- if (!LastPICArg)
- return;
- if (LastPICArg->getOption().matches(options::OPT_fPIC) ||
- LastPICArg->getOption().matches(options::OPT_fpic) ||
- LastPICArg->getOption().matches(options::OPT_fPIE) ||
- LastPICArg->getOption().matches(options::OPT_fpie)) {
- CmdArgs.push_back("-KPIC");
+static void handleTargetFeaturesGroup(const ArgList &Args,
+ std::vector<const char *> &Features,
+ OptSpecifier Group) {
+ for (const Arg *A : Args.filtered(Group)) {
+ StringRef Name = A->getOption().getName();
+ A->claim();
+
+ // Skip over "-m".
+ assert(Name.startswith("m") && "Invalid feature name.");
+ Name = Name.substr(1);
+
+ bool IsNegative = Name.startswith("no-");
+ if (IsNegative)
+ Name = Name.substr(3);
+ Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ }
+}
+
+static const char *getSparcAsmModeForCPU(StringRef Name,
+ const llvm::Triple &Triple) {
+ if (Triple.getArch() == llvm::Triple::sparcv9) {
+ return llvm::StringSwitch<const char *>(Name)
+ .Case("niagara", "-Av9b")
+ .Case("niagara2", "-Av9b")
+ .Case("niagara3", "-Av9d")
+ .Case("niagara4", "-Av9d")
+ .Default("-Av9");
+ } else {
+ return llvm::StringSwitch<const char *>(Name)
+ .Case("v8", "-Av8")
+ .Case("supersparc", "-Av8")
+ .Case("sparclite", "-Asparclite")
+ .Case("f934", "-Asparclite")
+ .Case("hypersparc", "-Av8")
+ .Case("sparclite86x", "-Asparclite")
+ .Case("sparclet", "-Asparclet")
+ .Case("tsc701", "-Asparclet")
+ .Case("v9", "-Av8plus")
+ .Case("ultrasparc", "-Av8plus")
+ .Case("ultrasparc3", "-Av8plus")
+ .Case("niagara", "-Av8plusb")
+ .Case("niagara2", "-Av8plusb")
+ .Case("niagara3", "-Av8plusd")
+ .Case("niagara4", "-Av8plusd")
+ .Default("-Av8");
}
}
@@ -199,13 +233,9 @@ static void AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
Args.AddAllArgValues(CmdArgs, options::OPT_Zlinker_input);
for (const auto &II : Inputs) {
- if (!TC.HasNativeLLVMSupport()) {
+ if (!TC.HasNativeLLVMSupport() && types::isLLVMIR(II.getType()))
// Don't try to pass LLVM inputs unless we have native support.
- if (II.getType() == types::TY_LLVM_IR ||
- II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
- D.Diag(diag::err_drv_no_linker_llvm_support) << TC.getTripleString();
- }
+ D.Diag(diag::err_drv_no_linker_llvm_support) << TC.getTripleString();
// Add filenames immediately.
if (II.isFilename()) {
@@ -262,7 +292,8 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfo &Output,
- const InputInfoList &Inputs) const {
+ const InputInfoList &Inputs,
+ const ToolChain *AuxToolChain) const {
Arg *A;
CheckPreprocessingOptions(D, Args);
@@ -412,11 +443,11 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
A->render(Args, CmdArgs);
}
- Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U);
- Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F,
- options::OPT_index_header_map);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_D, options::OPT_U, options::OPT_I_Group,
+ options::OPT_F, options::OPT_index_header_map});
- // Add -Wp, and -Xassembler if using the preprocessor.
+ // Add -Wp, and -Xpreprocessor if using the preprocessor.
// FIXME: There is a very unfortunate problem here, some troubled
// souls abuse -Wp, to pass preprocessor options in gcc syntax. To
@@ -454,12 +485,26 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// OBJCPLUS_INCLUDE_PATH - system includes enabled when compiling ObjC++.
addDirectoryList(Args, CmdArgs, "-objcxx-isystem", "OBJCPLUS_INCLUDE_PATH");
+ // Optional AuxToolChain indicates that we need to include headers
+ // for more than one target. If that's the case, add include paths
+ // from AuxToolChain right after include paths of the same kind for
+ // the current target.
+
// Add C++ include arguments, if needed.
- if (types::isCXX(Inputs[0].getType()))
+ if (types::isCXX(Inputs[0].getType())) {
getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+ if (AuxToolChain)
+ AuxToolChain->AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+ }
// Add system include arguments.
getToolChain().AddClangSystemIncludeArgs(Args, CmdArgs);
+ if (AuxToolChain)
+ AuxToolChain->AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+
+ // Add CUDA include arguments, if needed.
+ if (types::isCuda(Inputs[0].getType()))
+ getToolChain().AddCudaIncludeArgs(Args, CmdArgs);
}
// FIXME: Move to target hook.
@@ -498,6 +543,8 @@ static bool isNoCommonDefault(const llvm::Triple &Triple) {
return false;
case llvm::Triple::xcore:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
return true;
}
}
@@ -507,13 +554,13 @@ static bool isNoCommonDefault(const llvm::Triple &Triple) {
// Get SubArch (vN).
static int getARMSubArchVersionNumber(const llvm::Triple &Triple) {
llvm::StringRef Arch = Triple.getArchName();
- return llvm::ARMTargetParser::parseArchVersion(Arch);
+ return llvm::ARM::parseArchVersion(Arch);
}
// True if M-profile.
static bool isARMMProfile(const llvm::Triple &Triple) {
llvm::StringRef Arch = Triple.getArchName();
- unsigned Profile = llvm::ARMTargetParser::parseArchProfile(Arch);
+ unsigned Profile = llvm::ARM::parseArchProfile(Arch);
return Profile == llvm::ARM::PK_M;
}
@@ -542,19 +589,8 @@ static void getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
static void getARMHWDivFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef HWDiv,
std::vector<const char *> &Features) {
- if (HWDiv == "arm") {
- Features.push_back("+hwdiv-arm");
- Features.push_back("-hwdiv");
- } else if (HWDiv == "thumb") {
- Features.push_back("-hwdiv-arm");
- Features.push_back("+hwdiv");
- } else if (HWDiv == "arm,thumb" || HWDiv == "thumb,arm") {
- Features.push_back("+hwdiv-arm");
- Features.push_back("+hwdiv");
- } else if (HWDiv == "none") {
- Features.push_back("-hwdiv-arm");
- Features.push_back("-hwdiv");
- } else
+ unsigned HWDivID = llvm::ARM::parseHWDiv(HWDiv);
+ if (!llvm::ARM::getHWDivFeatures(HWDivID, Features))
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
@@ -562,82 +598,125 @@ static void getARMHWDivFeatures(const Driver &D, const Arg *A,
static void getARMFPUFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef FPU,
std::vector<const char *> &Features) {
- unsigned FPUID = llvm::ARMTargetParser::parseFPU(FPU);
- if (!llvm::ARMTargetParser::getFPUFeatures(FPUID, Features))
+ unsigned FPUID = llvm::ARM::parseFPU(FPU);
+ if (!llvm::ARM::getFPUFeatures(FPUID, Features))
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
+// Decode ARM features from string like +[no]featureA+[no]featureB+...
+static bool DecodeARMFeatures(const Driver &D, StringRef text,
+ std::vector<const char *> &Features) {
+ SmallVector<StringRef, 8> Split;
+ text.split(Split, StringRef("+"), -1, false);
+
+ for (StringRef Feature : Split) {
+ const char *FeatureName = llvm::ARM::getArchExtFeature(Feature);
+ if (FeatureName)
+ Features.push_back(FeatureName);
+ else
+ return false;
+ }
+ return true;
+}
+
// Check if -march is valid by checking if it can be canonicalised and parsed.
// getARMArch is used here instead of just checking the -march value in order
// to handle -march=native correctly.
static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef ArchName,
+ std::vector<const char *> &Features,
const llvm::Triple &Triple) {
+ std::pair<StringRef, StringRef> Split = ArchName.split("+");
+
std::string MArch = arm::getARMArch(ArchName, Triple);
- if (llvm::ARMTargetParser::parseArch(MArch) == llvm::ARM::AK_INVALID)
+ if (llvm::ARM::parseArch(MArch) == llvm::ARM::AK_INVALID ||
+ (Split.second.size() && !DecodeARMFeatures(D, Split.second, Features)))
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
// Check -mcpu=. Needs ArchName to handle -mcpu=generic.
static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef CPUName, llvm::StringRef ArchName,
+ std::vector<const char *> &Features,
const llvm::Triple &Triple) {
+ std::pair<StringRef, StringRef> Split = CPUName.split("+");
+
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
- std::string Arch = arm::getARMArch(ArchName, Triple);
- if (strcmp(arm::getLLVMArchSuffixForARM(CPU, Arch), "") == 0)
+ if (arm::getLLVMArchSuffixForARM(CPU, ArchName, Triple).empty() ||
+ (Split.second.size() && !DecodeARMFeatures(D, Split.second, Features)))
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
+static bool useAAPCSForMachO(const llvm::Triple &T) {
+ // The backend is hardwired to assume AAPCS for M-class processors, ensure
+ // the frontend matches that.
+ return T.getEnvironment() == llvm::Triple::EABI ||
+ T.getOS() == llvm::Triple::UnknownOS || isARMMProfile(T);
+}
+
// Select the float ABI as determined by -msoft-float, -mhard-float, and
// -mfloat-abi=.
-StringRef tools::arm::getARMFloatABI(const Driver &D, const ArgList &Args,
- const llvm::Triple &Triple) {
- StringRef FloatABI;
+arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
+ const Driver &D = TC.getDriver();
+ const llvm::Triple Triple(TC.ComputeEffectiveClangTriple(Args));
+ auto SubArch = getARMSubArchVersionNumber(Triple);
+ arm::FloatABI ABI = FloatABI::Invalid;
if (Arg *A =
Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
options::OPT_mfloat_abi_EQ)) {
- if (A->getOption().matches(options::OPT_msoft_float))
- FloatABI = "soft";
- else if (A->getOption().matches(options::OPT_mhard_float))
- FloatABI = "hard";
- else {
- FloatABI = A->getValue();
- if (FloatABI != "soft" && FloatABI != "softfp" && FloatABI != "hard") {
+ if (A->getOption().matches(options::OPT_msoft_float)) {
+ ABI = FloatABI::Soft;
+ } else if (A->getOption().matches(options::OPT_mhard_float)) {
+ ABI = FloatABI::Hard;
+ } else {
+ ABI = llvm::StringSwitch<arm::FloatABI>(A->getValue())
+ .Case("soft", FloatABI::Soft)
+ .Case("softfp", FloatABI::SoftFP)
+ .Case("hard", FloatABI::Hard)
+ .Default(FloatABI::Invalid);
+ if (ABI == FloatABI::Invalid && !StringRef(A->getValue()).empty()) {
D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
- FloatABI = "soft";
+ ABI = FloatABI::Soft;
}
}
+
+ // It is incorrect to select hard float ABI on MachO platforms if the ABI is
+ // "apcs-gnu".
+ if (Triple.isOSBinFormatMachO() && !useAAPCSForMachO(Triple) &&
+ ABI == FloatABI::Hard) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target) << A->getAsString(Args)
+ << Triple.getArchName();
+ }
}
// If unspecified, choose the default based on the platform.
- if (FloatABI.empty()) {
+ if (ABI == FloatABI::Invalid) {
switch (Triple.getOS()) {
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
- case llvm::Triple::IOS: {
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS: {
// Darwin defaults to "softfp" for v6 and v7.
- //
- if (getARMSubArchVersionNumber(Triple) == 6 ||
- getARMSubArchVersionNumber(Triple) == 7)
- FloatABI = "softfp";
- else
- FloatABI = "soft";
+ ABI = (SubArch == 6 || SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
break;
}
+ case llvm::Triple::WatchOS:
+ ABI = FloatABI::Hard;
+ break;
// FIXME: this is invalid for WindowsCE
case llvm::Triple::Win32:
- FloatABI = "hard";
+ ABI = FloatABI::Hard;
break;
case llvm::Triple::FreeBSD:
switch (Triple.getEnvironment()) {
case llvm::Triple::GNUEABIHF:
- FloatABI = "hard";
+ ABI = FloatABI::Hard;
break;
default:
// FreeBSD defaults to soft float
- FloatABI = "soft";
+ ABI = FloatABI::Soft;
break;
}
break;
@@ -645,28 +724,20 @@ StringRef tools::arm::getARMFloatABI(const Driver &D, const ArgList &Args,
default:
switch (Triple.getEnvironment()) {
case llvm::Triple::GNUEABIHF:
- FloatABI = "hard";
- break;
- case llvm::Triple::GNUEABI:
- FloatABI = "softfp";
- break;
case llvm::Triple::EABIHF:
- FloatABI = "hard";
+ ABI = FloatABI::Hard;
break;
+ case llvm::Triple::GNUEABI:
case llvm::Triple::EABI:
// EABI is always AAPCS, and if it was not marked 'hard', it's softfp
- FloatABI = "softfp";
+ ABI = FloatABI::SoftFP;
break;
- case llvm::Triple::Android: {
- if (getARMSubArchVersionNumber(Triple) == 7)
- FloatABI = "softfp";
- else
- FloatABI = "soft";
+ case llvm::Triple::Android:
+ ABI = (SubArch == 7) ? FloatABI::SoftFP : FloatABI::Soft;
break;
- }
default:
// Assume "soft", but warn the user we are guessing.
- FloatABI = "soft";
+ ABI = FloatABI::Soft;
if (Triple.getOS() != llvm::Triple::UnknownOS ||
!Triple.isOSBinFormatMachO())
D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
@@ -675,16 +746,20 @@ StringRef tools::arm::getARMFloatABI(const Driver &D, const ArgList &Args,
}
}
- return FloatABI;
+ assert(ABI != FloatABI::Invalid && "must select an ABI");
+ return ABI;
}
-static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+static void getARMTargetFeatures(const ToolChain &TC,
+ const llvm::Triple &Triple,
const ArgList &Args,
std::vector<const char *> &Features,
bool ForAS) {
+ const Driver &D = TC.getDriver();
+
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
- StringRef FloatABI = tools::arm::getARMFloatABI(D, Args, Triple);
+ arm::FloatABI ABI = arm::getARMFloatABI(TC, Args);
const Arg *WaCPU = nullptr, *WaFPU = nullptr;
const Arg *WaHDiv = nullptr, *WaArch = nullptr;
@@ -702,11 +777,11 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// assembler and the frontend behave the same.
// Use software floating point operations?
- if (FloatABI == "soft")
+ if (ABI == arm::FloatABI::Soft)
Features.push_back("+soft-float");
// Use software floating point argument passing?
- if (FloatABI != "hard")
+ if (ABI != arm::FloatABI::Hard)
Features.push_back("+soft-float-abi");
} else {
// Here, we make sure that -Wa,-mfpu/cpu/arch/hwdiv will be passed down
@@ -726,29 +801,6 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
}
}
- // Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
- const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
- if (WaFPU) {
- if (FPUArg)
- D.Diag(clang::diag::warn_drv_unused_argument)
- << FPUArg->getAsString(Args);
- getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6),
- Features);
- } else if (FPUArg) {
- getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
- }
-
- // Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
- const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ);
- if (WaHDiv) {
- if (HDivArg)
- D.Diag(clang::diag::warn_drv_unused_argument)
- << HDivArg->getAsString(Args);
- getARMHWDivFeatures(D, WaHDiv, Args,
- StringRef(WaHDiv->getValue()).substr(8), Features);
- } else if (HDivArg)
- getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
-
// Check -march. ClangAs gives preference to -Wa,-march=.
const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
StringRef ArchName;
@@ -757,12 +809,12 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
D.Diag(clang::diag::warn_drv_unused_argument)
<< ArchArg->getAsString(Args);
ArchName = StringRef(WaArch->getValue()).substr(7);
- checkARMArchName(D, WaArch, Args, ArchName, Triple);
+ checkARMArchName(D, WaArch, Args, ArchName, Features, Triple);
// FIXME: Set Arch.
D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
} else if (ArchArg) {
ArchName = ArchArg->getValue();
- checkARMArchName(D, ArchArg, Args, ArchName, Triple);
+ checkARMArchName(D, ArchArg, Args, ArchName, Features, Triple);
}
// Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
@@ -773,15 +825,47 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
D.Diag(clang::diag::warn_drv_unused_argument)
<< CPUArg->getAsString(Args);
CPUName = StringRef(WaCPU->getValue()).substr(6);
- checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Triple);
+ checkARMCPUName(D, WaCPU, Args, CPUName, ArchName, Features, Triple);
} else if (CPUArg) {
CPUName = CPUArg->getValue();
- checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Triple);
+ checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, Features, Triple);
}
+ // Add CPU features for generic CPUs
+ if (CPUName == "native") {
+ llvm::StringMap<bool> HostFeatures;
+ if (llvm::sys::getHostCPUFeatures(HostFeatures))
+ for (auto &F : HostFeatures)
+ Features.push_back(
+ Args.MakeArgString((F.second ? "+" : "-") + F.first()));
+ }
+
+ // Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
+ const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
+ if (WaFPU) {
+ if (FPUArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << FPUArg->getAsString(Args);
+ getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6),
+ Features);
+ } else if (FPUArg) {
+ getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+ }
+
+ // Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
+ const Arg *HDivArg = Args.getLastArg(options::OPT_mhwdiv_EQ);
+ if (WaHDiv) {
+ if (HDivArg)
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << HDivArg->getAsString(Args);
+ getARMHWDivFeatures(D, WaHDiv, Args,
+ StringRef(WaHDiv->getValue()).substr(8), Features);
+ } else if (HDivArg)
+ getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
+
// Setting -msoft-float effectively disables NEON because of the GCC
// implementation, although the same isn't true of VFP or VFP3.
- if (FloatABI == "soft") {
+ if (ABI == arm::FloatABI::Soft) {
Features.push_back("-neon");
// Also need to explicitly disable features which imply NEON.
Features.push_back("-crypto");
@@ -806,31 +890,72 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
options::OPT_mno_long_calls)) {
if (A->getOption().matches(options::OPT_mlong_calls))
Features.push_back("+long-calls");
- } else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6))) {
+ } else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
+ !Triple.isWatchOS()) {
Features.push_back("+long-calls");
}
-}
-void Clang::AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs,
- bool KernelOrKext) const {
- const Driver &D = getToolChain().getDriver();
- // Get the effective triple, which takes into account the deployment target.
- std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
- llvm::Triple Triple(TripleStr);
+ // Kernel code has more strict alignment requirements.
+ if (KernelOrKext)
+ Features.push_back("+strict-align");
+ else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access)) {
+ if (A->getOption().matches(options::OPT_munaligned_access)) {
+ // No v6M core supports unaligned memory access (v6M ARM ARM A3.2).
+ if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
+ D.Diag(diag::err_target_unsupported_unaligned) << "v6m";
+ } else
+ Features.push_back("+strict-align");
+ } else {
+ // Assume pre-ARMv6 doesn't support unaligned accesses.
+ //
+ // ARMv6 may or may not support unaligned accesses depending on the
+ // SCTLR.U bit, which is architecture-specific. We assume ARMv6
+ // Darwin and NetBSD targets support unaligned accesses, and others don't.
+ //
+ // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
+ // which raises an alignment fault on unaligned accesses. Linux
+ // defaults this bit to 0 and handles it as a system-wide (not
+ // per-process) setting. It is therefore safe to assume that ARMv7+
+ // Linux targets support unaligned accesses. The same goes for NaCl.
+ //
+ // The above behavior is consistent with GCC.
+ int VersionNum = getARMSubArchVersionNumber(Triple);
+ if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
+ if (VersionNum < 6 ||
+ Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
+ Features.push_back("+strict-align");
+ } else if (Triple.isOSLinux() || Triple.isOSNaCl()) {
+ if (VersionNum < 7)
+ Features.push_back("+strict-align");
+ } else
+ Features.push_back("+strict-align");
+ }
+ // llvm does not support reserving registers in general. There is support
+ // for reserving r9 on ARM though (defined as a platform-specific register
+ // in ARM EABI).
+ if (Args.hasArg(options::OPT_ffixed_r9))
+ Features.push_back("+reserve-r9");
+
+ // The kext linker doesn't know how to deal with movw/movt.
+ if (KernelOrKext)
+ Features.push_back("+no-movt");
+}
+
+void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
+ ArgStringList &CmdArgs, bool KernelOrKext) const {
// Select the ABI to use.
- //
// FIXME: Support -meabi.
// FIXME: Parts of this are duplicated in the backend, unify this somehow.
const char *ABIName = nullptr;
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
ABIName = A->getValue();
} else if (Triple.isOSBinFormatMachO()) {
- // The backend is hardwired to assume AAPCS for M-class processors, ensure
- // the frontend matches that.
- if (Triple.getEnvironment() == llvm::Triple::EABI ||
- Triple.getOS() == llvm::Triple::UnknownOS || isARMMProfile(Triple)) {
+ if (useAAPCSForMachO(Triple)) {
ABIName = "aapcs";
+ } else if (Triple.isWatchOS()) {
+ ABIName = "aapcs16";
} else {
ABIName = "apcs-gnu";
}
@@ -861,50 +986,24 @@ void Clang::AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs,
CmdArgs.push_back(ABIName);
// Determine floating point ABI from the options & target defaults.
- StringRef FloatABI = tools::arm::getARMFloatABI(D, Args, Triple);
- if (FloatABI == "soft") {
+ arm::FloatABI ABI = arm::getARMFloatABI(getToolChain(), Args);
+ if (ABI == arm::FloatABI::Soft) {
// Floating point operations and argument passing are soft.
- //
// FIXME: This changes CPP defines, we need -target-soft-float.
CmdArgs.push_back("-msoft-float");
CmdArgs.push_back("-mfloat-abi");
CmdArgs.push_back("soft");
- } else if (FloatABI == "softfp") {
+ } else if (ABI == arm::FloatABI::SoftFP) {
// Floating point operations are hard, but argument passing is soft.
CmdArgs.push_back("-mfloat-abi");
CmdArgs.push_back("soft");
} else {
// Floating point operations and argument passing are hard.
- assert(FloatABI == "hard" && "Invalid float abi!");
+ assert(ABI == arm::FloatABI::Hard && "Invalid float abi!");
CmdArgs.push_back("-mfloat-abi");
CmdArgs.push_back("hard");
}
- // Kernel code has more strict alignment requirements.
- if (KernelOrKext) {
- CmdArgs.push_back("-backend-option");
- CmdArgs.push_back("-arm-strict-align");
-
- // The kext linker doesn't know how to deal with movw/movt.
- CmdArgs.push_back("-backend-option");
- CmdArgs.push_back("-arm-use-movt=0");
- }
-
- // -mkernel implies -mstrict-align; don't add the redundant option.
- if (!KernelOrKext) {
- if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access)) {
- CmdArgs.push_back("-backend-option");
- if (A->getOption().matches(options::OPT_mno_unaligned_access))
- CmdArgs.push_back("-arm-strict-align");
- else {
- if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
- D.Diag(diag::err_target_unsupported_unaligned) << "v6m";
- CmdArgs.push_back("-arm-no-strict-align");
- }
- }
- }
-
// Forward the -mglobal-merge option for explicit control over the pass.
if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
options::OPT_mno_global_merge)) {
@@ -918,14 +1017,6 @@ void Clang::AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs,
if (!Args.hasFlag(options::OPT_mimplicit_float,
options::OPT_mno_implicit_float, true))
CmdArgs.push_back("-no-implicit-float");
-
- // llvm does not support reserving registers in general. There is support
- // for reserving r9 on ARM though (defined as a platform-specific register
- // in ARM EABI).
- if (Args.hasArg(options::OPT_ffixed_r9)) {
- CmdArgs.push_back("-backend-option");
- CmdArgs.push_back("-arm-reserve-r9");
- }
}
// ARM tools end.
@@ -936,7 +1027,7 @@ static std::string getAArch64TargetCPU(const ArgList &Args) {
std::string CPU;
// If we have -mtune or -mcpu, use that.
if ((A = Args.getLastArg(options::OPT_mtune_EQ))) {
- CPU = A->getValue();
+ CPU = StringRef(A->getValue()).lower();
} else if ((A = Args.getLastArg(options::OPT_mcpu_EQ))) {
StringRef Mcpu = A->getValue();
CPU = Mcpu.split("+").first.lower();
@@ -981,15 +1072,6 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
- if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access)) {
- CmdArgs.push_back("-backend-option");
- if (A->getOption().matches(options::OPT_mno_unaligned_access))
- CmdArgs.push_back("-aarch64-strict-align");
- else
- CmdArgs.push_back("-aarch64-no-strict-align");
- }
-
if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
options::OPT_mno_fix_cortex_a53_835769)) {
CmdArgs.push_back("-backend-option");
@@ -997,7 +1079,7 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
else
CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=0");
- } else if (Triple.getEnvironment() == llvm::Triple::Android) {
+ } else if (Triple.isAndroid()) {
// Enabled A53 errata (835769) workaround by default on android
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
@@ -1012,11 +1094,6 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
else
CmdArgs.push_back("-aarch64-global-merge=true");
}
-
- if (Args.hasArg(options::OPT_ffixed_x18)) {
- CmdArgs.push_back("-backend-option");
- CmdArgs.push_back("-aarch64-reserve-x18");
- }
}
// Get CPU and ABI names. They are not independent
@@ -1034,6 +1111,10 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
DefMips64CPU = "mips64r6";
}
+ // MIPS64r6 is the default for Android MIPS64 (mips64el-linux-android).
+ if (Triple.isAndroid())
+ DefMips64CPU = "mips64r6";
+
// MIPS3 is the default for mips64*-unknown-openbsd.
if (Triple.getOS() == llvm::Triple::OpenBSD)
DefMips64CPU = "mips3";
@@ -1087,6 +1168,16 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
// FIXME: Warn on inconsistent use of -march and -mabi.
}
+std::string mips::getMipsABILibSuffix(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ StringRef CPUName, ABIName;
+ tools::mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+ return llvm::StringSwitch<std::string>(ABIName)
+ .Case("o32", "")
+ .Case("n32", "32")
+ .Case("n64", "64");
+}
+
// Convert ABI name to the GNU tools acceptable variant.
static StringRef getGnuCompatibleMipsABIName(StringRef ABI) {
return llvm::StringSwitch<llvm::StringRef>(ABI)
@@ -1097,33 +1188,37 @@ static StringRef getGnuCompatibleMipsABIName(StringRef ABI) {
// Select the MIPS float ABI as determined by -msoft-float, -mhard-float,
// and -mfloat-abi=.
-static StringRef getMipsFloatABI(const Driver &D, const ArgList &Args) {
- StringRef FloatABI;
+static mips::FloatABI getMipsFloatABI(const Driver &D, const ArgList &Args) {
+ mips::FloatABI ABI = mips::FloatABI::Invalid;
if (Arg *A =
Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
options::OPT_mfloat_abi_EQ)) {
if (A->getOption().matches(options::OPT_msoft_float))
- FloatABI = "soft";
+ ABI = mips::FloatABI::Soft;
else if (A->getOption().matches(options::OPT_mhard_float))
- FloatABI = "hard";
+ ABI = mips::FloatABI::Hard;
else {
- FloatABI = A->getValue();
- if (FloatABI != "soft" && FloatABI != "hard") {
+ ABI = llvm::StringSwitch<mips::FloatABI>(A->getValue())
+ .Case("soft", mips::FloatABI::Soft)
+ .Case("hard", mips::FloatABI::Hard)
+ .Default(mips::FloatABI::Invalid);
+ if (ABI == mips::FloatABI::Invalid && !StringRef(A->getValue()).empty()) {
D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
- FloatABI = "hard";
+ ABI = mips::FloatABI::Hard;
}
}
}
// If unspecified, choose the default based on the platform.
- if (FloatABI.empty()) {
+ if (ABI == mips::FloatABI::Invalid) {
// Assume "hard", because it's a default value used by gcc.
// When we start to recognize specific target MIPS processors,
// we will be able to select the default more correctly.
- FloatABI = "hard";
+ ABI = mips::FloatABI::Hard;
}
- return FloatABI;
+ assert(ABI != mips::FloatABI::Invalid && "must select an ABI");
+ return ABI;
}
static void AddTargetFeature(const ArgList &Args,
@@ -1149,8 +1244,8 @@ static void getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
AddTargetFeature(Args, Features, options::OPT_mno_abicalls,
options::OPT_mabicalls, "noabicalls");
- StringRef FloatABI = getMipsFloatABI(D, Args);
- if (FloatABI == "soft") {
+ mips::FloatABI FloatABI = getMipsFloatABI(D, Args);
+ if (FloatABI == mips::FloatABI::Soft) {
// FIXME: Note, this is a hack. We need to pass the selected float
// mode to the MipsTargetInfoBase to define appropriate macros there.
// Now it is the only method.
@@ -1222,16 +1317,15 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName.data());
- StringRef FloatABI = getMipsFloatABI(D, Args);
-
- if (FloatABI == "soft") {
+ mips::FloatABI ABI = getMipsFloatABI(D, Args);
+ if (ABI == mips::FloatABI::Soft) {
// Floating point operations and argument passing are soft.
CmdArgs.push_back("-msoft-float");
CmdArgs.push_back("-mfloat-abi");
CmdArgs.push_back("soft");
} else {
// Floating point operations and argument passing are hard.
- assert(FloatABI == "hard" && "Invalid float abi!");
+ assert(ABI == mips::FloatABI::Hard && "Invalid float abi!");
CmdArgs.push_back("-mfloat-abi");
CmdArgs.push_back("hard");
}
@@ -1331,33 +1425,54 @@ static std::string getPPCTargetCPU(const ArgList &Args) {
return "";
}
-static void getPPCTargetFeatures(const ArgList &Args,
+static void getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<const char *> &Features) {
- for (const Arg *A : Args.filtered(options::OPT_m_ppc_Features_Group)) {
- StringRef Name = A->getOption().getName();
- A->claim();
+ handleTargetFeaturesGroup(Args, Features, options::OPT_m_ppc_Features_Group);
- // Skip over "-m".
- assert(Name.startswith("m") && "Invalid feature name.");
- Name = Name.substr(1);
+ ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
+ if (FloatABI == ppc::FloatABI::Soft &&
+ !(Triple.getArch() == llvm::Triple::ppc64 ||
+ Triple.getArch() == llvm::Triple::ppc64le))
+ Features.push_back("+soft-float");
+ else if (FloatABI == ppc::FloatABI::Soft &&
+ (Triple.getArch() == llvm::Triple::ppc64 ||
+ Triple.getArch() == llvm::Triple::ppc64le))
+ D.Diag(diag::err_drv_invalid_mfloat_abi)
+ << "soft float is not supported for ppc64";
- bool IsNegative = Name.startswith("no-");
- if (IsNegative)
- Name = Name.substr(3);
+ // Altivec is a bit weird, allow overriding of the Altivec feature here.
+ AddTargetFeature(Args, Features, options::OPT_faltivec,
+ options::OPT_fno_altivec, "altivec");
+}
- // Note that gcc calls this mfcrf and LLVM calls this mfocrf so we
- // pass the correct option to the backend while calling the frontend
- // option the same.
- // TODO: Change the LLVM backend option maybe?
- if (Name == "mfcrf")
- Name = "mfocrf";
+ppc::FloatABI ppc::getPPCFloatABI(const Driver &D, const ArgList &Args) {
+ ppc::FloatABI ABI = ppc::FloatABI::Invalid;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ ABI = ppc::FloatABI::Soft;
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ ABI = ppc::FloatABI::Hard;
+ else {
+ ABI = llvm::StringSwitch<ppc::FloatABI>(A->getValue())
+ .Case("soft", ppc::FloatABI::Soft)
+ .Case("hard", ppc::FloatABI::Hard)
+ .Default(ppc::FloatABI::Invalid);
+ if (ABI == ppc::FloatABI::Invalid && !StringRef(A->getValue()).empty()) {
+ D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
+ ABI = ppc::FloatABI::Hard;
+ }
+ }
+ }
- Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ // If unspecified, choose the default based on the platform.
+ if (ABI == ppc::FloatABI::Invalid) {
+ ABI = ppc::FloatABI::Hard;
}
- // Altivec is a bit weird, allow overriding of the Altivec feature here.
- AddTargetFeature(Args, Features, options::OPT_faltivec,
- options::OPT_fno_altivec, "altivec");
+ return ABI;
}
void Clang::AddPPCTargetArgs(const ArgList &Args,
@@ -1396,6 +1511,21 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
if (StringRef(A->getValue()) != "altivec")
ABIName = A->getValue();
+ ppc::FloatABI FloatABI =
+ ppc::getPPCFloatABI(getToolChain().getDriver(), Args);
+
+ if (FloatABI == ppc::FloatABI::Soft) {
+ // Floating point operations and argument passing are soft.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+ } else {
+ // Floating point operations and argument passing are hard.
+ assert(FloatABI == ppc::FloatABI::Hard && "Invalid float abi!");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+ }
+
if (ABIName) {
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
@@ -1533,7 +1663,7 @@ static const char *getX86TargetCPU(const ArgList &Args,
return "btver2";
// On Android use targets compatible with gcc
- if (Triple.getEnvironment() == llvm::Triple::Android)
+ if (Triple.isAndroid())
return Is64Bit ? "x86-64" : "i686";
// Everything else goes to x86-64 in 64-bit mode.
@@ -1555,6 +1685,25 @@ static const char *getX86TargetCPU(const ArgList &Args,
}
}
+/// Get the (LLVM) name of the WebAssembly cpu we are targeting.
+static StringRef getWebAssemblyTargetCPU(const ArgList &Args) {
+ // If we have -mcpu=, use that.
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ StringRef CPU = A->getValue();
+
+#ifdef __wasm__
+ // Handle "native" by examining the host. "native" isn't meaningful when
+ // cross compiling, so only support this when the host is also WebAssembly.
+ if (CPU == "native")
+ return llvm::sys::getHostCPUName();
+#endif
+
+ return CPU;
+ }
+
+ return "generic";
+}
+
static std::string getCPUName(const ArgList &Args, const llvm::Triple &T,
bool FromAs = false) {
switch (T.getArch()) {
@@ -1619,7 +1768,8 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T,
return getX86TargetCPU(Args, T);
case llvm::Triple::hexagon:
- return "hexagon" + toolchains::Hexagon_TC::GetTargetCPU(Args).str();
+ return "hexagon" +
+ toolchains::HexagonToolChain::GetTargetCPUVersion(Args).str();
case llvm::Triple::systemz:
return getSystemZTargetCPU(Args);
@@ -1627,11 +1777,15 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::r600:
case llvm::Triple::amdgcn:
return getR600TargetGPU(Args);
+
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return getWebAssemblyTargetCPU(Args);
}
}
static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
- ArgStringList &CmdArgs) {
+ ArgStringList &CmdArgs, bool IsThinLTO) {
// Tell the linker to load the plugin. This has to come before AddLinkerInputs
// as gold requires -plugin to come before any -plugin-opt that -Wl might
// forward.
@@ -1647,13 +1801,29 @@ static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
std::string CPU = getCPUName(Args, ToolChain.getTriple());
if (!CPU.empty())
CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU));
+
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ StringRef OOpt;
+ if (A->getOption().matches(options::OPT_O4) ||
+ A->getOption().matches(options::OPT_Ofast))
+ OOpt = "3";
+ else if (A->getOption().matches(options::OPT_O))
+ OOpt = A->getValue();
+ else if (A->getOption().matches(options::OPT_O0))
+ OOpt = "0";
+ if (!OOpt.empty())
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=O") + OOpt));
+ }
+
+ if (IsThinLTO)
+ CmdArgs.push_back("-plugin-opt=thinlto");
}
/// This is a helper function for validating the optional refinement step
/// parameter in reciprocal argument strings. Return false if there is an error
/// parsing the refinement step. Otherwise, return true and set the Position
/// of the refinement step in the input string.
-static bool getRefinementStep(const StringRef &In, const Driver &D,
+static bool getRefinementStep(StringRef In, const Driver &D,
const Arg &A, size_t &Position) {
const char RefinementStepToken = ':';
Position = In.find(RefinementStepToken);
@@ -1807,7 +1977,7 @@ static void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::Triple::ArchType ArchType = Triple.getArch();
// Add features to be compatible with gcc for Android.
- if (Triple.getEnvironment() == llvm::Triple::Android) {
+ if (Triple.isAndroid()) {
if (ArchType == llvm::Triple::x86_64) {
Features.push_back("+sse4.2");
Features.push_back("+popcnt");
@@ -1841,20 +2011,7 @@ static void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
- for (const Arg *A : Args.filtered(options::OPT_m_x86_Features_Group)) {
- StringRef Name = A->getOption().getName();
- A->claim();
-
- // Skip over "-m".
- assert(Name.startswith("m") && "Invalid feature name.");
- Name = Name.substr(1);
-
- bool IsNegative = Name.startswith("no-");
- if (IsNegative)
- Name = Name.substr(3);
-
- Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
- }
+ handleTargetFeaturesGroup(Args, Features, options::OPT_m_x86_Features_Group);
}
void Clang::AddX86TargetArgs(const ArgList &Args,
@@ -1895,11 +2052,11 @@ void Clang::AddHexagonTargetArgs(const ArgList &Args,
CmdArgs.push_back("-mqdsp6-compat");
CmdArgs.push_back("-Wreturn-type");
- if (const char *v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) {
- std::string SmallDataThreshold = "-hexagon-small-data-threshold=";
- SmallDataThreshold += v;
+ if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
+ std::string N = llvm::utostr(G.getValue());
+ std::string Opt = std::string("-hexagon-small-data-threshold=") + N;
CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString(SmallDataThreshold));
+ CmdArgs.push_back(Args.MakeArgString(Opt));
}
if (!Args.hasArg(options::OPT_fno_short_enums))
@@ -1918,16 +2075,20 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
- for (const StringRef Feature : Split) {
+ for (StringRef Feature : Split) {
const char *result = llvm::StringSwitch<const char *>(Feature)
.Case("fp", "+fp-armv8")
.Case("simd", "+neon")
.Case("crc", "+crc")
.Case("crypto", "+crypto")
+ .Case("fp16", "+fullfp16")
+ .Case("profile", "+spe")
.Case("nofp", "-fp-armv8")
.Case("nosimd", "-neon")
.Case("nocrc", "-crc")
.Case("nocrypto", "-crypto")
+ .Case("nofp16", "-fullfp16")
+ .Case("noprofile", "-spe")
.Default(nullptr);
if (result)
Features.push_back(result);
@@ -1946,7 +2107,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
CPU = Split.first;
if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" ||
- CPU == "cortex-a72") {
+ CPU == "cortex-a72" || CPU == "cortex-a35") {
Features.push_back("+neon");
Features.push_back("+crc");
Features.push_back("+crypto");
@@ -1973,6 +2134,8 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
// ok, no additional features.
} else if (Split.first == "armv8.1-a" || Split.first == "armv8.1a") {
Features.push_back("+v8.1a");
+ } else if (Split.first == "armv8.2-a" || Split.first == "armv8.2a" ) {
+ Features.push_back("+v8.2a");
} else {
return false;
}
@@ -1999,10 +2162,11 @@ static bool
getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
const ArgList &Args,
std::vector<const char *> &Features) {
+ std::string MtuneLowerCase = Mtune.lower();
// Handle CPU name is 'native'.
- if (Mtune == "native")
- Mtune = llvm::sys::getHostCPUName();
- if (Mtune == "cyclone") {
+ if (MtuneLowerCase == "native")
+ MtuneLowerCase = llvm::sys::getHostCPUName();
+ if (MtuneLowerCase == "cyclone") {
Features.push_back("+zcm");
Features.push_back("+zcz");
}
@@ -2062,11 +2226,51 @@ static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
else
Features.push_back("-crc");
}
+
+ if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access))
+ if (A->getOption().matches(options::OPT_mno_unaligned_access))
+ Features.push_back("+strict-align");
+
+ if (Args.hasArg(options::OPT_ffixed_x18))
+ Features.push_back("+reserve-x18");
}
-static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+static void getHexagonTargetFeatures(const ArgList &Args,
+ std::vector<const char *> &Features) {
+ bool HasHVX = false, HasHVXD = false;
+
+ // FIXME: This should be able to use handleTargetFeaturesGroup except it is
+ // doing dependent option handling here rather than in initFeatureMap or a
+ // similar handler.
+ for (auto &A : Args) {
+ auto &Opt = A->getOption();
+ if (Opt.matches(options::OPT_mhexagon_hvx))
+ HasHVX = true;
+ else if (Opt.matches(options::OPT_mno_hexagon_hvx))
+ HasHVXD = HasHVX = false;
+ else if (Opt.matches(options::OPT_mhexagon_hvx_double))
+ HasHVXD = HasHVX = true;
+ else if (Opt.matches(options::OPT_mno_hexagon_hvx_double))
+ HasHVXD = false;
+ else
+ continue;
+ A->claim();
+ }
+
+ Features.push_back(HasHVX ? "+hvx" : "-hvx");
+ Features.push_back(HasHVXD ? "+hvx-double" : "-hvx-double");
+}
+
+static void getWebAssemblyTargetFeatures(const ArgList &Args,
+ std::vector<const char *> &Features) {
+ handleTargetFeaturesGroup(Args, Features, options::OPT_m_wasm_Features_Group);
+}
+
+static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
const ArgList &Args, ArgStringList &CmdArgs,
bool ForAS) {
+ const Driver &D = TC.getDriver();
std::vector<const char *> Features;
switch (Triple.getArch()) {
default:
@@ -2082,13 +2286,13 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- getARMTargetFeatures(D, Triple, Args, Features, ForAS);
+ getARMTargetFeatures(TC, Triple, Args, Features, ForAS);
break;
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
- getPPCTargetFeatures(Args, Features);
+ getPPCTargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::systemz:
getSystemZTargetFeatures(Args, Features);
@@ -2101,6 +2305,13 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::x86_64:
getX86TargetFeatures(D, Triple, Args, Features);
break;
+ case llvm::Triple::hexagon:
+ getHexagonTargetFeatures(Args, Features);
+ break;
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ getWebAssemblyTargetFeatures(Args, Features);
+ break;
}
// Find the last of each feature.
@@ -2272,6 +2483,67 @@ static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
RelaxDefault);
}
+// Convert an arg of the form "-gN" or "-ggdbN" or one of their aliases
+// to the corresponding DebugInfoKind.
+static CodeGenOptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
+ assert(A.getOption().matches(options::OPT_gN_Group) &&
+ "Not a -g option that specifies a debug-info level");
+ if (A.getOption().matches(options::OPT_g0) ||
+ A.getOption().matches(options::OPT_ggdb0))
+ return CodeGenOptions::NoDebugInfo;
+ if (A.getOption().matches(options::OPT_gline_tables_only) ||
+ A.getOption().matches(options::OPT_ggdb1))
+ return CodeGenOptions::DebugLineTablesOnly;
+ return CodeGenOptions::LimitedDebugInfo;
+}
+
+// Extract the integer N from a string spelled "-dwarf-N", returning 0
+// on mismatch. The StringRef input (rather than an Arg) allows
+// for use by the "-Xassembler" option parser.
+static unsigned DwarfVersionNum(StringRef ArgValue) {
+ return llvm::StringSwitch<unsigned>(ArgValue)
+ .Case("-gdwarf-2", 2)
+ .Case("-gdwarf-3", 3)
+ .Case("-gdwarf-4", 4)
+ .Case("-gdwarf-5", 5)
+ .Default(0);
+}
+
+static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ CodeGenOptions::DebugInfoKind DebugInfoKind,
+ unsigned DwarfVersion,
+ llvm::DebuggerKind DebuggerTuning) {
+ switch (DebugInfoKind) {
+ case CodeGenOptions::DebugLineTablesOnly:
+ CmdArgs.push_back("-debug-info-kind=line-tables-only");
+ break;
+ case CodeGenOptions::LimitedDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=limited");
+ break;
+ case CodeGenOptions::FullDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=standalone");
+ break;
+ default:
+ break;
+ }
+ if (DwarfVersion > 0)
+ CmdArgs.push_back(
+ Args.MakeArgString("-dwarf-version=" + Twine(DwarfVersion)));
+ switch (DebuggerTuning) {
+ case llvm::DebuggerKind::GDB:
+ CmdArgs.push_back("-debugger-tuning=gdb");
+ break;
+ case llvm::DebuggerKind::LLDB:
+ CmdArgs.push_back("-debugger-tuning=lldb");
+ break;
+ case llvm::DebuggerKind::SCE:
+ CmdArgs.push_back("-debugger-tuning=sce");
+ break;
+ default:
+ break;
+ }
+}
+
static void CollectArgsForIntegratedAssembler(Compilation &C,
const ArgList &Args,
ArgStringList &CmdArgs,
@@ -2279,6 +2551,15 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
if (UseRelaxAll(C, Args))
CmdArgs.push_back("-mrelax-all");
+ // Only default to -mincremental-linker-compatible if we think we are
+ // targeting the MSVC linker.
+ bool DefaultIncrementalLinkerCompatible =
+ C.getDefaultToolChain().getTriple().isWindowsMSVCEnvironment();
+ if (Args.hasFlag(options::OPT_mincremental_linker_compatible,
+ options::OPT_mno_incremental_linker_compatible,
+ DefaultIncrementalLinkerCompatible))
+ CmdArgs.push_back("-mincremental-linker-compatible");
+
// When passing -I arguments to the assembler we sometimes need to
// unconditionally take the next argument. For example, when parsing
// '-Wa,-I -Wa,foo' we need to accept the -Wa,foo arg after seeing the
@@ -2293,13 +2574,43 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
A->claim();
- for (const StringRef Value : A->getValues()) {
+ for (StringRef Value : A->getValues()) {
if (TakeNextArg) {
CmdArgs.push_back(Value.data());
TakeNextArg = false;
continue;
}
+ switch (C.getDefaultToolChain().getArch()) {
+ default:
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ if (Value == "--trap") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+use-tcc-in-div");
+ continue;
+ }
+ if (Value == "--break") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-use-tcc-in-div");
+ continue;
+ }
+ if (Value.startswith("-msoft-float")) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float");
+ continue;
+ }
+ if (Value.startswith("-mhard-float")) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-soft-float");
+ continue;
+ }
+ break;
+ }
+
if (Value == "-force_cpusubtype_ALL") {
// Do nothing, this is the default and we don't support anything else.
} else if (Value == "-L") {
@@ -2321,7 +2632,15 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
if (Value == "-I")
TakeNextArg = true;
} else if (Value.startswith("-gdwarf-")) {
- CmdArgs.push_back(Value.data());
+ // "-gdwarf-N" options are not cc1as options.
+ unsigned DwarfVersion = DwarfVersionNum(Value);
+ if (DwarfVersion == 0) { // Send it onward, and let cc1as complain.
+ CmdArgs.push_back(Value.data());
+ } else {
+ RenderDebugEnablingArgs(
+ Args, CmdArgs, CodeGenOptions::LimitedDebugInfo, DwarfVersion,
+ llvm::DebuggerKind::Default);
+ }
} else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
Value.startswith("-mhwdiv") || Value.startswith("-march")) {
// Do nothing, we'll validate it later.
@@ -2339,76 +2658,12 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
}
-// Until ARM libraries are build separately, we have them all in one library
-static StringRef getArchNameForCompilerRTLib(const ToolChain &TC) {
- if (TC.getTriple().isWindowsMSVCEnvironment() &&
- TC.getArch() == llvm::Triple::x86)
- return "i386";
- if (TC.getArch() == llvm::Triple::arm || TC.getArch() == llvm::Triple::armeb)
- return "arm";
- return TC.getArchName();
-}
-
-static SmallString<128> getCompilerRTLibDir(const ToolChain &TC) {
- // The runtimes are located in the OS-specific resource directory.
- SmallString<128> Res(TC.getDriver().ResourceDir);
- const llvm::Triple &Triple = TC.getTriple();
- // TC.getOS() yield "freebsd10.0" whereas "freebsd" is expected.
- StringRef OSLibName =
- (Triple.getOS() == llvm::Triple::FreeBSD) ? "freebsd" : TC.getOS();
- llvm::sys::path::append(Res, "lib", OSLibName);
- return Res;
-}
-
-SmallString<128> tools::getCompilerRT(const ToolChain &TC, StringRef Component,
- bool Shared) {
- const char *Env = TC.getTriple().getEnvironment() == llvm::Triple::Android
- ? "-android"
- : "";
-
- bool IsOSWindows = TC.getTriple().isOSWindows();
- bool IsITANMSVCWindows = TC.getTriple().isWindowsMSVCEnvironment() ||
- TC.getTriple().isWindowsItaniumEnvironment();
- StringRef Arch = getArchNameForCompilerRTLib(TC);
- const char *Prefix = IsITANMSVCWindows ? "" : "lib";
- const char *Suffix =
- Shared ? (IsOSWindows ? ".dll" : ".so") : (IsITANMSVCWindows ? ".lib" : ".a");
-
- SmallString<128> Path = getCompilerRTLibDir(TC);
- llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
- Arch + Env + Suffix);
-
- return Path;
-}
-
// This adds the static libclang_rt.builtins-arch.a directly to the command line
// FIXME: Make sure we can also emit shared objects if they're requested
// and available, check for possible errors, etc.
static void addClangRT(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
- CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, "builtins")));
-
- if (!TC.getTriple().isOSWindows()) {
- // FIXME: why do we link against gcc when we are using compiler-rt?
- CmdArgs.push_back("-lgcc_s");
- if (TC.getDriver().CCCIsCXX())
- CmdArgs.push_back("-lgcc_eh");
- }
-}
-
-static void addProfileRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (!(Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
- false) ||
- Args.hasArg(options::OPT_fprofile_generate) ||
- Args.hasArg(options::OPT_fprofile_generate_EQ) ||
- Args.hasArg(options::OPT_fprofile_instr_generate) ||
- Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
- Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_coverage)))
- return;
-
- CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, "profile")));
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "builtins"));
}
namespace {
@@ -2487,11 +2742,9 @@ static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
bool IsShared) {
// Static runtimes must be forced into executable, so we wrap them in
// whole-archive.
- if (!IsShared)
- CmdArgs.push_back("-whole-archive");
- CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Sanitizer, IsShared)));
- if (!IsShared)
- CmdArgs.push_back("-no-whole-archive");
+ if (!IsShared) CmdArgs.push_back("-whole-archive");
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Sanitizer, IsShared));
+ if (!IsShared) CmdArgs.push_back("-no-whole-archive");
}
// Tries to use a file with the list of dynamic symbols that need to be exported
@@ -2499,7 +2752,7 @@ static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs,
StringRef Sanitizer) {
- SmallString<128> SanRT = getCompilerRT(TC, Sanitizer);
+ SmallString<128> SanRT(TC.getCompilerRT(Args, Sanitizer));
if (llvm::sys::fs::exists(SanRT + ".syms")) {
CmdArgs.push_back(Args.MakeArgString("--dynamic-list=" + SanRT + ".syms"));
return true;
@@ -2532,8 +2785,7 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
}
// Collect static runtimes.
- if (Args.hasArg(options::OPT_shared) ||
- (TC.getTriple().getEnvironment() == llvm::Triple::Android)) {
+ if (Args.hasArg(options::OPT_shared) || TC.getTriple().isAndroid()) {
// Don't link static runtimes into DSOs or if compiling for Android.
return;
}
@@ -2567,6 +2819,10 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
}
if (SanArgs.needsSafeStackRt())
StaticRuntimes.push_back("safestack");
+ if (SanArgs.needsCfiRt())
+ StaticRuntimes.push_back("cfi");
+ if (SanArgs.needsCfiDiagRt())
+ StaticRuntimes.push_back("cfi_diag");
}
// Should be called before we add system libraries (C++ ABI, libstdc++/libc++,
@@ -2603,9 +2859,15 @@ static bool areOptimizationsEnabled(const ArgList &Args) {
static bool shouldUseFramePointerForTarget(const ArgList &Args,
const llvm::Triple &Triple) {
- // XCore never wants frame pointers, regardless of OS.
- if (Triple.getArch() == llvm::Triple::xcore) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::xcore:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ // XCore never wants frame pointers, regardless of OS.
+ // WebAssembly never wants frame pointers.
return false;
+ default:
+ break;
}
if (Triple.isOSLinux()) {
@@ -2628,6 +2890,10 @@ static bool shouldUseFramePointerForTarget(const ArgList &Args,
switch (Triple.getArch()) {
case llvm::Triple::x86:
return !areOptimizationsEnabled(Args);
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // Windows on ARM builds with FPO disabled to aid fast stack walking
+ return true;
default:
// All other supported Windows ISAs use xdata unwind information, so frame
// pointers are not generally useful.
@@ -2643,6 +2909,8 @@ static bool shouldUseFramePointer(const ArgList &Args,
if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
options::OPT_fomit_frame_pointer))
return A->getOption().matches(options::OPT_fno_omit_frame_pointer);
+ if (Args.hasArg(options::OPT_pg))
+ return true;
return shouldUseFramePointerForTarget(Args, Triple);
}
@@ -2652,6 +2920,8 @@ static bool shouldUseLeafFramePointer(const ArgList &Args,
if (Arg *A = Args.getLastArg(options::OPT_mno_omit_leaf_frame_pointer,
options::OPT_momit_leaf_frame_pointer))
return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer);
+ if (Args.hasArg(options::OPT_pg))
+ return true;
if (Triple.isPS4CPU())
return false;
@@ -2700,12 +2970,13 @@ static void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
ExtractArgs.push_back(OutFile);
const char *Exec = Args.MakeArgString(TC.GetProgramPath("objcopy"));
+ InputInfo II(Output.getFilename(), types::TY_Object, Output.getFilename());
// First extract the dwo sections.
- C.addCommand(llvm::make_unique<Command>(JA, T, Exec, ExtractArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, ExtractArgs, II));
// Then remove them from the original .o file.
- C.addCommand(llvm::make_unique<Command>(JA, T, Exec, StripArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, StripArgs, II));
}
/// \brief Vectorize at all optimization levels greater than 1 except for -Oz.
@@ -2774,6 +3045,7 @@ static VersionTuple getMSCompatibilityVersion(unsigned Version) {
static void claimNoWarnArgs(const ArgList &Args) {
// Don't warn about unused -f(no-)?lto. This can happen when we're
// preprocessing, precompiling or assembling.
+ Args.ClaimAllArgs(options::OPT_flto_EQ);
Args.ClaimAllArgs(options::OPT_flto);
Args.ClaimAllArgs(options::OPT_fno_lto);
}
@@ -2863,42 +3135,53 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
auto *ProfileGenerateArg = Args.getLastArg(
options::OPT_fprofile_instr_generate,
options::OPT_fprofile_instr_generate_EQ, options::OPT_fprofile_generate,
- options::OPT_fprofile_generate_EQ);
+ options::OPT_fprofile_generate_EQ,
+ options::OPT_fno_profile_instr_generate);
+ if (ProfileGenerateArg &&
+ ProfileGenerateArg->getOption().matches(
+ options::OPT_fno_profile_instr_generate))
+ ProfileGenerateArg = nullptr;
auto *ProfileUseArg = Args.getLastArg(
options::OPT_fprofile_instr_use, options::OPT_fprofile_instr_use_EQ,
- options::OPT_fprofile_use, options::OPT_fprofile_use_EQ);
+ options::OPT_fprofile_use, options::OPT_fprofile_use_EQ,
+ options::OPT_fno_profile_instr_use);
+ if (ProfileUseArg &&
+ ProfileUseArg->getOption().matches(options::OPT_fno_profile_instr_use))
+ ProfileUseArg = nullptr;
if (ProfileGenerateArg && ProfileUseArg)
D.Diag(diag::err_drv_argument_not_allowed_with)
<< ProfileGenerateArg->getSpelling() << ProfileUseArg->getSpelling();
- if (ProfileGenerateArg &&
- ProfileGenerateArg->getOption().matches(
- options::OPT_fprofile_instr_generate_EQ))
- ProfileGenerateArg->render(Args, CmdArgs);
- else if (ProfileGenerateArg &&
- ProfileGenerateArg->getOption().matches(
- options::OPT_fprofile_generate_EQ)) {
- SmallString<128> Path(ProfileGenerateArg->getValue());
- llvm::sys::path::append(Path, "default.profraw");
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-fprofile-instr-generate=") + Path));
- } else
- Args.AddAllArgs(CmdArgs, options::OPT_fprofile_instr_generate);
-
- if (ProfileUseArg &&
- ProfileUseArg->getOption().matches(options::OPT_fprofile_instr_use_EQ))
- ProfileUseArg->render(Args, CmdArgs);
- else if (ProfileUseArg &&
- (ProfileUseArg->getOption().matches(options::OPT_fprofile_use_EQ) ||
- ProfileUseArg->getOption().matches(
- options::OPT_fprofile_instr_use))) {
- SmallString<128> Path(
- ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
- if (Path.empty() || llvm::sys::fs::is_directory(Path))
- llvm::sys::path::append(Path, "default.profdata");
- CmdArgs.push_back(Args.MakeArgString(Twine("-fprofile-instr-use=") + Path));
+ if (ProfileGenerateArg) {
+ if (ProfileGenerateArg->getOption().matches(
+ options::OPT_fprofile_instr_generate_EQ))
+ ProfileGenerateArg->render(Args, CmdArgs);
+ else if (ProfileGenerateArg->getOption().matches(
+ options::OPT_fprofile_generate_EQ)) {
+ SmallString<128> Path(ProfileGenerateArg->getValue());
+ llvm::sys::path::append(Path, "default.profraw");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fprofile-instr-generate=") + Path));
+ } else
+ Args.AddAllArgs(CmdArgs, options::OPT_fprofile_instr_generate);
+ }
+
+ if (ProfileUseArg) {
+ if (ProfileUseArg->getOption().matches(options::OPT_fprofile_instr_use_EQ))
+ ProfileUseArg->render(Args, CmdArgs);
+ else if ((ProfileUseArg->getOption().matches(
+ options::OPT_fprofile_use_EQ) ||
+ ProfileUseArg->getOption().matches(
+ options::OPT_fprofile_instr_use))) {
+ SmallString<128> Path(
+ ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
+ if (Path.empty() || llvm::sys::fs::is_directory(Path))
+ llvm::sys::path::append(Path, "default.profdata");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fprofile-instr-use=") + Path));
+ }
}
if (Args.hasArg(options::OPT_ftest_coverage) ||
@@ -2909,12 +3192,15 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
Args.hasArg(options::OPT_coverage))
CmdArgs.push_back("-femit-coverage-data");
- if (Args.hasArg(options::OPT_fcoverage_mapping) && !ProfileGenerateArg)
+ if (Args.hasFlag(options::OPT_fcoverage_mapping,
+ options::OPT_fno_coverage_mapping, false) &&
+ !ProfileGenerateArg)
D.Diag(diag::err_drv_argument_only_allowed_with)
<< "-fcoverage-mapping"
<< "-fprofile-instr-generate";
- if (Args.hasArg(options::OPT_fcoverage_mapping))
+ if (Args.hasFlag(options::OPT_fcoverage_mapping,
+ options::OPT_fno_coverage_mapping, false))
CmdArgs.push_back("-fcoverage-mapping");
if (C.getArgs().hasArg(options::OPT_c) ||
@@ -2939,9 +3225,192 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
}
}
+static void addPS4ProfileRTArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if ((Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasFlag(options::OPT_fprofile_generate,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_generate_EQ,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_instr_generate,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_instr_generate_EQ,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage)))
+ CmdArgs.push_back("--dependent-lib=libclang_rt.profile-x86_64.a");
+}
+
+/// Parses the various -fpic/-fPIC/-fpie/-fPIE arguments. Then,
+/// smooshes them together with platform defaults, to decide whether
+/// this compile should be using PIC mode or not. Returns a tuple of
+/// (RelocationModel, PICLevel, IsPIE).
+static std::tuple<llvm::Reloc::Model, unsigned, bool>
+ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
+ const ArgList &Args) {
+ // FIXME: why does this code...and so much everywhere else, use both
+ // ToolChain.getTriple() and Triple?
+ bool PIE = ToolChain.isPIEDefault();
+ bool PIC = PIE || ToolChain.isPICDefault();
+ // The Darwin default to use PIC does not apply when using -static.
+ if (ToolChain.getTriple().isOSDarwin() && Args.hasArg(options::OPT_static))
+ PIE = PIC = false;
+ bool IsPICLevelTwo = PIC;
+
+ bool KernelOrKext =
+ Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
+
+ // Android-specific defaults for PIC/PIE
+ if (ToolChain.getTriple().isAndroid()) {
+ switch (ToolChain.getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ PIC = true; // "-fpic"
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ PIC = true; // "-fPIC"
+ IsPICLevelTwo = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // OpenBSD-specific defaults for PIE
+ if (ToolChain.getTriple().getOS() == llvm::Triple::OpenBSD) {
+ switch (ToolChain.getArch()) {
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ IsPICLevelTwo = false; // "-fpie"
+ break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
+ IsPICLevelTwo = true; // "-fPIE"
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // The last argument relating to either PIC or PIE wins, and no
+ // other argument is used. If the last argument is any flavor of the
+ // '-fno-...' arguments, both PIC and PIE are disabled. Any PIE
+ // option implicitly enables PIC at the same level.
+ Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
+ options::OPT_fpic, options::OPT_fno_pic,
+ options::OPT_fPIE, options::OPT_fno_PIE,
+ options::OPT_fpie, options::OPT_fno_pie);
+ // Check whether the tool chain trumps the PIC-ness decision. If the PIC-ness
+ // is forced, then neither PIC nor PIE flags will have no effect.
+ if (!ToolChain.isPICDefaultForced()) {
+ if (LastPICArg) {
+ Option O = LastPICArg->getOption();
+ if (O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic) ||
+ O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie)) {
+ PIE = O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie);
+ PIC =
+ PIE || O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic);
+ IsPICLevelTwo =
+ O.matches(options::OPT_fPIE) || O.matches(options::OPT_fPIC);
+ } else {
+ PIE = PIC = false;
+ if (Triple.isPS4CPU()) {
+ Arg *ModelArg = Args.getLastArg(options::OPT_mcmodel_EQ);
+ StringRef Model = ModelArg ? ModelArg->getValue() : "";
+ if (Model != "kernel") {
+ PIC = true;
+ ToolChain.getDriver().Diag(diag::warn_drv_ps4_force_pic)
+ << LastPICArg->getSpelling();
+ }
+ }
+ }
+ }
+ }
+
+ // Introduce a Darwin and PS4-specific hack. If the default is PIC, but the
+ // PIC level would've been set to level 1, force it back to level 2 PIC
+ // instead.
+ if (PIC && (ToolChain.getTriple().isOSDarwin() || Triple.isPS4CPU()))
+ IsPICLevelTwo |= ToolChain.isPICDefault();
+
+ // This kernel flags are a trump-card: they will disable PIC/PIE
+ // generation, independent of the argument order.
+ if (KernelOrKext && ((!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
+ !Triple.isWatchOS()))
+ PIC = PIE = false;
+
+ if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) {
+ // This is a very special mode. It trumps the other modes, almost no one
+ // uses it, and it isn't even valid on any OS but Darwin.
+ if (!ToolChain.getTriple().isOSDarwin())
+ ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << ToolChain.getTriple().str();
+
+ // FIXME: Warn when this flag trumps some other PIC or PIE flag.
+
+ // Only a forced PIC mode can cause the actual compile to have PIC defines
+ // etc., no flags are sufficient. This behavior was selected to closely
+ // match that of llvm-gcc and Apple GCC before that.
+ PIC = ToolChain.isPICDefault() && ToolChain.isPICDefaultForced();
+
+ return std::make_tuple(llvm::Reloc::DynamicNoPIC, PIC ? 2 : 0, false);
+ }
+
+ if (PIC)
+ return std::make_tuple(llvm::Reloc::PIC_, IsPICLevelTwo ? 2 : 1, PIE);
+
+ return std::make_tuple(llvm::Reloc::Static, 0, false);
+}
+
+static const char *RelocationModelName(llvm::Reloc::Model Model) {
+ switch (Model) {
+ case llvm::Reloc::Default:
+ return nullptr;
+ case llvm::Reloc::Static:
+ return "static";
+ case llvm::Reloc::PIC_:
+ return "pic";
+ case llvm::Reloc::DynamicNoPIC:
+ return "dynamic-no-pic";
+ }
+ llvm_unreachable("Unknown Reloc::Model kind");
+}
+
+static void AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(ToolChain, ToolChain.getTriple(), Args);
+
+ if (RelocationModel != llvm::Reloc::Static)
+ CmdArgs.push_back("-KPIC");
+}
+
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args, const char *LinkingOutput) const {
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ const llvm::Triple Triple(TripleStr);
+
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
const Driver &D = getToolChain().getDriver();
@@ -2951,6 +3420,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
bool IsWindowsCygnus =
getToolChain().getTriple().isWindowsCygwinEnvironment();
bool IsWindowsMSVC = getToolChain().getTriple().isWindowsMSVCEnvironment();
+ bool IsPS4CPU = getToolChain().getTriple().isPS4CPU();
// Check number of inputs for sanity. We need at least one input.
assert(Inputs.size() >= 1 && "Must have at least one input.");
@@ -2968,17 +3438,34 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add the "effective" target triple.
CmdArgs.push_back("-triple");
- std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
CmdArgs.push_back(Args.MakeArgString(TripleStr));
- const llvm::Triple TT(TripleStr);
- if (TT.isOSWindows() && (TT.getArch() == llvm::Triple::arm ||
- TT.getArch() == llvm::Triple::thumb)) {
- unsigned Offset = TT.getArch() == llvm::Triple::arm ? 4 : 6;
+ const ToolChain *AuxToolChain = nullptr;
+ if (IsCuda) {
+ // FIXME: We need a (better) way to pass information about
+ // particular compilation pass we're constructing here. For now we
+ // can check which toolchain we're using and pick the other one to
+ // extract the triple.
+ if (&getToolChain() == C.getCudaDeviceToolChain())
+ AuxToolChain = C.getCudaHostToolChain();
+ else if (&getToolChain() == C.getCudaHostToolChain())
+ AuxToolChain = C.getCudaDeviceToolChain();
+ else
+ llvm_unreachable("Can't figure out CUDA compilation mode.");
+ assert(AuxToolChain != nullptr && "No aux toolchain.");
+ CmdArgs.push_back("-aux-triple");
+ CmdArgs.push_back(Args.MakeArgString(AuxToolChain->getTriple().str()));
+ CmdArgs.push_back("-fcuda-target-overloads");
+ CmdArgs.push_back("-fcuda-disable-target-call-checks");
+ }
+
+ if (Triple.isOSWindows() && (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb)) {
+ unsigned Offset = Triple.getArch() == llvm::Triple::arm ? 4 : 6;
unsigned Version;
- TT.getArchName().substr(Offset).getAsInteger(10, Version);
+ Triple.getArchName().substr(Offset).getAsInteger(10, Version);
if (Version < 7)
- D.Diag(diag::err_target_unsupported_arch) << TT.getArchName()
+ D.Diag(diag::err_target_unsupported_arch) << Triple.getArchName()
<< TripleStr;
}
@@ -3026,10 +3513,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else {
assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
"Invalid action for clang tool.");
-
- if (JA.getType() == types::TY_LTO_IR || JA.getType() == types::TY_LTO_BC) {
- CmdArgs.push_back("-flto");
- }
if (JA.getType() == types::TY_Nothing) {
CmdArgs.push_back("-fsyntax-only");
} else if (JA.getType() == types::TY_LLVM_IR ||
@@ -3060,6 +3543,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// the use-list order, since serialization to bitcode is part of the flow.
if (JA.getType() == types::TY_LLVM_BC)
CmdArgs.push_back("-emit-llvm-uselists");
+
+ if (D.isUsingLTO())
+ Args.AddLastArg(CmdArgs, options::OPT_flto, options::OPT_flto_EQ);
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_fthinlto_index_EQ)) {
+ if (!types::isLLVMIR(Input.getType()))
+ D.Diag(diag::err_drv_argument_only_allowed_with) << A->getAsString(Args)
+ << "-x ir";
+ Args.AddLastArg(CmdArgs, options::OPT_fthinlto_index_EQ);
}
// We normally speed up the clang process a bit by skipping destructors at
@@ -3115,6 +3608,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
+
+ // Default nullability checks.
+ CmdArgs.push_back("-analyzer-checker=nullability.NullPassedToNonnull");
+ CmdArgs.push_back(
+ "-analyzer-checker=nullability.NullReturnedFromNonnull");
}
// Set the output format. The default is plist, for (lame) historical
@@ -3136,136 +3634,29 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CheckCodeGenerationOptions(D, Args);
- bool PIE = getToolChain().isPIEDefault();
- bool PIC = PIE || getToolChain().isPICDefault();
- bool IsPICLevelTwo = PIC;
-
- // Android-specific defaults for PIC/PIE
- if (getToolChain().getTriple().getEnvironment() == llvm::Triple::Android) {
- switch (getToolChain().getArch()) {
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::aarch64:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- PIC = true; // "-fpic"
- break;
-
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- PIC = true; // "-fPIC"
- IsPICLevelTwo = true;
- break;
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(getToolChain(), Triple, Args);
- default:
- break;
- }
+ const char *RMName = RelocationModelName(RelocationModel);
+ if (RMName) {
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back(RMName);
}
-
- // OpenBSD-specific defaults for PIE
- if (getToolChain().getTriple().getOS() == llvm::Triple::OpenBSD) {
- switch (getToolChain().getArch()) {
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- case llvm::Triple::sparcel:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- IsPICLevelTwo = false; // "-fpie"
- break;
-
- case llvm::Triple::ppc:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcv9:
- IsPICLevelTwo = true; // "-fPIE"
- break;
-
- default:
- break;
+ if (PICLevel > 0) {
+ CmdArgs.push_back("-pic-level");
+ CmdArgs.push_back(PICLevel == 1 ? "1" : "2");
+ if (IsPIE) {
+ CmdArgs.push_back("-pie-level");
+ CmdArgs.push_back(PICLevel == 1 ? "1" : "2");
}
}
- // For the PIC and PIE flag options, this logic is different from the
- // legacy logic in very old versions of GCC, as that logic was just
- // a bug no one had ever fixed. This logic is both more rational and
- // consistent with GCC's new logic now that the bugs are fixed. The last
- // argument relating to either PIC or PIE wins, and no other argument is
- // used. If the last argument is any flavor of the '-fno-...' arguments,
- // both PIC and PIE are disabled. Any PIE option implicitly enables PIC
- // at the same level.
- Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
- options::OPT_fpic, options::OPT_fno_pic,
- options::OPT_fPIE, options::OPT_fno_PIE,
- options::OPT_fpie, options::OPT_fno_pie);
- // Check whether the tool chain trumps the PIC-ness decision. If the PIC-ness
- // is forced, then neither PIC nor PIE flags will have no effect.
- if (!getToolChain().isPICDefaultForced()) {
- if (LastPICArg) {
- Option O = LastPICArg->getOption();
- if (O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic) ||
- O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie)) {
- PIE = O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie);
- PIC =
- PIE || O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic);
- IsPICLevelTwo =
- O.matches(options::OPT_fPIE) || O.matches(options::OPT_fPIC);
- } else {
- PIE = PIC = false;
- }
- }
- }
-
- // Introduce a Darwin-specific hack. If the default is PIC but the flags
- // specified while enabling PIC enabled level 1 PIC, just force it back to
- // level 2 PIC instead. This matches the behavior of Darwin GCC (based on my
- // informal testing).
- if (PIC && getToolChain().getTriple().isOSDarwin())
- IsPICLevelTwo |= getToolChain().isPICDefault();
-
- // Note that these flags are trump-cards. Regardless of the order w.r.t. the
- // PIC or PIE options above, if these show up, PIC is disabled.
- llvm::Triple Triple(TripleStr);
- if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)))
- PIC = PIE = false;
- if (Args.hasArg(options::OPT_static))
- PIC = PIE = false;
-
- if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) {
- // This is a very special mode. It trumps the other modes, almost no one
- // uses it, and it isn't even valid on any OS but Darwin.
- if (!getToolChain().getTriple().isOSDarwin())
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getSpelling() << getToolChain().getTriple().str();
-
- // FIXME: Warn when this flag trumps some other PIC or PIE flag.
-
- CmdArgs.push_back("-mrelocation-model");
- CmdArgs.push_back("dynamic-no-pic");
-
- // Only a forced PIC mode can cause the actual compile to have PIC defines
- // etc., no flags are sufficient. This behavior was selected to closely
- // match that of llvm-gcc and Apple GCC before that.
- if (getToolChain().isPICDefault() && getToolChain().isPICDefaultForced()) {
- CmdArgs.push_back("-pic-level");
- CmdArgs.push_back("2");
- }
- } else {
- // Currently, LLVM only knows about PIC vs. static; the PIE differences are
- // handled in Clang's IRGen by the -pie-level flag.
- CmdArgs.push_back("-mrelocation-model");
- CmdArgs.push_back(PIC ? "pic" : "static");
-
- if (PIC) {
- CmdArgs.push_back("-pic-level");
- CmdArgs.push_back(IsPICLevelTwo ? "2" : "1");
- if (PIE) {
- CmdArgs.push_back("-pie-level");
- CmdArgs.push_back(IsPICLevelTwo ? "2" : "1");
- }
- }
+ if (Arg *A = Args.getLastArg(options::OPT_meabi)) {
+ CmdArgs.push_back("-meabi");
+ CmdArgs.push_back(A->getValue());
}
CmdArgs.push_back("-mthread-model");
@@ -3343,6 +3734,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums,
false))
CmdArgs.push_back("-fstrict-enums");
+ if (Args.hasFlag(options::OPT_fstrict_vtable_pointers,
+ options::OPT_fno_strict_vtable_pointers,
+ false))
+ CmdArgs.push_back("-fstrict-vtable-pointers");
if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
options::OPT_fno_optimize_sibling_calls))
CmdArgs.push_back("-mdisable-tail-calls");
@@ -3520,7 +3915,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (KernelOrKext && getToolChain().getTriple().isOSDarwin())
CmdArgs.push_back("-fforbid-guard-variables");
- if (Args.hasArg(options::OPT_mms_bitfields)) {
+ if (Args.hasFlag(options::OPT_mms_bitfields, options::OPT_mno_ms_bitfields,
+ false)) {
CmdArgs.push_back("-mms-bitfields");
}
@@ -3565,7 +3961,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add the target features
- getTargetFeatures(D, Triple, Args, CmdArgs, false);
+ getTargetFeatures(getToolChain(), Triple, Args, CmdArgs, false);
// Add target specific flags.
switch (getToolChain().getArch()) {
@@ -3576,7 +3972,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- AddARMTargetArgs(Args, CmdArgs, KernelOrKext);
+ // Use the effective triple, which takes into account the deployment target.
+ AddARMTargetArgs(Triple, Args, CmdArgs, KernelOrKext);
break;
case llvm::Triple::aarch64:
@@ -3613,9 +4010,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
+ // The 'g' groups options involve a somewhat intricate sequence of decisions
+ // about what to pass from the driver to the frontend, but by the time they
+ // reach cc1 they've been factored into three well-defined orthogonal choices:
+ // * what level of debug info to generate
+ // * what dwarf version to write
+ // * what debugger tuning to use
+ // This avoids having to monkey around further in cc1 other than to disable
+ // codeview if not running in a Windows environment. Perhaps even that
+ // decision should be made in the driver as well though.
+ unsigned DwarfVersion = 0;
+ llvm::DebuggerKind DebuggerTuning = getToolChain().getDefaultDebuggerTuning();
+ // These two are potentially updated by AddClangCLArgs.
+ enum CodeGenOptions::DebugInfoKind DebugInfoKind =
+ CodeGenOptions::NoDebugInfo;
+ bool EmitCodeView = false;
+
// Add clang-cl arguments.
if (getToolChain().getDriver().IsCLMode())
- AddClangCLArgs(Args, CmdArgs);
+ AddClangCLArgs(Args, CmdArgs, &DebugInfoKind, &EmitCodeView);
// Pass the linker version in use.
if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
@@ -3656,57 +4069,86 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
: "-");
}
- // Use the last option from "-g" group. "-gline-tables-only" and "-gdwarf-x"
- // are preserved, all other debug options are substituted with "-g".
Args.ClaimAllArgs(options::OPT_g_Group);
+ Arg *SplitDwarfArg = Args.getLastArg(options::OPT_gsplit_dwarf);
if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- if (A->getOption().matches(options::OPT_gline_tables_only) ||
- A->getOption().matches(options::OPT_g1)) {
- // FIXME: we should support specifying dwarf version with
- // -gline-tables-only.
- CmdArgs.push_back("-gline-tables-only");
- // Default is dwarf-2 for Darwin, OpenBSD, FreeBSD and Solaris.
- const llvm::Triple &Triple = getToolChain().getTriple();
- if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::OpenBSD ||
- Triple.getOS() == llvm::Triple::FreeBSD ||
- Triple.getOS() == llvm::Triple::Solaris)
- CmdArgs.push_back("-gdwarf-2");
- } else if (A->getOption().matches(options::OPT_gdwarf_2))
- CmdArgs.push_back("-gdwarf-2");
- else if (A->getOption().matches(options::OPT_gdwarf_3))
- CmdArgs.push_back("-gdwarf-3");
- else if (A->getOption().matches(options::OPT_gdwarf_4))
- CmdArgs.push_back("-gdwarf-4");
- else if (!A->getOption().matches(options::OPT_g0) &&
- !A->getOption().matches(options::OPT_ggdb0)) {
- // Default is dwarf-2 for Darwin, OpenBSD, FreeBSD and Solaris.
- const llvm::Triple &Triple = getToolChain().getTriple();
- if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::OpenBSD ||
- Triple.getOS() == llvm::Triple::FreeBSD ||
- Triple.getOS() == llvm::Triple::Solaris)
- CmdArgs.push_back("-gdwarf-2");
- else
- CmdArgs.push_back("-g");
- }
+ // If the last option explicitly specified a debug-info level, use it.
+ if (A->getOption().matches(options::OPT_gN_Group)) {
+ DebugInfoKind = DebugLevelToInfoKind(*A);
+ // If you say "-gsplit-dwarf -gline-tables-only", -gsplit-dwarf loses.
+ // But -gsplit-dwarf is not a g_group option, hence we have to check the
+ // order explicitly. (If -gsplit-dwarf wins, we fix DebugInfoKind later.)
+ if (SplitDwarfArg && DebugInfoKind < CodeGenOptions::LimitedDebugInfo &&
+ A->getIndex() > SplitDwarfArg->getIndex())
+ SplitDwarfArg = nullptr;
+ } else
+ // For any other 'g' option, use Limited.
+ DebugInfoKind = CodeGenOptions::LimitedDebugInfo;
+ }
+
+ // If a debugger tuning argument appeared, remember it.
+ if (Arg *A = Args.getLastArg(options::OPT_gTune_Group,
+ options::OPT_ggdbN_Group)) {
+ if (A->getOption().matches(options::OPT_glldb))
+ DebuggerTuning = llvm::DebuggerKind::LLDB;
+ else if (A->getOption().matches(options::OPT_gsce))
+ DebuggerTuning = llvm::DebuggerKind::SCE;
+ else
+ DebuggerTuning = llvm::DebuggerKind::GDB;
+ }
+
+ // If a -gdwarf argument appeared, remember it.
+ if (Arg *A = Args.getLastArg(options::OPT_gdwarf_2, options::OPT_gdwarf_3,
+ options::OPT_gdwarf_4, options::OPT_gdwarf_5))
+ DwarfVersion = DwarfVersionNum(A->getSpelling());
+
+ // Forward -gcodeview.
+ // 'EmitCodeView might have been set by CL-compatibility argument parsing.
+ if (Args.hasArg(options::OPT_gcodeview) || EmitCodeView) {
+ // DwarfVersion remains at 0 if no explicit choice was made.
+ CmdArgs.push_back("-gcodeview");
+ } else if (DwarfVersion == 0 &&
+ DebugInfoKind != CodeGenOptions::NoDebugInfo) {
+ DwarfVersion = getToolChain().GetDefaultDwarfVersion();
}
// We ignore flags -gstrict-dwarf and -grecord-gcc-switches for now.
Args.ClaimAllArgs(options::OPT_g_flags_Group);
+
+ // PS4 defaults to no column info
if (Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
- /*Default*/ true))
+ /*Default=*/ !IsPS4CPU))
CmdArgs.push_back("-dwarf-column-info");
// FIXME: Move backend command line options to the module.
+ if (Args.hasArg(options::OPT_gmodules)) {
+ DebugInfoKind = CodeGenOptions::LimitedDebugInfo;
+ CmdArgs.push_back("-dwarf-ext-refs");
+ CmdArgs.push_back("-fmodule-format=obj");
+ }
+
// -gsplit-dwarf should turn on -g and enable the backend dwarf
// splitting and extraction.
// FIXME: Currently only works on Linux.
- if (getToolChain().getTriple().isOSLinux() &&
- Args.hasArg(options::OPT_gsplit_dwarf)) {
- CmdArgs.push_back("-g");
+ if (getToolChain().getTriple().isOSLinux() && SplitDwarfArg) {
+ DebugInfoKind = CodeGenOptions::LimitedDebugInfo;
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-split-dwarf=Enable");
}
+ // After we've dealt with all combinations of things that could
+ // make DebugInfoKind be other than None or DebugLineTablesOnly,
+ // figure out if we need to "upgrade" it to standalone debug info.
+ // We parse these two '-f' options whether or not they will be used,
+ // to claim them even if you wrote "-fstandalone-debug -gline-tables-only"
+ bool NeedFullDebug = Args.hasFlag(options::OPT_fstandalone_debug,
+ options::OPT_fno_standalone_debug,
+ getToolChain().GetDefaultStandaloneDebug());
+ if (DebugInfoKind == CodeGenOptions::LimitedDebugInfo && NeedFullDebug)
+ DebugInfoKind = CodeGenOptions::FullDebugInfo;
+ RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DwarfVersion,
+ DebuggerTuning);
+
// -ggnu-pubnames turns on gnu style pubnames in the backend.
if (Args.hasArg(options::OPT_ggnu_pubnames)) {
CmdArgs.push_back("-backend-option");
@@ -3715,7 +4157,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -gdwarf-aranges turns on the emission of the aranges section in the
// backend.
- if (Args.hasArg(options::OPT_gdwarf_aranges)) {
+ // Always enabled on the PS4.
+ if (Args.hasArg(options::OPT_gdwarf_aranges) || IsPS4CPU) {
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-generate-arange-section");
}
@@ -3747,6 +4190,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
addPGOAndCoverageFlags(C, D, Output, Args, CmdArgs);
+ // Add runtime flag for PS4 when PGO or Coverage are enabled.
+ if (getToolChain().getTriple().isPS4CPU())
+ addPS4ProfileRTArgs(getToolChain(), Args, CmdArgs);
+
// Pass options for controlling the default header search paths.
if (Args.hasArg(options::OPT_nostdinc)) {
CmdArgs.push_back("-nostdsysteminc");
@@ -3839,7 +4286,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
//
// FIXME: Support -fpreprocessed
if (types::getPreprocessedType(InputType) != types::TY_INVALID)
- AddPreprocessingOptions(C, JA, D, Args, CmdArgs, Output, Inputs);
+ AddPreprocessingOptions(C, JA, D, Args, CmdArgs, Output, Inputs,
+ AuxToolChain);
// Don't warn about "clang -c -DPIC -fPIC test.i" because libtool.m4 assumes
// that "The compiler can only warn and ignore the option if not recognized".
@@ -3861,6 +4309,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
for (const Arg *A :
Args.filtered(options::OPT_clang_ignored_gcc_optimization_f_Group)) {
D.Diag(diag::warn_ignored_gcc_optimization) << A->getAsString(Args);
+ A->claim();
}
claimNoWarnArgs(Args);
@@ -3956,6 +4405,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add in -fdebug-compilation-dir if necessary.
addDebugCompDirArg(Args, CmdArgs);
+ for (const Arg *A : Args.filtered(options::OPT_fdebug_prefix_map_EQ)) {
+ StringRef Map = A->getValue();
+ if (Map.find('=') == StringRef::npos)
+ D.Diag(diag::err_drv_invalid_argument_to_fdebug_prefix_map) << Map;
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fdebug-prefix-map=" + Map));
+ A->claim();
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
options::OPT_ftemplate_depth_EQ)) {
CmdArgs.push_back("-ftemplate-depth");
@@ -4068,9 +4526,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -f (flag) options which we can pass directly.
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
- Args.AddLastArg(CmdArgs, options::OPT_fstandalone_debug);
- Args.AddLastArg(CmdArgs, options::OPT_fno_standalone_debug);
Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
+ // Emulated TLS is enabled by default on Android, and can be enabled manually
+ // with -femulated-tls.
+ bool EmulatedTLSDefault = Triple.isAndroid();
+ if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
+ EmulatedTLSDefault))
+ CmdArgs.push_back("-femulated-tls");
// AltiVec-like language extensions aren't relevant for assembling.
if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm) {
Args.AddLastArg(CmdArgs, options::OPT_faltivec);
@@ -4092,10 +4554,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// given, decide a default based on the target. Otherwise rely on the
// options and pass the right information to the frontend.
if (!Args.hasFlag(options::OPT_fopenmp_use_tls,
- options::OPT_fnoopenmp_use_tls,
- getToolChain().getArch() == llvm::Triple::ppc ||
- getToolChain().getArch() == llvm::Triple::ppc64 ||
- getToolChain().getArch() == llvm::Triple::ppc64le))
+ options::OPT_fnoopenmp_use_tls, /*Default=*/true))
CmdArgs.push_back("-fnoopenmp-use-tls");
break;
default:
@@ -4134,7 +4593,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_lax_vector_conversions))
CmdArgs.push_back("-fno-lax-vector-conversions");
- if (Args.getLastArg(options::OPT_fapple_kext))
+ if (Args.getLastArg(options::OPT_fapple_kext) ||
+ (Args.hasArg(options::OPT_mkernel) && types::isCXX(InputType)))
CmdArgs.push_back("-fapple-kext");
Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
@@ -4215,14 +4675,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Translate -mstackrealign
if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
- false)) {
- CmdArgs.push_back("-backend-option");
- CmdArgs.push_back("-force-align-stack");
- }
- if (!Args.hasFlag(options::OPT_mno_stackrealign, options::OPT_mstackrealign,
- false)) {
+ false))
CmdArgs.push_back(Args.MakeArgString("-mstackrealign"));
- }
if (Args.hasArg(options::OPT_mstack_alignment)) {
StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment);
@@ -4238,9 +4692,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mstack-probe-size=0");
}
- if (getToolChain().getArch() == llvm::Triple::aarch64 ||
- getToolChain().getArch() == llvm::Triple::aarch64_be)
+ switch (getToolChain().getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
CmdArgs.push_back("-fallow-half-arguments-and-returns");
+ break;
+
+ default:
+ break;
+ }
if (Arg *A = Args.getLastArg(options::OPT_mrestrict_it,
options::OPT_mno_restrict_it)) {
@@ -4251,8 +4715,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-arm-no-restrict-it");
}
- } else if (TT.isOSWindows() && (TT.getArch() == llvm::Triple::arm ||
- TT.getArch() == llvm::Triple::thumb)) {
+ } else if (Triple.isOSWindows() &&
+ (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb)) {
// Windows on ARM expects restricted IT blocks
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-arm-restrict-it");
@@ -4268,15 +4733,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->render(Args, CmdArgs);
}
- if (Args.hasArg(options::OPT_mkernel)) {
- if (!Args.hasArg(options::OPT_fapple_kext) && types::isCXX(InputType))
- CmdArgs.push_back("-fapple-kext");
- if (!Args.hasArg(options::OPT_fbuiltin))
- CmdArgs.push_back("-fno-builtin");
- Args.ClaimAllArgs(options::OPT_fno_builtin);
- }
- // -fbuiltin is default.
- else if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin))
+ // -fbuiltin is default unless -mkernel is used
+ if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin,
+ !Args.hasArg(options::OPT_mkernel)))
CmdArgs.push_back("-fno-builtin");
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
@@ -4345,7 +4804,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_fmodule_map_file);
// -fmodule-file can be used to specify files containing precompiled modules.
- Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file);
+ if (HaveModules)
+ Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file);
+ else
+ Args.ClaimAllArgs(options::OPT_fmodule_file);
// -fmodule-cache-path specifies where our implicitly-built module files
// should be written.
@@ -4453,11 +4915,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// -fuse-cxa-atexit is default.
- if (!Args.hasFlag(options::OPT_fuse_cxa_atexit,
- options::OPT_fno_use_cxa_atexit,
- !IsWindowsCygnus && !IsWindowsGNU &&
- getToolChain().getArch() != llvm::Triple::hexagon &&
- getToolChain().getArch() != llvm::Triple::xcore) ||
+ if (!Args.hasFlag(
+ options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
+ !IsWindowsCygnus && !IsWindowsGNU &&
+ getToolChain().getTriple().getOS() != llvm::Triple::Solaris &&
+ getToolChain().getArch() != llvm::Triple::hexagon &&
+ getToolChain().getArch() != llvm::Triple::xcore &&
+ ((getToolChain().getTriple().getVendor() !=
+ llvm::Triple::MipsTechnologies) ||
+ getToolChain().getTriple().hasEnvironment())) ||
KernelOrKext)
CmdArgs.push_back("-fno-use-cxa-atexit");
@@ -4499,6 +4965,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_borland_extensions, false))
CmdArgs.push_back("-fborland-extensions");
+ // -fno-declspec is default, except for PS4.
+ if (Args.hasFlag(options::OPT_fdeclspec, options::OPT_fno_declspec,
+ getToolChain().getTriple().isPS4()))
+ CmdArgs.push_back("-fdeclspec");
+ else if (Args.hasArg(options::OPT_fno_declspec))
+ CmdArgs.push_back("-fno-declspec"); // Explicitly disabling __declspec.
+
// -fthreadsafe-static is default, except for MSVC compatibility versions less
// than 19.
if (!Args.hasFlag(options::OPT_fthreadsafe_statics,
@@ -4585,6 +5058,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_objc_arc_exceptions,
/*default*/ types::isCXX(InputType)))
CmdArgs.push_back("-fobjc-arc-exceptions");
+
}
// -fobjc-infer-related-result-type is the default, except in the Objective-C
@@ -4608,6 +5082,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Pass down -fobjc-weak or -fno-objc-weak if present.
+ if (types::isObjC(InputType)) {
+ auto WeakArg = Args.getLastArg(options::OPT_fobjc_weak,
+ options::OPT_fno_objc_weak);
+ if (!WeakArg) {
+ // nothing to do
+ } else if (GCArg) {
+ if (WeakArg->getOption().matches(options::OPT_fobjc_weak))
+ D.Diag(diag::err_objc_weak_with_gc);
+ } else if (!objcRuntime.allowsWeak()) {
+ if (WeakArg->getOption().matches(options::OPT_fobjc_weak))
+ D.Diag(diag::err_objc_weak_unsupported);
+ } else {
+ WeakArg->render(Args, CmdArgs);
+ }
+ }
+
if (Args.hasFlag(options::OPT_fapplication_extension,
options::OPT_fno_application_extension, false))
CmdArgs.push_back("-fapplication-extension");
@@ -4617,7 +5108,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
addExceptionArgs(Args, InputType, getToolChain(), KernelOrKext, objcRuntime,
CmdArgs);
- if (getToolChain().UseSjLjExceptions())
+ if (getToolChain().UseSjLjExceptions(Args))
CmdArgs.push_back("-fsjlj-exceptions");
// C++ "sane" operator new.
@@ -4676,14 +5167,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (KernelOrKext || isNoCommonDefault(getToolChain().getTriple())) {
- if (!Args.hasArg(options::OPT_fcommon))
- CmdArgs.push_back("-fno-common");
- Args.ClaimAllArgs(options::OPT_fno_common);
- }
-
- // -fcommon is default, only pass non-default.
- else if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common))
+ // -fcommon is the default unless compiling kernel code or the target says so
+ bool NoCommonDefault =
+ KernelOrKext || isNoCommonDefault(getToolChain().getTriple());
+ if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common,
+ !NoCommonDefault))
CmdArgs.push_back("-fno-common");
// -fsigned-bitfields is default, and clang doesn't yet support
@@ -4918,10 +5406,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -fparse-all-comments to -cc1.
Args.AddAllArgs(CmdArgs, options::OPT_fparse_all_comments);
+ // Turn -fplugin=name.so into -load name.so
+ for (const Arg *A : Args.filtered(options::OPT_fplugin_EQ)) {
+ CmdArgs.push_back("-load");
+ CmdArgs.push_back(A->getValue());
+ A->claim();
+ }
+
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
- bool OptDisabled = false;
for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
A->claim();
@@ -4929,17 +5423,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// it and developers have been trained to spell it with -mllvm.
if (StringRef(A->getValue(0)) == "-disable-llvm-optzns") {
CmdArgs.push_back("-disable-llvm-optzns");
- OptDisabled = true;
} else
A->render(Args, CmdArgs);
}
// With -save-temps, we want to save the unoptimized bitcode output from the
- // CompileJobAction, so disable optimizations if they are not already
- // disabled.
- if (C.getDriver().isSaveTempsEnabled() && !OptDisabled &&
- isa<CompileJobAction>(JA))
- CmdArgs.push_back("-disable-llvm-optzns");
+ // CompileJobAction, use -disable-llvm-passes to get pristine IR generated
+ // by the frontend.
+ if (C.getDriver().isSaveTempsEnabled() && isa<CompileJobAction>(JA))
+ CmdArgs.push_back("-disable-llvm-passes");
if (Output.getType() == types::TY_Dependencies) {
// Handled with other dependency code.
@@ -4982,8 +5474,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
- bool SplitDwarf = Args.hasArg(options::OPT_gsplit_dwarf) &&
- getToolChain().getTriple().isOSLinux() &&
+ bool SplitDwarf = SplitDwarfArg && getToolChain().getTriple().isOSLinux() &&
(isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
isa<BackendJobAction>(JA));
const char *SplitDwarfOut;
@@ -4996,11 +5487,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Host-side cuda compilation receives device-side outputs as Inputs[1...].
// Include them with -fcuda-include-gpubinary.
if (IsCuda && Inputs.size() > 1)
- for (InputInfoList::const_iterator it = std::next(Inputs.begin()),
- ie = Inputs.end();
- it != ie; ++it) {
+ for (auto I = std::next(Inputs.begin()), E = Inputs.end(); I != E; ++I) {
CmdArgs.push_back("-fcuda-include-gpubinary");
- CmdArgs.push_back(it->getFilename());
+ CmdArgs.push_back(I->getFilename());
}
// Finally add the compile command to the compilation.
@@ -5009,10 +5498,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
(InputType == types::TY_C || InputType == types::TY_CXX)) {
auto CLCommand =
getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput);
- C.addCommand(llvm::make_unique<FallbackCommand>(JA, *this, Exec, CmdArgs,
- std::move(CLCommand)));
+ C.addCommand(llvm::make_unique<FallbackCommand>(
+ JA, *this, Exec, CmdArgs, Inputs, std::move(CLCommand)));
} else {
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
// Handle the debug info splitting at object creation time if we're
@@ -5153,7 +5642,7 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
// -fgnu-runtime
} else {
assert(runtimeArg->getOption().matches(options::OPT_fgnu_runtime));
- // Legacy behaviour is to target the gnustep runtime if we are i
+ // Legacy behaviour is to target the gnustep runtime if we are in
// non-fragile mode or the GCC runtime in fragile mode.
if (isNonFragile)
runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple(1, 6));
@@ -5172,12 +5661,14 @@ static bool maybeConsumeDash(const std::string &EH, size_t &I) {
return !HaveDash;
}
+namespace {
struct EHFlags {
EHFlags() : Synch(false), Asynch(false), NoExceptC(false) {}
bool Synch;
bool Asynch;
bool NoExceptC;
};
+} // end anonymous namespace
/// /EH controls whether to run destructor cleanups when exceptions are
/// thrown. There are three modifiers:
@@ -5212,14 +5703,12 @@ static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) {
}
}
- // FIXME: Disable C++ EH completely, until it becomes more reliable. Users
- // can use -Xclang to manually enable C++ EH until then.
- EH = EHFlags();
-
return EH;
}
-void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
+void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ enum CodeGenOptions::DebugInfoKind *DebugInfoKind,
+ bool *EmitCodeView) const {
unsigned RTOptionID = options::OPT__SLASH_MT;
if (Args.hasArg(options::OPT__SLASH_LDd))
@@ -5230,39 +5719,46 @@ void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
if (Arg *A = Args.getLastArg(options::OPT__SLASH_M_Group))
RTOptionID = A->getOption().getID();
+ StringRef FlagForCRT;
switch (RTOptionID) {
case options::OPT__SLASH_MD:
if (Args.hasArg(options::OPT__SLASH_LDd))
CmdArgs.push_back("-D_DEBUG");
CmdArgs.push_back("-D_MT");
CmdArgs.push_back("-D_DLL");
- CmdArgs.push_back("--dependent-lib=msvcrt");
+ FlagForCRT = "--dependent-lib=msvcrt";
break;
case options::OPT__SLASH_MDd:
CmdArgs.push_back("-D_DEBUG");
CmdArgs.push_back("-D_MT");
CmdArgs.push_back("-D_DLL");
- CmdArgs.push_back("--dependent-lib=msvcrtd");
+ FlagForCRT = "--dependent-lib=msvcrtd";
break;
case options::OPT__SLASH_MT:
if (Args.hasArg(options::OPT__SLASH_LDd))
CmdArgs.push_back("-D_DEBUG");
CmdArgs.push_back("-D_MT");
- CmdArgs.push_back("--dependent-lib=libcmt");
+ FlagForCRT = "--dependent-lib=libcmt";
break;
case options::OPT__SLASH_MTd:
CmdArgs.push_back("-D_DEBUG");
CmdArgs.push_back("-D_MT");
- CmdArgs.push_back("--dependent-lib=libcmtd");
+ FlagForCRT = "--dependent-lib=libcmtd";
break;
default:
llvm_unreachable("Unexpected option ID.");
}
- // This provides POSIX compatibility (maps 'open' to '_open'), which most
- // users want. The /Za flag to cl.exe turns this off, but it's not
- // implemented in clang.
- CmdArgs.push_back("--dependent-lib=oldnames");
+ if (Args.hasArg(options::OPT__SLASH_Zl)) {
+ CmdArgs.push_back("-D_VC_NODEFAULTLIB");
+ } else {
+ CmdArgs.push_back(FlagForCRT.data());
+
+ // This provides POSIX compatibility (maps 'open' to '_open'), which most
+ // users want. The /Za flag to cl.exe turns this off, but it's not
+ // implemented in clang.
+ CmdArgs.push_back("--dependent-lib=oldnames");
+ }
// Both /showIncludes and /E (and /EP) write to stdout. Allowing both
// would produce interleaved output, so ignore /showIncludes in such cases.
@@ -5275,6 +5771,16 @@ void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
/*default=*/false))
CmdArgs.push_back("-fno-rtti-data");
+ // Emit CodeView if -Z7 is present.
+ *EmitCodeView = Args.hasArg(options::OPT__SLASH_Z7);
+ bool EmitDwarf = Args.hasArg(options::OPT_gdwarf);
+ // If we are emitting CV but not DWARF, don't build information that LLVM
+ // can't yet process.
+ if (*EmitCodeView && !EmitDwarf)
+ *DebugInfoKind = CodeGenOptions::DebugLineTablesOnly;
+ if (*EmitCodeView)
+ CmdArgs.push_back("-gcodeview");
+
const Driver &D = getToolChain().getDriver();
EHFlags EH = parseClangCLEHFlags(D, Args);
// FIXME: Do something with NoExceptC.
@@ -5366,6 +5872,10 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
assert(Inputs.size() == 1 && "Unexpected number of inputs.");
const InputInfo &Input = Inputs[0];
+ std::string TripleStr =
+ getToolChain().ComputeEffectiveClangTriple(Args, Input.getType());
+ const llvm::Triple Triple(TripleStr);
+
// Don't warn about "clang -w -c foo.s"
Args.ClaimAllArgs(options::OPT_w);
// and "clang -emit-llvm -c foo.s"
@@ -5380,8 +5890,6 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// Add the "effective" target triple.
CmdArgs.push_back("-triple");
- std::string TripleStr =
- getToolChain().ComputeEffectiveClangTriple(Args, Input.getType());
CmdArgs.push_back(Args.MakeArgString(TripleStr));
// Set the output mode, we currently only expect to be used as a real
@@ -5395,7 +5903,6 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Clang::getBaseInputName(Args, Input));
// Add the target cpu
- const llvm::Triple Triple(TripleStr);
std::string CPU = getCPUName(Args, Triple, /*FromAs*/ true);
if (!CPU.empty()) {
CmdArgs.push_back("-target-cpu");
@@ -5403,8 +5910,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add the target features
- const Driver &D = getToolChain().getDriver();
- getTargetFeatures(D, Triple, Args, CmdArgs, true);
+ getTargetFeatures(getToolChain(), Triple, Args, CmdArgs, true);
// Ignore explicit -force_cpusubtype_ALL option.
(void)Args.hasArg(options::OPT_force__cpusubtype__ALL);
@@ -5423,17 +5929,21 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// with an actual assembly file.
if (SourceAction->getType() == types::TY_Asm ||
SourceAction->getType() == types::TY_PP_Asm) {
+ bool WantDebug = false;
+ unsigned DwarfVersion = 0;
Args.ClaimAllArgs(options::OPT_g_Group);
- if (Arg *A = Args.getLastArg(options::OPT_g_Group))
- if (!A->getOption().matches(options::OPT_g0))
- CmdArgs.push_back("-g");
-
- if (Args.hasArg(options::OPT_gdwarf_2))
- CmdArgs.push_back("-gdwarf-2");
- if (Args.hasArg(options::OPT_gdwarf_3))
- CmdArgs.push_back("-gdwarf-3");
- if (Args.hasArg(options::OPT_gdwarf_4))
- CmdArgs.push_back("-gdwarf-4");
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ WantDebug = !A->getOption().matches(options::OPT_g0) &&
+ !A->getOption().matches(options::OPT_ggdb0);
+ if (WantDebug)
+ DwarfVersion = DwarfVersionNum(A->getSpelling());
+ }
+ if (DwarfVersion == 0)
+ DwarfVersion = getToolChain().GetDefaultDwarfVersion();
+ RenderDebugEnablingArgs(Args, CmdArgs,
+ (WantDebug ? CodeGenOptions::LimitedDebugInfo
+ : CodeGenOptions::NoDebugInfo),
+ DwarfVersion, llvm::DebuggerKind::Default);
// Add the -fdebug-compilation-dir flag if needed.
addDebugCompDirArg(Args, CmdArgs);
@@ -5442,6 +5952,23 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// assembler on assembly source files.
CmdArgs.push_back("-dwarf-debug-producer");
CmdArgs.push_back(Args.MakeArgString(getClangFullVersion()));
+
+ // And pass along -I options
+ Args.AddAllArgs(CmdArgs, options::OPT_I);
+ }
+
+ // Handle -fPIC et al -- the relocation-model affects the assembler
+ // for some targets.
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(getToolChain(), Triple, Args);
+
+ const char *RMName = RelocationModelName(RelocationModel);
+ if (RMName) {
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back(RMName);
}
// Optionally embed the -cc1as level arguments into the debug info, for build
@@ -5500,7 +6027,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Input.getFilename());
const char *Exec = getToolChain().getDriver().getClangProgramPath();
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
// Handle the debug info splitting at object creation time if we're
// creating an object.
@@ -5555,12 +6082,22 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
//
// FIXME: The triple class should directly provide the information we want
// here.
- const llvm::Triple::ArchType Arch = getToolChain().getArch();
- if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::ppc)
+ switch (getToolChain().getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::ppc:
CmdArgs.push_back("-m32");
- else if (Arch == llvm::Triple::x86_64 || Arch == llvm::Triple::ppc64 ||
- Arch == llvm::Triple::ppc64le)
+ break;
+ case llvm::Triple::x86_64:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
CmdArgs.push_back("-m64");
+ break;
+ case llvm::Triple::sparcel:
+ CmdArgs.push_back("-EL");
+ break;
+ }
if (Output.isFilename()) {
CmdArgs.push_back("-o");
@@ -5582,8 +6119,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
// inputs into '-Wl,' options?
for (const auto &II : Inputs) {
// Don't try to pass LLVM or AST inputs to a generic gcc.
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ if (types::isLLVMIR(II.getType()))
D.Diag(diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
else if (II.getType() == types::TY_AST)
@@ -5623,7 +6159,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
GCCName = "gcc";
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void gcc::Preprocessor::RenderExtraToolArgs(const JobAction &JA,
@@ -5661,7 +6197,9 @@ void gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
// Hexagon tools start.
void hexagon::Assembler::RenderExtraToolArgs(const JobAction &JA,
- ArgStringList &CmdArgs) const {}
+ ArgStringList &CmdArgs) const {
+}
+
void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -5669,15 +6207,21 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
claimNoWarnArgs(Args);
- const Driver &D = getToolChain().getDriver();
+ auto &HTC = static_cast<const toolchains::HexagonToolChain&>(getToolChain());
+ const Driver &D = HTC.getDriver();
ArgStringList CmdArgs;
- std::string MarchString = "-march=";
- MarchString += toolchains::Hexagon_TC::GetTargetCPU(Args);
- CmdArgs.push_back(Args.MakeArgString(MarchString));
+ std::string MArchString = "-march=hexagon";
+ CmdArgs.push_back(Args.MakeArgString(MArchString));
RenderExtraToolArgs(JA, CmdArgs);
+ std::string AsName = "hexagon-llvm-mc";
+ std::string MCpuString = "-mcpu=hexagon" +
+ toolchains::HexagonToolChain::GetTargetCPUVersion(Args).str();
+ CmdArgs.push_back("-filetype=obj");
+ CmdArgs.push_back(Args.MakeArgString(MCpuString));
+
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -5686,8 +6230,10 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fsyntax-only");
}
- if (const char *v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args))
- CmdArgs.push_back(Args.MakeArgString(std::string("-G") + v));
+ if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
+ std::string N = llvm::utostr(G.getValue());
+ CmdArgs.push_back(Args.MakeArgString(std::string("-gpsize=") + N));
+ }
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
@@ -5701,61 +6247,56 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// inputs into '-Wl,' options?
for (const auto &II : Inputs) {
// Don't try to pass LLVM or AST inputs to a generic gcc.
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ if (types::isLLVMIR(II.getType()))
D.Diag(clang::diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
+ << HTC.getTripleString();
else if (II.getType() == types::TY_AST)
D.Diag(clang::diag::err_drv_no_ast_support)
- << getToolChain().getTripleString();
+ << HTC.getTripleString();
else if (II.getType() == types::TY_ModuleFile)
D.Diag(diag::err_drv_no_module_support)
- << getToolChain().getTripleString();
+ << HTC.getTripleString();
if (II.isFilename())
CmdArgs.push_back(II.getFilename());
else
// Don't render as input, we need gcc to do the translations.
- // FIXME: Pranav: What is this ?
+ // FIXME: What is this?
II.getInputArg().render(Args, CmdArgs);
}
- const char *GCCName = "hexagon-as";
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ auto *Exec = Args.MakeArgString(HTC.GetProgramPath(AsName.c_str()));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void hexagon::Linker::RenderExtraToolArgs(const JobAction &JA,
ArgStringList &CmdArgs) const {
- // The types are (hopefully) good enough.
}
-static void constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
- const toolchains::Hexagon_TC &ToolChain,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- ArgStringList &CmdArgs,
- const char *LinkingOutput) {
+static void
+constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
+ const toolchains::HexagonToolChain &HTC,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ const char *LinkingOutput) {
- const Driver &D = ToolChain.getDriver();
+ const Driver &D = HTC.getDriver();
//----------------------------------------------------------------------------
//
//----------------------------------------------------------------------------
- bool hasStaticArg = Args.hasArg(options::OPT_static);
- bool buildingLib = Args.hasArg(options::OPT_shared);
- bool buildPIE = Args.hasArg(options::OPT_pie);
- bool incStdLib = !Args.hasArg(options::OPT_nostdlib);
- bool incStartFiles = !Args.hasArg(options::OPT_nostartfiles);
- bool incDefLibs = !Args.hasArg(options::OPT_nodefaultlibs);
- bool useG0 = false;
- bool useShared = buildingLib && !hasStaticArg;
+ bool IsStatic = Args.hasArg(options::OPT_static);
+ bool IsShared = Args.hasArg(options::OPT_shared);
+ bool IsPIE = Args.hasArg(options::OPT_pie);
+ bool IncStdLib = !Args.hasArg(options::OPT_nostdlib);
+ bool IncStartFiles = !Args.hasArg(options::OPT_nostartfiles);
+ bool IncDefLibs = !Args.hasArg(options::OPT_nodefaultlibs);
+ bool UseG0 = false;
+ bool UseShared = IsShared && !IsStatic;
//----------------------------------------------------------------------------
// Silence warnings for various options
//----------------------------------------------------------------------------
-
Args.ClaimAllArgs(options::OPT_g_Group);
Args.ClaimAllArgs(options::OPT_emit_llvm);
Args.ClaimAllArgs(options::OPT_w); // Other warning options are already
@@ -5765,27 +6306,37 @@ static void constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
//----------------------------------------------------------------------------
//
//----------------------------------------------------------------------------
- for (const auto &Opt : ToolChain.ExtraOpts)
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ if (Args.hasArg(options::OPT_r))
+ CmdArgs.push_back("-r");
+
+ for (const auto &Opt : HTC.ExtraOpts)
CmdArgs.push_back(Opt.c_str());
- std::string MarchString = toolchains::Hexagon_TC::GetTargetCPU(Args);
- CmdArgs.push_back(Args.MakeArgString("-m" + MarchString));
+ CmdArgs.push_back("-march=hexagon");
+ std::string CpuVer =
+ toolchains::HexagonToolChain::GetTargetCPUVersion(Args).str();
+ std::string MCpuString = "-mcpu=hexagon" + CpuVer;
+ CmdArgs.push_back(Args.MakeArgString(MCpuString));
- if (buildingLib) {
+ if (IsShared) {
CmdArgs.push_back("-shared");
- CmdArgs.push_back("-call_shared"); // should be the default, but doing as
- // hexagon-gcc does
+ // The following should be the default, but doing as hexagon-gcc does.
+ CmdArgs.push_back("-call_shared");
}
- if (hasStaticArg)
+ if (IsStatic)
CmdArgs.push_back("-static");
- if (buildPIE && !buildingLib)
+ if (IsPIE && !IsShared)
CmdArgs.push_back("-pie");
- if (const char *v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) {
- CmdArgs.push_back(Args.MakeArgString(std::string("-G") + v));
- useG0 = toolchains::Hexagon_TC::UsesG0(v);
+ if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
+ std::string N = llvm::utostr(G.getValue());
+ CmdArgs.push_back(Args.MakeArgString(std::string("-G") + N));
+ UseG0 = G.getValue() == 0;
}
//----------------------------------------------------------------------------
@@ -5794,77 +6345,85 @@ static void constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- const std::string MarchSuffix = "/" + MarchString;
- const std::string G0Suffix = "/G0";
- const std::string MarchG0Suffix = MarchSuffix + G0Suffix;
- const std::string RootDir =
- toolchains::Hexagon_TC::GetGnuDir(D.InstalledDir, Args) + "/";
- const std::string StartFilesDir =
- RootDir + "hexagon/lib" + (useG0 ? MarchG0Suffix : MarchSuffix);
-
//----------------------------------------------------------------------------
// moslib
//----------------------------------------------------------------------------
- std::vector<std::string> oslibs;
- bool hasStandalone = false;
+ std::vector<std::string> OsLibs;
+ bool HasStandalone = false;
for (const Arg *A : Args.filtered(options::OPT_moslib_EQ)) {
A->claim();
- oslibs.emplace_back(A->getValue());
- hasStandalone = hasStandalone || (oslibs.back() == "standalone");
+ OsLibs.emplace_back(A->getValue());
+ HasStandalone = HasStandalone || (OsLibs.back() == "standalone");
}
- if (oslibs.empty()) {
- oslibs.push_back("standalone");
- hasStandalone = true;
+ if (OsLibs.empty()) {
+ OsLibs.push_back("standalone");
+ HasStandalone = true;
}
//----------------------------------------------------------------------------
// Start Files
//----------------------------------------------------------------------------
- if (incStdLib && incStartFiles) {
-
- if (!buildingLib) {
- if (hasStandalone) {
- CmdArgs.push_back(
- Args.MakeArgString(StartFilesDir + "/crt0_standalone.o"));
+ const std::string MCpuSuffix = "/" + CpuVer;
+ const std::string MCpuG0Suffix = MCpuSuffix + "/G0";
+ const std::string RootDir =
+ HTC.getHexagonTargetDir(D.InstalledDir, D.PrefixDirs) + "/";
+ const std::string StartSubDir =
+ "hexagon/lib" + (UseG0 ? MCpuG0Suffix : MCpuSuffix);
+
+ auto Find = [&HTC] (const std::string &RootDir, const std::string &SubDir,
+ const char *Name) -> std::string {
+ std::string RelName = SubDir + Name;
+ std::string P = HTC.GetFilePath(RelName.c_str());
+ if (llvm::sys::fs::exists(P))
+ return P;
+ return RootDir + RelName;
+ };
+
+ if (IncStdLib && IncStartFiles) {
+ if (!IsShared) {
+ if (HasStandalone) {
+ std::string Crt0SA = Find(RootDir, StartSubDir, "/crt0_standalone.o");
+ CmdArgs.push_back(Args.MakeArgString(Crt0SA));
}
- CmdArgs.push_back(Args.MakeArgString(StartFilesDir + "/crt0.o"));
+ std::string Crt0 = Find(RootDir, StartSubDir, "/crt0.o");
+ CmdArgs.push_back(Args.MakeArgString(Crt0));
}
- std::string initObj = useShared ? "/initS.o" : "/init.o";
- CmdArgs.push_back(Args.MakeArgString(StartFilesDir + initObj));
+ std::string Init = UseShared
+ ? Find(RootDir, StartSubDir + "/pic", "/initS.o")
+ : Find(RootDir, StartSubDir, "/init.o");
+ CmdArgs.push_back(Args.MakeArgString(Init));
}
//----------------------------------------------------------------------------
// Library Search Paths
//----------------------------------------------------------------------------
- const ToolChain::path_list &LibPaths = ToolChain.getFilePaths();
+ const ToolChain::path_list &LibPaths = HTC.getFilePaths();
for (const auto &LibPath : LibPaths)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
//----------------------------------------------------------------------------
//
//----------------------------------------------------------------------------
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_e, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(HTC, Inputs, Args, CmdArgs);
//----------------------------------------------------------------------------
// Libraries
//----------------------------------------------------------------------------
- if (incStdLib && incDefLibs) {
+ if (IncStdLib && IncDefLibs) {
if (D.CCCIsCXX()) {
- ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ HTC.AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
CmdArgs.push_back("--start-group");
- if (!buildingLib) {
- for (const std::string &Lib : oslibs)
+ if (!IsShared) {
+ for (const std::string &Lib : OsLibs)
CmdArgs.push_back(Args.MakeArgString("-l" + Lib));
CmdArgs.push_back("-lc");
}
@@ -5876,9 +6435,11 @@ static void constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
//----------------------------------------------------------------------------
// End files
//----------------------------------------------------------------------------
- if (incStdLib && incStartFiles) {
- std::string finiObj = useShared ? "/finiS.o" : "/fini.o";
- CmdArgs.push_back(Args.MakeArgString(StartFilesDir + finiObj));
+ if (IncStdLib && IncStartFiles) {
+ std::string Fini = UseShared
+ ? Find(RootDir, StartSubDir + "/pic", "/finiS.o")
+ : Find(RootDir, StartSubDir, "/fini.o");
+ CmdArgs.push_back(Args.MakeArgString(Fini));
}
}
@@ -5887,60 +6448,101 @@ void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
-
- const toolchains::Hexagon_TC &ToolChain =
- static_cast<const toolchains::Hexagon_TC &>(getToolChain());
+ auto &HTC = static_cast<const toolchains::HexagonToolChain&>(getToolChain());
ArgStringList CmdArgs;
- constructHexagonLinkArgs(C, JA, ToolChain, Output, Inputs, Args, CmdArgs,
+ constructHexagonLinkArgs(C, JA, HTC, Output, Inputs, Args, CmdArgs,
LinkingOutput);
- std::string Linker = ToolChain.GetProgramPath("hexagon-ld");
+ std::string Linker = HTC.GetProgramPath("hexagon-link");
C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs));
+ CmdArgs, Inputs));
}
// Hexagon tools end.
+void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ std::string Linker = getToolChain().GetProgramPath(getShortName());
+ ArgStringList CmdArgs;
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("old-gnu");
+ CmdArgs.push_back("-target");
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getTripleString()));
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
+ CmdArgs, Inputs));
+}
+// AMDGPU tools end.
+
+wasm::Linker::Linker(const ToolChain &TC)
+ : GnuTool("wasm::Linker", "lld", TC) {}
+
+bool wasm::Linker::isLinkJob() const {
+ return true;
+}
+
+bool wasm::Linker::hasIntegratedCPP() const {
+ return false;
+}
+
+void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const char *Linker = Args.MakeArgString(getToolChain().GetLinkerPath());
+ ArgStringList CmdArgs;
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("ld");
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Linker, CmdArgs, Inputs));
+}
+
const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch;
if (!Arch.empty())
MArch = Arch;
else
MArch = Triple.getArchName();
- MArch = StringRef(MArch).lower();
+ MArch = StringRef(MArch).split("+").first.lower();
// Handle -march=native.
if (MArch == "native") {
std::string CPU = llvm::sys::getHostCPUName();
if (CPU != "generic") {
// Translate the native cpu into the architecture suffix for that CPU.
- const char *Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch);
+ StringRef Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
// If there is no valid architecture suffix for this CPU we don't know how
// to handle it, so return no architecture.
- if (strcmp(Suffix, "") == 0)
+ if (Suffix.empty())
MArch = "";
else
- MArch = std::string("arm") + Suffix;
+ MArch = std::string("arm") + Suffix.str();
}
}
return MArch;
}
+
/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
-const char *arm::getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple) {
+StringRef arm::getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch = getARMArch(Arch, Triple);
// getARMCPUForArch defaults to the triple if MArch is empty, but empty MArch
// here means an -march=native that we can't handle, so instead return no CPU.
if (MArch.empty())
- return "";
+ return StringRef();
// We need to return an empty string here on invalid MArch values as the
// various places that call this function can't cope with a null result.
- const char *result = Triple.getARMCPUForArch(MArch);
- if (result)
- return result;
- else
- return "";
+ return Triple.getARMCPUForArch(MArch);
}
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
@@ -5949,7 +6551,7 @@ std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
// FIXME: Warn on inconsistent use of -mcpu and -march.
// If we have -mcpu=, use that.
if (!CPU.empty()) {
- std::string MCPU = StringRef(CPU).lower();
+ std::string MCPU = StringRef(CPU).split("+").first.lower();
// Handle -mcpu=native.
if (MCPU == "native")
return llvm::sys::getHostCPUName();
@@ -5963,15 +6565,26 @@ std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
/// CPU (or Arch, if CPU is generic).
// FIXME: This is redundant with -mcpu, why does LLVM use this.
-const char *arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch) {
- if (CPU == "generic" &&
- llvm::ARMTargetParser::parseArch(Arch) == llvm::ARM::AK_ARMV8_1A)
- return "v8.1a";
-
- unsigned ArchKind = llvm::ARMTargetParser::parseCPUArch(CPU);
+StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple) {
+ unsigned ArchKind;
+ if (CPU == "generic") {
+ std::string ARMArch = tools::arm::getARMArch(Arch, Triple);
+ ArchKind = llvm::ARM::parseArch(ARMArch);
+ if (ArchKind == llvm::ARM::AK_INVALID)
+ // In case of generic Arch, i.e. "arm",
+ // extract arch from default cpu of the Triple
+ ArchKind = llvm::ARM::parseCPUArch(Triple.getARMCPUForArch(ARMArch));
+ } else {
+ // FIXME: horrible hack to get around the fact that Cortex-A7 is only an
+ // armv7k triple if it's actually been specified via "-arch armv7k".
+ ArchKind = (Arch == "armv7k" || Arch == "thumbv7k")
+ ? (unsigned)llvm::ARM::AK_ARMV7K
+ : llvm::ARM::parseCPUArch(CPU);
+ }
if (ArchKind == llvm::ARM::AK_INVALID)
return "";
- return llvm::ARMTargetParser::getSubArch(ArchKind);
+ return llvm::ARM::getSubArch(ArchKind);
}
void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs,
@@ -5986,6 +6599,9 @@ void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs,
}
mips::NanEncoding mips::getSupportedNanEncoding(StringRef &CPU) {
+ // Strictly speaking, mips32r2 and mips64r2 are NanLegacy-only since Nan2008
+ // was first introduced in Release 3. However, other compilers have
+ // traditionally allowed it for Release 2 so we should do the same.
return (NanEncoding)llvm::StringSwitch<int>(CPU)
.Case("mips1", NanLegacy)
.Case("mips2", NanLegacy)
@@ -5993,12 +6609,12 @@ mips::NanEncoding mips::getSupportedNanEncoding(StringRef &CPU) {
.Case("mips4", NanLegacy)
.Case("mips5", NanLegacy)
.Case("mips32", NanLegacy)
- .Case("mips32r2", NanLegacy)
+ .Case("mips32r2", NanLegacy | Nan2008)
.Case("mips32r3", NanLegacy | Nan2008)
.Case("mips32r5", NanLegacy | Nan2008)
.Case("mips32r6", Nan2008)
.Case("mips64", NanLegacy)
- .Case("mips64r2", NanLegacy)
+ .Case("mips64r2", NanLegacy | Nan2008)
.Case("mips64r3", NanLegacy | Nan2008)
.Case("mips64r5", NanLegacy | Nan2008)
.Case("mips64r6", Nan2008)
@@ -6031,7 +6647,7 @@ bool mips::isNaN2008(const ArgList &Args, const llvm::Triple &Triple) {
}
bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
- StringRef ABIName, StringRef FloatABI) {
+ StringRef ABIName, mips::FloatABI FloatABI) {
if (Triple.getVendor() != llvm::Triple::ImaginationTechnologies &&
Triple.getVendor() != llvm::Triple::MipsTechnologies)
return false;
@@ -6041,7 +6657,7 @@ bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
// FPXX shouldn't be used if either -msoft-float or -mfloat-abi=soft is
// present.
- if (FloatABI == "soft")
+ if (FloatABI == mips::FloatABI::Soft)
return false;
return llvm::StringSwitch<bool>(CPUName)
@@ -6053,7 +6669,7 @@ bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
bool mips::shouldUseFPXX(const ArgList &Args, const llvm::Triple &Triple,
StringRef CPUName, StringRef ABIName,
- StringRef FloatABI) {
+ mips::FloatABI FloatABI) {
bool UseFPXX = isFPXXDefault(Triple, CPUName, ABIName, FloatABI);
// FPXX shouldn't be used if -msingle-float is present.
@@ -6173,42 +6789,34 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
- const ToolChain::path_list &Paths = ToolChain.getFilePaths();
- for (const auto &Path : Paths)
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_e, options::OPT_s,
+ options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
- if (D.IsUsingLTO(Args))
- AddGoldPlugin(ToolChain, Args, CmdArgs);
+ if (D.isUsingLTO())
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX())
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lc");
CmdArgs.push_back("-lcompiler_rt");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles))
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6228,11 +6836,11 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
SourceAction = SourceAction->getInputs()[0];
}
- // If -fno_integrated_as is used add -Q to the darwin assember driver to make
+ // If -fno-integrated-as is used add -Q to the darwin assember driver to make
// sure it runs its system assembler not clang's integrated assembler.
// Applicable to darwin11+ and Xcode 4+. darwin<10 lacked integrated-as.
// FIXME: at run-time detect assembler capabilities or rely on version
- // information forwarded by -target-assembler-version (future)
+ // information forwarded by -target-assembler-version.
if (Args.hasArg(options::OPT_fno_integrated_as)) {
const llvm::Triple &T(getToolChain().getTriple());
if (!(T.isMacOSX() && T.isMacOSXVersionLT(10, 7)))
@@ -6276,7 +6884,7 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// asm_final spec is empty.
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void darwin::MachOTool::anchor() {}
@@ -6334,15 +6942,34 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
options::OPT_fno_application_extension, false))
CmdArgs.push_back("-application_extension");
- // If we are using LTO, then automatically create a temporary file path for
- // the linker to use, so that it's lifetime will extend past a possible
- // dsymutil step.
- if (Version[0] >= 116 && D.IsUsingLTO(Args) && NeedsTempPath(Inputs)) {
- const char *TmpPath = C.getArgs().MakeArgString(
- D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object)));
- C.addTempFile(TmpPath);
- CmdArgs.push_back("-object_path_lto");
- CmdArgs.push_back(TmpPath);
+ if (D.isUsingLTO()) {
+ // If we are using LTO, then automatically create a temporary file path for
+ // the linker to use, so that it's lifetime will extend past a possible
+ // dsymutil step.
+ if (Version[0] >= 116 && NeedsTempPath(Inputs)) {
+ const char *TmpPath = C.getArgs().MakeArgString(
+ D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object)));
+ C.addTempFile(TmpPath);
+ CmdArgs.push_back("-object_path_lto");
+ CmdArgs.push_back(TmpPath);
+ }
+
+ // Use -lto_library option to specify the libLTO.dylib path. Try to find
+ // it in clang installed libraries. If not found, the option is not used
+ // and 'ld' will use its default mechanism to search for libLTO.dylib.
+ if (Version[0] >= 133) {
+ // Search for libLTO in <InstalledDir>/../lib/libLTO.dylib
+ StringRef P = llvm::sys::path::parent_path(D.getInstalledDir());
+ SmallString<128> LibLTOPath(P);
+ llvm::sys::path::append(LibLTOPath, "lib");
+ llvm::sys::path::append(LibLTOPath, "libLTO.dylib");
+ if (llvm::sys::fs::exists(LibLTOPath)) {
+ CmdArgs.push_back("-lto_library");
+ CmdArgs.push_back(C.getArgs().MakeArgString(LibLTOPath));
+ } else {
+ D.Diag(diag::warn_drv_lto_libpath);
+ }
+ }
}
// Derived from the "link" spec.
@@ -6509,7 +7136,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("touch"));
CmdArgs.push_back(Output.getFilename());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, None));
return;
}
@@ -6517,13 +7144,11 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// we follow suite for ease of comparison.
AddLinkArgs(C, Args, CmdArgs, Inputs);
- Args.AddAllArgs(CmdArgs, options::OPT_d_Flag);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
- Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
- Args.AddLastArg(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
+ // It seems that the 'e' option is completely ignored for dynamic executables
+ // (the default), and with static executables, the last one wins, as expected.
+ Args.AddAllArgs(CmdArgs, {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_u_Group,
+ options::OPT_e, options::OPT_r});
// Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading
// members of static archive libraries which implement Objective-C classes or
@@ -6534,8 +7159,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles))
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
getMachOToolChain().addStartObjectFileArgs(Args, CmdArgs);
// SafeStack requires its own runtime libraries
@@ -6567,12 +7191,11 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
InputFileList.push_back(II.getFilename());
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs))
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
addOpenMPRuntime(CmdArgs, getToolChain(), Args);
- if (isObjCRuntimeLinked(Args) && !Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (isObjCRuntimeLinked(Args) &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
// We use arclite library for both ARC and subscripting support.
getMachOToolChain().AddLinkARCArgs(Args, CmdArgs);
@@ -6591,13 +7214,9 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fnested_functions))
CmdArgs.push_back("-allow_stack_execute");
- // TODO: It would be nice to use addProfileRT() here, but darwin's compiler-rt
- // paths are different enough from other toolchains that this needs a fair
- // amount of refactoring done first.
getMachOToolChain().addProfileRTLibs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (getToolChain().getDriver().CCCIsCXX())
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
@@ -6607,8 +7226,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getMachOToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs);
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
// endfile_spec is empty.
}
@@ -6619,8 +7237,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
for (const Arg *A : Args.filtered(options::OPT_iframework))
CmdArgs.push_back(Args.MakeArgString(std::string("-F") + A->getValue()));
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (Arg *A = Args.getLastArg(options::OPT_fveclib)) {
if (A->getValue() == StringRef("Accelerate")) {
CmdArgs.push_back("-framework");
@@ -6631,7 +7248,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
std::unique_ptr<Command> Cmd =
- llvm::make_unique<Command>(JA, *this, Exec, CmdArgs);
+ llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs);
Cmd->setInputFileList(std::move(InputFileList));
C.addCommand(std::move(Cmd));
}
@@ -6655,7 +7272,7 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6675,7 +7292,7 @@ void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6698,7 +7315,7 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6718,7 +7335,7 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6726,32 +7343,12 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- // FIXME: Find a real GCC, don't hard-code versions here
- std::string GCCLibPath = "/usr/gcc/4.5/lib/gcc/";
- const llvm::Triple &T = getToolChain().getTriple();
- std::string LibPath = "/usr/lib/";
- const llvm::Triple::ArchType Arch = T.getArch();
- switch (Arch) {
- case llvm::Triple::x86:
- GCCLibPath +=
- ("i386-" + T.getVendorName() + "-" + T.getOSName()).str() + "/4.5.2/";
- break;
- case llvm::Triple::x86_64:
- GCCLibPath += ("i386-" + T.getVendorName() + "-" + T.getOSName()).str();
- GCCLibPath += "/4.5.2/amd64/";
- LibPath += "amd64/";
- break;
- default:
- llvm_unreachable("Unsupported architecture");
- }
-
ArgStringList CmdArgs;
// Demangle C++ names in errors
CmdArgs.push_back("-C");
- if ((!Args.hasArg(options::OPT_nostdlib)) &&
- (!Args.hasArg(options::OPT_shared))) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
CmdArgs.push_back("-e");
CmdArgs.push_back("_start");
}
@@ -6765,7 +7362,8 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-shared");
} else {
CmdArgs.push_back("--dynamic-linker");
- CmdArgs.push_back(Args.MakeArgString(LibPath + "ld.so.1"));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("ld.so.1")));
}
}
@@ -6776,53 +7374,46 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
- if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(LibPath + "crt1.o"));
- CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o"));
- CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o"));
- CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o"));
- } else {
- CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o"));
- CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o"));
- CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o"));
- }
- if (getToolChain().getDriver().CCCIsCXX())
- CmdArgs.push_back(Args.MakeArgString(LibPath + "cxa_finalize.o"));
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("values-Xa.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
}
- CmdArgs.push_back(Args.MakeArgString("-L" + GCCLibPath));
+ getToolChain().AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
+ Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_e, options::OPT_r});
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (getToolChain().getDriver().CCCIsCXX())
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("-lc");
if (!Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-lgcc");
- CmdArgs.push_back("-lc");
CmdArgs.push_back("-lm");
}
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtend.o"));
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
}
- CmdArgs.push_back(Args.MakeArgString(LibPath + "crtn.o"));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
- addProfileRT(getToolChain(), Args, CmdArgs);
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6832,7 +7423,6 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
claimNoWarnArgs(Args);
ArgStringList CmdArgs;
- bool NeedsKPIC = false;
switch (getToolChain().getArch()) {
case llvm::Triple::x86:
@@ -6847,16 +7437,21 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
break;
case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
+ case llvm::Triple::sparcel: {
CmdArgs.push_back("-32");
- NeedsKPIC = true;
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
+ }
- case llvm::Triple::sparcv9:
+ case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
- CmdArgs.push_back("-Av9a");
- NeedsKPIC = true;
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
+ }
case llvm::Triple::mips64:
case llvm::Triple::mips64el: {
@@ -6872,7 +7467,7 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("-EL");
- NeedsKPIC = true;
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
@@ -6880,9 +7475,6 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
- if (NeedsKPIC)
- addAssemblerKPIC(Args, CmdArgs);
-
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
CmdArgs.push_back("-o");
@@ -6892,7 +7484,7 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -6916,8 +7508,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else if (getToolChain().getArch() == llvm::Triple::mips64el)
CmdArgs.push_back("-EL");
- if ((!Args.hasArg(options::OPT_nostdlib)) &&
- (!Args.hasArg(options::OPT_shared))) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
CmdArgs.push_back("-e");
CmdArgs.push_back("__start");
}
@@ -6947,8 +7538,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back(
@@ -6970,18 +7560,13 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple + "/4.2.1"));
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
+ Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_e, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX()) {
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
if (Args.hasArg(options::OPT_pg))
@@ -7011,8 +7596,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lgcc");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
@@ -7022,7 +7606,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void bitrig::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7042,7 +7626,7 @@ void bitrig::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7053,8 +7637,7 @@ void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
- if ((!Args.hasArg(options::OPT_nostdlib)) &&
- (!Args.hasArg(options::OPT_shared))) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
CmdArgs.push_back("-e");
CmdArgs.push_back("__start");
}
@@ -7081,8 +7664,7 @@ void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back(
@@ -7098,14 +7680,12 @@ void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_e});
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX()) {
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
if (Args.hasArg(options::OPT_pg))
@@ -7145,8 +7725,7 @@ void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString("-lclang_rt." + MyArch));
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
@@ -7156,7 +7735,7 @@ void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7169,14 +7748,19 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on FreeBSD/amd64, we have to explicitly
// instruct as in the base system to assemble 32-bit code.
- if (getToolChain().getArch() == llvm::Triple::x86)
+ switch (getToolChain().getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
CmdArgs.push_back("--32");
- else if (getToolChain().getArch() == llvm::Triple::ppc)
+ break;
+ case llvm::Triple::ppc:
CmdArgs.push_back("-a32");
- else if (getToolChain().getArch() == llvm::Triple::mips ||
- getToolChain().getArch() == llvm::Triple::mipsel ||
- getToolChain().getArch() == llvm::Triple::mips64 ||
- getToolChain().getArch() == llvm::Triple::mips64el) {
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el: {
StringRef CPUName;
StringRef ABIName;
mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
@@ -7193,20 +7777,25 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("-EL");
- addAssemblerKPIC(Args, CmdArgs);
- } else if (getToolChain().getArch() == llvm::Triple::arm ||
- getToolChain().getArch() == llvm::Triple::armeb ||
- getToolChain().getArch() == llvm::Triple::thumb ||
- getToolChain().getArch() == llvm::Triple::thumbeb) {
- const Driver &D = getToolChain().getDriver();
- const llvm::Triple &Triple = getToolChain().getTriple();
- StringRef FloatABI = arm::getARMFloatABI(D, Args, Triple);
+ if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-G" + v));
+ A->claim();
+ }
+
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ arm::FloatABI ABI = arm::getARMFloatABI(getToolChain(), Args);
- if (FloatABI == "hard") {
+ if (ABI == arm::FloatABI::Hard)
CmdArgs.push_back("-mfpu=vfp");
- } else {
+ else
CmdArgs.push_back("-mfpu=softvfp");
- }
switch (getToolChain().getTriple().getEnvironment()) {
case llvm::Triple::GNUEABIHF:
@@ -7218,15 +7807,16 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
default:
CmdArgs.push_back("-matpcs");
}
- } else if (getToolChain().getArch() == llvm::Triple::sparc ||
- getToolChain().getArch() == llvm::Triple::sparcel ||
- getToolChain().getArch() == llvm::Triple::sparcv9) {
- if (getToolChain().getArch() == llvm::Triple::sparc)
- CmdArgs.push_back("-Av8plusa");
- else
- CmdArgs.push_back("-Av9a");
-
- addAssemblerKPIC(Args, CmdArgs);
+ break;
+ }
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::sparcv9: {
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ break;
+ }
}
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
@@ -7238,7 +7828,7 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7302,6 +7892,17 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf32ppc_fbsd");
}
+ if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ if (ToolChain.getArch() == llvm::Triple::mips ||
+ ToolChain.getArch() == llvm::Triple::mipsel ||
+ ToolChain.getArch() == llvm::Triple::mips64 ||
+ ToolChain.getArch() == llvm::Triple::mips64el) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-G" + v));
+ A->claim();
+ }
+ }
+
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -7309,8 +7910,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
const char *crt1 = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
@@ -7337,9 +7937,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
- const ToolChain::path_list &Paths = ToolChain.getFilePaths();
- for (const auto &Path : Paths)
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_e);
Args.AddAllArgs(CmdArgs, options::OPT_s);
@@ -7347,14 +7945,13 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_r);
- if (D.IsUsingLTO(Args))
- AddGoldPlugin(ToolChain, Args, CmdArgs);
+ if (D.isUsingLTO())
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
addOpenMPRuntime(CmdArgs, ToolChain, Args);
if (D.CCCIsCXX()) {
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
@@ -7410,8 +8007,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (Args.hasArg(options::OPT_shared) || IsPIE)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
else
@@ -7419,10 +8015,10 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
- addProfileRT(ToolChain, Args, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7471,21 +8067,26 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("-EL");
- addAssemblerKPIC(Args, CmdArgs);
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
+ case llvm::Triple::sparcel: {
CmdArgs.push_back("-32");
- addAssemblerKPIC(Args, CmdArgs);
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
+ }
- case llvm::Triple::sparcv9:
+ case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
- CmdArgs.push_back("-Av9");
- addAssemblerKPIC(Args, CmdArgs);
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
+ }
default:
break;
@@ -7500,7 +8101,7 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7620,8 +8221,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crt0.o")));
@@ -7669,8 +8269,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
addOpenMPRuntime(CmdArgs, getToolChain(), Args);
if (D.CCCIsCXX()) {
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
@@ -7697,8 +8296,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
@@ -7708,10 +8306,10 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7721,8 +8319,16 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
claimNoWarnArgs(Args);
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ llvm::Triple Triple = llvm::Triple(TripleStr);
+
ArgStringList CmdArgs;
- bool NeedsKPIC = false;
+
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(getToolChain(), Triple, Args);
switch (getToolChain().getArch()) {
default:
@@ -7755,22 +8361,26 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mlittle-endian");
break;
case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
+ case llvm::Triple::sparcel: {
CmdArgs.push_back("-32");
- CmdArgs.push_back("-Av8plusa");
- NeedsKPIC = true;
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
- case llvm::Triple::sparcv9:
+ }
+ case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
- CmdArgs.push_back("-Av9a");
- NeedsKPIC = true;
+ std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ CmdArgs.push_back(getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
+ }
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
- const llvm::Triple &Triple = getToolChain().getTriple();
- switch (Triple.getSubArch()) {
+ const llvm::Triple &Triple2 = getToolChain().getTriple();
+ switch (Triple2.getSubArch()) {
case llvm::Triple::ARMSubArch_v7:
CmdArgs.push_back("-mfpu=neon");
break;
@@ -7781,10 +8391,18 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
- StringRef ARMFloatABI = tools::arm::getARMFloatABI(
- getToolChain().getDriver(), Args,
- llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args)));
- CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=" + ARMFloatABI));
+ switch (arm::getARMFloatABI(getToolChain(), Args)) {
+ case arm::FloatABI::Invalid: llvm_unreachable("must have an ABI!");
+ case arm::FloatABI::Soft:
+ CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=soft"));
+ break;
+ case arm::FloatABI::SoftFP:
+ CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=softfp"));
+ break;
+ case arm::FloatABI::Hard:
+ CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=hard"));
+ break;
+ }
Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
@@ -7817,18 +8435,7 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// -mno-shared should be emitted unless -fpic, -fpie, -fPIC, -fPIE,
// or -mshared (not implemented) is in effect.
- bool IsPicOrPie = false;
- if (Arg *A = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
- options::OPT_fpic, options::OPT_fno_pic,
- options::OPT_fPIE, options::OPT_fno_PIE,
- options::OPT_fpie, options::OPT_fno_pie)) {
- if (A->getOption().matches(options::OPT_fPIC) ||
- A->getOption().matches(options::OPT_fpic) ||
- A->getOption().matches(options::OPT_fPIE) ||
- A->getOption().matches(options::OPT_fpie))
- IsPicOrPie = true;
- }
- if (!IsPicOrPie)
+ if (RelocationModel == llvm::Reloc::Static)
CmdArgs.push_back("-mno-shared");
// LLVM doesn't support -mplt yet and acts as if it is always given.
@@ -7847,13 +8454,13 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add the last -mfp32/-mfpxx/-mfp64 or -mfpxx if it is enabled by default.
- StringRef MIPSFloatABI = getMipsFloatABI(getToolChain().getDriver(), Args);
if (Arg *A = Args.getLastArg(options::OPT_mfp32, options::OPT_mfpxx,
options::OPT_mfp64)) {
A->claim();
A->render(Args, CmdArgs);
- } else if (mips::shouldUseFPXX(Args, getToolChain().getTriple(), CPUName,
- ABIName, MIPSFloatABI))
+ } else if (mips::shouldUseFPXX(
+ Args, getToolChain().getTriple(), CPUName, ABIName,
+ getMipsFloatABI(getToolChain().getDriver(), Args)))
CmdArgs.push_back("-mfpxx");
// Pass on -mmips16 or -mno-mips16. However, the assembler equivalent of
@@ -7890,7 +8497,7 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_modd_spreg,
options::OPT_mno_odd_spreg);
- NeedsKPIC = true;
+ AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
case llvm::Triple::systemz: {
@@ -7902,9 +8509,7 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (NeedsKPIC)
- addAssemblerKPIC(Args, CmdArgs);
-
+ Args.AddAllArgs(CmdArgs, options::OPT_I);
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
CmdArgs.push_back("-o");
@@ -7914,7 +8519,7 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
// Handle the debug info splitting at object creation time if we're
// creating an object.
@@ -7927,7 +8532,7 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
- bool isAndroid = Triple.getEnvironment() == llvm::Triple::Android;
+ bool isAndroid = Triple.isAndroid();
bool isCygMing = Triple.isOSCygMing();
bool StaticLibgcc = Args.hasArg(options::OPT_static_libgcc) ||
Args.hasArg(options::OPT_static);
@@ -7963,7 +8568,7 @@ static std::string getLinuxDynamicLinker(const ArgList &Args,
const toolchains::Linux &ToolChain) {
const llvm::Triple::ArchType Arch = ToolChain.getArch();
- if (ToolChain.getTriple().getEnvironment() == llvm::Triple::Android) {
+ if (ToolChain.getTriple().isAndroid()) {
if (ToolChain.getTriple().isArch64Bit())
return "/system/bin/linker64";
else
@@ -7977,33 +8582,30 @@ static std::string getLinuxDynamicLinker(const ArgList &Args,
return "/lib/ld-linux-aarch64_be.so.1";
else if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb) {
if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF ||
- tools::arm::getARMFloatABI(ToolChain.getDriver(), Args, ToolChain.getTriple()) == "hard")
+ arm::getARMFloatABI(ToolChain, Args) == arm::FloatABI::Hard)
return "/lib/ld-linux-armhf.so.3";
else
return "/lib/ld-linux.so.3";
} else if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb) {
// TODO: check which dynamic linker name.
if (ToolChain.getTriple().getEnvironment() == llvm::Triple::GNUEABIHF ||
- tools::arm::getARMFloatABI(ToolChain.getDriver(), Args, ToolChain.getTriple()) == "hard")
+ arm::getARMFloatABI(ToolChain, Args) == arm::FloatABI::Hard)
return "/lib/ld-linux-armhf.so.3";
else
return "/lib/ld-linux.so.3";
} else if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel ||
Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el) {
- StringRef CPUName;
- StringRef ABIName;
- mips::getMipsCPUAndABI(Args, ToolChain.getTriple(), CPUName, ABIName);
- bool IsNaN2008 = mips::isNaN2008(Args, ToolChain.getTriple());
-
- StringRef LibDir = llvm::StringSwitch<llvm::StringRef>(ABIName)
- .Case("o32", "/lib")
- .Case("n32", "/lib32")
- .Case("n64", "/lib64")
- .Default("/lib");
+ std::string LibDir =
+ "/lib" + mips::getMipsABILibSuffix(Args, ToolChain.getTriple());
StringRef LibName;
+ bool IsNaN2008 = mips::isNaN2008(Args, ToolChain.getTriple());
if (mips::isUCLibc(Args))
LibName = IsNaN2008 ? "ld-uClibc-mipsn8.so.0" : "ld-uClibc.so.0";
- else
+ else if (!ToolChain.getTriple().hasEnvironment()) {
+ bool LE = (ToolChain.getTriple().getArch() == llvm::Triple::mipsel) ||
+ (ToolChain.getTriple().getArch() == llvm::Triple::mips64el);
+ LibName = LE ? "ld-musl-mipsel.so.1" : "ld-musl-mips.so.1";
+ } else
LibName = IsNaN2008 ? "ld-linux-mipsn8.so.1" : "ld.so.1";
return (LibDir + "/" + LibName).str();
@@ -8018,7 +8620,7 @@ static std::string getLinuxDynamicLinker(const ArgList &Args,
return "/lib64/ld64.so.1";
return "/lib64/ld64.so.2";
} else if (Arch == llvm::Triple::systemz)
- return "/lib64/ld64.so.1";
+ return "/lib/ld64.so.1";
else if (Arch == llvm::Triple::sparcv9)
return "/lib64/ld-linux.so.2";
else if (Arch == llvm::Triple::x86_64 &&
@@ -8106,12 +8708,18 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const toolchains::Linux &ToolChain =
static_cast<const toolchains::Linux &>(getToolChain());
const Driver &D = ToolChain.getDriver();
+
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ llvm::Triple Triple = llvm::Triple(TripleStr);
+
const llvm::Triple::ArchType Arch = ToolChain.getArch();
- const bool isAndroid =
- ToolChain.getTriple().getEnvironment() == llvm::Triple::Android;
+ const bool isAndroid = ToolChain.getTriple().isAndroid();
const bool IsPIE =
!Args.hasArg(options::OPT_shared) && !Args.hasArg(options::OPT_static) &&
(Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault());
+ const bool HasCRTBeginEndFiles =
+ ToolChain.getTriple().hasEnvironment() ||
+ (ToolChain.getTriple().getVendor() != llvm::Triple::MipsTechnologies);
ArgStringList CmdArgs;
@@ -8123,6 +8731,14 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ if (llvm::sys::path::filename(Exec) == "lld") {
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("old-gnu");
+ CmdArgs.push_back("-target");
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getTripleString()));
+ }
+
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
@@ -8136,9 +8752,7 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-s");
if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb)
- arm::appendEBLinkFlags(
- Args, CmdArgs,
- llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args)));
+ arm::appendEBLinkFlags(Args, CmdArgs, Triple);
for (const auto &Opt : ToolChain.ExtraOpts)
CmdArgs.push_back(Opt.c_str());
@@ -8172,8 +8786,7 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!isAndroid) {
const char *crt1 = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
@@ -8199,7 +8812,9 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o";
else
crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o";
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+
+ if (HasCRTBeginEndFiles)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
// Add crtfastmath.o if available and fast math is enabled.
ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
@@ -8208,13 +8823,10 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_u);
- const ToolChain::path_list &Paths = ToolChain.getFilePaths();
-
- for (const auto &Path : Paths)
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- if (D.IsUsingLTO(Args))
- AddGoldPlugin(ToolChain, Args, CmdArgs);
+ if (D.isUsingLTO())
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
@@ -8222,10 +8834,10 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
// The profile runtime also needs access to system libraries.
- addProfileRT(getToolChain(), Args, CmdArgs);
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
- if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX() &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
if (OnlyLibstdcxxStatic)
@@ -8298,14 +8910,14 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else
crtend = isAndroid ? "crtend_android.o" : "crtend.o";
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ if (HasCRTBeginEndFiles)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
if (!isAndroid)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
}
- C.addCommand(
- llvm::make_unique<Command>(JA, *this, ToolChain.Linker.c_str(), CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
// NaCl ARM assembly (inline or standalone) can be written with a set of macros
@@ -8317,8 +8929,8 @@ void nacltools::AssemblerARM::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::NaCl_TC &ToolChain =
- static_cast<const toolchains::NaCl_TC &>(getToolChain());
+ const toolchains::NaClToolChain &ToolChain =
+ static_cast<const toolchains::NaClToolChain &>(getToolChain());
InputInfo NaClMacros(ToolChain.GetNaClArmMacrosPath(), types::TY_PP_Asm,
"nacl-arm-macros.s");
InputInfoList NewInputs;
@@ -8338,8 +8950,8 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::NaCl_TC &ToolChain =
- static_cast<const toolchains::NaCl_TC &>(getToolChain());
+ const toolchains::NaClToolChain &ToolChain =
+ static_cast<const toolchains::NaClToolChain &>(getToolChain());
const Driver &D = ToolChain.getDriver();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool IsStatic =
@@ -8364,8 +8976,8 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("-s");
- // NaCl_TC doesn't have ExtraOpts like Linux; the only relevant flag from
- // there is --build-id, which we do want.
+ // NaClToolChain doesn't have ExtraOpts like Linux; the only relevant flag
+ // from there is --build-id, which we do want.
CmdArgs.push_back("--build-id");
if (!IsStatic)
@@ -8391,8 +9003,7 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt1.o")));
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
@@ -8410,18 +9021,15 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_u);
- const ToolChain::path_list &Paths = ToolChain.getFilePaths();
-
- for (const auto &Path : Paths)
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
- if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX() &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
bool OnlyLibstdcxxStatic =
Args.hasArg(options::OPT_static_libstdcxx) && !IsStatic;
if (OnlyLibstdcxxStatic)
@@ -8480,8 +9088,8 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- C.addCommand(
- llvm::make_unique<Command>(JA, *this, ToolChain.Linker.c_str(), CmdArgs));
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -8501,7 +9109,7 @@ void minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -8519,8 +9127,7 @@ void minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
CmdArgs.push_back(
@@ -8528,24 +9135,21 @@ void minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
}
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_e});
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
- addProfileRT(getToolChain(), Args, CmdArgs);
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX()) {
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
CmdArgs.push_back("-lc");
@@ -8556,7 +9160,7 @@ void minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
/// DragonFly Tools
@@ -8585,7 +9189,7 @@ void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -8595,7 +9199,6 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
- bool UseGCC47 = llvm::sys::fs::exists("/usr/lib/gcc47");
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
@@ -8612,7 +9215,8 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/usr/libexec/ld-elf.so.2");
}
- CmdArgs.push_back("--hash-style=both");
+ CmdArgs.push_back("--hash-style=gnu");
+ CmdArgs.push_back("--enable-new-dtags");
}
// When building 32-bit code on DragonFly/pc64, we have to explicitly
@@ -8629,8 +9233,7 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back(
@@ -8653,29 +9256,17 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
}
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_e});
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
- // FIXME: GCC passes on -lgcc, -lgcc_pic and a whole lot of
- // rpaths
- if (UseGCC47)
- CmdArgs.push_back("-L/usr/lib/gcc47");
- else
- CmdArgs.push_back("-L/usr/lib/gcc44");
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ CmdArgs.push_back("-L/usr/lib/gcc50");
if (!Args.hasArg(options::OPT_static)) {
- if (UseGCC47) {
- CmdArgs.push_back("-rpath");
- CmdArgs.push_back("/usr/lib/gcc47");
- } else {
- CmdArgs.push_back("-rpath");
- CmdArgs.push_back("/usr/lib/gcc44");
- }
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("/usr/lib/gcc50");
}
if (D.CCCIsCXX()) {
@@ -8690,34 +9281,25 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- if (UseGCC47) {
- if (Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_static_libgcc)) {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_static_libgcc)) {
CmdArgs.push_back("-lgcc");
CmdArgs.push_back("-lgcc_eh");
- } else {
- if (Args.hasArg(options::OPT_shared_libgcc)) {
+ } else {
+ if (Args.hasArg(options::OPT_shared_libgcc)) {
CmdArgs.push_back("-lgcc_pic");
if (!Args.hasArg(options::OPT_shared))
CmdArgs.push_back("-lgcc");
- } else {
+ } else {
CmdArgs.push_back("-lgcc");
CmdArgs.push_back("--as-needed");
CmdArgs.push_back("-lgcc_pic");
CmdArgs.push_back("--no-as-needed");
- }
- }
- } else {
- if (Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-lgcc_pic");
- } else {
- CmdArgs.push_back("-lgcc");
}
}
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
@@ -8727,10 +9309,10 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
// Try to find Exe from a Visual Studio distribution. This first tries to find
@@ -8766,8 +9348,8 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString(std::string("-out:") + Output.getFilename()));
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles) && !C.getDriver().IsCLMode())
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
+ !C.getDriver().IsCLMode())
CmdArgs.push_back("-defaultlib:libcmt");
if (!llvm::sys::Process::GetEnv("LIB")) {
@@ -8795,6 +9377,13 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back(
Args.MakeArgString(std::string("-libpath:") + LibDir.c_str()));
+
+ if (MSVC.useUniversalCRT(VisualStudioDir)) {
+ std::string UniversalCRTLibPath;
+ if (MSVC.getUniversalCRTLibraryPath(UniversalCRTLibPath))
+ CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
+ UniversalCRTLibPath.c_str()));
+ }
}
std::string WindowsSdkLibPath;
@@ -8805,7 +9394,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-nologo");
- if (Args.hasArg(options::OPT_g_Group))
+ if (Args.hasArg(options::OPT_g_Group, options::OPT__SLASH_Z7))
CmdArgs.push_back("-debug");
bool DLL = Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd,
@@ -8822,28 +9411,42 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString("-debug"));
CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
if (Args.hasArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd)) {
- static const char *CompilerRTComponents[] = {
- "asan_dynamic", "asan_dynamic_runtime_thunk",
- };
- for (const auto &Component : CompilerRTComponents)
- CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Component)));
+ for (const auto &Lib : {"asan_dynamic", "asan_dynamic_runtime_thunk"})
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
// Make sure the dynamic runtime thunk is not optimized out at link time
// to ensure proper SEH handling.
CmdArgs.push_back(Args.MakeArgString("-include:___asan_seh_interceptor"));
} else if (DLL) {
- CmdArgs.push_back(
- Args.MakeArgString(getCompilerRT(TC, "asan_dll_thunk")));
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "asan_dll_thunk"));
} else {
- static const char *CompilerRTComponents[] = {
- "asan", "asan_cxx",
- };
- for (const auto &Component : CompilerRTComponents)
- CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Component)));
+ for (const auto &Lib : {"asan", "asan_cxx"})
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
}
}
Args.AddAllArgValues(CmdArgs, options::OPT__SLASH_link);
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ CmdArgs.push_back("-nodefaultlib:vcomp.lib");
+ CmdArgs.push_back("-nodefaultlib:vcompd.lib");
+ CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
+ TC.getDriver().Dir + "/../lib"));
+ switch (getOpenMPRuntime(getToolChain(), Args)) {
+ case OMPRT_OMP:
+ CmdArgs.push_back("-defaultlib:libomp.lib");
+ break;
+ case OMPRT_IOMP5:
+ CmdArgs.push_back("-defaultlib:libiomp5md.lib");
+ break;
+ case OMPRT_GOMP:
+ break;
+ case OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
+ }
+
// Add filenames, libraries, and other linker inputs.
for (const auto &Input : Inputs) {
if (Input.isFilename()) {
@@ -8891,7 +9494,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(linkPath);
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void visualstudio::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -8915,21 +9518,34 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
// any flag accepted by clang-cl.
// These are spelled the same way in clang and cl.exe,.
- Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U);
- Args.AddAllArgs(CmdArgs, options::OPT_I);
+ Args.AddAllArgs(CmdArgs, {options::OPT_D, options::OPT_U, options::OPT_I});
// Optimization level.
+ if (Arg *A = Args.getLastArg(options::OPT_fbuiltin, options::OPT_fno_builtin))
+ CmdArgs.push_back(A->getOption().getID() == options::OPT_fbuiltin ? "/Oi"
+ : "/Oi-");
if (Arg *A = Args.getLastArg(options::OPT_O, options::OPT_O0)) {
if (A->getOption().getID() == options::OPT_O0) {
CmdArgs.push_back("/Od");
} else {
+ CmdArgs.push_back("/Og");
+
StringRef OptLevel = A->getValue();
- if (OptLevel == "1" || OptLevel == "2" || OptLevel == "s")
- A->render(Args, CmdArgs);
- else if (OptLevel == "3")
- CmdArgs.push_back("/Ox");
+ if (OptLevel == "s" || OptLevel == "z")
+ CmdArgs.push_back("/Os");
+ else
+ CmdArgs.push_back("/Ot");
+
+ CmdArgs.push_back("/Ob2");
}
}
+ if (Arg *A = Args.getLastArg(options::OPT_fomit_frame_pointer,
+ options::OPT_fno_omit_frame_pointer))
+ CmdArgs.push_back(A->getOption().getID() == options::OPT_fomit_frame_pointer
+ ? "/Oy"
+ : "/Oy-");
+ if (!Args.hasArg(options::OPT_fwritable_strings))
+ CmdArgs.push_back("/GF");
// Flags for which clang-cl has an alias.
// FIXME: How can we ensure this stays in sync with relevant clang-cl options?
@@ -8948,7 +9564,8 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
A->getOption().getID() == options::OPT_fdata_sections ? "/Gw" : "/Gw-");
if (Args.hasArg(options::OPT_fsyntax_only))
CmdArgs.push_back("/Zs");
- if (Args.hasArg(options::OPT_g_Flag, options::OPT_gline_tables_only))
+ if (Args.hasArg(options::OPT_g_Flag, options::OPT_gline_tables_only,
+ options::OPT__SLASH_Z7))
CmdArgs.push_back("/Z7");
std::vector<std::string> Includes =
@@ -8960,6 +9577,7 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LD);
Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LDd);
Args.AddAllArgs(CmdArgs, options::OPT__SLASH_EH);
+ Args.AddAllArgs(CmdArgs, options::OPT__SLASH_Zl);
// The order of these flags is relevant, so pick the last one.
if (Arg *A = Args.getLastArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd,
@@ -8986,7 +9604,7 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe",
D.getClangProgramPath());
return llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs);
+ CmdArgs, Inputs);
}
/// MinGW Tools
@@ -9013,7 +9631,7 @@ void MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
if (Args.hasArg(options::OPT_gsplit_dwarf))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
@@ -9026,8 +9644,24 @@ void MinGW::Linker::AddLibGCC(const ArgList &Args,
CmdArgs.push_back("-lmingwthrd");
CmdArgs.push_back("-lmingw32");
- // Add libgcc or compiler-rt.
- AddRunTimeLibs(getToolChain(), getToolChain().getDriver(), CmdArgs, Args);
+ // Make use of compiler-rt if --rtlib option is used
+ ToolChain::RuntimeLibType RLT = getToolChain().GetRuntimeLibType(Args);
+ if (RLT == ToolChain::RLT_Libgcc) {
+ bool Static = Args.hasArg(options::OPT_static_libgcc) ||
+ Args.hasArg(options::OPT_static);
+ bool Shared = Args.hasArg(options::OPT_shared);
+ bool CXX = getToolChain().getDriver().CCCIsCXX();
+
+ if (Static || (!CXX && !Shared)) {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lgcc_eh");
+ } else {
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("-lgcc");
+ }
+ } else {
+ AddRunTimeLibs(getToolChain(), getToolChain().getDriver(), CmdArgs, Args);
+ }
CmdArgs.push_back("-lmoldname");
CmdArgs.push_back("-lmingwex");
@@ -9057,6 +9691,8 @@ void MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (LinkerName.equals_lower("lld")) {
CmdArgs.push_back("-flavor");
CmdArgs.push_back("gnu");
+ } else if (!LinkerName.equals_lower("ld")) {
+ D.Diag(diag::err_drv_unsupported_linker) << LinkerName;
}
if (!D.SysRoot.empty())
@@ -9110,8 +9746,7 @@ void MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
Args.AddLastArg(CmdArgs, options::OPT_Z_Flag);
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_mdll)) {
CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("dllcrt2.o")));
} else {
@@ -9126,18 +9761,15 @@ void MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
- const ToolChain::path_list Paths = TC.getFilePaths();
- for (const auto &Path : Paths)
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
-
+ TC.AddFilePathLibArgs(Args, CmdArgs);
AddLinkerInputs(TC, Inputs, Args, CmdArgs);
// TODO: Add ASan stuff here
// TODO: Add profile stuff here
- if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX() &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
if (OnlyLibstdcxxStatic)
@@ -9193,7 +9825,7 @@ void MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
const char *Exec = Args.MakeArgString(TC.GetProgramPath(LinkerName.data()));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
/// XCore Tools
@@ -9229,7 +9861,7 @@ void XCore::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -9257,7 +9889,7 @@ void XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void CrossWindows::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -9296,7 +9928,7 @@ void CrossWindows::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const std::string Assembler = TC.GetProgramPath("as");
Exec = Args.MakeArgString(Assembler);
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -9375,8 +10007,7 @@ void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.hasArg(options::OPT_static) ? "-Bstatic"
: "-Bdynamic");
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
CmdArgs.push_back("--entry");
CmdArgs.push_back(Args.MakeArgString(EntryPoint));
}
@@ -9398,8 +10029,7 @@ void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ImpLib));
}
- if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
const std::string CRTPath(D.SysRoot + "/usr/lib/");
const char *CRTBegin;
@@ -9409,11 +10039,7 @@ void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
-
- const auto &Paths = TC.getFilePaths();
- for (const auto &Path : Paths)
- CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path));
-
+ TC.AddFilePathLibArgs(Args, CmdArgs);
AddLinkerInputs(TC, Inputs, Args, CmdArgs);
if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) &&
@@ -9435,10 +10061,25 @@ void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- const std::string Linker = TC.GetProgramPath("ld");
- Exec = Args.MakeArgString(Linker);
+ if (TC.getSanitizerArgs().needsAsanRt()) {
+ // TODO handle /MT[d] /MD[d]
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "asan_dll_thunk"));
+ } else {
+ for (const auto &Lib : {"asan_dynamic", "asan_dynamic_runtime_thunk"})
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
+ // Make sure the dynamic runtime thunk is not optimized out at link time
+ // to ensure proper SEH handling.
+ CmdArgs.push_back(Args.MakeArgString("--undefined"));
+ CmdArgs.push_back(Args.MakeArgString(TC.getArch() == llvm::Triple::x86
+ ? "___asan_seh_interceptor"
+ : "__asan_seh_interceptor"));
+ }
+ }
+
+ Exec = Args.MakeArgString(TC.GetLinkerPath());
- C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -9446,35 +10087,46 @@ void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
-
ArgStringList CmdArgs;
-
assert(Inputs.size() == 1);
const InputInfo &II = Inputs[0];
- assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX);
- assert(Output.getType() == types::TY_PP_Asm); // Require preprocessed asm.
-
- // Append all -I, -iquote, -isystem paths.
- Args.AddAllArgs(CmdArgs, options::OPT_clang_i_Group);
- // These are spelled the same way in clang and moviCompile.
- Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U);
+ assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX ||
+ II.getType() == types::TY_PP_CXX);
- CmdArgs.push_back("-DMYRIAD2");
+ if (JA.getKind() == Action::PreprocessJobClass) {
+ Args.ClaimAllArgs();
+ CmdArgs.push_back("-E");
+ } else {
+ assert(Output.getType() == types::TY_PP_Asm); // Require preprocessed asm.
+ CmdArgs.push_back("-S");
+ CmdArgs.push_back("-fno-exceptions"); // Always do this even if unspecified.
+ }
CmdArgs.push_back("-mcpu=myriad2");
- CmdArgs.push_back("-S");
-
- // Any -O option passes through without translation. What about -Ofast ?
- if (Arg *A = Args.getLastArg(options::OPT_O_Group))
- A->render(Args, CmdArgs);
+ CmdArgs.push_back("-DMYRIAD2");
- if (Args.hasFlag(options::OPT_ffunction_sections,
- options::OPT_fno_function_sections)) {
- CmdArgs.push_back("-ffunction-sections");
+ // Append all -I, -iquote, -isystem paths, defines/undefines,
+ // 'f' flags, optimize flags, and warning options.
+ // These are spelled the same way in clang and moviCompile.
+ Args.AddAllArgs(CmdArgs, {options::OPT_I_Group, options::OPT_clang_i_Group,
+ options::OPT_std_EQ, options::OPT_D, options::OPT_U,
+ options::OPT_f_Group, options::OPT_f_clang_Group,
+ options::OPT_g_Group, options::OPT_M_Group,
+ options::OPT_O_Group, options::OPT_W_Group});
+
+ // If we're producing a dependency file, and assembly is the final action,
+ // then the name of the target in the dependency file should be the '.o'
+ // file, not the '.s' file produced by this step. For example, instead of
+ // /tmp/mumble.s: mumble.c .../someheader.h
+ // the filename on the lefthand side should be "mumble.o"
+ if (Args.getLastArg(options::OPT_MF) && !Args.getLastArg(options::OPT_MT) &&
+ C.getActions().size() == 1 &&
+ C.getActions()[0]->getKind() == Action::AssembleJobClass) {
+ Arg *A = Args.getLastArg(options::OPT_o);
+ if (A) {
+ CmdArgs.push_back("-MT");
+ CmdArgs.push_back(Args.MakeArgString(A->getValue()));
+ }
}
- if (Args.hasArg(options::OPT_fno_inline_functions))
- CmdArgs.push_back("-fno-inline-functions");
-
- CmdArgs.push_back("-fno-exceptions"); // Always do this even if unspecified.
CmdArgs.push_back(II.getFilename());
CmdArgs.push_back("-o");
@@ -9482,8 +10134,8 @@ void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(getToolChain().GetProgramPath("moviCompile"));
- C.addCommand(
- llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec), CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs, Inputs));
}
void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -9499,13 +10151,14 @@ void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.getType() == types::TY_Object);
CmdArgs.push_back("-no6thSlotCompression");
- CmdArgs.push_back("-cv:myriad2"); // Chip Version ?
+ CmdArgs.push_back("-cv:myriad2"); // Chip Version
CmdArgs.push_back("-noSPrefixing");
CmdArgs.push_back("-a"); // Mystery option.
- for (auto Arg : Args.filtered(options::OPT_I)) {
- Arg->claim();
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+ for (const Arg *A : Args.filtered(options::OPT_I, options::OPT_isystem)) {
+ A->claim();
CmdArgs.push_back(
- Args.MakeArgString(std::string("-i:") + Arg->getValue(0)));
+ Args.MakeArgString(std::string("-i:") + A->getValue(0)));
}
CmdArgs.push_back("-elf"); // Output format.
CmdArgs.push_back(II.getFilename());
@@ -9514,6 +10167,378 @@ void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(getToolChain().GetProgramPath("moviAsm"));
- C.addCommand(
- llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec), CmdArgs));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs, Inputs));
+}
+
+void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const auto &TC =
+ static_cast<const toolchains::MyriadToolChain &>(getToolChain());
+ const llvm::Triple &T = TC.getTriple();
+ ArgStringList CmdArgs;
+ bool UseStartfiles =
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
+ bool UseDefaultLibs =
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs);
+
+ if (T.getArch() == llvm::Triple::sparc)
+ CmdArgs.push_back("-EB");
+ else // SHAVE assumes little-endian, and sparcel is expressly so.
+ CmdArgs.push_back("-EL");
+
+ // The remaining logic is mostly like gnutools::Linker::ConstructJob,
+ // but we never pass through a --sysroot option and various other bits.
+ // For example, there are no sanitizers (yet) nor gold linker.
+
+ // Eat some arguments that may be present but have no effect.
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ Args.ClaimAllArgs(options::OPT_w);
+ Args.ClaimAllArgs(options::OPT_static_libgcc);
+
+ if (Args.hasArg(options::OPT_s)) // Pass the 'strip' option.
+ CmdArgs.push_back("-s");
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (UseStartfiles) {
+ // If you want startfiles, it means you want the builtin crti and crtbegin,
+ // but not crt0. Myriad link commands provide their own crt0.o as needed.
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtbegin.o")));
+ }
+
+ Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_e, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
+
+ TC.AddFilePathLibArgs(Args, CmdArgs);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (UseDefaultLibs) {
+ if (C.getDriver().CCCIsCXX())
+ CmdArgs.push_back("-lstdc++");
+ if (T.getOS() == llvm::Triple::RTEMS) {
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc");
+ // You must provide your own "-L" option to enable finding these.
+ CmdArgs.push_back("-lrtemscpu");
+ CmdArgs.push_back("-lrtemsbsp");
+ CmdArgs.push_back("--end-group");
+ } else {
+ CmdArgs.push_back("-lc");
+ }
+ CmdArgs.push_back("-lgcc");
+ }
+ if (UseStartfiles) {
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtn.o")));
+ }
+
+ std::string Exec =
+ Args.MakeArgString(TC.GetProgramPath("sparc-myriad-elf-ld"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
+ CmdArgs, Inputs));
+}
+
+void PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ps4-as"));
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
+static void AddPS4SanitizerArgs(const ToolChain &TC, ArgStringList &CmdArgs) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ if (SanArgs.needsUbsanRt()) {
+ CmdArgs.push_back("-lSceDbgUBSanitizer_stub_weak");
+ }
+ if (SanArgs.needsAsanRt()) {
+ CmdArgs.push_back("-lSceDbgAddressSanitizer_stub_weak");
+ }
+}
+
+static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
+ const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) {
+ const toolchains::FreeBSD &ToolChain =
+ static_cast<const toolchains::FreeBSD &>(T.getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("--oformat=so");
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ AddPS4SanitizerArgs(ToolChain, CmdArgs);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ CmdArgs.push_back("-lpthread");
+ }
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetProgramPath("ps4-ld"));
+
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
+}
+
+static void ConstructGoldLinkJob(const Tool &T, Compilation &C,
+ const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) {
+ const toolchains::FreeBSD &ToolChain =
+ static_cast<const toolchains::FreeBSD &>(T.getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld-elf.so.1");
+ }
+ CmdArgs.push_back("--enable-new-dtags");
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ AddPS4SanitizerArgs(ToolChain, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const char *crt1 = nullptr;
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ crt1 = "gcrt1.o";
+ else if (Args.hasArg(options::OPT_pie))
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
+ }
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+
+ const char *crtbegin = nullptr;
+ if (Args.hasArg(options::OPT_static))
+ crtbegin = "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ // For PS4, we always want to pass libm, libstdc++ and libkernel
+ // libraries for both C and C++ compilations.
+ CmdArgs.push_back("-lkernel");
+ if (D.CCCIsCXX()) {
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+ // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
+ // the default system libraries. Just mimic this for now.
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lgcc_p");
+ else
+ CmdArgs.push_back("-lcompiler_rt");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lstdc++");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lstdc++");
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (Args.hasArg(options::OPT_pg)) {
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-lc");
+ else {
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc_p");
+ CmdArgs.push_back("-lpthread_p");
+ CmdArgs.push_back("--end-group");
+ } else {
+ CmdArgs.push_back("-lc_p");
+ }
+ }
+ CmdArgs.push_back("-lgcc_p");
+ } else {
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("--end-group");
+ } else {
+ CmdArgs.push_back("-lc");
+ }
+ CmdArgs.push_back("-lcompiler_rt");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lstdc++");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lstdc++");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+
+ const char *Exec =
+#ifdef LLVM_ON_WIN32
+ Args.MakeArgString(ToolChain.GetProgramPath("ps4-ld.gold"));
+#else
+ Args.MakeArgString(ToolChain.GetProgramPath("ps4-ld"));
+#endif
+
+ C.addCommand(llvm::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
+}
+
+void PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::FreeBSD &ToolChain =
+ static_cast<const toolchains::FreeBSD &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ bool PS4Linker;
+ StringRef LinkerOptName;
+ if (const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
+ LinkerOptName = A->getValue();
+ if (LinkerOptName != "ps4" && LinkerOptName != "gold")
+ D.Diag(diag::err_drv_unsupported_linker) << LinkerOptName;
+ }
+
+ if (LinkerOptName == "gold")
+ PS4Linker = false;
+ else if (LinkerOptName == "ps4")
+ PS4Linker = true;
+ else
+ PS4Linker = !Args.hasArg(options::OPT_shared);
+
+ if (PS4Linker)
+ ConstructPS4LinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
+ else
+ ConstructGoldLinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
}
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index 651ddc8ff572..314315dea006 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -14,19 +14,20 @@
#include "clang/Driver/Tool.h"
#include "clang/Driver/Types.h"
#include "clang/Driver/Util.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/Compiler.h"
namespace clang {
- class ObjCRuntime;
+class ObjCRuntime;
namespace driver {
- class Command;
- class Driver;
+class Command;
+class Driver;
namespace toolchains {
- class MachO;
+class MachO;
}
namespace tools {
@@ -37,162 +38,161 @@ class Compiler;
using llvm::opt::ArgStringList;
-SmallString<128> getCompilerRT(const ToolChain &TC, StringRef Component,
- bool Shared = false);
-
- /// \brief Clang compiler tool.
- class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
- public:
- static const char *getBaseInputName(const llvm::opt::ArgList &Args,
- const InputInfo &Input);
- static const char *getBaseInputStem(const llvm::opt::ArgList &Args,
- const InputInfoList &Inputs);
- static const char *getDependencyFileName(const llvm::opt::ArgList &Args,
- const InputInfoList &Inputs);
-
- private:
- void AddPreprocessingOptions(Compilation &C, const JobAction &JA,
- const Driver &D,
- const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
- const InputInfo &Output,
- const InputInfoList &Inputs) const;
-
- void AddAArch64TargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
- void AddARMTargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
- bool KernelOrKext) const;
- void AddARM64TargetArgs(const llvm::opt::ArgList &Args,
+SmallString<128> getCompilerRT(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ StringRef Component, bool Shared = false);
+
+/// \brief Clang compiler tool.
+class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
+public:
+ static const char *getBaseInputName(const llvm::opt::ArgList &Args,
+ const InputInfo &Input);
+ static const char *getBaseInputStem(const llvm::opt::ArgList &Args,
+ const InputInfoList &Inputs);
+ static const char *getDependencyFileName(const llvm::opt::ArgList &Args,
+ const InputInfoList &Inputs);
+
+private:
+ void AddPreprocessingOptions(Compilation &C, const JobAction &JA,
+ const Driver &D, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ToolChain *AuxToolChain) const;
+
+ void AddAArch64TargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
- void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
- void AddPPCTargetArgs(const llvm::opt::ArgList &Args,
+ void AddARMTargetArgs(const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ bool KernelOrKext) const;
+ void AddARM64TargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
- void AddR600TargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
- void AddSparcTargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
- void AddSystemZTargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
- void AddX86TargetArgs(const llvm::opt::ArgList &Args,
+ void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddPPCTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddR600TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddSparcTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
- void AddHexagonTargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
+ void AddSystemZTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddX86TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void AddHexagonTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
- enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile };
+ enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile };
- ObjCRuntime AddObjCRuntimeArgs(const llvm::opt::ArgList &args,
- llvm::opt::ArgStringList &cmdArgs,
- RewriteKind rewrite) const;
+ ObjCRuntime AddObjCRuntimeArgs(const llvm::opt::ArgList &args,
+ llvm::opt::ArgStringList &cmdArgs,
+ RewriteKind rewrite) const;
- void AddClangCLArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
+ void AddClangCLArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ enum CodeGenOptions::DebugInfoKind *DebugInfoKind,
+ bool *EmitCodeView) const;
- visualstudio::Compiler *getCLFallback() const;
-
- mutable std::unique_ptr<visualstudio::Compiler> CLFallback;
-
- public:
- // CAUTION! The first constructor argument ("clang") is not arbitrary,
- // as it is for other tools. Some operations on a Tool actually test
- // whether that tool is Clang based on the Tool's Name as a string.
- Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC, RF_Full) {}
-
- bool hasGoodDiagnostics() const override { return true; }
- bool hasIntegratedAssembler() const override { return true; }
- bool hasIntegratedCPP() const override { return true; }
- bool canEmitIR() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-
- /// \brief Clang integrated assembler tool.
- class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
- public:
- ClangAs(const ToolChain &TC) : Tool("clang::as",
- "clang integrated assembler", TC,
- RF_Full) {}
- void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
- bool hasGoodDiagnostics() const override { return true; }
- bool hasIntegratedAssembler() const override { return false; }
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-
- /// \brief Base class for all GNU tools that provide the same behavior when
- /// it comes to response files support
- class LLVM_LIBRARY_VISIBILITY GnuTool : public Tool {
- virtual void anchor();
-
- public:
- GnuTool(const char *Name, const char *ShortName, const ToolChain &TC)
- : Tool(Name, ShortName, TC, RF_Full, llvm::sys::WEM_CurrentCodePage) {}
- };
-
- /// gcc - Generic GCC tool implementations.
+ visualstudio::Compiler *getCLFallback() const;
+
+ mutable std::unique_ptr<visualstudio::Compiler> CLFallback;
+
+public:
+ // CAUTION! The first constructor argument ("clang") is not arbitrary,
+ // as it is for other tools. Some operations on a Tool actually test
+ // whether that tool is Clang based on the Tool's Name as a string.
+ Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC, RF_Full) {}
+
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedAssembler() const override { return true; }
+ bool hasIntegratedCPP() const override { return true; }
+ bool canEmitIR() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+/// \brief Clang integrated assembler tool.
+class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
+public:
+ ClangAs(const ToolChain &TC)
+ : Tool("clang::as", "clang integrated assembler", TC, RF_Full) {}
+ void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedAssembler() const override { return false; }
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+/// \brief Base class for all GNU tools that provide the same behavior when
+/// it comes to response files support
+class LLVM_LIBRARY_VISIBILITY GnuTool : public Tool {
+ virtual void anchor();
+
+public:
+ GnuTool(const char *Name, const char *ShortName, const ToolChain &TC)
+ : Tool(Name, ShortName, TC, RF_Full, llvm::sys::WEM_CurrentCodePage) {}
+};
+
+/// gcc - Generic GCC tool implementations.
namespace gcc {
- class LLVM_LIBRARY_VISIBILITY Common : public GnuTool {
- public:
- Common(const char *Name, const char *ShortName,
- const ToolChain &TC) : GnuTool(Name, ShortName, TC) {}
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-
- /// RenderExtraToolArgs - Render any arguments necessary to force
- /// the particular tool mode.
- virtual void
- RenderExtraToolArgs(const JobAction &JA,
- llvm::opt::ArgStringList &CmdArgs) const = 0;
- };
-
- class LLVM_LIBRARY_VISIBILITY Preprocessor : public Common {
- public:
- Preprocessor(const ToolChain &TC)
- : Common("gcc::Preprocessor", "gcc preprocessor", TC) {}
-
- bool hasGoodDiagnostics() const override { return true; }
- bool hasIntegratedCPP() const override { return false; }
-
- void RenderExtraToolArgs(const JobAction &JA,
- llvm::opt::ArgStringList &CmdArgs) const override;
- };
-
- class LLVM_LIBRARY_VISIBILITY Compiler : public Common {
- public:
- Compiler(const ToolChain &TC)
- : Common("gcc::Compiler", "gcc frontend", TC) {}
-
- bool hasGoodDiagnostics() const override { return true; }
- bool hasIntegratedCPP() const override { return true; }
-
- void RenderExtraToolArgs(const JobAction &JA,
- llvm::opt::ArgStringList &CmdArgs) const override;
- };
-
- class LLVM_LIBRARY_VISIBILITY Linker : public Common {
- public:
- Linker(const ToolChain &TC)
- : Common("gcc::Linker", "linker (via gcc)", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
-
- void RenderExtraToolArgs(const JobAction &JA,
- llvm::opt::ArgStringList &CmdArgs) const override;
- };
+class LLVM_LIBRARY_VISIBILITY Common : public GnuTool {
+public:
+ Common(const char *Name, const char *ShortName, const ToolChain &TC)
+ : GnuTool(Name, ShortName, TC) {}
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+ /// RenderExtraToolArgs - Render any arguments necessary to force
+ /// the particular tool mode.
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const = 0;
+};
+
+class LLVM_LIBRARY_VISIBILITY Preprocessor : public Common {
+public:
+ Preprocessor(const ToolChain &TC)
+ : Common("gcc::Preprocessor", "gcc preprocessor", TC) {}
+
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedCPP() const override { return false; }
+
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Compiler : public Common {
+public:
+ Compiler(const ToolChain &TC) : Common("gcc::Compiler", "gcc frontend", TC) {}
+
+ bool hasGoodDiagnostics() const override { return true; }
+ bool hasIntegratedCPP() const override { return true; }
+
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Common {
+public:
+ Linker(const ToolChain &TC) : Common("gcc::Linker", "linker (via gcc)", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+};
} // end namespace gcc
namespace hexagon {
@@ -206,12 +206,12 @@ public:
bool hasIntegratedCPP() const override { return false; }
- void RenderExtraToolArgs(const JobAction &JA,
- llvm::opt::ArgStringList &CmdArgs) const;
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
+ void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
};
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
@@ -219,50 +219,88 @@ public:
Linker(const ToolChain &TC) : GnuTool("hexagon::Linker", "hexagon-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
+ bool isLinkJob() const override { return true; }
- virtual void RenderExtraToolArgs(const JobAction &JA,
- llvm::opt::ArgStringList &CmdArgs) const;
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ llvm::opt::ArgStringList &CmdArgs) const;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace hexagon.
-namespace arm {
- std::string getARMTargetCPU(StringRef CPU, StringRef Arch,
- const llvm::Triple &Triple);
- const std::string getARMArch(StringRef Arch,
- const llvm::Triple &Triple);
- const char* getARMCPUForMArch(StringRef Arch,
- const llvm::Triple &Triple);
- const char* getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch);
+namespace amdgpu {
- void appendEBLinkFlags(const llvm::opt::ArgList &Args, ArgStringList &CmdArgs, const llvm::Triple &Triple);
-}
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("amdgpu::Linker", "lld", TC) {}
+ bool isLinkJob() const override { return true; }
+ bool hasIntegratedCPP() const override { return false; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+} // end namespace amdgpu
+
+namespace wasm {
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ explicit Linker(const ToolChain &TC);
+ bool isLinkJob() const override;
+ bool hasIntegratedCPP() const override;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+} // end namespace wasm
+
+namespace arm {
+std::string getARMTargetCPU(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple);
+const std::string getARMArch(StringRef Arch,
+ const llvm::Triple &Triple);
+StringRef getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple);
+StringRef getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
+ const llvm::Triple &Triple);
+
+void appendEBLinkFlags(const llvm::opt::ArgList &Args, ArgStringList &CmdArgs,
+ const llvm::Triple &Triple);
+} // end namespace arm
namespace mips {
- typedef enum {
- NanLegacy = 1,
- Nan2008 = 2
- } NanEncoding;
- NanEncoding getSupportedNanEncoding(StringRef &CPU);
- void getMipsCPUAndABI(const llvm::opt::ArgList &Args,
- const llvm::Triple &Triple, StringRef &CPUName,
- StringRef &ABIName);
- bool hasMipsAbiArg(const llvm::opt::ArgList &Args, const char *Value);
- bool isUCLibc(const llvm::opt::ArgList &Args);
- bool isNaN2008(const llvm::opt::ArgList &Args, const llvm::Triple &Triple);
- bool isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
- StringRef ABIName, StringRef FloatABI);
- bool shouldUseFPXX(const llvm::opt::ArgList &Args, const llvm::Triple &Triple,
- StringRef CPUName, StringRef ABIName, StringRef FloatABI);
-}
+typedef enum { NanLegacy = 1, Nan2008 = 2 } NanEncoding;
+
+enum class FloatABI {
+ Invalid,
+ Soft,
+ Hard,
+};
+
+NanEncoding getSupportedNanEncoding(StringRef &CPU);
+void getMipsCPUAndABI(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple, StringRef &CPUName,
+ StringRef &ABIName);
+std::string getMipsABILibSuffix(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+bool hasMipsAbiArg(const llvm::opt::ArgList &Args, const char *Value);
+bool isUCLibc(const llvm::opt::ArgList &Args);
+bool isNaN2008(const llvm::opt::ArgList &Args, const llvm::Triple &Triple);
+bool isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
+ StringRef ABIName, mips::FloatABI FloatABI);
+bool shouldUseFPXX(const llvm::opt::ArgList &Args, const llvm::Triple &Triple,
+ StringRef CPUName, StringRef ABIName,
+ mips::FloatABI FloatABI);
+} // end namespace mips
namespace ppc {
- bool hasPPCAbiArg(const llvm::opt::ArgList &Args, const char *Value);
-}
+bool hasPPCAbiArg(const llvm::opt::ArgList &Args, const char *Value);
+} // end namespace ppc
/// cloudabi -- Directly call GNU Binutils linker
namespace cloudabi {
@@ -281,103 +319,102 @@ public:
} // end namespace cloudabi
namespace darwin {
- llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str);
- void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str);
-
- class LLVM_LIBRARY_VISIBILITY MachOTool : public Tool {
- virtual void anchor();
- protected:
- void AddMachOArch(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const;
-
- const toolchains::MachO &getMachOToolChain() const {
- return reinterpret_cast<const toolchains::MachO&>(getToolChain());
- }
-
- public:
- MachOTool(
- const char *Name, const char *ShortName, const ToolChain &TC,
- ResponseFileSupport ResponseSupport = RF_None,
- llvm::sys::WindowsEncodingMethod ResponseEncoding = llvm::sys::WEM_UTF8,
- const char *ResponseFlag = "@")
- : Tool(Name, ShortName, TC, ResponseSupport, ResponseEncoding,
- ResponseFlag) {}
- };
-
- class LLVM_LIBRARY_VISIBILITY Assembler : public MachOTool {
- public:
- Assembler(const ToolChain &TC)
- : MachOTool("darwin::Assembler", "assembler", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-
- class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
- bool NeedsTempPath(const InputInfoList &Inputs) const;
- void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
- const InputInfoList &Inputs) const;
-
- public:
- Linker(const ToolChain &TC)
- : MachOTool("darwin::Linker", "linker", TC, RF_FileList,
- llvm::sys::WEM_UTF8, "-filelist") {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-
- class LLVM_LIBRARY_VISIBILITY Lipo : public MachOTool {
- public:
- Lipo(const ToolChain &TC) : MachOTool("darwin::Lipo", "lipo", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-
- class LLVM_LIBRARY_VISIBILITY Dsymutil : public MachOTool {
- public:
- Dsymutil(const ToolChain &TC) : MachOTool("darwin::Dsymutil",
- "dsymutil", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isDsymutilJob() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-
- class LLVM_LIBRARY_VISIBILITY VerifyDebug : public MachOTool {
- public:
- VerifyDebug(const ToolChain &TC) : MachOTool("darwin::VerifyDebug",
- "dwarfdump", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str);
+void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str);
-}
+class LLVM_LIBRARY_VISIBILITY MachOTool : public Tool {
+ virtual void anchor();
+
+protected:
+ void AddMachOArch(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ const toolchains::MachO &getMachOToolChain() const {
+ return reinterpret_cast<const toolchains::MachO &>(getToolChain());
+ }
+
+public:
+ MachOTool(
+ const char *Name, const char *ShortName, const ToolChain &TC,
+ ResponseFileSupport ResponseSupport = RF_None,
+ llvm::sys::WindowsEncodingMethod ResponseEncoding = llvm::sys::WEM_UTF8,
+ const char *ResponseFlag = "@")
+ : Tool(Name, ShortName, TC, ResponseSupport, ResponseEncoding,
+ ResponseFlag) {}
+};
+
+class LLVM_LIBRARY_VISIBILITY Assembler : public MachOTool {
+public:
+ Assembler(const ToolChain &TC)
+ : MachOTool("darwin::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
+ bool NeedsTempPath(const InputInfoList &Inputs) const;
+ void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const;
+
+public:
+ Linker(const ToolChain &TC)
+ : MachOTool("darwin::Linker", "linker", TC, RF_FileList,
+ llvm::sys::WEM_UTF8, "-filelist") {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Lipo : public MachOTool {
+public:
+ Lipo(const ToolChain &TC) : MachOTool("darwin::Lipo", "lipo", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Dsymutil : public MachOTool {
+public:
+ Dsymutil(const ToolChain &TC)
+ : MachOTool("darwin::Dsymutil", "dsymutil", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isDsymutilJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY VerifyDebug : public MachOTool {
+public:
+ VerifyDebug(const ToolChain &TC)
+ : MachOTool("darwin::VerifyDebug", "dwarfdump", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace darwin
/// openbsd -- Directly call GNU Binutils assembler and linker
namespace openbsd {
@@ -393,6 +430,7 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
Linker(const ToolChain &TC) : GnuTool("openbsd::Linker", "linker", TC) {}
@@ -400,11 +438,11 @@ public:
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace openbsd
/// bitrig -- Directly call GNU Binutils assembler and linker
@@ -421,6 +459,7 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
Linker(const ToolChain &TC) : GnuTool("bitrig::Linker", "linker", TC) {}
@@ -428,11 +467,11 @@ public:
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace bitrig
/// freebsd -- Directly call GNU Binutils assembler and linker
@@ -449,6 +488,7 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
Linker(const ToolChain &TC) : GnuTool("freebsd::Linker", "linker", TC) {}
@@ -456,17 +496,16 @@ public:
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace freebsd
/// netbsd -- Directly call GNU Binutils assembler and linker
namespace netbsd {
class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
-
public:
Assembler(const ToolChain &TC)
: GnuTool("netbsd::Assembler", "assembler", TC) {}
@@ -478,19 +517,19 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
Linker(const ToolChain &TC) : GnuTool("netbsd::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace netbsd
/// Directly call GNU Binutils' assembler and linker.
@@ -506,6 +545,7 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
Linker(const ToolChain &TC) : GnuTool("GNU::Linker", "linker", TC) {}
@@ -513,38 +553,37 @@ public:
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
- }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace gnutools
- namespace nacltools {
- class LLVM_LIBRARY_VISIBILITY AssemblerARM : public gnutools::Assembler {
- public:
- AssemblerARM(const ToolChain &TC) : gnutools::Assembler(TC) {}
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
- class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
- public:
- Linker(const ToolChain &TC) : Tool("NaCl::Linker", "linker", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
-}
+namespace nacltools {
+class LLVM_LIBRARY_VISIBILITY AssemblerARM : public gnutools::Assembler {
+public:
+ AssemblerARM(const ToolChain &TC) : gnutools::Assembler(TC) {}
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("NaCl::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace nacltools
/// minix -- Directly call GNU Binutils assembler and linker
namespace minix {
@@ -560,6 +599,7 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
Linker(const ToolChain &TC) : GnuTool("minix::Linker", "linker", TC) {}
@@ -567,12 +607,11 @@ public:
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace minix
/// solaris -- Directly call Solaris assembler and linker
@@ -589,6 +628,7 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
Linker(const ToolChain &TC) : Tool("solaris::Linker", "linker", TC) {}
@@ -596,11 +636,11 @@ public:
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace solaris
/// dragonfly -- Directly call GNU Binutils assembler and linker
@@ -617,6 +657,7 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+
class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
public:
Linker(const ToolChain &TC) : GnuTool("dragonfly::Linker", "linker", TC) {}
@@ -624,12 +665,11 @@ public:
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
} // end namespace dragonfly
/// Visual studio tools.
@@ -644,12 +684,12 @@ public:
llvm::sys::WEM_UTF16) {}
bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
+ bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
};
class LLVM_LIBRARY_VISIBILITY Compiler : public Tool {
@@ -659,20 +699,20 @@ public:
llvm::sys::WEM_UTF16) {}
bool hasIntegratedAssembler() const override { return true; }
- bool hasIntegratedCPP() const override { return true; }
- bool isLinkJob() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-
- std::unique_ptr<Command> GetCommand(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const;
- };
+ bool hasIntegratedCPP() const override { return true; }
+ bool isLinkJob() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+ std::unique_ptr<Command> GetCommand(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const;
+};
} // end namespace visualstudio
/// MinGW -- Directly call GNU Binutils assembler and linker
@@ -707,9 +747,26 @@ private:
} // end namespace MinGW
namespace arm {
- StringRef getARMFloatABI(const Driver &D, const llvm::opt::ArgList &Args,
- const llvm::Triple &Triple);
-}
+enum class FloatABI {
+ Invalid,
+ Soft,
+ SoftFP,
+ Hard,
+};
+
+FloatABI getARMFloatABI(const ToolChain &TC, const llvm::opt::ArgList &Args);
+} // end namespace arm
+
+namespace ppc {
+enum class FloatABI {
+ Invalid,
+ Soft,
+ Hard,
+};
+
+FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+} // end namespace ppc
+
namespace XCore {
// For XCore, we do not need to instantiate tools for PreProcess, PreCompile and
// Compile.
@@ -730,42 +787,41 @@ public:
Linker(const ToolChain &TC) : Tool("XCore::Linker", "XCore-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
- } // end namespace XCore.
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace XCore.
- namespace CrossWindows {
- class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
- public:
- Assembler(const ToolChain &TC)
- : Tool("CrossWindows::Assembler", "as", TC) {}
+namespace CrossWindows {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("CrossWindows::Assembler", "as", TC) {}
- bool hasIntegratedCPP() const override { return false; }
+ bool hasIntegratedCPP() const override { return false; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
- };
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
- class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
- public:
- Linker(const ToolChain &TC)
- : Tool("CrossWindows::Linker", "ld", TC, RF_Full) {}
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC)
+ : Tool("CrossWindows::Linker", "ld", TC, RF_Full) {}
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-}
+} // end namespace CrossWindows
/// SHAVE tools -- Directly call moviCompile and moviAsm
namespace SHAVE {
@@ -794,8 +850,55 @@ public:
};
} // end namespace SHAVE
+/// The Myriad toolchain uses tools that are in two different namespaces.
+/// The Compiler and Assembler as defined above are in the SHAVE namespace,
+/// whereas the linker, which accepts code for a mixture of Sparc and SHAVE,
+/// is in the Myriad namespace.
+namespace Myriad {
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("shave::Linker", "ld", TC) {}
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace Myriad
+
+namespace PS4cpu {
+class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+public:
+ Assemble(const ToolChain &TC)
+ : Tool("PS4cpu::Assemble", "assembler", TC, RF_Full) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+public:
+ Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC, RF_Full) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace PS4cpu
+
} // end namespace tools
} // end namespace driver
} // end namespace clang
-#endif
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLS_H
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index 2085b0124298..c29ce9462a07 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -128,6 +128,19 @@ bool types::isCXX(ID Id) {
}
}
+bool types::isLLVMIR(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_LLVM_IR:
+ case TY_LLVM_BC:
+ case TY_LTO_IR:
+ case TY_LTO_BC:
+ return true;
+ }
+}
+
bool types::isCuda(ID Id) {
switch (Id) {
default:
diff --git a/lib/Edit/Commit.cpp b/lib/Edit/Commit.cpp
index 9c08cc28ac9b..cb7a784a41af 100644
--- a/lib/Edit/Commit.cpp
+++ b/lib/Edit/Commit.cpp
@@ -183,7 +183,7 @@ void Commit::addInsert(SourceLocation OrigLoc, FileOffset Offs, StringRef text,
data.Kind = Act_Insert;
data.OrigLoc = OrigLoc;
data.Offset = Offs;
- data.Text = copyString(text);
+ data.Text = text.copy(StrAlloc);
data.BeforePrev = beforePreviousInsertions;
CachedEdits.push_back(data);
}
diff --git a/lib/Edit/EditedSource.cpp b/lib/Edit/EditedSource.cpp
index e557de92410e..5292a58a9ccd 100644
--- a/lib/Edit/EditedSource.cpp
+++ b/lib/Edit/EditedSource.cpp
@@ -23,6 +23,36 @@ void EditsReceiver::remove(CharSourceRange range) {
replace(range, StringRef());
}
+void EditedSource::deconstructMacroArgLoc(SourceLocation Loc,
+ SourceLocation &ExpansionLoc,
+ IdentifierInfo *&II) {
+ assert(SourceMgr.isMacroArgExpansion(Loc));
+ SourceLocation DefArgLoc = SourceMgr.getImmediateExpansionRange(Loc).first;
+ ExpansionLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
+ SmallString<20> Buf;
+ StringRef ArgName = Lexer::getSpelling(SourceMgr.getSpellingLoc(DefArgLoc),
+ Buf, SourceMgr, LangOpts);
+ II = nullptr;
+ if (!ArgName.empty()) {
+ II = &IdentTable.get(ArgName);
+ }
+}
+
+void EditedSource::startingCommit() {}
+
+void EditedSource::finishedCommit() {
+ for (auto &ExpArg : CurrCommitMacroArgExps) {
+ SourceLocation ExpLoc;
+ IdentifierInfo *II;
+ std::tie(ExpLoc, II) = ExpArg;
+ auto &ArgNames = ExpansionToArgMap[ExpLoc.getRawEncoding()];
+ if (std::find(ArgNames.begin(), ArgNames.end(), II) == ArgNames.end()) {
+ ArgNames.push_back(II);
+ }
+ }
+ CurrCommitMacroArgExps.clear();
+}
+
StringRef EditedSource::copyString(const Twine &twine) {
SmallString<128> Data;
return copyString(twine.toStringRef(Data));
@@ -36,15 +66,27 @@ bool EditedSource::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
}
if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
- SourceLocation
- DefArgLoc = SourceMgr.getImmediateExpansionRange(OrigLoc).first;
- SourceLocation
- ExpLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
- llvm::DenseMap<unsigned, SourceLocation>::iterator
- I = ExpansionToArgMap.find(ExpLoc.getRawEncoding());
- if (I != ExpansionToArgMap.end() && I->second != DefArgLoc)
- return false; // Trying to write in a macro argument input that has
- // already been written for another argument of the same macro.
+ IdentifierInfo *II;
+ SourceLocation ExpLoc;
+ deconstructMacroArgLoc(OrigLoc, ExpLoc, II);
+ auto I = ExpansionToArgMap.find(ExpLoc.getRawEncoding());
+ if (I != ExpansionToArgMap.end() &&
+ std::find(I->second.begin(), I->second.end(), II) != I->second.end()) {
+ // Trying to write in a macro argument input that has already been
+ // written by a previous commit for another expansion of the same macro
+ // argument name. For example:
+ //
+ // \code
+ // #define MAC(x) ((x)+(x))
+ // MAC(a)
+ // \endcode
+ //
+ // A commit modified the macro argument 'a' due to the first '(x)'
+ // expansion inside the macro definition, and a subsequent commit tried
+ // to modify 'a' again for the second '(x)' expansion. The edits of the
+ // second commit will be rejected.
+ return false;
+ }
}
return true;
@@ -59,11 +101,11 @@ bool EditedSource::commitInsert(SourceLocation OrigLoc,
return true;
if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
- SourceLocation
- DefArgLoc = SourceMgr.getImmediateExpansionRange(OrigLoc).first;
- SourceLocation
- ExpLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
- ExpansionToArgMap[ExpLoc.getRawEncoding()] = DefArgLoc;
+ IdentifierInfo *II;
+ SourceLocation ExpLoc;
+ deconstructMacroArgLoc(OrigLoc, ExpLoc, II);
+ if (II)
+ CurrCommitMacroArgExps.emplace_back(ExpLoc, II);
}
FileEdit &FA = FileEdits[Offs];
@@ -219,6 +261,16 @@ bool EditedSource::commit(const Commit &commit) {
if (!commit.isCommitable())
return false;
+ struct CommitRAII {
+ EditedSource &Editor;
+ CommitRAII(EditedSource &Editor) : Editor(Editor) {
+ Editor.startingCommit();
+ }
+ ~CommitRAII() {
+ Editor.finishedCommit();
+ }
+ } CommitRAII(*this);
+
for (edit::Commit::edit_iterator
I = commit.edit_begin(), E = commit.edit_end(); I != E; ++I) {
const edit::Commit::Edit &edit = *I;
@@ -312,7 +364,7 @@ static void adjustRemoval(const SourceManager &SM, const LangOptions &LangOpts,
static void applyRewrite(EditsReceiver &receiver,
StringRef text, FileOffset offs, unsigned len,
const SourceManager &SM, const LangOptions &LangOpts) {
- assert(!offs.getFID().isInvalid());
+ assert(offs.getFID().isValid());
SourceLocation Loc = SM.getLocForStartOfFile(offs.getFID());
Loc = Loc.getLocWithOffset(offs.getOffset());
assert(Loc.isFileID());
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index dd56831a3bc5..41451b91f881 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -95,7 +95,7 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
assert(&Previous == Current.Previous);
if (!Current.CanBreakBefore &&
!(State.Stack.back().BreakBeforeClosingBrace &&
- Current.closesBlockTypeList(Style)))
+ Current.closesBlockOrBlockTypeList(Style)))
return false;
// The opening "{" of a braced list has to be on the same line as the first
// element if it is nested in another braced init list or function call.
@@ -125,10 +125,10 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// Don't break after very short return types (e.g. "void") as that is often
// unexpected.
- if (Current.is(TT_FunctionDeclarationName) &&
- Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_None &&
- State.Column < 6)
- return false;
+ if (Current.is(TT_FunctionDeclarationName) && State.Column < 6) {
+ if (Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None)
+ return false;
+ }
return !State.Stack.back().NoLineBreak;
}
@@ -139,11 +139,12 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
return true;
if (State.Stack.back().BreakBeforeClosingBrace &&
- Current.closesBlockTypeList(Style))
+ Current.closesBlockOrBlockTypeList(Style))
return true;
if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
return true;
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
+ (Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName)) ||
(Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) &&
Previous.isNot(tok::question)) ||
(!Style.BreakBeforeTernaryOperators &&
@@ -158,6 +159,9 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
getColumnLimit(State))
return true;
if (Current.is(TT_CtorInitializerColon) &&
+ (State.Column + State.Line->Last->TotalLength - Current.TotalLength + 2 >
+ getColumnLimit(State) ||
+ State.Stack.back().BreakBeforeParameter) &&
((Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All) ||
Style.BreakConstructorInitializersBeforeComma || Style.ColumnLimit != 0))
return true;
@@ -225,7 +229,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
// If the return type spans multiple lines, wrap before the function name.
- if (Current.isOneOf(TT_FunctionDeclarationName, tok::kw_operator) &&
+ if ((Current.is(TT_FunctionDeclarationName) ||
+ (Current.is(tok::kw_operator) && !Previous.is(tok::coloncolon))) &&
State.Stack.back().BreakBeforeParameter)
return true;
@@ -323,8 +328,17 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
State.Stack.back().ColonPos = State.Column + Spaces + Current.ColumnWidth;
}
- if (Style.AlignAfterOpenBracket && Previous.opensScope() &&
- Previous.isNot(TT_ObjCMethodExpr) &&
+ // In "AlwaysBreak" mode, enforce wrapping directly after the parenthesis by
+ // disallowing any further line breaks if there is no line break after the
+ // opening parenthesis. Don't break if it doesn't conserve columns.
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak &&
+ Previous.is(tok::l_paren) && State.Column > getNewLineColumn(State) &&
+ (!Previous.Previous ||
+ !Previous.Previous->isOneOf(tok::kw_for, tok::kw_while, tok::kw_switch)))
+ State.Stack.back().NoLineBreak = true;
+
+ if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
+ Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
(Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit))
State.Stack.back().Indent = State.Column + Spaces;
if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
@@ -500,6 +514,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// Any break on this level means that the parent level has been broken
// and we need to avoid bin packing there.
bool NestedBlockSpecialCase =
+ Style.Language != FormatStyle::LK_Cpp &&
Current.is(tok::r_brace) && State.Stack.size() > 1 &&
State.Stack[State.Stack.size() - 2].NestedBlockInlined;
if (!NestedBlockSpecialCase)
@@ -560,7 +575,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return Current.NestingLevel == 0 ? State.FirstIndent
: State.Stack.back().Indent;
if (Current.isOneOf(tok::r_brace, tok::r_square) && State.Stack.size() > 1) {
- if (Current.closesBlockTypeList(Style))
+ if (Current.closesBlockOrBlockTypeList(Style))
return State.Stack[State.Stack.size() - 2].NestedBlockIndent;
if (Current.MatchingParen &&
Current.MatchingParen->BlockKind == BK_BracedInit)
@@ -678,8 +693,13 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.isMemberAccess())
State.Stack.back().StartOfFunctionCall =
Current.LastOperator ? 0 : State.Column;
- if (Current.is(TT_SelectorName))
+ if (Current.is(TT_SelectorName)) {
State.Stack.back().ObjCSelectorNameFound = true;
+ if (Style.IndentWrappedFunctionNames) {
+ State.Stack.back().Indent =
+ State.FirstIndent + Style.ContinuationIndentWidth;
+ }
+ }
if (Current.is(TT_CtorInitializerColon)) {
// Indent 2 from the column, so:
// SomeClass::SomeClass()
@@ -784,8 +804,8 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
(Style.AlignOperands || *I < prec::Assignment) &&
(!Previous || Previous->isNot(tok::kw_return) ||
(Style.Language != FormatStyle::LK_Java && *I > 0)) &&
- (Style.AlignAfterOpenBracket || *I != prec::Comma ||
- Current.NestingLevel == 0))
+ (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
+ *I != prec::Comma || Current.NestingLevel == 0))
NewParenState.Indent =
std::max(std::max(State.Column, NewParenState.Indent),
State.Stack.back().LastSpace);
@@ -865,7 +885,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
unsigned NestedBlockIndent = std::max(State.Stack.back().StartOfFunctionCall,
State.Stack.back().NestedBlockIndent);
if (Current.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
- if (Current.opensBlockTypeList(Style)) {
+ if (Current.opensBlockOrBlockTypeList(Style)) {
NewIndent = State.Stack.back().NestedBlockIndent + Style.IndentWidth;
NewIndent = std::min(State.Column + 2, NewIndent);
++NewIndentLevel;
@@ -923,9 +943,15 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
}
}
}
- bool NoLineBreak = State.Stack.back().NoLineBreak ||
- (Current.is(TT_TemplateOpener) &&
- State.Stack.back().ContainsUnwrappedBuilder);
+ // Generally inherit NoLineBreak from the current scope to nested scope.
+ // However, don't do this for non-empty nested blocks, dict literals and
+ // array literals as these follow different indentation rules.
+ bool NoLineBreak =
+ Current.Children.empty() &&
+ !Current.isOneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) &&
+ (State.Stack.back().NoLineBreak ||
+ (Current.is(TT_TemplateOpener) &&
+ State.Stack.back().ContainsUnwrappedBuilder));
State.Stack.push_back(ParenState(NewIndent, NewIndentLevel, LastSpace,
AvoidBinPacking, NoLineBreak));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
@@ -964,7 +990,7 @@ void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
State.Stack.push_back(ParenState(
NewIndent, /*NewIndentLevel=*/State.Stack.back().IndentLevel + 1,
State.Stack.back().LastSpace, /*AvoidBinPacking=*/true,
- State.Stack.back().NoLineBreak));
+ /*NoLineBreak=*/false));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = true;
}
@@ -1050,7 +1076,8 @@ unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
return 0;
}
} else if (Current.is(TT_BlockComment) && Current.isTrailingComment()) {
- if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
+ if (!Style.ReflowComments ||
+ CommentPragmasRegex.match(Current.TokenText.substr(2)))
return 0;
Token.reset(new BreakableBlockComment(
Current, State.Line->Level, StartColumn, Current.OriginalColumn,
@@ -1058,7 +1085,8 @@ unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
} else if (Current.is(TT_LineComment) &&
(Current.Previous == nullptr ||
Current.Previous->isNot(TT_ImplicitStringLiteral))) {
- if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
+ if (!Style.ReflowComments ||
+ CommentPragmasRegex.match(Current.TokenText.substr(2)))
return 0;
Token.reset(new BreakableLineComment(Current, State.Line->Level,
StartColumn, /*InPPDirective=*/false,
diff --git a/lib/Format/Encoding.h b/lib/Format/Encoding.h
index 766d29274ce6..592d7201a8ac 100644
--- a/lib/Format/Encoding.h
+++ b/lib/Format/Encoding.h
@@ -135,7 +135,7 @@ inline unsigned getEscapeSequenceLength(StringRef Text) {
++I;
return I;
}
- return 2;
+ return 1 + getNumBytesForUTF8(Text[1]);
}
}
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 382ae819ebfd..5068fca5c44d 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -13,6 +13,7 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/Format/Format.h"
#include "ContinuationIndenter.h"
#include "TokenAnnotator.h"
#include "UnwrappedLineFormatter.h"
@@ -21,7 +22,6 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Format/Format.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Allocator.h"
@@ -37,6 +37,7 @@
using clang::format::FormatStyle;
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(std::string)
+LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::IncludeCategory)
namespace llvm {
namespace yaml {
@@ -46,6 +47,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
IO.enumCase(Value, "Java", FormatStyle::LK_Java);
IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
+ IO.enumCase(Value, "TableGen", FormatStyle::LK_TableGen);
}
};
@@ -98,11 +100,27 @@ template <> struct ScalarEnumerationTraits<FormatStyle::BraceBreakingStyle> {
IO.enumCase(Value, "Stroustrup", FormatStyle::BS_Stroustrup);
IO.enumCase(Value, "Allman", FormatStyle::BS_Allman);
IO.enumCase(Value, "GNU", FormatStyle::BS_GNU);
+ IO.enumCase(Value, "WebKit", FormatStyle::BS_WebKit);
+ IO.enumCase(Value, "Custom", FormatStyle::BS_Custom);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
+ static void enumeration(IO &IO, FormatStyle::ReturnTypeBreakingStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::RTBS_None);
+ IO.enumCase(Value, "All", FormatStyle::RTBS_All);
+ IO.enumCase(Value, "TopLevel", FormatStyle::RTBS_TopLevel);
+ IO.enumCase(Value, "TopLevelDefinitions",
+ FormatStyle::RTBS_TopLevelDefinitions);
+ IO.enumCase(Value, "AllDefinitions", FormatStyle::RTBS_AllDefinitions);
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
- static void enumeration(IO &IO, FormatStyle::DefinitionReturnTypeBreakingStyle &Value) {
+template <>
+struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::DefinitionReturnTypeBreakingStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::DRTBS_None);
IO.enumCase(Value, "All", FormatStyle::DRTBS_All);
IO.enumCase(Value, "TopLevel", FormatStyle::DRTBS_TopLevel);
@@ -123,6 +141,18 @@ struct ScalarEnumerationTraits<FormatStyle::NamespaceIndentationKind> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::BracketAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::BracketAlignmentStyle &Value) {
+ IO.enumCase(Value, "Align", FormatStyle::BAS_Align);
+ IO.enumCase(Value, "DontAlign", FormatStyle::BAS_DontAlign);
+ IO.enumCase(Value, "AlwaysBreak", FormatStyle::BAS_AlwaysBreak);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::BAS_Align);
+ IO.enumCase(Value, "false", FormatStyle::BAS_DontAlign);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
static void enumeration(IO &IO, FormatStyle::PointerAlignmentStyle &Value) {
IO.enumCase(Value, "Middle", FormatStyle::PAS_Middle);
@@ -197,6 +227,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
+ IO.mapOptional("AlignConsecutiveDeclarations",
+ Style.AlignConsecutiveDeclarations);
IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlinesLeft);
IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
@@ -214,12 +246,28 @@ template <> struct MappingTraits<FormatStyle> {
Style.AllowShortLoopsOnASingleLine);
IO.mapOptional("AlwaysBreakAfterDefinitionReturnType",
Style.AlwaysBreakAfterDefinitionReturnType);
+ IO.mapOptional("AlwaysBreakAfterReturnType",
+ Style.AlwaysBreakAfterReturnType);
+ // If AlwaysBreakAfterDefinitionReturnType was specified but
+ // AlwaysBreakAfterReturnType was not, initialize the latter from the
+ // former for backwards compatibility.
+ if (Style.AlwaysBreakAfterDefinitionReturnType != FormatStyle::DRTBS_None &&
+ Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None) {
+ if (Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_All)
+ Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ else if (Style.AlwaysBreakAfterDefinitionReturnType ==
+ FormatStyle::DRTBS_TopLevel)
+ Style.AlwaysBreakAfterReturnType =
+ FormatStyle::RTBS_TopLevelDefinitions;
+ }
+
IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
Style.AlwaysBreakBeforeMultilineStrings);
IO.mapOptional("AlwaysBreakTemplateDeclarations",
Style.AlwaysBreakTemplateDeclarations);
IO.mapOptional("BinPackArguments", Style.BinPackArguments);
IO.mapOptional("BinPackParameters", Style.BinPackParameters);
+ IO.mapOptional("BraceWrapping", Style.BraceWrapping);
IO.mapOptional("BreakBeforeBinaryOperators",
Style.BreakBeforeBinaryOperators);
IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
@@ -240,6 +288,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("ExperimentalAutoDetectBinPacking",
Style.ExperimentalAutoDetectBinPacking);
IO.mapOptional("ForEachMacros", Style.ForEachMacros);
+ IO.mapOptional("IncludeCategories", Style.IncludeCategories);
IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
@@ -264,6 +313,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("PenaltyReturnTypeOnItsOwnLine",
Style.PenaltyReturnTypeOnItsOwnLine);
IO.mapOptional("PointerAlignment", Style.PointerAlignment);
+ IO.mapOptional("ReflowComments", Style.ReflowComments);
+ IO.mapOptional("SortIncludes", Style.SortIncludes);
IO.mapOptional("SpaceAfterCStyleCast", Style.SpaceAfterCStyleCast);
IO.mapOptional("SpaceBeforeAssignmentOperators",
Style.SpaceBeforeAssignmentOperators);
@@ -284,6 +335,29 @@ template <> struct MappingTraits<FormatStyle> {
}
};
+template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
+ static void mapping(IO &IO, FormatStyle::BraceWrappingFlags &Wrapping) {
+ IO.mapOptional("AfterClass", Wrapping.AfterClass);
+ IO.mapOptional("AfterControlStatement", Wrapping.AfterControlStatement);
+ IO.mapOptional("AfterEnum", Wrapping.AfterEnum);
+ IO.mapOptional("AfterFunction", Wrapping.AfterFunction);
+ IO.mapOptional("AfterNamespace", Wrapping.AfterNamespace);
+ IO.mapOptional("AfterObjCDeclaration", Wrapping.AfterObjCDeclaration);
+ IO.mapOptional("AfterStruct", Wrapping.AfterStruct);
+ IO.mapOptional("AfterUnion", Wrapping.AfterUnion);
+ IO.mapOptional("BeforeCatch", Wrapping.BeforeCatch);
+ IO.mapOptional("BeforeElse", Wrapping.BeforeElse);
+ IO.mapOptional("IndentBraces", Wrapping.IndentBraces);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle::IncludeCategory> {
+ static void mapping(IO &IO, FormatStyle::IncludeCategory &Category) {
+ IO.mapOptional("Regex", Category.Regex);
+ IO.mapOptional("Priority", Category.Priority);
+ }
+};
+
// Allows to read vector<FormatStyle> while keeping default values.
// IO.getContext() should contain a pointer to the FormatStyle structure, that
// will be used to get default values for missing keys.
@@ -309,8 +383,8 @@ template <> struct DocumentListTraits<std::vector<FormatStyle>> {
return Seq[Index];
}
};
-}
-}
+} // namespace yaml
+} // namespace llvm
namespace clang {
namespace format {
@@ -339,21 +413,71 @@ std::string ParseErrorCategory::message(int EV) const {
llvm_unreachable("unexpected parse error");
}
+static FormatStyle expandPresets(const FormatStyle &Style) {
+ if (Style.BreakBeforeBraces == FormatStyle::BS_Custom)
+ return Style;
+ FormatStyle Expanded = Style;
+ Expanded.BraceWrapping = {false, false, false, false, false, false,
+ false, false, false, false, false};
+ switch (Style.BreakBeforeBraces) {
+ case FormatStyle::BS_Linux:
+ Expanded.BraceWrapping.AfterClass = true;
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.AfterNamespace = true;
+ break;
+ case FormatStyle::BS_Mozilla:
+ Expanded.BraceWrapping.AfterClass = true;
+ Expanded.BraceWrapping.AfterEnum = true;
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.AfterStruct = true;
+ Expanded.BraceWrapping.AfterUnion = true;
+ break;
+ case FormatStyle::BS_Stroustrup:
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.BeforeCatch = true;
+ Expanded.BraceWrapping.BeforeElse = true;
+ break;
+ case FormatStyle::BS_Allman:
+ Expanded.BraceWrapping.AfterClass = true;
+ Expanded.BraceWrapping.AfterControlStatement = true;
+ Expanded.BraceWrapping.AfterEnum = true;
+ Expanded.BraceWrapping.AfterFunction = true;
+ Expanded.BraceWrapping.AfterNamespace = true;
+ Expanded.BraceWrapping.AfterObjCDeclaration = true;
+ Expanded.BraceWrapping.AfterStruct = true;
+ Expanded.BraceWrapping.BeforeCatch = true;
+ Expanded.BraceWrapping.BeforeElse = true;
+ break;
+ case FormatStyle::BS_GNU:
+ Expanded.BraceWrapping = {true, true, true, true, true, true,
+ true, true, true, true, true};
+ break;
+ case FormatStyle::BS_WebKit:
+ Expanded.BraceWrapping.AfterFunction = true;
+ break;
+ default:
+ break;
+ }
+ return Expanded;
+}
+
FormatStyle getLLVMStyle() {
FormatStyle LLVMStyle;
LLVMStyle.Language = FormatStyle::LK_Cpp;
LLVMStyle.AccessModifierOffset = -2;
LLVMStyle.AlignEscapedNewlinesLeft = false;
- LLVMStyle.AlignAfterOpenBracket = true;
+ LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
LLVMStyle.AlignOperands = true;
LLVMStyle.AlignTrailingComments = true;
LLVMStyle.AlignConsecutiveAssignments = false;
+ LLVMStyle.AlignConsecutiveDeclarations = false;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortBlocksOnASingleLine = false;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
LLVMStyle.AllowShortIfStatementsOnASingleLine = false;
LLVMStyle.AllowShortLoopsOnASingleLine = false;
+ LLVMStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
LLVMStyle.AlwaysBreakTemplateDeclarations = false;
@@ -362,7 +486,10 @@ FormatStyle getLLVMStyle() {
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
+ LLVMStyle.BraceWrapping = {false, false, false, false, false, false,
+ false, false, false, false, false};
LLVMStyle.BreakConstructorInitializersBeforeComma = false;
+ LLVMStyle.BreakAfterJavaFieldAnnotations = false;
LLVMStyle.ColumnLimit = 80;
LLVMStyle.CommentPragmas = "^ IWYU pragma:";
LLVMStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = false;
@@ -374,6 +501,9 @@ FormatStyle getLLVMStyle() {
LLVMStyle.ForEachMacros.push_back("foreach");
LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
+ LLVMStyle.IncludeCategories = {{"^\"(llvm|llvm-c|clang|clang-c)/", 2},
+ {"^(<|\"(gtest|isl|json)/)", 3},
+ {".*", 1}};
LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
@@ -388,6 +518,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.Standard = FormatStyle::LS_Cpp11;
LLVMStyle.UseTab = FormatStyle::UT_Never;
+ LLVMStyle.ReflowComments = true;
LLVMStyle.SpacesInParentheses = false;
LLVMStyle.SpacesInSquareBrackets = false;
LLVMStyle.SpaceInEmptyParentheses = false;
@@ -406,6 +537,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
LLVMStyle.DisableFormat = false;
+ LLVMStyle.SortIncludes = true;
return LLVMStyle;
}
@@ -422,6 +554,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.AlwaysBreakTemplateDeclarations = true;
GoogleStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
GoogleStyle.DerivePointerAlignment = true;
+ GoogleStyle.IncludeCategories = {{"^<.*\\.h>", 1}, {"^<.*", 2}, {".*", 3}};
GoogleStyle.IndentCaseLabels = true;
GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
GoogleStyle.ObjCSpaceAfterProperty = false;
@@ -434,7 +567,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.PenaltyBreakBeforeFirstCallParameter = 1;
if (Language == FormatStyle::LK_Java) {
- GoogleStyle.AlignAfterOpenBracket = false;
+ GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
GoogleStyle.AlignOperands = false;
GoogleStyle.AlignTrailingComments = false;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
@@ -445,11 +578,13 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.SpaceAfterCStyleCast = true;
GoogleStyle.SpacesBeforeTrailingComments = 1;
} else if (Language == FormatStyle::LK_JavaScript) {
+ GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
+ GoogleStyle.AlignOperands = false;
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.BreakBeforeTernaryOperators = false;
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.SpacesInContainerLiterals = false;
- GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
- GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
} else if (Language == FormatStyle::LK_Proto) {
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
GoogleStyle.SpacesInContainerLiterals = false;
@@ -462,8 +597,9 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
FormatStyle ChromiumStyle = getGoogleStyle(Language);
if (Language == FormatStyle::LK_Java) {
ChromiumStyle.AllowShortIfStatementsOnASingleLine = true;
- ChromiumStyle.IndentWidth = 4;
+ ChromiumStyle.BreakAfterJavaFieldAnnotations = true;
ChromiumStyle.ContinuationIndentWidth = 8;
+ ChromiumStyle.IndentWidth = 4;
} else {
ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
ChromiumStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
@@ -472,8 +608,7 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
ChromiumStyle.BinPackParameters = false;
ChromiumStyle.DerivePointerAlignment = false;
}
- ChromiumStyle.MacroBlockBegin = "^IPC_BEGIN_MESSAGE_MAP$";
- ChromiumStyle.MacroBlockBegin = "^IPC_END_MESSAGE_MAP$";
+ ChromiumStyle.SortIncludes = false;
return ChromiumStyle;
}
@@ -481,6 +616,8 @@ FormatStyle getMozillaStyle() {
FormatStyle MozillaStyle = getLLVMStyle();
MozillaStyle.AllowAllParametersOfDeclarationOnNextLine = false;
MozillaStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ MozillaStyle.AlwaysBreakAfterReturnType =
+ FormatStyle::RTBS_TopLevelDefinitions;
MozillaStyle.AlwaysBreakAfterDefinitionReturnType =
FormatStyle::DRTBS_TopLevel;
MozillaStyle.AlwaysBreakTemplateDeclarations = true;
@@ -500,11 +637,11 @@ FormatStyle getMozillaStyle() {
FormatStyle getWebKitStyle() {
FormatStyle Style = getLLVMStyle();
Style.AccessModifierOffset = -4;
- Style.AlignAfterOpenBracket = false;
+ Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
Style.AlignOperands = false;
Style.AlignTrailingComments = false;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
- Style.BreakBeforeBraces = FormatStyle::BS_Stroustrup;
+ Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
Style.BreakConstructorInitializersBeforeComma = true;
Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 0;
@@ -520,6 +657,7 @@ FormatStyle getWebKitStyle() {
FormatStyle getGNUStyle() {
FormatStyle Style = getLLVMStyle();
Style.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_All;
+ Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
@@ -533,6 +671,7 @@ FormatStyle getGNUStyle() {
FormatStyle getNoStyle() {
FormatStyle NoStyle = getLLVMStyle();
NoStyle.DisableFormat = true;
+ NoStyle.SortIncludes = false;
return NoStyle;
}
@@ -612,7 +751,7 @@ std::string configurationAsText(const FormatStyle &Style) {
llvm::yaml::Output Output(Stream);
// We use the same mapping method for input and output, so we need a non-const
// reference here.
- FormatStyle NonConstStyle = Style;
+ FormatStyle NonConstStyle = expandPresets(Style);
Output << NonConstStyle;
return Stream.str();
}
@@ -644,6 +783,8 @@ public:
assert(FirstInLineIndex == 0);
do {
Tokens.push_back(getNextToken());
+ if (Style.Language == FormatStyle::LK_JavaScript)
+ tryParseJSRegexLiteral();
tryMergePreviousTokens();
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
FirstInLineIndex = Tokens.size() - 1;
@@ -663,10 +804,6 @@ private:
return;
if (Style.Language == FormatStyle::LK_JavaScript) {
- if (tryMergeJSRegexLiteral())
- return;
- if (tryMergeEscapeSequence())
- return;
if (tryMergeTemplateString())
return;
@@ -738,96 +875,97 @@ private:
return true;
}
- // Tries to merge an escape sequence, i.e. a "\\" and the following
- // character. Use e.g. inside JavaScript regex literals.
- bool tryMergeEscapeSequence() {
- if (Tokens.size() < 2)
- return false;
- FormatToken *Previous = Tokens[Tokens.size() - 2];
- if (Previous->isNot(tok::unknown) || Previous->TokenText != "\\")
- return false;
- ++Previous->ColumnWidth;
- StringRef Text = Previous->TokenText;
- Previous->TokenText = StringRef(Text.data(), Text.size() + 1);
- resetLexer(SourceMgr.getFileOffset(Tokens.back()->Tok.getLocation()) + 1);
- Tokens.resize(Tokens.size() - 1);
- Column = Previous->OriginalColumn + Previous->ColumnWidth;
- return true;
+ // Returns \c true if \p Tok can only be followed by an operand in JavaScript.
+ bool precedesOperand(FormatToken *Tok) {
+ // NB: This is not entirely correct, as an r_paren can introduce an operand
+ // location in e.g. `if (foo) /bar/.exec(...);`. That is a rare enough
+ // corner case to not matter in practice, though.
+ return Tok->isOneOf(tok::period, tok::l_paren, tok::comma, tok::l_brace,
+ tok::r_brace, tok::l_square, tok::semi, tok::exclaim,
+ tok::colon, tok::question, tok::tilde) ||
+ Tok->isOneOf(tok::kw_return, tok::kw_do, tok::kw_case, tok::kw_throw,
+ tok::kw_else, tok::kw_new, tok::kw_delete, tok::kw_void,
+ tok::kw_typeof, Keywords.kw_instanceof,
+ Keywords.kw_in) ||
+ Tok->isBinaryOperator();
}
- // Try to determine whether the current token ends a JavaScript regex literal.
- // We heuristically assume that this is a regex literal if we find two
- // unescaped slashes on a line and the token before the first slash is one of
- // "(;,{}![:?", a binary operator or 'return', as those cannot be followed by
- // a division.
- bool tryMergeJSRegexLiteral() {
- if (Tokens.size() < 2)
- return false;
+ bool canPrecedeRegexLiteral(FormatToken *Prev) {
+ if (!Prev)
+ return true;
- // If this is a string literal with a slash inside, compute the slash's
- // offset and try to find the beginning of the regex literal.
- // Also look at tok::unknown, as it can be an unterminated char literal.
- size_t SlashInStringPos = StringRef::npos;
- if (Tokens.back()->isOneOf(tok::string_literal, tok::char_constant,
- tok::unknown)) {
- // Start search from position 1 as otherwise, this is an unknown token
- // for an unterminated /*-comment which is handled elsewhere.
- SlashInStringPos = Tokens.back()->TokenText.find('/', 1);
- if (SlashInStringPos == StringRef::npos)
- return false;
- }
+ // Regex literals can only follow after prefix unary operators, not after
+ // postfix unary operators. If the '++' is followed by a non-operand
+ // introducing token, the slash here is the operand and not the start of a
+ // regex.
+ if (Prev->isOneOf(tok::plusplus, tok::minusminus))
+ return (Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]));
- // If a regex literal ends in "\//", this gets represented by an unknown
- // token "\" and a comment.
- bool MightEndWithEscapedSlash =
- Tokens.back()->is(tok::comment) &&
- Tokens.back()->TokenText.startswith("//") &&
- Tokens[Tokens.size() - 2]->TokenText == "\\";
- if (!MightEndWithEscapedSlash && SlashInStringPos == StringRef::npos &&
- (Tokens.back()->isNot(tok::slash) ||
- (Tokens[Tokens.size() - 2]->is(tok::unknown) &&
- Tokens[Tokens.size() - 2]->TokenText == "\\")))
+ // The previous token must introduce an operand location where regex
+ // literals can occur.
+ if (!precedesOperand(Prev))
return false;
- unsigned TokenCount = 0;
+ return true;
+ }
+
+ // Tries to parse a JavaScript Regex literal starting at the current token,
+ // if that begins with a slash and is in a location where JavaScript allows
+ // regex literals. Changes the current token to a regex literal and updates
+ // its text if successful.
+ void tryParseJSRegexLiteral() {
+ FormatToken *RegexToken = Tokens.back();
+ if (!RegexToken->isOneOf(tok::slash, tok::slashequal))
+ return;
+
+ FormatToken *Prev = nullptr;
for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; ++I) {
- ++TokenCount;
- auto Prev = I + 1;
- while (Prev != E && Prev[0]->is(tok::comment))
- ++Prev;
- if (I[0]->isOneOf(tok::slash, tok::slashequal) &&
- (Prev == E ||
- ((Prev[0]->isOneOf(tok::l_paren, tok::semi, tok::l_brace,
- tok::r_brace, tok::exclaim, tok::l_square,
- tok::colon, tok::comma, tok::question,
- tok::kw_return) ||
- Prev[0]->isBinaryOperator())))) {
- unsigned LastColumn = Tokens.back()->OriginalColumn;
- SourceLocation Loc = Tokens.back()->Tok.getLocation();
- if (MightEndWithEscapedSlash) {
- // This regex literal ends in '\//'. Skip past the '//' of the last
- // token and re-start lexing from there.
- resetLexer(SourceMgr.getFileOffset(Loc) + 2);
- } else if (SlashInStringPos != StringRef::npos) {
- // This regex literal ends in a string_literal with a slash inside.
- // Calculate end column and reset lexer appropriately.
- resetLexer(SourceMgr.getFileOffset(Loc) + SlashInStringPos + 1);
- LastColumn += SlashInStringPos;
- }
- Tokens.resize(Tokens.size() - TokenCount);
- Tokens.back()->Tok.setKind(tok::unknown);
- Tokens.back()->Type = TT_RegexLiteral;
- // Treat regex literals like other string_literals.
- Tokens.back()->Tok.setKind(tok::string_literal);
- Tokens.back()->ColumnWidth += LastColumn - I[0]->OriginalColumn;
- return true;
+ // NB: Because previous pointers are not initialized yet, this cannot use
+ // Token.getPreviousNonComment.
+ if ((*I)->isNot(tok::comment)) {
+ Prev = *I;
+ break;
}
+ }
- // There can't be a newline inside a regex literal.
- if (I[0]->NewlinesBefore > 0)
- return false;
+ if (!canPrecedeRegexLiteral(Prev))
+ return;
+
+ // 'Manually' lex ahead in the current file buffer.
+ const char *Offset = Lex->getBufferLocation();
+ const char *RegexBegin = Offset - RegexToken->TokenText.size();
+ StringRef Buffer = Lex->getBuffer();
+ bool InCharacterClass = false;
+ bool HaveClosingSlash = false;
+ for (; !HaveClosingSlash && Offset != Buffer.end(); ++Offset) {
+ // Regular expressions are terminated with a '/', which can only be
+ // escaped using '\' or a character class between '[' and ']'.
+ // See http://www.ecma-international.org/ecma-262/5.1/#sec-7.8.5.
+ switch (*Offset) {
+ case '\\':
+ // Skip the escaped character.
+ ++Offset;
+ break;
+ case '[':
+ InCharacterClass = true;
+ break;
+ case ']':
+ InCharacterClass = false;
+ break;
+ case '/':
+ if (!InCharacterClass)
+ HaveClosingSlash = true;
+ break;
+ }
}
- return false;
+
+ RegexToken->Type = TT_RegexLiteral;
+ // Treat regex literals like other string_literals.
+ RegexToken->Tok.setKind(tok::string_literal);
+ RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
+ RegexToken->ColumnWidth = RegexToken->TokenText.size();
+
+ resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
}
bool tryMergeTemplateString() {
@@ -1138,7 +1276,13 @@ private:
FormatTok->Tok.setIdentifierInfo(&Info);
FormatTok->Tok.setKind(Info.getTokenID());
if (Style.Language == FormatStyle::LK_Java &&
- FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete)) {
+ FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete,
+ tok::kw_operator)) {
+ FormatTok->Tok.setKind(tok::identifier);
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ } else if (Style.Language == FormatStyle::LK_JavaScript &&
+ FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
+ tok::kw_operator)) {
FormatTok->Tok.setKind(tok::identifier);
FormatTok->Tok.setIdentifierInfo(nullptr);
}
@@ -1485,11 +1629,46 @@ private:
return Text.count('\r') * 2 > Text.count('\n');
}
+ bool
+ hasCpp03IncompatibleFormat(const SmallVectorImpl<AnnotatedLine *> &Lines) {
+ for (const AnnotatedLine* Line : Lines) {
+ if (hasCpp03IncompatibleFormat(Line->Children))
+ return true;
+ for (FormatToken *Tok = Line->First->Next; Tok; Tok = Tok->Next) {
+ if (Tok->WhitespaceRange.getBegin() == Tok->WhitespaceRange.getEnd()) {
+ if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
+ return true;
+ if (Tok->is(TT_TemplateCloser) &&
+ Tok->Previous->is(TT_TemplateCloser))
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ int countVariableAlignments(const SmallVectorImpl<AnnotatedLine *> &Lines) {
+ int AlignmentDiff = 0;
+ for (const AnnotatedLine* Line : Lines) {
+ AlignmentDiff += countVariableAlignments(Line->Children);
+ for (FormatToken *Tok = Line->First; Tok && Tok->Next; Tok = Tok->Next) {
+ if (!Tok->is(TT_PointerOrReference))
+ continue;
+ bool SpaceBefore =
+ Tok->WhitespaceRange.getBegin() != Tok->WhitespaceRange.getEnd();
+ bool SpaceAfter = Tok->Next->WhitespaceRange.getBegin() !=
+ Tok->Next->WhitespaceRange.getEnd();
+ if (SpaceBefore && !SpaceAfter)
+ ++AlignmentDiff;
+ if (!SpaceBefore && SpaceAfter)
+ --AlignmentDiff;
+ }
+ }
+ return AlignmentDiff;
+ }
+
void
deriveLocalStyle(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
- unsigned CountBoundToVariable = 0;
- unsigned CountBoundToType = 0;
- bool HasCpp03IncompatibleFormat = false;
bool HasBinPackedFunction = false;
bool HasOnePerLineFunction = false;
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
@@ -1497,25 +1676,6 @@ private:
continue;
FormatToken *Tok = AnnotatedLines[i]->First->Next;
while (Tok->Next) {
- if (Tok->is(TT_PointerOrReference)) {
- bool SpacesBefore =
- Tok->WhitespaceRange.getBegin() != Tok->WhitespaceRange.getEnd();
- bool SpacesAfter = Tok->Next->WhitespaceRange.getBegin() !=
- Tok->Next->WhitespaceRange.getEnd();
- if (SpacesBefore && !SpacesAfter)
- ++CountBoundToVariable;
- else if (!SpacesBefore && SpacesAfter)
- ++CountBoundToType;
- }
-
- if (Tok->WhitespaceRange.getBegin() == Tok->WhitespaceRange.getEnd()) {
- if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
- HasCpp03IncompatibleFormat = true;
- if (Tok->is(TT_TemplateCloser) &&
- Tok->Previous->is(TT_TemplateCloser))
- HasCpp03IncompatibleFormat = true;
- }
-
if (Tok->PackingKind == PPK_BinPacked)
HasBinPackedFunction = true;
if (Tok->PackingKind == PPK_OnePerLine)
@@ -1524,16 +1684,14 @@ private:
Tok = Tok->Next;
}
}
- if (Style.DerivePointerAlignment) {
- if (CountBoundToType > CountBoundToVariable)
- Style.PointerAlignment = FormatStyle::PAS_Left;
- else if (CountBoundToType < CountBoundToVariable)
- Style.PointerAlignment = FormatStyle::PAS_Right;
- }
- if (Style.Standard == FormatStyle::LS_Auto) {
- Style.Standard = HasCpp03IncompatibleFormat ? FormatStyle::LS_Cpp11
- : FormatStyle::LS_Cpp03;
- }
+ if (Style.DerivePointerAlignment)
+ Style.PointerAlignment = countVariableAlignments(AnnotatedLines) <= 0
+ ? FormatStyle::PAS_Left
+ : FormatStyle::PAS_Right;
+ if (Style.Standard == FormatStyle::LS_Auto)
+ Style.Standard = hasCpp03IncompatibleFormat(AnnotatedLines)
+ ? FormatStyle::LS_Cpp11
+ : FormatStyle::LS_Cpp03;
BinPackInconclusiveFunctions =
HasBinPackedFunction || !HasOnePerLineFunction;
}
@@ -1558,15 +1716,175 @@ private:
bool BinPackInconclusiveFunctions;
};
+struct IncludeDirective {
+ StringRef Filename;
+ StringRef Text;
+ unsigned Offset;
+ int Category;
+};
+
} // end anonymous namespace
+// Determines whether 'Ranges' intersects with ('Start', 'End').
+static bool affectsRange(ArrayRef<tooling::Range> Ranges, unsigned Start,
+ unsigned End) {
+ for (auto Range : Ranges) {
+ if (Range.getOffset() < End &&
+ Range.getOffset() + Range.getLength() > Start)
+ return true;
+ }
+ return false;
+}
+
+// Sorts a block of includes given by 'Includes' alphabetically adding the
+// necessary replacement to 'Replaces'. 'Includes' must be in strict source
+// order.
+static void sortIncludes(const FormatStyle &Style,
+ const SmallVectorImpl<IncludeDirective> &Includes,
+ ArrayRef<tooling::Range> Ranges, StringRef FileName,
+ tooling::Replacements &Replaces, unsigned *Cursor) {
+ if (!affectsRange(Ranges, Includes.front().Offset,
+ Includes.back().Offset + Includes.back().Text.size()))
+ return;
+ SmallVector<unsigned, 16> Indices;
+ for (unsigned i = 0, e = Includes.size(); i != e; ++i)
+ Indices.push_back(i);
+ std::sort(Indices.begin(), Indices.end(), [&](unsigned LHSI, unsigned RHSI) {
+ return std::tie(Includes[LHSI].Category, Includes[LHSI].Filename) <
+ std::tie(Includes[RHSI].Category, Includes[RHSI].Filename);
+ });
+
+ // If the #includes are out of order, we generate a single replacement fixing
+ // the entire block. Otherwise, no replacement is generated.
+ bool OutOfOrder = false;
+ for (unsigned i = 1, e = Indices.size(); i != e; ++i) {
+ if (Indices[i] != i) {
+ OutOfOrder = true;
+ break;
+ }
+ }
+ if (!OutOfOrder)
+ return;
+
+ std::string result;
+ bool CursorMoved = false;
+ for (unsigned Index : Indices) {
+ if (!result.empty())
+ result += "\n";
+ result += Includes[Index].Text;
+
+ if (Cursor && !CursorMoved) {
+ unsigned Start = Includes[Index].Offset;
+ unsigned End = Start + Includes[Index].Text.size();
+ if (*Cursor >= Start && *Cursor < End) {
+ *Cursor = Includes.front().Offset + result.size() + *Cursor - End;
+ CursorMoved = true;
+ }
+ }
+ }
+
+ // Sorting #includes shouldn't change their total number of characters.
+ // This would otherwise mess up 'Ranges'.
+ assert(result.size() ==
+ Includes.back().Offset + Includes.back().Text.size() -
+ Includes.front().Offset);
+
+ Replaces.insert(tooling::Replacement(FileName, Includes.front().Offset,
+ result.size(), result));
+}
+
+tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName, unsigned *Cursor) {
+ tooling::Replacements Replaces;
+ if (!Style.SortIncludes)
+ return Replaces;
+
+ unsigned Prev = 0;
+ unsigned SearchFrom = 0;
+ llvm::Regex IncludeRegex(
+ R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))");
+ SmallVector<StringRef, 4> Matches;
+ SmallVector<IncludeDirective, 16> IncludesInBlock;
+
+ // In compiled files, consider the first #include to be the main #include of
+ // the file if it is not a system #include. This ensures that the header
+ // doesn't have hidden dependencies
+ // (http://llvm.org/docs/CodingStandards.html#include-style).
+ //
+ // FIXME: Do some sanity checking, e.g. edit distance of the base name, to fix
+ // cases where the first #include is unlikely to be the main header.
+ bool IsSource = FileName.endswith(".c") || FileName.endswith(".cc") ||
+ FileName.endswith(".cpp") || FileName.endswith(".c++") ||
+ FileName.endswith(".cxx") || FileName.endswith(".m") ||
+ FileName.endswith(".mm");
+ StringRef FileStem = llvm::sys::path::stem(FileName);
+ bool FirstIncludeBlock = true;
+ bool MainIncludeFound = false;
+
+ // Create pre-compiled regular expressions for the #include categories.
+ SmallVector<llvm::Regex, 4> CategoryRegexs;
+ for (const auto &Category : Style.IncludeCategories)
+ CategoryRegexs.emplace_back(Category.Regex);
+
+ bool FormattingOff = false;
+
+ for (;;) {
+ auto Pos = Code.find('\n', SearchFrom);
+ StringRef Line =
+ Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
+
+ StringRef Trimmed = Line.trim();
+ if (Trimmed == "// clang-format off")
+ FormattingOff = true;
+ else if (Trimmed == "// clang-format on")
+ FormattingOff = false;
+
+ if (!FormattingOff && !Line.endswith("\\")) {
+ if (IncludeRegex.match(Line, &Matches)) {
+ StringRef IncludeName = Matches[2];
+ int Category = INT_MAX;
+ for (unsigned i = 0, e = CategoryRegexs.size(); i != e; ++i) {
+ if (CategoryRegexs[i].match(IncludeName)) {
+ Category = Style.IncludeCategories[i].Priority;
+ break;
+ }
+ }
+ if (IsSource && !MainIncludeFound && Category > 0 &&
+ FirstIncludeBlock && IncludeName.startswith("\"")) {
+ StringRef HeaderStem =
+ llvm::sys::path::stem(IncludeName.drop_front(1).drop_back(1));
+ if (FileStem.startswith(HeaderStem)) {
+ Category = 0;
+ MainIncludeFound = true;
+ }
+ }
+ IncludesInBlock.push_back({IncludeName, Line, Prev, Category});
+ } else if (!IncludesInBlock.empty()) {
+ sortIncludes(Style, IncludesInBlock, Ranges, FileName, Replaces,
+ Cursor);
+ IncludesInBlock.clear();
+ FirstIncludeBlock = false;
+ }
+ Prev = Pos + 1;
+ }
+ if (Pos == StringRef::npos || Pos + 1 == Code.size())
+ break;
+ SearchFrom = Pos + 1;
+ }
+ if (!IncludesInBlock.empty())
+ sortIncludes(Style, IncludesInBlock, Ranges, FileName, Replaces, Cursor);
+ return Replaces;
+}
+
tooling::Replacements reformat(const FormatStyle &Style,
SourceManager &SourceMgr, FileID ID,
ArrayRef<CharSourceRange> Ranges,
bool *IncompleteFormat) {
- if (Style.DisableFormat)
+ FormatStyle Expanded = expandPresets(Style);
+ if (Expanded.DisableFormat)
return tooling::Replacements();
- Formatter formatter(Style, SourceMgr, ID, Ranges);
+ Formatter formatter(Expanded, SourceMgr, ID, Ranges);
return formatter.format(IncompleteFormat);
}
@@ -1576,18 +1894,17 @@ tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
if (Style.DisableFormat)
return tooling::Replacements();
- FileManager Files((FileSystemOptions()));
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticsEngine Diagnostics(
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
new DiagnosticOptions);
SourceManager SourceMgr(Diagnostics, Files);
- std::unique_ptr<llvm::MemoryBuffer> Buf =
- llvm::MemoryBuffer::getMemBuffer(Code, FileName);
- const clang::FileEntry *Entry =
- Files.getVirtualFile(FileName, Buf->getBufferSize(), 0);
- SourceMgr.overrideFileContents(Entry, std::move(Buf));
- FileID ID =
- SourceMgr.createFileID(Entry, SourceLocation(), clang::SrcMgr::C_User);
+ InMemoryFileSystem->addFile(FileName, 0,
+ llvm::MemoryBuffer::getMemBuffer(Code, FileName));
+ FileID ID = SourceMgr.createFileID(Files.getFile(FileName), SourceLocation(),
+ clang::SrcMgr::C_User);
SourceLocation StartOfFile = SourceMgr.getLocForStartOfFile(ID);
std::vector<CharSourceRange> CharRanges;
for (const tooling::Range &Range : Ranges) {
@@ -1610,6 +1927,7 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOpts.ObjC1 = 1;
LangOpts.ObjC2 = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
+ LangOpts.DeclSpecKeyword = 1; // To get __declspec.
return LangOpts;
}
@@ -1625,15 +1943,15 @@ const char *StyleOptionHelpDescription =
" -style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
- if (FileName.endswith(".java")) {
+ if (FileName.endswith(".java"))
return FormatStyle::LK_Java;
- } else if (FileName.endswith_lower(".js") || FileName.endswith_lower(".ts")) {
- // JavaScript or TypeScript.
- return FormatStyle::LK_JavaScript;
- } else if (FileName.endswith_lower(".proto") ||
- FileName.endswith_lower(".protodevel")) {
+ if (FileName.endswith_lower(".js") || FileName.endswith_lower(".ts"))
+ return FormatStyle::LK_JavaScript; // JavaScript or TypeScript.
+ if (FileName.endswith_lower(".proto") ||
+ FileName.endswith_lower(".protodevel"))
return FormatStyle::LK_Proto;
- }
+ if (FileName.endswith_lower(".td"))
+ return FormatStyle::LK_TableGen;
return FormatStyle::LK_Cpp;
}
diff --git a/lib/Format/FormatToken.cpp b/lib/Format/FormatToken.cpp
index 6c244c316604..63af0d6088d1 100644
--- a/lib/Format/FormatToken.cpp
+++ b/lib/Format/FormatToken.cpp
@@ -13,8 +13,8 @@
///
//===----------------------------------------------------------------------===//
-#include "FormatToken.h"
#include "ContinuationIndenter.h"
+#include "FormatToken.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
@@ -80,8 +80,8 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
// Ensure that we start on the opening brace.
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
- if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->BlockKind == BK_Block ||
- LBrace->Type == TT_DictLiteral ||
+ if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
+ LBrace->BlockKind == BK_Block || LBrace->Type == TT_DictLiteral ||
LBrace->Next->Type == TT_DesignatedInitializerPeriod)
return 0;
@@ -144,7 +144,8 @@ static unsigned CodePointsBetween(const FormatToken *Begin,
void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// FIXME: At some point we might want to do this for other lists, too.
- if (!Token->MatchingParen || Token->isNot(tok::l_brace))
+ if (!Token->MatchingParen ||
+ !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare))
return;
// In C++11 braced list style, we should not format in columns unless they
@@ -154,8 +155,14 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
Commas.size() < 19)
return;
+ // Limit column layout for JavaScript array initializers to 20 or more items
+ // for now to introduce it carefully. We can become more aggressive if this
+ // necessary.
+ if (Token->is(TT_ArrayInitializerLSquare) && Commas.size() < 19)
+ return;
+
// Column format doesn't really make sense if we don't align after brackets.
- if (!Style.AlignAfterOpenBracket)
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign)
return;
FormatToken *ItemBegin = Token->Next;
@@ -183,7 +190,8 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
ItemEnd = Token->MatchingParen;
const FormatToken *NonCommentEnd = ItemEnd->getPreviousNonComment();
ItemLengths.push_back(CodePointsBetween(ItemBegin, NonCommentEnd));
- if (Style.Cpp11BracedListStyle) {
+ if (Style.Cpp11BracedListStyle &&
+ !ItemEnd->Previous->isTrailingComment()) {
// In Cpp11 braced list style, the } and possibly other subsequent
// tokens will need to stay on a line with the last element.
while (ItemEnd->Next && !ItemEnd->Next->CanBreakBefore)
@@ -212,7 +220,8 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// Don't use column layout for nested lists, lists with few elements and in
// presence of separating comments.
- if (Token->NestingLevel != 0 || Commas.size() < 5 || HasSeparatingComment)
+ if ((Token->NestingLevel != 0 && Token->is(tok::l_brace)) ||
+ Commas.size() < 5 || HasSeparatingComment)
return;
// We can never place more than ColumnLimit / 3 items in a row (because of the
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index f335eda086c0..78bc0edc45c0 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -283,6 +283,10 @@ struct FormatToken {
bool is(const IdentifierInfo *II) const {
return II && II == Tok.getIdentifierInfo();
}
+ bool is(tok::PPKeywordKind Kind) const {
+ return Tok.getIdentifierInfo() &&
+ Tok.getIdentifierInfo()->getPPKeywordID() == Kind;
+ }
template <typename A, typename B> bool isOneOf(A K1, B K2) const {
return is(K1) || is(K2);
}
@@ -408,16 +412,16 @@ struct FormatToken {
/// \brief Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
- bool opensBlockTypeList(const FormatStyle &Style) const {
+ bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
return is(TT_ArrayInitializerLSquare) ||
(is(tok::l_brace) &&
(BlockKind == BK_Block || is(TT_DictLiteral) ||
(!Style.Cpp11BracedListStyle && NestingLevel == 0)));
}
- /// \brief Same as opensBlockTypeList, but for the closing token.
- bool closesBlockTypeList(const FormatStyle &Style) const {
- return MatchingParen && MatchingParen->opensBlockTypeList(Style);
+ /// \brief Same as opensBlockOrBlockTypeList, but for the closing token.
+ bool closesBlockOrBlockTypeList(const FormatStyle &Style) const {
+ return MatchingParen && MatchingParen->opensBlockOrBlockTypeList(Style);
}
private:
@@ -521,6 +525,8 @@ private:
/// properly supported by Clang's lexer.
struct AdditionalKeywords {
AdditionalKeywords(IdentifierTable &IdentTable) {
+ kw_final = &IdentTable.get("final");
+ kw_override = &IdentTable.get("override");
kw_in = &IdentTable.get("in");
kw_CF_ENUM = &IdentTable.get("CF_ENUM");
kw_CF_OPTIONS = &IdentTable.get("CF_OPTIONS");
@@ -530,11 +536,13 @@ struct AdditionalKeywords {
kw_finally = &IdentTable.get("finally");
kw_function = &IdentTable.get("function");
kw_import = &IdentTable.get("import");
+ kw_is = &IdentTable.get("is");
+ kw_let = &IdentTable.get("let");
kw_var = &IdentTable.get("var");
kw_abstract = &IdentTable.get("abstract");
+ kw_assert = &IdentTable.get("assert");
kw_extends = &IdentTable.get("extends");
- kw_final = &IdentTable.get("final");
kw_implements = &IdentTable.get("implements");
kw_instanceof = &IdentTable.get("instanceof");
kw_interface = &IdentTable.get("interface");
@@ -546,6 +554,7 @@ struct AdditionalKeywords {
kw_mark = &IdentTable.get("mark");
+ kw_extend = &IdentTable.get("extend");
kw_option = &IdentTable.get("option");
kw_optional = &IdentTable.get("optional");
kw_repeated = &IdentTable.get("repeated");
@@ -553,11 +562,14 @@ struct AdditionalKeywords {
kw_returns = &IdentTable.get("returns");
kw_signals = &IdentTable.get("signals");
+ kw_qsignals = &IdentTable.get("Q_SIGNALS");
kw_slots = &IdentTable.get("slots");
kw_qslots = &IdentTable.get("Q_SLOTS");
}
// Context sensitive keywords.
+ IdentifierInfo *kw_final;
+ IdentifierInfo *kw_override;
IdentifierInfo *kw_in;
IdentifierInfo *kw_CF_ENUM;
IdentifierInfo *kw_CF_OPTIONS;
@@ -569,12 +581,14 @@ struct AdditionalKeywords {
IdentifierInfo *kw_finally;
IdentifierInfo *kw_function;
IdentifierInfo *kw_import;
+ IdentifierInfo *kw_is;
+ IdentifierInfo *kw_let;
IdentifierInfo *kw_var;
// Java keywords.
IdentifierInfo *kw_abstract;
+ IdentifierInfo *kw_assert;
IdentifierInfo *kw_extends;
- IdentifierInfo *kw_final;
IdentifierInfo *kw_implements;
IdentifierInfo *kw_instanceof;
IdentifierInfo *kw_interface;
@@ -587,6 +601,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_mark;
// Proto keywords.
+ IdentifierInfo *kw_extend;
IdentifierInfo *kw_option;
IdentifierInfo *kw_optional;
IdentifierInfo *kw_repeated;
@@ -595,6 +610,7 @@ struct AdditionalKeywords {
// QT keywords.
IdentifierInfo *kw_signals;
+ IdentifierInfo *kw_qsignals;
IdentifierInfo *kw_slots;
IdentifierInfo *kw_qslots;
};
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index ea8b30de8dfb..c3ea935e727b 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -120,8 +120,9 @@ private:
}
if (Left->Previous &&
- (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_if,
- tok::kw_while, tok::l_paren, tok::comma) ||
+ (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_decltype,
+ tok::kw_if, tok::kw_while, tok::l_paren,
+ tok::comma) ||
Left->Previous->is(TT_BinaryOperator))) {
// static_assert, if and while usually contain expressions.
Contexts.back().IsExpression = true;
@@ -147,6 +148,10 @@ private:
} else if (Left->Previous && Left->Previous->MatchingParen &&
Left->Previous->MatchingParen->is(TT_ObjCBlockLParen)) {
Contexts.back().IsExpression = false;
+ } else if (!Line.MustBeDeclaration && !Line.InPPDirective) {
+ bool IsForOrCatch =
+ Left->Previous && Left->Previous->isOneOf(tok::kw_for, tok::kw_catch);
+ Contexts.back().IsExpression = !IsForOrCatch;
}
if (StartsObjCMethodExpr) {
@@ -154,7 +159,8 @@ private:
Left->Type = TT_ObjCMethodExpr;
}
- bool MightBeFunctionType = CurrentToken->is(tok::star);
+ bool MightBeFunctionType = CurrentToken->isOneOf(tok::star, tok::amp) &&
+ !Contexts[Contexts.size() - 2].IsExpression;
bool HasMultipleLines = false;
bool HasMultipleParametersOnALine = false;
bool MightBeObjCForRangeLoop =
@@ -188,7 +194,7 @@ private:
if (MightBeFunctionType && CurrentToken->Next &&
(CurrentToken->Next->is(tok::l_paren) ||
(CurrentToken->Next->is(tok::l_square) &&
- !Contexts.back().IsExpression)))
+ Line.MustBeDeclaration)))
Left->Type = TT_FunctionTypeLParen;
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
@@ -371,9 +377,11 @@ private:
updateParameterCount(Left, CurrentToken);
if (CurrentToken->isOneOf(tok::colon, tok::l_brace)) {
FormatToken *Previous = CurrentToken->getPreviousNonComment();
- if ((CurrentToken->is(tok::colon) ||
+ if (((CurrentToken->is(tok::colon) &&
+ (!Contexts.back().ColonIsDictLiteral ||
+ Style.Language != FormatStyle::LK_Cpp)) ||
Style.Language == FormatStyle::LK_Proto) &&
- Previous->is(tok::identifier))
+ Previous->Tok.getIdentifierInfo())
Previous->Type = TT_SelectorName;
if (CurrentToken->is(tok::colon) ||
Style.Language == FormatStyle::LK_JavaScript)
@@ -387,12 +395,8 @@ private:
}
void updateParameterCount(FormatToken *Left, FormatToken *Current) {
- if (Current->is(TT_LambdaLSquare) ||
- (Current->is(tok::caret) && Current->is(TT_UnaryOperator)) ||
- (Style.Language == FormatStyle::LK_JavaScript &&
- Current->is(Keywords.kw_function))) {
+ if (Current->is(tok::l_brace) && !Current->is(TT_DictLiteral))
++Left->BlockParameterCount;
- }
if (Current->is(tok::comma)) {
++Left->ParameterCount;
if (!Left->Role)
@@ -500,6 +504,19 @@ private:
return false;
break;
case tok::l_paren:
+ // When faced with 'operator()()', the kw_operator handler incorrectly
+ // marks the first l_paren as a OverloadedOperatorLParen. Here, we make
+ // the first two parens OverloadedOperators and the second l_paren an
+ // OverloadedOperatorLParen.
+ if (Tok->Previous &&
+ Tok->Previous->is(tok::r_paren) &&
+ Tok->Previous->MatchingParen &&
+ Tok->Previous->MatchingParen->is(TT_OverloadedOperatorLParen)) {
+ Tok->Previous->Type = TT_OverloadedOperator;
+ Tok->Previous->MatchingParen->Type = TT_OverloadedOperator;
+ Tok->Type = TT_OverloadedOperatorLParen;
+ }
+
if (!parseParens())
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
@@ -715,7 +732,7 @@ public:
while (CurrentToken) {
if (CurrentToken->is(tok::kw_virtual))
KeywordVirtualFound = true;
- if (IsImportStatement(*CurrentToken))
+ if (isImportStatement(*CurrentToken))
ImportStatement = true;
if (!consumeToken())
return LT_Invalid;
@@ -736,14 +753,15 @@ public:
}
private:
- bool IsImportStatement(const FormatToken &Tok) {
+ bool isImportStatement(const FormatToken &Tok) {
// FIXME: Closure-library specific stuff should not be hard-coded but be
// configurable.
return Style.Language == FormatStyle::LK_JavaScript &&
Tok.TokenText == "goog" && Tok.Next && Tok.Next->is(tok::period) &&
Tok.Next->Next && (Tok.Next->Next->TokenText == "module" ||
+ Tok.Next->Next->TokenText == "provide" ||
Tok.Next->Next->TokenText == "require" ||
- Tok.Next->Next->TokenText == "provide") &&
+ Tok.Next->Next->TokenText == "setTestOnly") &&
Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren);
}
@@ -818,12 +836,13 @@ private:
void modifyContext(const FormatToken &Current) {
if (Current.getPrecedence() == prec::Assignment &&
- !Line.First->isOneOf(tok::kw_template, tok::kw_using) &&
+ !Line.First->isOneOf(tok::kw_template, tok::kw_using, tok::kw_return) &&
(!Current.Previous || Current.Previous->isNot(tok::kw_operator))) {
Contexts.back().IsExpression = true;
if (!Line.startsWith(TT_UnaryOperator)) {
for (FormatToken *Previous = Current.Previous;
- Previous && !Previous->isOneOf(tok::comma, tok::semi);
+ Previous && Previous->Previous &&
+ !Previous->Previous->isOneOf(tok::comma, tok::semi);
Previous = Previous->Previous) {
if (Previous->isOneOf(tok::r_square, tok::r_paren)) {
Previous = Previous->MatchingParen;
@@ -845,19 +864,8 @@ private:
Contexts.back().IsExpression = true;
} else if (Current.is(TT_TrailingReturnArrow)) {
Contexts.back().IsExpression = false;
- } else if (Current.is(TT_LambdaArrow)) {
+ } else if (Current.is(TT_LambdaArrow) || Current.is(Keywords.kw_assert)) {
Contexts.back().IsExpression = Style.Language == FormatStyle::LK_Java;
- } else if (Current.is(tok::l_paren) && !Line.MustBeDeclaration &&
- !Line.InPPDirective &&
- (!Current.Previous ||
- Current.Previous->isNot(tok::kw_decltype))) {
- bool ParametersOfFunctionType =
- Current.Previous && Current.Previous->is(tok::r_paren) &&
- Current.Previous->MatchingParen &&
- Current.Previous->MatchingParen->is(TT_FunctionTypeLParen);
- bool IsForOrCatch = Current.Previous &&
- Current.Previous->isOneOf(tok::kw_for, tok::kw_catch);
- Contexts.back().IsExpression = !ParametersOfFunctionType && !IsForOrCatch;
} else if (Current.isOneOf(tok::r_paren, tok::greater, tok::comma)) {
for (FormatToken *Previous = Current.Previous;
Previous && Previous->isOneOf(tok::star, tok::amp);
@@ -891,7 +899,7 @@ private:
(!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) {
Contexts.back().FirstStartOfName = &Current;
Current.Type = TT_StartOfName;
- } else if (Current.is(tok::kw_auto)) {
+ } else if (Current.isOneOf(tok::kw_auto, tok::kw___auto_type)) {
AutoFound = true;
} else if (Current.is(tok::arrow) &&
Style.Language == FormatStyle::LK_Java) {
@@ -1035,82 +1043,101 @@ private:
PreviousNotConst->MatchingParen->Previous->is(tok::kw_decltype))
return true;
- return (!IsPPKeyword && PreviousNotConst->is(tok::identifier)) ||
+ return (!IsPPKeyword &&
+ PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto)) ||
PreviousNotConst->is(TT_PointerOrReference) ||
PreviousNotConst->isSimpleTypeSpecifier();
}
/// \brief Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
- FormatToken *LeftOfParens = nullptr;
- if (Tok.MatchingParen)
- LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
- if (LeftOfParens && LeftOfParens->is(tok::r_paren) &&
- LeftOfParens->MatchingParen)
- LeftOfParens = LeftOfParens->MatchingParen->Previous;
- if (LeftOfParens && LeftOfParens->is(tok::r_square) &&
- LeftOfParens->MatchingParen &&
- LeftOfParens->MatchingParen->is(TT_LambdaLSquare))
+ // C-style casts are only used in C++ and Java.
+ if (Style.Language != FormatStyle::LK_Cpp &&
+ Style.Language != FormatStyle::LK_Java)
+ return false;
+
+ // Empty parens aren't casts and there are no casts at the end of the line.
+ if (Tok.Previous == Tok.MatchingParen || !Tok.Next || !Tok.MatchingParen)
return false;
- if (Tok.Next) {
- if (Tok.Next->is(tok::question))
+
+ FormatToken *LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
+ if (LeftOfParens) {
+ // If there is an opening parenthesis left of the current parentheses,
+ // look past it as these might be chained casts.
+ if (LeftOfParens->is(tok::r_paren)) {
+ if (!LeftOfParens->MatchingParen ||
+ !LeftOfParens->MatchingParen->Previous)
+ return false;
+ LeftOfParens = LeftOfParens->MatchingParen->Previous;
+ }
+
+ // If there is an identifier (or with a few exceptions a keyword) right
+ // before the parentheses, this is unlikely to be a cast.
+ if (LeftOfParens->Tok.getIdentifierInfo() &&
+ !LeftOfParens->isOneOf(Keywords.kw_in, tok::kw_return, tok::kw_case,
+ tok::kw_delete))
return false;
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Tok.Next->is(Keywords.kw_in))
+
+ // Certain other tokens right before the parentheses are also signals that
+ // this cannot be a cast.
+ if (LeftOfParens->isOneOf(tok::at, tok::r_square, TT_OverloadedOperator,
+ TT_TemplateCloser))
return false;
- if (Style.Language == FormatStyle::LK_Java && Tok.Next->is(tok::l_paren))
- return true;
}
- bool IsCast = false;
- bool ParensAreEmpty = Tok.Previous == Tok.MatchingParen;
+
+ if (Tok.Next->is(tok::question))
+ return false;
+
+ // As Java has no function types, a "(" after the ")" likely means that this
+ // is a cast.
+ if (Style.Language == FormatStyle::LK_Java && Tok.Next->is(tok::l_paren))
+ return true;
+
+ // If a (non-string) literal follows, this is likely a cast.
+ if (Tok.Next->isNot(tok::string_literal) &&
+ (Tok.Next->Tok.isLiteral() ||
+ Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof)))
+ return true;
+
+ // Heuristically try to determine whether the parentheses contain a type.
bool ParensAreType =
!Tok.Previous ||
Tok.Previous->isOneOf(TT_PointerOrReference, TT_TemplateCloser) ||
Tok.Previous->isSimpleTypeSpecifier();
bool ParensCouldEndDecl =
- Tok.Next && Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace);
- bool IsSizeOfOrAlignOf =
- LeftOfParens && LeftOfParens->isOneOf(tok::kw_sizeof, tok::kw_alignof);
- if (ParensAreType && !ParensCouldEndDecl && !IsSizeOfOrAlignOf &&
- (Contexts.size() > 1 && Contexts[Contexts.size() - 2].IsExpression))
- IsCast = true;
- else if (Tok.Next && Tok.Next->isNot(tok::string_literal) &&
- (Tok.Next->Tok.isLiteral() ||
- Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof)))
- IsCast = true;
- // If there is an identifier after the (), it is likely a cast, unless
- // there is also an identifier before the ().
- else if (LeftOfParens && Tok.Next &&
- (LeftOfParens->Tok.getIdentifierInfo() == nullptr ||
- LeftOfParens->isOneOf(tok::kw_return, tok::kw_case)) &&
- !LeftOfParens->isOneOf(TT_OverloadedOperator, tok::at,
- TT_TemplateCloser)) {
- if (Tok.Next->isOneOf(tok::identifier, tok::numeric_constant)) {
- IsCast = true;
- } else {
- // Use heuristics to recognize c style casting.
- FormatToken *Prev = Tok.Previous;
- if (Prev && Prev->isOneOf(tok::amp, tok::star))
- Prev = Prev->Previous;
-
- if (Prev && Tok.Next && Tok.Next->Next) {
- bool NextIsUnary = Tok.Next->isUnaryOperator() ||
- Tok.Next->isOneOf(tok::amp, tok::star);
- IsCast =
- NextIsUnary && !Tok.Next->is(tok::plus) &&
- Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant);
- }
+ Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace, tok::greater);
+ if (ParensAreType && !ParensCouldEndDecl)
+ return true;
- for (; Prev != Tok.MatchingParen; Prev = Prev->Previous) {
- if (!Prev ||
- !Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon)) {
- IsCast = false;
- break;
- }
- }
- }
+ // At this point, we heuristically assume that there are no casts at the
+ // start of the line. We assume that we have found most cases where there
+ // are by the logic above, e.g. "(void)x;".
+ if (!LeftOfParens)
+ return false;
+
+ // If the following token is an identifier, this is a cast. All cases where
+ // this can be something else are handled above.
+ if (Tok.Next->is(tok::identifier))
+ return true;
+
+ if (!Tok.Next->Next)
+ return false;
+
+ // If the next token after the parenthesis is a unary operator, assume
+ // that this is cast, unless there are unexpected tokens inside the
+ // parenthesis.
+ bool NextIsUnary =
+ Tok.Next->isUnaryOperator() || Tok.Next->isOneOf(tok::amp, tok::star);
+ if (!NextIsUnary || Tok.Next->is(tok::plus) ||
+ !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant))
+ return false;
+ // Search for unexpected tokens.
+ for (FormatToken *Prev = Tok.Previous; Prev != Tok.MatchingParen;
+ Prev = Prev->Previous) {
+ if (!Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon))
+ return false;
}
- return IsCast && !ParensAreEmpty;
+ return true;
}
/// \brief Return the type of the given token assuming it is * or &.
@@ -1124,9 +1151,11 @@ private:
return TT_UnaryOperator;
const FormatToken *NextToken = Tok.getNextNonComment();
- if (!NextToken || NextToken->is(tok::arrow) ||
+ if (!NextToken ||
+ NextToken->isOneOf(tok::arrow, Keywords.kw_final,
+ Keywords.kw_override) ||
(NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
- return TT_Unknown;
+ return TT_PointerOrReference;
if (PrevToken->is(tok::coloncolon))
return TT_PointerOrReference;
@@ -1140,7 +1169,9 @@ private:
if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare))
return TT_PointerOrReference;
- if (NextToken->isOneOf(tok::kw_operator, tok::comma, tok::semi))
+ if (NextToken->is(tok::kw_operator) && !IsExpression)
+ return TT_PointerOrReference;
+ if (NextToken->isOneOf(tok::comma, tok::semi))
return TT_PointerOrReference;
if (PrevToken->is(tok::r_paren) && PrevToken->MatchingParen &&
@@ -1460,25 +1491,56 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
// This function heuristically determines whether 'Current' starts the name of a
// function declaration.
static bool isFunctionDeclarationName(const FormatToken &Current) {
- if (!Current.is(TT_StartOfName) || Current.NestingLevel != 0)
- return false;
- const FormatToken *Next = Current.Next;
- for (; Next; Next = Next->Next) {
- if (Next->is(TT_TemplateOpener)) {
- Next = Next->MatchingParen;
- } else if (Next->is(tok::coloncolon)) {
- Next = Next->Next;
- if (!Next || !Next->is(tok::identifier))
- return false;
- } else if (Next->is(tok::l_paren)) {
+ auto skipOperatorName = [](const FormatToken* Next) -> const FormatToken* {
+ for (; Next; Next = Next->Next) {
+ if (Next->is(TT_OverloadedOperatorLParen))
+ return Next;
+ if (Next->is(TT_OverloadedOperator))
+ continue;
+ if (Next->isOneOf(tok::kw_new, tok::kw_delete)) {
+ // For 'new[]' and 'delete[]'.
+ if (Next->Next && Next->Next->is(tok::l_square) &&
+ Next->Next->Next && Next->Next->Next->is(tok::r_square))
+ Next = Next->Next->Next;
+ continue;
+ }
+
break;
- } else {
+ }
+ return nullptr;
+ };
+
+ const FormatToken *Next = Current.Next;
+ if (Current.is(tok::kw_operator)) {
+ if (Current.Previous && Current.Previous->is(tok::coloncolon))
+ return false;
+ Next = skipOperatorName(Next);
+ } else {
+ if (!Current.is(TT_StartOfName) || Current.NestingLevel != 0)
return false;
+ for (; Next; Next = Next->Next) {
+ if (Next->is(TT_TemplateOpener)) {
+ Next = Next->MatchingParen;
+ } else if (Next->is(tok::coloncolon)) {
+ Next = Next->Next;
+ if (!Next)
+ return false;
+ if (Next->is(tok::kw_operator)) {
+ Next = skipOperatorName(Next->Next);
+ break;
+ }
+ if (!Next->is(tok::identifier))
+ return false;
+ } else if (Next->is(tok::l_paren)) {
+ break;
+ } else {
+ return false;
+ }
}
}
- if (!Next)
+
+ if (!Next || !Next->is(tok::l_paren))
return false;
- assert(Next->is(tok::l_paren));
if (Next->Next == Next->MatchingParen)
return true;
for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
@@ -1493,6 +1555,29 @@ static bool isFunctionDeclarationName(const FormatToken &Current) {
return false;
}
+bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
+ assert(Line.MightBeFunctionDecl);
+
+ if ((Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
+ Style.AlwaysBreakAfterReturnType ==
+ FormatStyle::RTBS_TopLevelDefinitions) &&
+ Line.Level > 0)
+ return false;
+
+ switch (Style.AlwaysBreakAfterReturnType) {
+ case FormatStyle::RTBS_None:
+ return false;
+ case FormatStyle::RTBS_All:
+ case FormatStyle::RTBS_TopLevel:
+ return true;
+ case FormatStyle::RTBS_AllDefinitions:
+ case FormatStyle::RTBS_TopLevelDefinitions:
+ return Line.mightBeFunctionDefinition();
+ }
+
+ return false;
+}
+
void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
E = Line.Children.end();
@@ -1544,15 +1629,9 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
Current->MustBreakBefore =
Current->MustBreakBefore || mustBreakBefore(Line, *Current);
- if ((Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_All ||
- (Style.AlwaysBreakAfterDefinitionReturnType ==
- FormatStyle::DRTBS_TopLevel &&
- Line.Level == 0)) &&
- InFunctionDecl && Current->is(TT_FunctionDeclarationName) &&
- !Line.Last->isOneOf(tok::semi, tok::comment)) // Only for definitions.
- // FIXME: Line.Last points to other characters than tok::semi
- // and tok::lbrace.
- Current->MustBreakBefore = true;
+ if (!Current->MustBreakBefore && InFunctionDecl &&
+ Current->is(TT_FunctionDeclarationName))
+ Current->MustBreakBefore = mustBreakForReturnType(Line);
Current->CanBreakBefore =
Current->MustBreakBefore || canBreakBefore(Line, *Current);
@@ -1636,7 +1715,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
Right.Next->is(TT_DictLiteral)))
return 1;
if (Right.is(tok::l_square)) {
- if (Style.Language == FormatStyle::LK_Proto)
+ if (Style.Language == FormatStyle::LK_Proto || Left.is(tok::r_square))
return 1;
// Slightly prefer formatting local lambda definitions like functions.
if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal))
@@ -1674,10 +1753,20 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 2;
if (Right.isMemberAccess()) {
- if (Left.is(tok::r_paren) && Left.MatchingParen &&
- Left.MatchingParen->ParameterCount > 0)
- return 20; // Should be smaller than breaking at a nested comma.
- return 150;
+ // Breaking before the "./->" of a chained call/member access is reasonably
+ // cheap, as formatting those with one call per line is generally
+ // desirable. In particular, it should be cheaper to break before the call
+ // than it is to break inside a call's parameters, which could lead to weird
+ // "hanging" indents. The exception is the very last "./->" to support this
+ // frequent pattern:
+ //
+ // aaaaaaaa.aaaaaaaa.bbbbbbb().ccccccccccccccccccccc(
+ // dddddddd);
+ //
+ // which might otherwise be blown up onto many lines. Here, clang-format
+ // won't produce "hanging" indents anyway as there is no other trailing
+ // call.
+ return Right.LastOperator ? 150 : 40;
}
if (Right.is(TT_TrailingAnnotation) &&
@@ -1706,7 +1795,8 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr))
return Line.MightBeFunctionDecl ? 50 : 500;
- if (Left.is(tok::l_paren) && InFunctionDecl && Style.AlignAfterOpenBracket)
+ if (Left.is(tok::l_paren) && InFunctionDecl &&
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
return 100;
if (Left.is(tok::l_paren) && Left.Previous &&
Left.Previous->isOneOf(tok::kw_if, tok::kw_for))
@@ -1718,7 +1808,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.is(TT_TemplateOpener))
return 100;
if (Left.opensScope()) {
- if (!Style.AlignAfterOpenBracket)
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign)
return 0;
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
: 19;
@@ -1794,11 +1884,10 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::l_square) && Right.is(tok::amp))
return false;
if (Right.is(TT_PointerOrReference))
- return !(Left.is(tok::r_paren) && Left.MatchingParen &&
- (Left.MatchingParen->is(TT_OverloadedOperatorLParen) ||
- (Left.MatchingParen->Previous &&
- Left.MatchingParen->Previous->is(
- TT_FunctionDeclarationName)))) &&
+ return (Left.is(tok::r_paren) && Left.MatchingParen &&
+ (Left.MatchingParen->is(TT_OverloadedOperatorLParen) ||
+ (Left.MatchingParen->Previous &&
+ Left.MatchingParen->Previous->is(TT_FunctionDeclarationName)))) ||
(Left.Tok.isLiteral() ||
(!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
(Style.PointerAlignment != FormatStyle::PAS_Left ||
@@ -1809,7 +1898,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
!Line.IsMultiVariableDeclStmt)))
return true;
if (Left.is(TT_PointerOrReference))
- return Right.Tok.isLiteral() || Right.is(TT_BlockComment) ||
+ return Right.Tok.isLiteral() ||
+ Right.isOneOf(TT_BlockComment, Keywords.kw_final,
+ Keywords.kw_override) ||
(Right.is(tok::l_brace) && Right.BlockKind == BK_Block) ||
(!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
tok::l_paren) &&
@@ -1849,8 +1940,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return true;
return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
(Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
- (Left.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while,
- tok::kw_switch, tok::kw_case, TT_ForEachMacro) ||
+ (Left.isOneOf(tok::kw_if, tok::pp_elif, tok::kw_for, tok::kw_while,
+ tok::kw_switch, tok::kw_case, TT_ForEachMacro,
+ TT_ObjCForIn) ||
(Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch,
tok::kw_new, tok::kw_delete) &&
(!Left.Previous || Left.Previous->isNot(tok::period))))) ||
@@ -1895,13 +1987,16 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
} else if (Style.Language == FormatStyle::LK_Proto) {
if (Right.is(tok::period) &&
Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
- Keywords.kw_repeated))
+ Keywords.kw_repeated, Keywords.kw_extend))
return true;
if (Right.is(tok::l_paren) &&
Left.isOneOf(Keywords.kw_returns, Keywords.kw_option))
return true;
} else if (Style.Language == FormatStyle::LK_JavaScript) {
- if (Left.isOneOf(Keywords.kw_var, TT_JsFatArrow))
+ if (Left.isOneOf(Keywords.kw_let, Keywords.kw_var, TT_JsFatArrow,
+ Keywords.kw_in))
+ return true;
+ if (Left.is(Keywords.kw_is) && Right.is(tok::l_brace))
return true;
if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion))
return false;
@@ -1952,7 +2047,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Right.isOneOf(TT_CtorInitializerColon, TT_ObjCBlockLParen))
return true;
if (Right.is(TT_OverloadedOperatorLParen))
- return false;
+ return Style.SpaceBeforeParens == FormatStyle::SBPO_Always;
if (Right.is(tok::colon)) {
if (Line.First->isOneOf(tok::kw_case, tok::kw_default) ||
!Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
@@ -1993,7 +2088,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return Style.SpacesInAngles;
if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) ||
- Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr))
+ (Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
+ !Right.is(tok::r_paren)))
return true;
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren) &&
Right.isNot(TT_FunctionTypeLParen))
@@ -2020,7 +2116,7 @@ static bool isAllmanBrace(const FormatToken &Tok) {
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
- if (Right.NewlinesBefore > 1)
+ if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
return true;
if (Style.Language == FormatStyle::LK_JavaScript) {
@@ -2032,8 +2128,9 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
Left.Previous && Left.Previous->is(tok::equal) &&
Line.First->isOneOf(tok::identifier, Keywords.kw_import, tok::kw_export,
tok::kw_const) &&
- // kw_var is a pseudo-token that's a tok::identifier, so matches above.
- !Line.startsWith(Keywords.kw_var))
+ // kw_var/kw_let are pseudo-tokens that are tok::identifier, so match
+ // above.
+ !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let))
// Object literals on the top level of a file are treated as "enum-style".
// Each key/value pair is put on a separate line, instead of bin-packing.
return true;
@@ -2047,6 +2144,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
!Left.Children.empty())
// Support AllowShortFunctionsOnASingleLine for JavaScript.
return Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_None ||
+ Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Empty ||
(Left.NestingLevel == 0 && Line.Level == 0 &&
Style.AllowShortFunctionsOnASingleLine ==
FormatStyle::SFS_Inline);
@@ -2107,10 +2205,9 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Right.is(TT_InlineASMBrace))
return Right.HasUnescapedNewline;
if (isAllmanBrace(Left) || isAllmanBrace(Right))
- return Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU ||
- (Style.BreakBeforeBraces == FormatStyle::BS_Mozilla &&
- Line.startsWith(tok::kw_enum));
+ return (Line.startsWith(tok::kw_enum) && Style.BraceWrapping.AfterEnum) ||
+ (Line.startsWith(tok::kw_class) && Style.BraceWrapping.AfterClass) ||
+ (Line.startsWith(tok::kw_struct) && Style.BraceWrapping.AfterStruct);
if (Style.Language == FormatStyle::LK_Proto && Left.isNot(tok::l_brace) &&
Right.is(TT_SelectorName))
return true;
@@ -2121,7 +2218,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
Style.Language == FormatStyle::LK_JavaScript) &&
Left.is(TT_LeadingJavaAnnotation) &&
Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) &&
- Line.Last->is(tok::l_brace))
+ (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations))
return true;
return false;
@@ -2144,6 +2241,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
if (Left.is(TT_JsTypeColon))
return true;
+ if (Right.NestingLevel == 0 && Right.is(Keywords.kw_is))
+ return false;
}
if (Left.is(tok::at))
@@ -2186,7 +2285,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls.
if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty)
return true;
- if (Left.ClosesTemplateDeclaration)
+ if (Left.ClosesTemplateDeclaration || Left.is(TT_FunctionAnnotationRParen))
return true;
if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen,
TT_OverloadedOperator))
@@ -2199,7 +2298,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Left.is(tok::kw_operator))
return false;
if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) &&
- Line.Type == LT_VirtualFunctionDecl)
+ Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0)
return false;
if (Left.is(tok::l_paren) && Left.is(TT_AttributeParen))
return false;
diff --git a/lib/Format/TokenAnnotator.h b/lib/Format/TokenAnnotator.h
index b8a6be057a6c..5329f1f3f2fc 100644
--- a/lib/Format/TokenAnnotator.h
+++ b/lib/Format/TokenAnnotator.h
@@ -86,6 +86,15 @@ public:
return startsWith(First, Tokens...);
}
+ /// \c true if this line looks like a function definition instead of a
+ /// function declaration. Asserts MightBeFunctionDecl.
+ bool mightBeFunctionDefinition() const {
+ assert(MightBeFunctionDecl);
+ // FIXME: Line.Last points to other characters than tok::semi
+ // and tok::lbrace.
+ return !Last->isOneOf(tok::semi, tok::comment);
+ }
+
FormatToken *First;
FormatToken *Last;
@@ -156,6 +165,8 @@ private:
bool canBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
+ bool mustBreakForReturnType(const AnnotatedLine &Line) const;
+
void printDebugInfo(const AnnotatedLine &Line);
void calculateUnbreakableTailLengths(AnnotatedLine &Line);
diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp
index b6784b369edf..f65056907963 100644
--- a/lib/Format/UnwrappedLineFormatter.cpp
+++ b/lib/Format/UnwrappedLineFormatter.cpp
@@ -90,8 +90,8 @@ private:
return 0;
if (RootToken.isAccessSpecifier(false) ||
RootToken.isObjCAccessSpecifier() ||
- (RootToken.is(Keywords.kw_signals) && RootToken.Next &&
- RootToken.Next->is(tok::colon)))
+ (RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
+ RootToken.Next && RootToken.Next->is(tok::colon)))
return Style.AccessModifierOffset;
return 0;
}
@@ -199,12 +199,12 @@ private:
return MergeShortFunctions ? tryMergeSimpleBlock(I, E, Limit) : 0;
}
if (TheLine->Last->is(tok::l_brace)) {
- return Style.BreakBeforeBraces == FormatStyle::BS_Attach
+ return !Style.BraceWrapping.AfterFunction
? tryMergeSimpleBlock(I, E, Limit)
: 0;
}
if (I[1]->First->is(TT_FunctionLBrace) &&
- Style.BreakBeforeBraces != FormatStyle::BS_Attach) {
+ Style.BraceWrapping.AfterFunction) {
if (I[1]->Last->is(TT_LineComment))
return 0;
@@ -263,8 +263,7 @@ private:
SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) {
if (Limit == 0)
return 0;
- if ((Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU) &&
+ if (Style.BraceWrapping.AfterControlStatement &&
(I[1]->First->is(tok::l_brace) && !Style.AllowShortBlocksOnASingleLine))
return 0;
if (I[1]->InPPDirective != (*I)->InPPDirective ||
@@ -305,7 +304,8 @@ private:
if (Line->First->isOneOf(tok::kw_case, tok::kw_default, tok::r_brace))
break;
if (Line->First->isOneOf(tok::kw_if, tok::kw_for, tok::kw_switch,
- tok::kw_while, tok::comment))
+ tok::kw_while, tok::comment) ||
+ Line->Last->is(tok::comment))
return 0;
Length += I[1 + NumStmts]->Last->TotalLength + 1; // 1 for the space.
}
@@ -606,7 +606,7 @@ public:
/// \brief Puts all tokens into a single line.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
- bool DryRun) {
+ bool DryRun) override {
unsigned Penalty = 0;
LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
while (State.NextToken) {
@@ -629,7 +629,7 @@ public:
/// \brief Formats the line by finding the best line breaks with line lengths
/// below the column limit.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
- bool DryRun) {
+ bool DryRun) override {
LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
// If the ObjC method declaration does not fit on a line, we should format
@@ -709,7 +709,7 @@ private:
// Cut off the analysis of certain solutions if the analysis gets too
// complex. See description of IgnoreStackForComparison.
- if (Count > 10000)
+ if (Count > 50000)
Node->State.IgnoreStackForComparison = true;
if (!Seen.insert(&Node->State).second)
@@ -791,7 +791,7 @@ private:
llvm::SpecificBumpPtrAllocator<StateNode> Allocator;
};
-} // namespace
+} // anonymous namespace
unsigned
UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
@@ -812,13 +812,26 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
AdditionalIndent);
const AnnotatedLine *PreviousLine = nullptr;
const AnnotatedLine *NextLine = nullptr;
+
+ // The minimum level of consecutive lines that have been formatted.
+ unsigned RangeMinLevel = UINT_MAX;
+
for (const AnnotatedLine *Line =
Joiner.getNextMergedLine(DryRun, IndentTracker);
Line; Line = NextLine) {
const AnnotatedLine &TheLine = *Line;
unsigned Indent = IndentTracker.getIndent();
- bool FixIndentation =
- FixBadIndentation && (Indent != TheLine.First->OriginalColumn);
+
+ // We continue formatting unchanged lines to adjust their indent, e.g. if a
+ // scope was added. However, we need to carefully stop doing this when we
+ // exit the scope of affected lines to prevent indenting a the entire
+ // remaining file if it currently missing a closing brace.
+ bool ContinueFormatting =
+ TheLine.Level > RangeMinLevel ||
+ (TheLine.Level == RangeMinLevel && !TheLine.startsWith(tok::r_brace));
+
+ bool FixIndentation = (FixBadIndentation || ContinueFormatting) &&
+ Indent != TheLine.First->OriginalColumn;
bool ShouldFormat = TheLine.Affected || FixIndentation;
// We cannot format this line; if the reason is that the line had a
// parsing error, remember that.
@@ -845,6 +858,7 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
else
Penalty += OptimizingLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, Indent, DryRun);
+ RangeMinLevel = std::min(RangeMinLevel, TheLine.Level);
} else {
// If no token in the current line is affected, we still need to format
// affected children.
@@ -875,6 +889,7 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
}
NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
+ RangeMinLevel = UINT_MAX;
}
if (!DryRun)
markFinalized(TheLine.First);
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index 97fd98ecb947..94b849881941 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -154,12 +154,10 @@ public:
CompoundStatementIndenter(UnwrappedLineParser *Parser,
const FormatStyle &Style, unsigned &LineLevel)
: LineLevel(LineLevel), OldLineLevel(LineLevel) {
- if (Style.BreakBeforeBraces == FormatStyle::BS_Allman) {
- Parser->addUnwrappedLine();
- } else if (Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
+ if (Style.BraceWrapping.AfterControlStatement)
Parser->addUnwrappedLine();
+ if (Style.BraceWrapping.IndentBraces)
++LineLevel;
- }
}
~CompoundStatementIndenter() { LineLevel = OldLineLevel; }
@@ -284,6 +282,8 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
case tok::l_brace:
// FIXME: Add parameter whether this can happen - if this happens, we must
// be in a non-declaration context.
+ if (!FormatTok->is(TT_MacroBlockBegin) && tryToParseBracedList())
+ continue;
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
break;
@@ -321,7 +321,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
SmallVector<FormatToken *, 8> LBraceStack;
assert(Tok->Tok.is(tok::l_brace));
do {
- // Get next none-comment token.
+ // Get next non-comment token.
FormatToken *NextTok;
unsigned ReadTokens = 0;
do {
@@ -357,7 +357,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
ProbablyBracedList =
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
tok::r_paren, tok::r_square, tok::l_brace,
- tok::l_paren, tok::ellipsis) ||
+ tok::l_square, tok::l_paren, tok::ellipsis) ||
(NextTok->is(tok::semi) &&
(!ExpectClassBody || LBraceStack.size() != 1)) ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
@@ -403,6 +403,7 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
"'{' or macro block token expected");
const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
+ FormatTok->BlockKind = BK_Block;
unsigned InitialLevel = Line->Level;
nextToken();
@@ -421,6 +422,7 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd)
: !FormatTok->is(tok::r_brace)) {
Line->Level = InitialLevel;
+ FormatTok->BlockKind = BK_Block;
return;
}
@@ -454,17 +456,15 @@ static bool isGoogScope(const UnwrappedLine &Line) {
static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
const FormatToken &InitialToken) {
- switch (Style.BreakBeforeBraces) {
- case FormatStyle::BS_Linux:
- return InitialToken.isOneOf(tok::kw_namespace, tok::kw_class);
- case FormatStyle::BS_Mozilla:
- return InitialToken.isOneOf(tok::kw_class, tok::kw_struct, tok::kw_union);
- case FormatStyle::BS_Allman:
- case FormatStyle::BS_GNU:
- return true;
- default:
- return false;
- }
+ if (InitialToken.is(tok::kw_namespace))
+ return Style.BraceWrapping.AfterNamespace;
+ if (InitialToken.is(tok::kw_class))
+ return Style.BraceWrapping.AfterClass;
+ if (InitialToken.is(tok::kw_union))
+ return Style.BraceWrapping.AfterUnion;
+ if (InitialToken.is(tok::kw_struct))
+ return Style.BraceWrapping.AfterStruct;
+ return false;
}
void UnwrappedLineParser::parseChildBlock() {
@@ -650,7 +650,15 @@ static bool tokenCanStartNewLine(const clang::Token &Tok) {
}
void UnwrappedLineParser::parseStructuralElement() {
- assert(!FormatTok->Tok.is(tok::l_brace));
+ assert(!FormatTok->is(tok::l_brace));
+ if (Style.Language == FormatStyle::LK_TableGen &&
+ FormatTok->is(tok::pp_include)) {
+ nextToken();
+ if (FormatTok->is(tok::string_literal))
+ nextToken();
+ addUnwrappedLine();
+ return;
+ }
switch (FormatTok->Tok.getKind()) {
case tok::at:
nextToken();
@@ -679,8 +687,7 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::objc_autoreleasepool:
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU)
+ if (Style.BraceWrapping.AfterObjCDeclaration)
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/false);
}
@@ -787,7 +794,8 @@ void UnwrappedLineParser::parseStructuralElement() {
parseJavaScriptEs6ImportExport();
return;
}
- if (FormatTok->is(Keywords.kw_signals)) {
+ if (FormatTok->isOneOf(Keywords.kw_signals, Keywords.kw_qsignals,
+ Keywords.kw_slots, Keywords.kw_qslots)) {
nextToken();
if (FormatTok->is(tok::colon)) {
nextToken();
@@ -810,10 +818,10 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::kw_enum:
// parseEnum falls through and does not yet add an unwrapped line as an
// enum definition can start a structural element.
- parseEnum();
- // This does not apply for Java and JavaScript.
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) {
+ if (!parseEnum())
+ break;
+ // This only applies for C++.
+ if (Style.Language != FormatStyle::LK_Cpp) {
addUnwrappedLine();
return;
}
@@ -843,6 +851,11 @@ void UnwrappedLineParser::parseStructuralElement() {
if (Style.Language == FormatStyle::LK_Java && FormatTok &&
FormatTok->is(tok::kw_class))
nextToken();
+ if (Style.Language == FormatStyle::LK_JavaScript && FormatTok &&
+ FormatTok->Tok.getIdentifierInfo())
+ // JavaScript only has pseudo keywords, all keywords are allowed to
+ // appear in "IdentifierName" positions. See http://es5.github.io/#x7.6
+ nextToken();
break;
case tok::semi:
nextToken();
@@ -854,6 +867,11 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::l_paren:
parseParens();
break;
+ case tok::kw_operator:
+ nextToken();
+ if (FormatTok->isBinaryOperator())
+ nextToken();
+ break;
case tok::caret:
nextToken();
if (FormatTok->Tok.isAnyIdentifier() ||
@@ -870,7 +888,7 @@ void UnwrappedLineParser::parseStructuralElement() {
// structural element.
// FIXME: Figure out cases where this is not true, and add projections
// for them (the one we know is missing are lambdas).
- if (Style.BreakBeforeBraces != FormatStyle::BS_Attach)
+ if (Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
FormatTok->Type = TT_FunctionLBrace;
parseBlock(/*MustBeDeclaration=*/false);
@@ -1001,6 +1019,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::less:
case tok::greater:
case tok::identifier:
+ case tok::numeric_constant:
case tok::coloncolon:
case tok::kw_mutable:
nextToken();
@@ -1257,12 +1276,10 @@ void UnwrappedLineParser::parseIfThenElse() {
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
+ if (Style.BraceWrapping.BeforeElse)
addUnwrappedLine();
- } else {
+ else
NeedsUnwrappedLine = true;
- }
} else {
addUnwrappedLine();
++Line->Level;
@@ -1270,8 +1287,6 @@ void UnwrappedLineParser::parseIfThenElse() {
--Line->Level;
}
if (FormatTok->Tok.is(tok::kw_else)) {
- if (Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup)
- addUnwrappedLine();
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
@@ -1312,9 +1327,7 @@ void UnwrappedLineParser::parseTryCatch() {
if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU ||
- Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup) {
+ if (Style.BraceWrapping.BeforeCatch) {
addUnwrappedLine();
} else {
NeedsUnwrappedLine = true;
@@ -1352,17 +1365,13 @@ void UnwrappedLineParser::parseTryCatch() {
NeedsUnwrappedLine = false;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU ||
- Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup) {
+ if (Style.BraceWrapping.BeforeCatch)
addUnwrappedLine();
- } else {
+ else
NeedsUnwrappedLine = true;
- }
}
- if (NeedsUnwrappedLine) {
+ if (NeedsUnwrappedLine)
addUnwrappedLine();
- }
}
void UnwrappedLineParser::parseNamespace() {
@@ -1370,7 +1379,7 @@ void UnwrappedLineParser::parseNamespace() {
const FormatToken &InitialToken = *FormatTok;
nextToken();
- if (FormatTok->Tok.is(tok::identifier))
+ while (FormatTok->isOneOf(tok::identifier, tok::coloncolon))
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
if (ShouldBreakBeforeBrace(Style, InitialToken))
@@ -1438,7 +1447,7 @@ void UnwrappedLineParser::parseDoWhile() {
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BreakBeforeBraces == FormatStyle::BS_GNU)
+ if (Style.BraceWrapping.IndentBraces)
addUnwrappedLine();
} else {
addUnwrappedLine();
@@ -1466,11 +1475,8 @@ void UnwrappedLineParser::parseLabel() {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
if (FormatTok->Tok.is(tok::kw_break)) {
- // "break;" after "}" on its own line only for BS_Allman and BS_GNU
- if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
+ if (Style.BraceWrapping.AfterControlStatement)
addUnwrappedLine();
- }
parseStructuralElement();
}
addUnwrappedLine();
@@ -1519,11 +1525,17 @@ void UnwrappedLineParser::parseAccessSpecifier() {
addUnwrappedLine();
}
-void UnwrappedLineParser::parseEnum() {
+bool UnwrappedLineParser::parseEnum() {
// Won't be 'enum' for NS_ENUMs.
if (FormatTok->Tok.is(tok::kw_enum))
nextToken();
+ // In TypeScript, "enum" can also be used as property name, e.g. in interface
+ // declarations. An "enum" keyword followed by a colon would be a syntax
+ // error and thus assume it is just an identifier.
+ if (Style.Language == FormatStyle::LK_JavaScript && FormatTok->is(tok::colon))
+ return false;
+
// Eat up enum class ...
if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct))
nextToken();
@@ -1541,19 +1553,23 @@ void UnwrappedLineParser::parseEnum() {
// return type. In Java, this can be "implements", etc.
if (Style.Language == FormatStyle::LK_Cpp &&
FormatTok->is(tok::identifier))
- return;
+ return false;
}
}
// Just a declaration or something is wrong.
if (FormatTok->isNot(tok::l_brace))
- return;
+ return true;
FormatTok->BlockKind = BK_Block;
if (Style.Language == FormatStyle::LK_Java) {
// Java enums are different.
parseJavaEnumBody();
- return;
+ return true;
+ }
+ if (Style.Language == FormatStyle::LK_Proto) {
+ parseBlock(/*MustBeDeclaration=*/true);
+ return true;
}
// Parse enum body.
@@ -1563,6 +1579,7 @@ void UnwrappedLineParser::parseEnum() {
nextToken();
addUnwrappedLine();
}
+ return true;
// There is no addUnwrappedLine() here so that we fall through to parsing a
// structural element afterwards. Thus, in "enum A {} n, m;",
@@ -1731,8 +1748,7 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
parseObjCProtocolList();
if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
- Style.BreakBeforeBraces == FormatStyle::BS_GNU)
+ if (Style.BraceWrapping.AfterObjCDeclaration)
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/true);
}
@@ -1777,7 +1793,7 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
}
if (FormatTok->isOneOf(tok::kw_const, tok::kw_class, tok::kw_enum,
- Keywords.kw_var))
+ Keywords.kw_let, Keywords.kw_var))
return; // Fall through to parsing the corresponding structure.
if (FormatTok->is(tok::l_brace)) {
diff --git a/lib/Format/UnwrappedLineParser.h b/lib/Format/UnwrappedLineParser.h
index c2fa02957685..a13c03f94086 100644
--- a/lib/Format/UnwrappedLineParser.h
+++ b/lib/Format/UnwrappedLineParser.h
@@ -96,7 +96,7 @@ private:
void parseNamespace();
void parseNew();
void parseAccessSpecifier();
- void parseEnum();
+ bool parseEnum();
void parseJavaEnumBody();
void parseRecord();
void parseObjCProtocolList();
diff --git a/lib/Format/WhitespaceManager.cpp b/lib/Format/WhitespaceManager.cpp
index 65395277f89a..725f05bcd8fc 100644
--- a/lib/Format/WhitespaceManager.cpp
+++ b/lib/Format/WhitespaceManager.cpp
@@ -26,16 +26,18 @@ operator()(const Change &C1, const Change &C2) const {
}
WhitespaceManager::Change::Change(
- bool CreateReplacement, const SourceRange &OriginalWhitespaceRange,
+ bool CreateReplacement, SourceRange OriginalWhitespaceRange,
unsigned IndentLevel, int Spaces, unsigned StartOfTokenColumn,
unsigned NewlinesBefore, StringRef PreviousLinePostfix,
- StringRef CurrentLinePrefix, tok::TokenKind Kind, bool ContinuesPPDirective)
+ StringRef CurrentLinePrefix, tok::TokenKind Kind, bool ContinuesPPDirective,
+ bool IsStartOfDeclName)
: CreateReplacement(CreateReplacement),
OriginalWhitespaceRange(OriginalWhitespaceRange),
StartOfTokenColumn(StartOfTokenColumn), NewlinesBefore(NewlinesBefore),
PreviousLinePostfix(PreviousLinePostfix),
CurrentLinePrefix(CurrentLinePrefix), Kind(Kind),
- ContinuesPPDirective(ContinuesPPDirective), IndentLevel(IndentLevel),
+ ContinuesPPDirective(ContinuesPPDirective),
+ IsStartOfDeclName(IsStartOfDeclName), IndentLevel(IndentLevel),
Spaces(Spaces), IsTrailingComment(false), TokenLength(0),
PreviousEndOfTokenColumn(0), EscapedNewlineColumn(0),
StartOfBlockComment(nullptr), IndentationOffset(0) {}
@@ -52,19 +54,21 @@ void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
if (Tok.Finalized)
return;
Tok.Decision = (Newlines > 0) ? FD_Break : FD_Continue;
- Changes.push_back(Change(true, Tok.WhitespaceRange, IndentLevel, Spaces,
- StartOfTokenColumn, Newlines, "", "",
- Tok.Tok.getKind(), InPPDirective && !Tok.IsFirst));
+ Changes.push_back(
+ Change(true, Tok.WhitespaceRange, IndentLevel, Spaces, StartOfTokenColumn,
+ Newlines, "", "", Tok.Tok.getKind(), InPPDirective && !Tok.IsFirst,
+ Tok.is(TT_StartOfName) || Tok.is(TT_FunctionDeclarationName)));
}
void WhitespaceManager::addUntouchableToken(const FormatToken &Tok,
bool InPPDirective) {
if (Tok.Finalized)
return;
- Changes.push_back(Change(false, Tok.WhitespaceRange, /*IndentLevel=*/0,
- /*Spaces=*/0, Tok.OriginalColumn, Tok.NewlinesBefore,
- "", "", Tok.Tok.getKind(),
- InPPDirective && !Tok.IsFirst));
+ Changes.push_back(
+ Change(false, Tok.WhitespaceRange, /*IndentLevel=*/0,
+ /*Spaces=*/0, Tok.OriginalColumn, Tok.NewlinesBefore, "", "",
+ Tok.Tok.getKind(), InPPDirective && !Tok.IsFirst,
+ Tok.is(TT_StartOfName) || Tok.is(TT_FunctionDeclarationName)));
}
void WhitespaceManager::replaceWhitespaceInToken(
@@ -84,7 +88,8 @@ void WhitespaceManager::replaceWhitespaceInToken(
// calculate the new length of the comment and to calculate the changes
// for which to do the alignment when aligning comments.
Tok.is(TT_LineComment) && Newlines > 0 ? tok::comment : tok::unknown,
- InPPDirective && !Tok.IsFirst));
+ InPPDirective && !Tok.IsFirst,
+ Tok.is(TT_StartOfName) || Tok.is(TT_FunctionDeclarationName)));
}
const tooling::Replacements &WhitespaceManager::generateReplacements() {
@@ -93,6 +98,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
std::sort(Changes.begin(), Changes.end(), Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
+ alignConsecutiveDeclarations();
alignConsecutiveAssignments();
alignTrailingComments();
alignEscapedNewlines();
@@ -142,94 +148,183 @@ void WhitespaceManager::calculateLineBreakInformation() {
}
}
-// Walk through all of the changes and find sequences of "=" to align. To do
-// so, keep track of the lines and whether or not an "=" was found on align. If
-// a "=" is found on a line, extend the current sequence. If the current line
-// cannot be part of a sequence, e.g. because there is an empty line before it
-// or it contains non-assignments, finalize the previous sequence.
-void WhitespaceManager::alignConsecutiveAssignments() {
- if (!Style.AlignConsecutiveAssignments)
- return;
+// Align a single sequence of tokens, see AlignTokens below.
+template <typename F>
+static void
+AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes) {
+ bool FoundMatchOnLine = false;
+ int Shift = 0;
+ for (unsigned i = Start; i != End; ++i) {
+ if (Changes[i].NewlinesBefore > 0) {
+ FoundMatchOnLine = false;
+ Shift = 0;
+ }
+
+ // If this is the first matching token to be aligned, remember by how many
+ // spaces it has to be shifted, so the rest of the changes on the line are
+ // shifted by the same amount
+ if (!FoundMatchOnLine && Matches(Changes[i])) {
+ FoundMatchOnLine = true;
+ Shift = Column - Changes[i].StartOfTokenColumn;
+ Changes[i].Spaces += Shift;
+ }
+
+ assert(Shift >= 0);
+ Changes[i].StartOfTokenColumn += Shift;
+ if (i + 1 != Changes.size())
+ Changes[i + 1].PreviousEndOfTokenColumn += Shift;
+ }
+}
+// Walk through all of the changes and find sequences of matching tokens to
+// align. To do so, keep track of the lines and whether or not a matching token
+// was found on a line. If a matching token is found, extend the current
+// sequence. If the current line cannot be part of a sequence, e.g. because
+// there is an empty line before it or it contains only non-matching tokens,
+// finalize the previous sequence.
+template <typename F>
+static void AlignTokens(const FormatStyle &Style, F &&Matches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes) {
unsigned MinColumn = 0;
+ unsigned MaxColumn = UINT_MAX;
+
+ // Line number of the start and the end of the current token sequence.
unsigned StartOfSequence = 0;
unsigned EndOfSequence = 0;
- bool FoundAssignmentOnLine = false;
- bool FoundLeftParenOnLine = false;
- unsigned CurrentLine = 0;
- auto AlignSequence = [&] {
- alignConsecutiveAssignments(StartOfSequence, EndOfSequence, MinColumn);
+ // Keep track of the nesting level of matching tokens, i.e. the number of
+ // surrounding (), [], or {}. We will only align a sequence of matching
+ // token that share the same scope depth.
+ //
+ // FIXME: This could use FormatToken::NestingLevel information, but there is
+ // an outstanding issue wrt the brace scopes.
+ unsigned NestingLevelOfLastMatch = 0;
+ unsigned NestingLevel = 0;
+
+ // Keep track of the number of commas before the matching tokens, we will only
+ // align a sequence of matching tokens if they are preceded by the same number
+ // of commas.
+ unsigned CommasBeforeLastMatch = 0;
+ unsigned CommasBeforeMatch = 0;
+
+ // Whether a matching token has been found on the current line.
+ bool FoundMatchOnLine = false;
+
+ // Aligns a sequence of matching tokens, on the MinColumn column.
+ //
+ // Sequences start from the first matching token to align, and end at the
+ // first token of the first line that doesn't need to be aligned.
+ //
+ // We need to adjust the StartOfTokenColumn of each Change that is on a line
+ // containing any matching token to be aligned and located after such token.
+ auto AlignCurrentSequence = [&] {
+ if (StartOfSequence > 0 && StartOfSequence < EndOfSequence)
+ AlignTokenSequence(StartOfSequence, EndOfSequence, MinColumn, Matches,
+ Changes);
MinColumn = 0;
+ MaxColumn = UINT_MAX;
StartOfSequence = 0;
EndOfSequence = 0;
};
for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
if (Changes[i].NewlinesBefore != 0) {
- CurrentLine += Changes[i].NewlinesBefore;
- if (StartOfSequence > 0 &&
- (Changes[i].NewlinesBefore > 1 || !FoundAssignmentOnLine)) {
- EndOfSequence = i;
- AlignSequence();
- }
- FoundAssignmentOnLine = false;
- FoundLeftParenOnLine = false;
+ CommasBeforeMatch = 0;
+ EndOfSequence = i;
+ // If there is a blank line, or if the last line didn't contain any
+ // matching token, the sequence ends here.
+ if (Changes[i].NewlinesBefore > 1 || !FoundMatchOnLine)
+ AlignCurrentSequence();
+
+ FoundMatchOnLine = false;
}
- if ((Changes[i].Kind == tok::equal &&
- (FoundAssignmentOnLine || ((Changes[i].NewlinesBefore > 0 ||
- Changes[i + 1].NewlinesBefore > 0)))) ||
- (!FoundLeftParenOnLine && Changes[i].Kind == tok::r_paren)) {
- if (StartOfSequence > 0)
- AlignSequence();
- } else if (Changes[i].Kind == tok::l_paren) {
- FoundLeftParenOnLine = true;
- if (!FoundAssignmentOnLine && StartOfSequence > 0)
- AlignSequence();
- } else if (!FoundAssignmentOnLine && !FoundLeftParenOnLine &&
- Changes[i].Kind == tok::equal) {
- FoundAssignmentOnLine = true;
- EndOfSequence = i;
- if (StartOfSequence == 0)
- StartOfSequence = i;
+ if (Changes[i].Kind == tok::comma) {
+ ++CommasBeforeMatch;
+ } else if (Changes[i].Kind == tok::r_brace ||
+ Changes[i].Kind == tok::r_paren ||
+ Changes[i].Kind == tok::r_square) {
+ --NestingLevel;
+ } else if (Changes[i].Kind == tok::l_brace ||
+ Changes[i].Kind == tok::l_paren ||
+ Changes[i].Kind == tok::l_square) {
+ // We want sequences to skip over child scopes if possible, but not the
+ // other way around.
+ NestingLevelOfLastMatch = std::min(NestingLevelOfLastMatch, NestingLevel);
+ ++NestingLevel;
+ }
- unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
- MinColumn = std::max(MinColumn, ChangeMinColumn);
+ if (!Matches(Changes[i]))
+ continue;
+
+ // If there is more than one matching token per line, or if the number of
+ // preceding commas, or the scope depth, do not match anymore, end the
+ // sequence.
+ if (FoundMatchOnLine || CommasBeforeMatch != CommasBeforeLastMatch ||
+ NestingLevel != NestingLevelOfLastMatch)
+ AlignCurrentSequence();
+
+ CommasBeforeLastMatch = CommasBeforeMatch;
+ NestingLevelOfLastMatch = NestingLevel;
+ FoundMatchOnLine = true;
+
+ if (StartOfSequence == 0)
+ StartOfSequence = i;
+
+ unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
+ int LineLengthAfter = -Changes[i].Spaces;
+ for (unsigned j = i; j != e && Changes[j].NewlinesBefore == 0; ++j)
+ LineLengthAfter += Changes[j].Spaces + Changes[j].TokenLength;
+ unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
+
+ // If we are restricted by the maximum column width, end the sequence.
+ if (ChangeMinColumn > MaxColumn || ChangeMaxColumn < MinColumn ||
+ CommasBeforeLastMatch != CommasBeforeMatch) {
+ AlignCurrentSequence();
+ StartOfSequence = i;
}
- }
- if (StartOfSequence > 0) {
- EndOfSequence = Changes.size();
- AlignSequence();
+ MinColumn = std::max(MinColumn, ChangeMinColumn);
+ MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
+
+ EndOfSequence = Changes.size();
+ AlignCurrentSequence();
}
-void WhitespaceManager::alignConsecutiveAssignments(unsigned Start,
- unsigned End,
- unsigned Column) {
- bool AlignedAssignment = false;
- int PreviousShift = 0;
- for (unsigned i = Start; i != End; ++i) {
- int Shift = 0;
- if (Changes[i].NewlinesBefore > 0)
- AlignedAssignment = false;
- if (!AlignedAssignment && Changes[i].Kind == tok::equal) {
- Shift = Column - Changes[i].StartOfTokenColumn;
- AlignedAssignment = true;
- PreviousShift = Shift;
- }
- assert(Shift >= 0);
- Changes[i].Spaces += Shift;
- if (i + 1 != Changes.size())
- Changes[i + 1].PreviousEndOfTokenColumn += Shift;
- Changes[i].StartOfTokenColumn += Shift;
- if (AlignedAssignment) {
- Changes[i].StartOfTokenColumn += PreviousShift;
- if (i + 1 != Changes.size())
- Changes[i + 1].PreviousEndOfTokenColumn += PreviousShift;
- }
- }
+void WhitespaceManager::alignConsecutiveAssignments() {
+ if (!Style.AlignConsecutiveAssignments)
+ return;
+
+ AlignTokens(Style,
+ [&](const Change &C) {
+ // Do not align on equal signs that are first on a line.
+ if (C.NewlinesBefore > 0)
+ return false;
+
+ // Do not align on equal signs that are last on a line.
+ if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
+ return false;
+
+ return C.Kind == tok::equal;
+ },
+ Changes);
+}
+
+void WhitespaceManager::alignConsecutiveDeclarations() {
+ if (!Style.AlignConsecutiveDeclarations)
+ return;
+
+ // FIXME: Currently we don't handle properly the PointerAlignment: Right
+ // The * and & are not aligned and are left dangling. Something has to be done
+ // about it, but it raises the question of alignment of code like:
+ // const char* const* v1;
+ // float const* v2;
+ // SomeVeryLongType const& v3;
+
+ AlignTokens(Style, [](Change const &C) { return C.IsStartOfDeclName; },
+ Changes);
}
void WhitespaceManager::alignTrailingComments() {
@@ -377,7 +472,7 @@ void WhitespaceManager::generateChanges() {
}
}
-void WhitespaceManager::storeReplacement(const SourceRange &Range,
+void WhitespaceManager::storeReplacement(SourceRange Range,
StringRef Text) {
unsigned WhitespaceLength = SourceMgr.getFileOffset(Range.getEnd()) -
SourceMgr.getFileOffset(Range.getBegin());
diff --git a/lib/Format/WhitespaceManager.h b/lib/Format/WhitespaceManager.h
index d97383832981..f83971b4add6 100644
--- a/lib/Format/WhitespaceManager.h
+++ b/lib/Format/WhitespaceManager.h
@@ -81,7 +81,6 @@ public:
/// \brief Returns all the \c Replacements created during formatting.
const tooling::Replacements &generateReplacements();
-private:
/// \brief Represents a change before a token, a break inside a token,
/// or the layout of an unchanged token (or whitespace within).
struct Change {
@@ -106,11 +105,11 @@ private:
///
/// \p StartOfTokenColumn and \p InPPDirective will be used to lay out
/// trailing comments and escaped newlines.
- Change(bool CreateReplacement, const SourceRange &OriginalWhitespaceRange,
+ Change(bool CreateReplacement, SourceRange OriginalWhitespaceRange,
unsigned IndentLevel, int Spaces, unsigned StartOfTokenColumn,
unsigned NewlinesBefore, StringRef PreviousLinePostfix,
StringRef CurrentLinePrefix, tok::TokenKind Kind,
- bool ContinuesPPDirective);
+ bool ContinuesPPDirective, bool IsStartOfDeclName);
bool CreateReplacement;
// Changes might be in the middle of a token, so we cannot just keep the
@@ -126,6 +125,7 @@ private:
// the \c BreakableToken is still doing its own alignment.
tok::TokenKind Kind;
bool ContinuesPPDirective;
+ bool IsStartOfDeclName;
// The number of nested blocks the token is in. This is used to add tabs
// only for the indentation, and not for alignment, when
@@ -159,6 +159,7 @@ private:
int IndentationOffset;
};
+private:
/// \brief Calculate \c IsTrailingComment, \c TokenLength for the last tokens
/// or token parts in a line and \c PreviousEndOfTokenColumn and
/// \c EscapedNewlineColumn for the first tokens or token parts in a line.
@@ -167,11 +168,8 @@ private:
/// \brief Align consecutive assignments over all \c Changes.
void alignConsecutiveAssignments();
- /// \brief Align consecutive assignments from change \p Start to change \p End
- /// at
- /// the specified \p Column.
- void alignConsecutiveAssignments(unsigned Start, unsigned End,
- unsigned Column);
+ /// \brief Align consecutive declarations over all \c Changes.
+ void alignConsecutiveDeclarations();
/// \brief Align trailing comments over all \c Changes.
void alignTrailingComments();
@@ -191,7 +189,7 @@ private:
void generateChanges();
/// \brief Stores \p Text as the replacement for the whitespace in \p Range.
- void storeReplacement(const SourceRange &Range, StringRef Text);
+ void storeReplacement(SourceRange Range, StringRef Text);
void appendNewlineText(std::string &Text, unsigned Newlines);
void appendNewlineText(std::string &Text, unsigned Newlines,
unsigned PreviousEndOfTokenColumn,
diff --git a/lib/Frontend/ASTMerge.cpp b/lib/Frontend/ASTMerge.cpp
index 762c7a5da5e6..b499fa2b0e68 100644
--- a/lib/Frontend/ASTMerge.cpp
+++ b/lib/Frontend/ASTMerge.cpp
@@ -59,7 +59,6 @@ void ASTMergeAction::ExecuteAction() {
/*MinimalImport=*/false);
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
- CI.getASTConsumer().Initialize(CI.getASTContext());
for (auto *D : TU->decls()) {
// Don't re-import __va_list_tag, __builtin_va_list.
if (const auto *ND = dyn_cast<NamedDecl>(D))
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index 1bb5c3ff279d..e6ba29201f85 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -1,4 +1,4 @@
-//===--- ASTUnit.cpp - ASTUnit utility ------------------------------------===//
+//===--- ASTUnit.cpp - ASTUnit utility --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -47,6 +47,7 @@
#include <atomic>
#include <cstdio>
#include <cstdlib>
+
using namespace clang;
using llvm::TimeRecord;
@@ -185,7 +186,7 @@ struct ASTUnit::ASTWriterData {
llvm::BitstreamWriter Stream;
ASTWriter Writer;
- ASTWriterData() : Stream(Buffer), Writer(Stream) { }
+ ASTWriterData() : Stream(Buffer), Writer(Stream, { }) { }
};
void ASTUnit::clearFileLevelDecls() {
@@ -649,12 +650,12 @@ void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
}
std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
- const std::string &Filename,
- const PCHContainerReader &PCHContainerRdr,
+ const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- const FileSystemOptions &FileSystemOpts, bool OnlyLocalDecls,
- ArrayRef<RemappedFile> RemappedFiles, bool CaptureDiagnostics,
- bool AllowPCHWithCompilerErrors, bool UserFilesAreVolatile) {
+ const FileSystemOptions &FileSystemOpts, bool UseDebugInfo,
+ bool OnlyLocalDecls, ArrayRef<RemappedFile> RemappedFiles,
+ bool CaptureDiagnostics, bool AllowPCHWithCompilerErrors,
+ bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
@@ -708,7 +709,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
bool disableValid = false;
if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
disableValid = true;
- AST->Reader = new ASTReader(PP, Context, PCHContainerRdr,
+ AST->Reader = new ASTReader(PP, Context, PCHContainerRdr, { },
/*isysroot=*/"",
/*DisableValidation=*/disableValid,
AllowPCHWithCompilerErrors);
@@ -926,6 +927,7 @@ public:
const Preprocessor &PP, StringRef isysroot,
raw_ostream *Out)
: PCHGenerator(PP, "", nullptr, isysroot, std::make_shared<PCHBuffer>(),
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>>(),
/*AllowASTWithErrors=*/true),
Unit(Unit), Hash(Unit.getCurrentTopLevelHashValue()), Action(Action),
Out(Out) {
@@ -973,7 +975,7 @@ public:
}
};
-}
+} // anonymous namespace
std::unique_ptr<ASTConsumer>
PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
@@ -1076,11 +1078,10 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// Configure the various subsystems.
LangOpts = Clang->getInvocation().LangOpts;
FileSystemOpts = Clang->getFileSystemOpts();
- IntrusiveRefCntPtr<vfs::FileSystem> VFS =
- createVFSFromCompilerInvocation(Clang->getInvocation(), getDiagnostics());
- if (!VFS)
- return true;
- FileMgr = new FileManager(FileSystemOpts, VFS);
+ if (!FileMgr) {
+ Clang->createFileManager();
+ FileMgr = &Clang->getFileManager();
+ }
SourceMgr = new SourceManager(getDiagnostics(), *FileMgr,
UserFilesAreVolatile);
TheSema.reset();
@@ -1724,9 +1725,10 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, ASTFrontendAction *Action,
ASTUnit *Unit, bool Persistent, StringRef ResourceFilesPath,
- bool OnlyLocalDecls, bool CaptureDiagnostics, bool PrecompilePreamble,
- bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
- bool UserFilesAreVolatile, std::unique_ptr<ASTUnit> *ErrAST) {
+ bool OnlyLocalDecls, bool CaptureDiagnostics,
+ unsigned PrecompilePreambleAfterNParses, bool CacheCodeCompletionResults,
+ bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile,
+ std::unique_ptr<ASTUnit> *ErrAST) {
assert(CI && "A CompilerInvocation is required");
std::unique_ptr<ASTUnit> OwnAST;
@@ -1745,8 +1747,8 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
}
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
- if (PrecompilePreamble)
- AST->PreambleRebuildCounter = 2;
+ if (PrecompilePreambleAfterNParses > 0)
+ AST->PreambleRebuildCounter = PrecompilePreambleAfterNParses;
AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->IncludeBriefCommentsInCodeCompletion
@@ -1863,7 +1865,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
bool ASTUnit::LoadFromCompilerInvocation(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- bool PrecompilePreamble) {
+ unsigned PrecompilePreambleAfterNParses) {
if (!Invocation)
return true;
@@ -1873,8 +1875,8 @@ bool ASTUnit::LoadFromCompilerInvocation(
ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
- if (PrecompilePreamble) {
- PreambleRebuildCounter = 2;
+ if (PrecompilePreambleAfterNParses > 0) {
+ PreambleRebuildCounter = PrecompilePreambleAfterNParses;
OverrideMainBuffer =
getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation);
}
@@ -1892,10 +1894,11 @@ bool ASTUnit::LoadFromCompilerInvocation(
std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
CompilerInvocation *CI,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags, bool OnlyLocalDecls,
- bool CaptureDiagnostics, bool PrecompilePreamble,
- TranslationUnitKind TUKind, bool CacheCodeCompletionResults,
- bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile) {
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags, FileManager *FileMgr,
+ bool OnlyLocalDecls, bool CaptureDiagnostics,
+ unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
+ bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
+ bool UserFilesAreVolatile) {
// Create the AST unit.
std::unique_ptr<ASTUnit> AST(new ASTUnit(false));
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
@@ -1907,12 +1910,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
AST->IncludeBriefCommentsInCodeCompletion
= IncludeBriefCommentsInCodeCompletion;
AST->Invocation = CI;
- AST->FileSystemOpts = CI->getFileSystemOpts();
- IntrusiveRefCntPtr<vfs::FileSystem> VFS =
- createVFSFromCompilerInvocation(*CI, *Diags);
- if (!VFS)
- return nullptr;
- AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
+ AST->FileSystemOpts = FileMgr->getFileSystemOpts();
+ AST->FileMgr = FileMgr;
AST->UserFilesAreVolatile = UserFilesAreVolatile;
// Recover resources if we crash before exiting this method.
@@ -1922,7 +1921,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
DiagCleanup(Diags.get());
- if (AST->LoadFromCompilerInvocation(PCHContainerOps, PrecompilePreamble))
+ if (AST->LoadFromCompilerInvocation(PCHContainerOps,
+ PrecompilePreambleAfterNParses))
return nullptr;
return AST;
}
@@ -1933,11 +1933,11 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
bool OnlyLocalDecls, bool CaptureDiagnostics,
ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
- bool PrecompilePreamble, TranslationUnitKind TUKind,
+ unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
bool AllowPCHWithCompilerErrors, bool SkipFunctionBodies,
bool UserFilesAreVolatile, bool ForSerialization,
- std::unique_ptr<ASTUnit> *ErrAST) {
+ llvm::Optional<StringRef> ModuleFormat, std::unique_ptr<ASTUnit> *ErrAST) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
@@ -1970,6 +1970,9 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
CI->getFrontendOpts().SkipFunctionBodies = SkipFunctionBodies;
+ if (ModuleFormat)
+ CI->getHeaderSearchOpts().ModuleFormat = ModuleFormat.getValue();
+
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
@@ -2001,7 +2004,8 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(AST.get());
- if (AST->LoadFromCompilerInvocation(PCHContainerOps, PrecompilePreamble)) {
+ if (AST->LoadFromCompilerInvocation(PCHContainerOps,
+ PrecompilePreambleAfterNParses)) {
// Some error occurred, if caller wants to examine diagnostics, pass it the
// ASTUnit.
if (ErrAST) {
@@ -2043,6 +2047,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation);
// Clear out the diagnostics state.
+ FileMgr.reset();
getDiagnostics().Reset();
ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
if (OverrideMainBuffer)
@@ -2124,7 +2129,7 @@ namespace {
return Next.getCodeCompletionTUInfo();
}
};
-}
+} // anonymous namespace
/// \brief Helper function that computes which global names are hidden by the
/// local code-completion results.
@@ -2210,7 +2215,6 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
}
}
-
void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
CodeCompletionContext Context,
CodeCompletionResult *Results,
@@ -2504,7 +2508,7 @@ bool ASTUnit::serialize(raw_ostream &OS) {
SmallString<128> Buffer;
llvm::BitstreamWriter Stream(Buffer);
- ASTWriter Writer(Stream);
+ ASTWriter Writer(Stream, { });
return serializeUnit(Writer, Buffer, getSema(), hasErrors, OS);
}
@@ -2782,39 +2786,29 @@ bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) {
return true;
}
-namespace {
-struct PCHLocatorInfo {
- serialization::ModuleFile *Mod;
- PCHLocatorInfo() : Mod(nullptr) {}
-};
-}
-
-static bool PCHLocator(serialization::ModuleFile &M, void *UserData) {
- PCHLocatorInfo &Info = *static_cast<PCHLocatorInfo*>(UserData);
- switch (M.Kind) {
- case serialization::MK_ImplicitModule:
- case serialization::MK_ExplicitModule:
- return true; // skip dependencies.
- case serialization::MK_PCH:
- Info.Mod = &M;
- return true; // found it.
- case serialization::MK_Preamble:
- return false; // look in dependencies.
- case serialization::MK_MainFile:
- return false; // look in dependencies.
- }
-
- return true;
-}
-
const FileEntry *ASTUnit::getPCHFile() {
if (!Reader)
return nullptr;
- PCHLocatorInfo Info;
- Reader->getModuleManager().visit(PCHLocator, &Info);
- if (Info.Mod)
- return Info.Mod->File;
+ serialization::ModuleFile *Mod = nullptr;
+ Reader->getModuleManager().visit([&Mod](serialization::ModuleFile &M) {
+ switch (M.Kind) {
+ case serialization::MK_ImplicitModule:
+ case serialization::MK_ExplicitModule:
+ return true; // skip dependencies.
+ case serialization::MK_PCH:
+ Mod = &M;
+ return true; // found it.
+ case serialization::MK_Preamble:
+ return false; // look in dependencies.
+ case serialization::MK_MainFile:
+ return false; // look in dependencies.
+ }
+
+ return true;
+ });
+ if (Mod)
+ return Mod->File;
return nullptr;
}
@@ -2854,9 +2848,9 @@ void ASTUnit::ConcurrencyState::finish() {
#else // NDEBUG
-ASTUnit::ConcurrencyState::ConcurrencyState() { Mutex = 0; }
+ASTUnit::ConcurrencyState::ConcurrencyState() { Mutex = nullptr; }
ASTUnit::ConcurrencyState::~ConcurrencyState() {}
void ASTUnit::ConcurrencyState::start() {}
void ASTUnit::ConcurrencyState::finish() {}
-#endif
+#endif // NDEBUG
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 9a3e459640a5..476812046241 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -35,6 +35,7 @@ add_clang_library(clangFrontend
PrintPreprocessedOutput.cpp
SerializedDiagnosticPrinter.cpp
SerializedDiagnosticReader.cpp
+ TestModuleFileExtension.cpp
TextDiagnostic.cpp
TextDiagnosticBuffer.cpp
TextDiagnosticPrinter.cpp
@@ -42,6 +43,7 @@ add_clang_library(clangFrontend
DEPENDS
ClangDriverOptions
+ intrinsics_gen
LINK_LIBS
clangAST
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
index 7d2a09cd7ca0..87f3d1725814 100644
--- a/lib/Frontend/CacheTokens.cpp
+++ b/lib/Frontend/CacheTokens.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
@@ -105,7 +106,7 @@ public:
}
unsigned getRepresentationLength() const {
- return Kind == IsNoExist ? 0 : 4 + 4 + 2 + 8 + 8;
+ return Kind == IsNoExist ? 0 : 4 * 8;
}
};
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index cc0504ba8b29..1c1081fbe08e 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -47,9 +47,9 @@ protected:
CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset) override;
bool FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclarationName Name) override;
- ExternalLoadResult
+ void
FindExternalLexicalDecls(const DeclContext *DC,
- bool (*isKindWeWant)(Decl::Kind),
+ llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
SmallVectorImpl<Decl *> &Result) override;
void CompleteType(TagDecl *Tag) override;
void CompleteType(ObjCInterfaceDecl *Class) override;
@@ -82,6 +82,7 @@ createASTReader(CompilerInstance &CI, StringRef pchFile,
std::unique_ptr<ASTReader> Reader;
Reader.reset(new ASTReader(PP, CI.getASTContext(),
CI.getPCHContainerReader(),
+ /*Extensions=*/{ },
/*isysroot=*/"", /*DisableValidation=*/true));
for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
StringRef sr(bufNames[ti]);
@@ -160,8 +161,10 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
Clang->createASTContext();
auto Buffer = std::make_shared<PCHBuffer>();
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions;
auto consumer = llvm::make_unique<PCHGenerator>(
- Clang->getPreprocessor(), "-", nullptr, /*isysroot=*/"", Buffer);
+ Clang->getPreprocessor(), "-", nullptr, /*isysroot=*/"", Buffer,
+ Extensions);
Clang->getASTContext().setASTMutationListener(
consumer->GetASTMutationListener());
Clang->setASTConsumer(std::move(consumer));
@@ -169,7 +172,7 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
if (firstInclude) {
Preprocessor &PP = Clang->getPreprocessor();
- PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
+ PP.getBuiltinInfo().initializeBuiltins(PP.getIdentifierTable(),
PP.getLangOpts());
} else {
assert(!SerialBufs.empty());
@@ -246,11 +249,10 @@ ChainedIncludesSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclarationName Name) {
return getFinalReader().FindExternalVisibleDeclsByName(DC, Name);
}
-ExternalLoadResult
-ChainedIncludesSource::FindExternalLexicalDecls(const DeclContext *DC,
- bool (*isKindWeWant)(Decl::Kind),
- SmallVectorImpl<Decl*> &Result) {
- return getFinalReader().FindExternalLexicalDecls(DC, isKindWeWant, Result);
+void ChainedIncludesSource::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) {
+ return getFinalReader().FindExternalLexicalDecls(DC, IsKindWeWant, Result);
}
void ChainedIncludesSource::CompleteType(TagDecl *Tag) {
return getFinalReader().CompleteType(Tag);
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index c33b150e3047..3edcf5d654b9 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -78,9 +78,8 @@ void CompilerInstance::setDiagnostics(DiagnosticsEngine *Value) {
Diagnostics = Value;
}
-void CompilerInstance::setTarget(TargetInfo *Value) {
- Target = Value;
-}
+void CompilerInstance::setTarget(TargetInfo *Value) { Target = Value; }
+void CompilerInstance::setAuxTarget(TargetInfo *Value) { AuxTarget = Value; }
void CompilerInstance::setFileManager(FileManager *Value) {
FileMgr = Value;
@@ -96,7 +95,12 @@ void CompilerInstance::setSourceManager(SourceManager *Value) {
void CompilerInstance::setPreprocessor(Preprocessor *Value) { PP = Value; }
-void CompilerInstance::setASTContext(ASTContext *Value) { Context = Value; }
+void CompilerInstance::setASTContext(ASTContext *Value) {
+ Context = Value;
+
+ if (Context && Consumer)
+ getASTConsumer().Initialize(getASTContext());
+}
void CompilerInstance::setSema(Sema *S) {
TheSema.reset(S);
@@ -104,6 +108,9 @@ void CompilerInstance::setSema(Sema *S) {
void CompilerInstance::setASTConsumer(std::unique_ptr<ASTConsumer> Value) {
Consumer = std::move(Value);
+
+ if (Context && Consumer)
+ getASTConsumer().Initialize(getASTContext());
}
void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
@@ -148,7 +155,6 @@ static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
<< DiagOpts->DiagnosticLogFile << EC.message();
} else {
FileOS->SetUnbuffered();
- FileOS->SetUseAtomicWrites(true);
OS = FileOS.get();
StreamOwner = std::move(FileOS);
}
@@ -304,7 +310,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
PP = new Preprocessor(&getPreprocessorOpts(), getDiagnostics(), getLangOpts(),
getSourceManager(), *HeaderInfo, *this, PTHMgr,
/*OwnsHeaderSearch=*/true, TUKind);
- PP->Initialize(getTarget());
+ PP->Initialize(getTarget(), getAuxTarget());
// Note that this is different then passing PTHMgr to Preprocessor's ctor.
// That argument is used as the IdentifierInfoLookup argument to
@@ -331,7 +337,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
PP->setPreprocessedOutput(getPreprocessorOutputOpts().ShowCPP);
- if (PP->getLangOpts().Modules)
+ if (PP->getLangOpts().Modules && PP->getLangOpts().ImplicitModules)
PP->getHeaderSearchInfo().setModuleCachePath(getSpecificModuleCachePath());
// Handle generating dependencies, if requested.
@@ -354,17 +360,19 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
// Handle generating header include information, if requested.
if (DepOpts.ShowHeaderIncludes)
- AttachHeaderIncludeGen(*PP);
+ AttachHeaderIncludeGen(*PP, DepOpts.ExtraDeps);
if (!DepOpts.HeaderIncludeOutputFile.empty()) {
StringRef OutputPath = DepOpts.HeaderIncludeOutputFile;
if (OutputPath == "-")
OutputPath = "";
- AttachHeaderIncludeGen(*PP, /*ShowAllHeaders=*/true, OutputPath,
+ AttachHeaderIncludeGen(*PP, DepOpts.ExtraDeps,
+ /*ShowAllHeaders=*/true, OutputPath,
/*ShowDepth=*/false);
}
if (DepOpts.PrintShowIncludes) {
- AttachHeaderIncludeGen(*PP, /*ShowAllHeaders=*/false, /*OutputPath=*/"",
+ AttachHeaderIncludeGen(*PP, DepOpts.ExtraDeps,
+ /*ShowAllHeaders=*/false, /*OutputPath=*/"",
/*ShowDepth=*/true, /*MSStyle=*/true);
}
}
@@ -372,9 +380,8 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
std::string CompilerInstance::getSpecificModuleCachePath() {
// Set up the module path, including the hash for the
// module-creation options.
- SmallString<256> SpecificModuleCache(
- getHeaderSearchOpts().ModuleCachePath);
- if (!getHeaderSearchOpts().DisableModuleHash)
+ SmallString<256> SpecificModuleCache(getHeaderSearchOpts().ModuleCachePath);
+ if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
llvm::sys::path::append(SpecificModuleCache,
getInvocation().getModuleHash());
return SpecificModuleCache.str();
@@ -384,10 +391,11 @@ std::string CompilerInstance::getSpecificModuleCachePath() {
void CompilerInstance::createASTContext() {
Preprocessor &PP = getPreprocessor();
- Context = new ASTContext(getLangOpts(), PP.getSourceManager(),
- PP.getIdentifierTable(), PP.getSelectorTable(),
- PP.getBuiltinInfo());
- Context->InitBuiltinTypes(getTarget());
+ auto *Context = new ASTContext(getLangOpts(), PP.getSourceManager(),
+ PP.getIdentifierTable(), PP.getSelectorTable(),
+ PP.getBuiltinInfo());
+ Context->InitBuiltinTypes(getTarget(), getAuxTarget());
+ setASTContext(Context);
}
// ExternalASTSource
@@ -399,7 +407,9 @@ void CompilerInstance::createPCHExternalASTSource(
ModuleManager = createPCHExternalASTSource(
Path, getHeaderSearchOpts().Sysroot, DisablePCHValidation,
AllowPCHWithCompilerErrors, getPreprocessor(), getASTContext(),
- getPCHContainerReader(), DeserializationListener,
+ getPCHContainerReader(),
+ getFrontendOpts().ModuleFileExtensions,
+ DeserializationListener,
OwnDeserializationListener, Preamble,
getFrontendOpts().UseGlobalModuleIndex);
}
@@ -408,15 +418,16 @@ IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
StringRef Path, StringRef Sysroot, bool DisablePCHValidation,
bool AllowPCHWithCompilerErrors, Preprocessor &PP, ASTContext &Context,
const PCHContainerReader &PCHContainerRdr,
+ ArrayRef<IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
void *DeserializationListener, bool OwnDeserializationListener,
bool Preamble, bool UseGlobalModuleIndex) {
HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
IntrusiveRefCntPtr<ASTReader> Reader(new ASTReader(
- PP, Context, PCHContainerRdr, Sysroot.empty() ? "" : Sysroot.data(),
- DisablePCHValidation, AllowPCHWithCompilerErrors,
- /*AllowConfigurationMismatch*/ false, HSOpts.ModulesValidateSystemHeaders,
- UseGlobalModuleIndex));
+ PP, Context, PCHContainerRdr, Extensions,
+ Sysroot.empty() ? "" : Sysroot.data(), DisablePCHValidation,
+ AllowPCHWithCompilerErrors, /*AllowConfigurationMismatch*/ false,
+ HSOpts.ModulesValidateSystemHeaders, UseGlobalModuleIndex));
// We need the external source to be set up before we read the AST, because
// eagerly-deserialized declarations may use it.
@@ -631,8 +642,10 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
llvm::sys::fs::status(OutputPath, Status);
if (llvm::sys::fs::exists(Status)) {
// Fail early if we can't write to the final destination.
- if (!llvm::sys::fs::can_write(OutputPath))
+ if (!llvm::sys::fs::can_write(OutputPath)) {
+ Error = make_error_code(llvm::errc::operation_not_permitted);
return nullptr;
+ }
// Don't use a temporary if the output is a special file. This handles
// things like '-o /dev/null'
@@ -715,7 +728,7 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
if (Input.isBuffer()) {
SourceMgr.setMainFileID(SourceMgr.createFileID(
std::unique_ptr<llvm::MemoryBuffer>(Input.getBuffer()), Kind));
- assert(!SourceMgr.getMainFileID().isInvalid() &&
+ assert(SourceMgr.getMainFileID().isValid() &&
"Couldn't establish MainFileID!");
return true;
}
@@ -766,7 +779,7 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
SourceMgr.overrideFileContents(File, std::move(SB));
}
- assert(!SourceMgr.getMainFileID().isInvalid() &&
+ assert(SourceMgr.getMainFileID().isValid() &&
"Couldn't establish MainFileID!");
return true;
}
@@ -788,6 +801,13 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (!hasTarget())
return false;
+ // Create TargetInfo for the other side of CUDA compilation.
+ if (getLangOpts().CUDA && !getFrontendOpts().AuxTriple.empty()) {
+ std::shared_ptr<TargetOptions> TO(new TargetOptions);
+ TO->Triple = getFrontendOpts().AuxTriple;
+ setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
+ }
+
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
@@ -810,13 +830,13 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (getFrontendOpts().ShowStats)
llvm::EnableStatistics();
- for (unsigned i = 0, e = getFrontendOpts().Inputs.size(); i != e; ++i) {
+ for (const FrontendInputFile &FIF : getFrontendOpts().Inputs) {
// Reset the ID tables if we are reusing the SourceManager and parsing
// regular files.
if (hasSourceManager() && !Act.isModelParsingAction())
getSourceManager().clearIDTables();
- if (Act.BeginSourceFile(*this, getFrontendOpts().Inputs[i])) {
+ if (Act.BeginSourceFile(*this, FIF)) {
Act.Execute();
Act.EndSourceFile();
}
@@ -912,6 +932,7 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance,
FrontendOpts.OutputFile = ModuleFileName.str();
FrontendOpts.DisableFree = false;
FrontendOpts.GenerateGlobalModuleIndex = false;
+ FrontendOpts.BuildingImplicitModule = true;
FrontendOpts.Inputs.clear();
InputKind IK = getSourceInputKindFromOptions(*Invocation->getLangOpts());
@@ -945,8 +966,10 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance,
FullSourceLoc(ImportLoc, ImportingInstance.getSourceManager()));
// If we're collecting module dependencies, we need to share a collector
- // between all of the module CompilerInstances.
+ // between all of the module CompilerInstances. Other than that, we don't
+ // want to produce any dependency output from the module build.
Instance.setModuleDepCollector(ImportingInstance.getModuleDepCollector());
+ Invocation->getDependencyOutputOpts() = DependencyOutputOptions();
// Get or create the module map that we'll use to build this module.
std::string InferredModuleMapContent;
@@ -1151,6 +1174,7 @@ static void pruneModuleCache(const HeaderSearchOptions &HSOpts) {
struct stat StatBuf;
llvm::SmallString<128> TimestampFile;
TimestampFile = HSOpts.ModuleCachePath;
+ assert(!TimestampFile.empty());
llvm::sys::path::append(TimestampFile, "modules.timestamp");
// Try to stat() the timestamp file.
@@ -1229,8 +1253,8 @@ void CompilerInstance::createModuleManager() {
// If we're implicitly building modules but not currently recursively
// building a module, check whether we need to prune the module cache.
- if (getLangOpts().ImplicitModules &&
- getSourceManager().getModuleBuildStack().empty() &&
+ if (getSourceManager().getModuleBuildStack().empty() &&
+ !getPreprocessor().getHeaderSearchInfo().getModuleCachePath().empty() &&
getHeaderSearchOpts().ModuleCachePruneInterval > 0 &&
getHeaderSearchOpts().ModuleCachePruneAfter > 0) {
pruneModuleCache(getHeaderSearchOpts());
@@ -1244,7 +1268,8 @@ void CompilerInstance::createModuleManager() {
ReadTimer = llvm::make_unique<llvm::Timer>("Reading modules",
*FrontendTimerGroup);
ModuleManager = new ASTReader(
- getPreprocessor(), *Context, getPCHContainerReader(),
+ getPreprocessor(), getASTContext(), getPCHContainerReader(),
+ getFrontendOpts().ModuleFileExtensions,
Sysroot.empty() ? "" : Sysroot.c_str(), PPOpts.DisablePCHValidation,
/*AllowASTWithCompilerErrors=*/false,
/*AllowConfigurationMismatch=*/false,
@@ -1262,6 +1287,13 @@ void CompilerInstance::createModuleManager() {
ModuleManager->InitializeSema(getSema());
if (hasASTConsumer())
ModuleManager->StartTranslationUnit(&getASTConsumer());
+
+ if (TheDependencyFileGenerator)
+ TheDependencyFileGenerator->AttachToASTReader(*ModuleManager);
+ if (ModuleDepCollector)
+ ModuleDepCollector->attachToASTReader(*ModuleManager);
+ for (auto &Listener : DependencyCollectors)
+ Listener->attachToASTReader(*ModuleManager);
}
}
@@ -1276,87 +1308,68 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
// the files we were handed.
struct ReadModuleNames : ASTReaderListener {
CompilerInstance &CI;
- std::vector<StringRef> ModuleFileStack;
- std::vector<StringRef> ModuleNameStack;
- bool Failed;
- bool TopFileIsModule;
-
- ReadModuleNames(CompilerInstance &CI)
- : CI(CI), Failed(false), TopFileIsModule(false) {}
+ llvm::SmallVector<IdentifierInfo*, 8> LoadedModules;
- bool needsImportVisitation() const override { return true; }
+ ReadModuleNames(CompilerInstance &CI) : CI(CI) {}
- void visitImport(StringRef FileName) override {
- if (!CI.ExplicitlyLoadedModuleFiles.insert(FileName).second) {
- if (ModuleFileStack.size() == 0)
- TopFileIsModule = true;
- return;
- }
+ void ReadModuleName(StringRef ModuleName) override {
+ LoadedModules.push_back(
+ CI.getPreprocessor().getIdentifierInfo(ModuleName));
+ }
- ModuleFileStack.push_back(FileName);
- ModuleNameStack.push_back(StringRef());
- if (ASTReader::readASTFileControlBlock(FileName, CI.getFileManager(),
- CI.getPCHContainerReader(),
- *this)) {
- CI.getDiagnostics().Report(
- SourceLocation(), CI.getFileManager().getBufferForFile(FileName)
- ? diag::err_module_file_invalid
- : diag::err_module_file_not_found)
- << FileName;
- for (int I = ModuleFileStack.size() - 2; I >= 0; --I)
- CI.getDiagnostics().Report(SourceLocation(),
- diag::note_module_file_imported_by)
- << ModuleFileStack[I]
- << !ModuleNameStack[I].empty() << ModuleNameStack[I];
- Failed = true;
+ void registerAll() {
+ for (auto *II : LoadedModules) {
+ CI.KnownModules[II] = CI.getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .findModule(II->getName());
}
- ModuleNameStack.pop_back();
- ModuleFileStack.pop_back();
+ LoadedModules.clear();
}
- void ReadModuleName(StringRef ModuleName) override {
- if (ModuleFileStack.size() == 1)
- TopFileIsModule = true;
- ModuleNameStack.back() = ModuleName;
-
- auto &ModuleFile = CI.ModuleFileOverrides[ModuleName];
- if (!ModuleFile.empty() &&
- CI.getFileManager().getFile(ModuleFile) !=
- CI.getFileManager().getFile(ModuleFileStack.back()))
- CI.getDiagnostics().Report(SourceLocation(),
- diag::err_conflicting_module_files)
- << ModuleName << ModuleFile << ModuleFileStack.back();
- ModuleFile = ModuleFileStack.back();
+ void markAllUnavailable() {
+ for (auto *II : LoadedModules) {
+ if (Module *M = CI.getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .findModule(II->getName()))
+ M->HasIncompatibleModuleFile = true;
+ }
+ LoadedModules.clear();
}
- } RMN(*this);
+ };
// If we don't already have an ASTReader, create one now.
if (!ModuleManager)
createModuleManager();
- // Tell the module manager about this module file.
- if (getModuleManager()->getModuleManager().addKnownModuleFile(FileName)) {
- getDiagnostics().Report(SourceLocation(), diag::err_module_file_not_found)
- << FileName;
- return false;
- }
+ auto Listener = llvm::make_unique<ReadModuleNames>(*this);
+ auto &ListenerRef = *Listener;
+ ASTReader::ListenerScope ReadModuleNamesListener(*ModuleManager,
+ std::move(Listener));
- // Build our mapping of module names to module files from this file
- // and its imports.
- RMN.visitImport(FileName);
+ // Try to load the module file.
+ switch (ModuleManager->ReadAST(FileName, serialization::MK_ExplicitModule,
+ SourceLocation(),
+ ASTReader::ARR_ConfigurationMismatch)) {
+ case ASTReader::Success:
+ // We successfully loaded the module file; remember the set of provided
+ // modules so that we don't try to load implicit modules for them.
+ ListenerRef.registerAll();
+ return true;
- if (RMN.Failed)
- return false;
+ case ASTReader::ConfigurationMismatch:
+ // Ignore unusable module files.
+ getDiagnostics().Report(SourceLocation(), diag::warn_module_config_mismatch)
+ << FileName;
+ // All modules provided by any files we tried and failed to load are now
+ // unavailable; includes of those modules should now be handled textually.
+ ListenerRef.markAllUnavailable();
+ return true;
- // If we never found a module name for the top file, then it's not a module,
- // it's a PCH or preamble or something.
- if (!RMN.TopFileIsModule) {
- getDiagnostics().Report(SourceLocation(), diag::err_module_file_not_module)
- << FileName;
+ default:
return false;
}
-
- return true;
}
ModuleLoadResult
@@ -1371,7 +1384,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// If we've already handled this import, just return the cached result.
// This one-element cache is important to eliminate redundant diagnostics
// when both the preprocessor and parser see the same import declaration.
- if (!ImportLoc.isInvalid() && LastModuleImportLoc == ImportLoc) {
+ if (ImportLoc.isValid() && LastModuleImportLoc == ImportLoc) {
// Make the named module visible.
if (LastModuleImportResult && ModuleName != getLangOpts().CurrentModule &&
ModuleName != getLangOpts().ImplementationOfModule)
@@ -1404,56 +1417,40 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
return ModuleLoadResult();
}
- auto Override = ModuleFileOverrides.find(ModuleName);
- bool Explicit = Override != ModuleFileOverrides.end();
- if (!Explicit && !getLangOpts().ImplicitModules) {
+ std::string ModuleFileName =
+ PP->getHeaderSearchInfo().getModuleFileName(Module);
+ if (ModuleFileName.empty()) {
+ if (Module->HasIncompatibleModuleFile) {
+ // We tried and failed to load a module file for this module. Fall
+ // back to textual inclusion for its headers.
+ return ModuleLoadResult(nullptr, /*missingExpected*/true);
+ }
+
getDiagnostics().Report(ModuleNameLoc, diag::err_module_build_disabled)
<< ModuleName;
ModuleBuildFailed = true;
return ModuleLoadResult();
}
- std::string ModuleFileName =
- Explicit ? Override->second
- : PP->getHeaderSearchInfo().getModuleFileName(Module);
-
// If we don't already have an ASTReader, create one now.
if (!ModuleManager)
createModuleManager();
- if (TheDependencyFileGenerator)
- TheDependencyFileGenerator->AttachToASTReader(*ModuleManager);
-
- if (ModuleDepCollector)
- ModuleDepCollector->attachToASTReader(*ModuleManager);
-
- for (auto &Listener : DependencyCollectors)
- Listener->attachToASTReader(*ModuleManager);
-
llvm::Timer Timer;
if (FrontendTimerGroup)
Timer.init("Loading " + ModuleFileName, *FrontendTimerGroup);
llvm::TimeRegion TimeLoading(FrontendTimerGroup ? &Timer : nullptr);
// Try to load the module file.
- unsigned ARRFlags =
- Explicit ? 0 : ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
+ unsigned ARRFlags = ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
switch (ModuleManager->ReadAST(ModuleFileName,
- Explicit ? serialization::MK_ExplicitModule
- : serialization::MK_ImplicitModule,
+ serialization::MK_ImplicitModule,
ImportLoc, ARRFlags)) {
case ASTReader::Success:
break;
case ASTReader::OutOfDate:
case ASTReader::Missing: {
- if (Explicit) {
- // ReadAST has already complained for us.
- ModuleLoader::HadFatalFailure = true;
- KnownModules[Path[0].first] = nullptr;
- return ModuleLoadResult();
- }
-
// The module file is missing or out-of-date. Build it.
assert(Module && "missing module file");
// Check whether there is a cycle in the module graph.
@@ -1508,7 +1505,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
case ASTReader::ConfigurationMismatch:
case ASTReader::HadErrors:
ModuleLoader::HadFatalFailure = true;
- // FIXME: The ASTReader will already have complained, but can we showhorn
+ // FIXME: The ASTReader will already have complained, but can we shoehorn
// that diagnostic information into a more useful form?
KnownModules[Path[0].first] = nullptr;
return ModuleLoadResult();
@@ -1652,6 +1649,8 @@ void CompilerInstance::makeModuleVisible(Module *Mod,
GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
SourceLocation TriggerLoc) {
+ if (getPreprocessor().getHeaderSearchInfo().getModuleCachePath().empty())
+ return nullptr;
if (!ModuleManager)
createModuleManager();
// Can't do anything if we don't have the module manager.
@@ -1685,11 +1684,10 @@ GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
if (!Entry) {
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
Path.push_back(std::make_pair(
- getPreprocessor().getIdentifierInfo(TheModule->Name), TriggerLoc));
+ getPreprocessor().getIdentifierInfo(TheModule->Name), TriggerLoc));
std::reverse(Path.begin(), Path.end());
- // Load a module as hidden. This also adds it to the global index.
- loadModule(TheModule->DefinitionLoc, Path,
- Module::Hidden, false);
+ // Load a module as hidden. This also adds it to the global index.
+ loadModule(TheModule->DefinitionLoc, Path, Module::Hidden, false);
RecreateIndex = true;
}
}
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index fbeba09e1cf1..d3870424b6bb 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -1,4 +1,4 @@
-//===--- CompilerInvocation.cpp -------------------------------------------===//
+//===---
//
// The LLVM Compiler Infrastructure
//
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "TestModuleFileExtension.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
@@ -19,12 +20,14 @@
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ModuleFileExtension.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Linker/Linker.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
@@ -35,6 +38,7 @@
#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
+#include "llvm/Target/TargetOptions.h"
#include <atomic>
#include <memory>
#include <sys/stat.h>
@@ -53,7 +57,7 @@ CompilerInvocationBase::CompilerInvocationBase()
CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
: RefCountedBase<CompilerInvocation>(),
- LangOpts(new LangOptions(*X.getLangOpts())),
+ LangOpts(new LangOptions(*X.getLangOpts())),
TargetOpts(new TargetOptions(X.getTargetOpts())),
DiagnosticOpts(new DiagnosticOptions(X.getDiagnosticOpts())),
HeaderSearchOpts(new HeaderSearchOptions(X.getHeaderSearchOpts())),
@@ -361,6 +365,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
const TargetOptions &TargetOpts) {
using namespace options;
bool Success = true;
+ llvm::Triple Triple = llvm::Triple(TargetOpts.Triple);
unsigned OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
// TODO: This could be done in Driver
@@ -393,39 +398,36 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
}
- if (Args.hasArg(OPT_gline_tables_only)) {
- Opts.setDebugInfo(CodeGenOptions::DebugLineTablesOnly);
- } else if (Args.hasArg(OPT_g_Flag) || Args.hasArg(OPT_gdwarf_2) ||
- Args.hasArg(OPT_gdwarf_3) || Args.hasArg(OPT_gdwarf_4)) {
- bool Default = false;
- // Until dtrace (via CTF) and LLDB can deal with distributed debug info,
- // Darwin and FreeBSD default to standalone/full debug info.
- if (llvm::Triple(TargetOpts.Triple).isOSDarwin() ||
- llvm::Triple(TargetOpts.Triple).isOSFreeBSD())
- Default = true;
-
- if (Args.hasFlag(OPT_fstandalone_debug, OPT_fno_standalone_debug, Default))
- Opts.setDebugInfo(CodeGenOptions::FullDebugInfo);
- else
- Opts.setDebugInfo(CodeGenOptions::LimitedDebugInfo);
+ if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
+ Opts.setDebugInfo(
+ llvm::StringSwitch<CodeGenOptions::DebugInfoKind>(A->getValue())
+ .Case("line-tables-only", CodeGenOptions::DebugLineTablesOnly)
+ .Case("limited", CodeGenOptions::LimitedDebugInfo)
+ .Case("standalone", CodeGenOptions::FullDebugInfo));
+ }
+ if (Arg *A = Args.getLastArg(OPT_debugger_tuning_EQ)) {
+ Opts.setDebuggerTuning(
+ llvm::StringSwitch<CodeGenOptions::DebuggerKind>(A->getValue())
+ .Case("gdb", CodeGenOptions::DebuggerKindGDB)
+ .Case("lldb", CodeGenOptions::DebuggerKindLLDB)
+ .Case("sce", CodeGenOptions::DebuggerKindSCE));
}
+ Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
+ Opts.EmitCodeView = Args.hasArg(OPT_gcodeview);
Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
- if (Args.hasArg(OPT_gdwarf_2))
- Opts.DwarfVersion = 2;
- else if (Args.hasArg(OPT_gdwarf_3))
- Opts.DwarfVersion = 3;
- else if (Args.hasArg(OPT_gdwarf_4))
- Opts.DwarfVersion = 4;
- else if (Opts.getDebugInfo() != CodeGenOptions::NoDebugInfo)
- // Default Dwarf version is 4 if we are generating debug information.
- Opts.DwarfVersion = 4;
+ Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
+ Opts.DebugExplicitImport = Triple.isPS4CPU();
+
+ for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ))
+ Opts.DebugPrefixMap.insert(StringRef(Arg).split('='));
if (const Arg *A =
Args.getLastArg(OPT_emit_llvm_uselists, OPT_no_emit_llvm_uselists))
Opts.EmitLLVMUseLists = A->getOption().getID() == OPT_emit_llvm_uselists;
Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
+ Opts.DisableLLVMPasses = Args.hasArg(OPT_disable_llvm_passes);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
Opts.UseRegisterSizedBitfieldAccess = Args.hasArg(
@@ -451,7 +453,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fprofile_instr_generate_EQ);
Opts.InstrProfileOutput = Args.getLastArgValue(OPT_fprofile_instr_generate_EQ);
Opts.InstrProfileInput = Args.getLastArgValue(OPT_fprofile_instr_use_EQ);
- Opts.CoverageMapping = Args.hasArg(OPT_fcoverage_mapping);
+ Opts.CoverageMapping =
+ Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
@@ -459,10 +462,25 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
Opts.CodeModel = getCodeModel(Args, Diags);
Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
- Opts.DisableFPElim = Args.hasArg(OPT_mdisable_fp_elim);
+ Opts.DisableFPElim =
+ (Args.hasArg(OPT_mdisable_fp_elim) || Args.hasArg(OPT_pg));
Opts.DisableFree = Args.hasArg(OPT_disable_free);
Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
+ if (Arg *A = Args.getLastArg(OPT_meabi)) {
+ StringRef Value = A->getValue();
+ llvm::EABI EABIVersion = llvm::StringSwitch<llvm::EABI>(Value)
+ .Case("default", llvm::EABI::Default)
+ .Case("4", llvm::EABI::EABI4)
+ .Case("5", llvm::EABI::EABI5)
+ .Case("gnu", llvm::EABI::GNU)
+ .Default(llvm::EABI::Unknown);
+ if (EABIVersion == llvm::EABI::Unknown)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << Value;
+ else
+ Opts.EABIVersion = Value;
+ }
Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable);
Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
@@ -481,11 +499,14 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
Opts.EnableSegmentedStacks = Args.hasArg(OPT_split_stacks);
Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
+ Opts.IncrementalLinkerCompatible =
+ Args.hasArg(OPT_mincremental_linker_compatible);
Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
Opts.SaveTempLabels = Args.hasArg(OPT_msave_temp_labels);
Opts.NoDwarfDirectoryAsm = Args.hasArg(OPT_fno_dwarf_directory_asm);
Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
Opts.StrictEnums = Args.hasArg(OPT_fstrict_enums);
+ Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
@@ -508,7 +529,15 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions);
- Opts.PrepareForLTO = Args.hasArg(OPT_flto);
+ Opts.PrepareForLTO = Args.hasArg(OPT_flto, OPT_flto_EQ);
+ const Arg *A = Args.getLastArg(OPT_flto, OPT_flto_EQ);
+ Opts.EmitFunctionSummary = A && A->containsValue("thin");
+ if (Arg *A = Args.getLastArg(OPT_fthinlto_index_EQ)) {
+ if (IK != IK_LLVM_IR)
+ Diags.Report(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-x ir";
+ Opts.ThinLTOIndexFile = Args.getLastArgValue(OPT_fthinlto_index_EQ);
+ }
Opts.MSVolatile = Args.hasArg(OPT_fms_volatile);
@@ -546,7 +575,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.EmitOpenCLArgMetadata = Args.hasArg(OPT_cl_kernel_arg_info);
Opts.CompressDebugSections = Args.hasArg(OPT_compress_debug_sections);
Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
- Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file);
+ for (auto A : Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_cuda_bitcode)) {
+ unsigned LinkFlags = llvm::Linker::Flags::None;
+ if (A->getOption().matches(OPT_mlink_cuda_bitcode))
+ LinkFlags = llvm::Linker::Flags::LinkOnlyNeeded |
+ llvm::Linker::Flags::InternalizeLinkedSymbols;
+ Opts.LinkBitcodeFiles.push_back(std::make_pair(LinkFlags, A->getValue()));
+ }
Opts.SanitizeCoverageType =
getLastArgIntValue(Args, OPT_fsanitize_coverage_type, 0, Diags);
Opts.SanitizeCoverageIndirectCalls =
@@ -559,6 +594,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
Opts.SanitizeMemoryUseAfterDtor =
Args.hasArg(OPT_fsanitize_memory_use_after_dtor);
+ Opts.SanitizeCfiCrossDso = Args.hasArg(OPT_fsanitize_cfi_cross_dso);
Opts.SSPBufferSize =
getLastArgIntValue(Args, OPT_stack_protector_buffer_size, 8, Diags);
Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
@@ -592,6 +628,9 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
+ Opts.EmulatedTLS =
+ Args.hasFlag(OPT_femulated_tls, OPT_fno_emulated_tls, false);
+
if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
StringRef Name = A->getValue();
unsigned Model = llvm::StringSwitch<unsigned>(Name)
@@ -695,6 +734,13 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
Args.getLastArgValue(OPT_module_dependency_dir);
if (Args.hasArg(OPT_MV))
Opts.OutputFormat = DependencyOutputFormat::NMake;
+ // Add sanitizer blacklists as extra dependencies.
+ // They won't be discovered by the regular preprocessor, so
+ // we let make / ninja to know about this implicit dependency.
+ Opts.ExtraDeps = Args.getAllArgValues(OPT_fdepfile_entry);
+ auto ModuleFiles = Args.getAllArgValues(OPT_fmodule_file);
+ Opts.ExtraDeps.insert(Opts.ExtraDeps.end(), ModuleFiles.begin(),
+ ModuleFiles.end());
}
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
@@ -822,6 +868,30 @@ static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
Opts.WorkingDir = Args.getLastArgValue(OPT_working_directory);
}
+/// Parse the argument to the -ftest-module-file-extension
+/// command-line argument.
+///
+/// \returns true on error, false on success.
+static bool parseTestModuleFileExtensionArg(StringRef Arg,
+ std::string &BlockName,
+ unsigned &MajorVersion,
+ unsigned &MinorVersion,
+ bool &Hashed,
+ std::string &UserInfo) {
+ SmallVector<StringRef, 5> Args;
+ Arg.split(Args, ':', 5);
+ if (Args.size() < 5)
+ return true;
+
+ BlockName = Args[0];
+ if (Args[1].getAsInteger(10, MajorVersion)) return true;
+ if (Args[2].getAsInteger(10, MinorVersion)) return true;
+ if (Args[3].getAsInteger(2, Hashed)) return true;
+ if (Args.size() > 4)
+ UserInfo = Args[4];
+ return false;
+}
+
static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
using namespace options;
@@ -914,6 +984,26 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (A->getValue(0) == Opts.AddPluginActions[i])
Opts.AddPluginArgs[i].emplace_back(A->getValue(1));
+ for (const std::string &Arg :
+ Args.getAllArgValues(OPT_ftest_module_file_extension_EQ)) {
+ std::string BlockName;
+ unsigned MajorVersion;
+ unsigned MinorVersion;
+ bool Hashed;
+ std::string UserInfo;
+ if (parseTestModuleFileExtensionArg(Arg, BlockName, MajorVersion,
+ MinorVersion, Hashed, UserInfo)) {
+ Diags.Report(diag::err_test_module_file_extension_format) << Arg;
+
+ continue;
+ }
+
+ // Add the testing module file extension.
+ Opts.ModuleFileExtensions.push_back(
+ new TestModuleFileExtension(BlockName, MajorVersion, MinorVersion,
+ Hashed, UserInfo));
+ }
+
if (const Arg *A = Args.getLastArg(OPT_code_completion_at)) {
Opts.CodeCompletionAt =
ParsedSourceLocation::FromString(A->getValue());
@@ -943,6 +1033,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.GenerateGlobalModuleIndex = Opts.UseGlobalModuleIndex;
Opts.ModuleMapFiles = Args.getAllArgValues(OPT_fmodule_map_file);
Opts.ModuleFiles = Args.getAllArgValues(OPT_fmodule_file);
+ Opts.ModulesEmbedFiles = Args.getAllArgValues(OPT_fmodules_embed_file_EQ);
+ Opts.ModulesEmbedAllFiles = Args.hasArg(OPT_fmodules_embed_all_files);
Opts.CodeCompleteOpts.IncludeMacros
= Args.hasArg(OPT_code_completion_macros);
@@ -955,6 +1047,9 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.OverrideRecordLayoutsFile
= Args.getLastArgValue(OPT_foverride_record_layout_EQ);
+ Opts.AuxTriple =
+ llvm::Triple::normalize(Args.getLastArgValue(OPT_aux_triple));
+
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
OPT_arcmt_modify,
OPT_arcmt_migrate)) {
@@ -1076,13 +1171,11 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
// Compute the path to the resource directory.
StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
SmallString<128> P(Dir);
- if (ClangResourceDir != "") {
+ if (ClangResourceDir != "")
llvm::sys::path::append(P, ClangResourceDir);
- } else {
- StringRef ClangLibdirSuffix(CLANG_LIBDIR_SUFFIX);
- llvm::sys::path::append(P, "..", Twine("lib") + ClangLibdirSuffix, "clang",
- CLANG_VERSION_STRING);
- }
+ else
+ llvm::sys::path::append(P, "..", Twine("lib") + CLANG_LIBDIR_SUFFIX,
+ "clang", CLANG_VERSION_STRING);
return P.str();
}
@@ -1291,7 +1384,7 @@ static Visibility parseVisibility(Arg *arg, ArgList &args,
StringRef value = arg->getValue();
if (value == "default") {
return DefaultVisibility;
- } else if (value == "hidden") {
+ } else if (value == "hidden" || value == "internal") {
return HiddenVisibility;
} else if (value == "protected") {
// FIXME: diagnose if target does not support protected visibility
@@ -1364,7 +1457,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
.Case("CL1.2", LangStandard::lang_opencl12)
.Case("CL2.0", LangStandard::lang_opencl20)
.Default(LangStandard::lang_unspecified);
-
+
if (OpenCLLangStd == LangStandard::lang_unspecified) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -1372,7 +1465,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
else
LangStd = OpenCLLangStd;
}
-
+
CompilerInvocation::setLangDefaults(Opts, IK, LangStd);
// We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
@@ -1395,6 +1488,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fcuda_disable_target_call_checks))
Opts.CUDADisableTargetCallChecks = 1;
+ if (Args.hasArg(OPT_fcuda_target_overloads))
+ Opts.CUDATargetOverloads = 1;
+
if (Opts.ObjC1) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
StringRef value = arg->getValue();
@@ -1410,22 +1506,41 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ObjCAutoRefCount = 1;
if (!Opts.ObjCRuntime.allowsARC())
Diags.Report(diag::err_arc_unsupported_on_runtime);
+ }
- // Only set ObjCARCWeak if ARC is enabled.
- if (Args.hasArg(OPT_fobjc_runtime_has_weak))
- Opts.ObjCARCWeak = 1;
- else
- Opts.ObjCARCWeak = Opts.ObjCRuntime.allowsWeak();
+ // ObjCWeakRuntime tracks whether the runtime supports __weak, not
+ // whether the feature is actually enabled. This is predominantly
+ // determined by -fobjc-runtime, but we allow it to be overridden
+ // from the command line for testing purposes.
+ if (Args.hasArg(OPT_fobjc_runtime_has_weak))
+ Opts.ObjCWeakRuntime = 1;
+ else
+ Opts.ObjCWeakRuntime = Opts.ObjCRuntime.allowsWeak();
+
+ // ObjCWeak determines whether __weak is actually enabled.
+ // Note that we allow -fno-objc-weak to disable this even in ARC mode.
+ if (auto weakArg = Args.getLastArg(OPT_fobjc_weak, OPT_fno_objc_weak)) {
+ if (!weakArg->getOption().matches(OPT_fobjc_weak)) {
+ assert(!Opts.ObjCWeak);
+ } else if (Opts.getGC() != LangOptions::NonGC) {
+ Diags.Report(diag::err_objc_weak_with_gc);
+ } else if (!Opts.ObjCWeakRuntime) {
+ Diags.Report(diag::err_objc_weak_unsupported);
+ } else {
+ Opts.ObjCWeak = 1;
+ }
+ } else if (Opts.ObjCAutoRefCount) {
+ Opts.ObjCWeak = Opts.ObjCWeakRuntime;
}
if (Args.hasArg(OPT_fno_objc_infer_related_result_type))
Opts.ObjCInferRelatedResultType = 0;
-
+
if (Args.hasArg(OPT_fobjc_subscripting_legacy_runtime))
Opts.ObjCSubscriptingLegacyRuntime =
(Opts.ObjCRuntime.getKind() == ObjCRuntime::FragileMacOSX);
}
-
+
if (Args.hasArg(OPT_fgnu89_inline)) {
if (Opts.CPlusPlus)
Diags.Report(diag::err_drv_argument_not_allowed_with) << "-fgnu89-inline"
@@ -1525,14 +1640,13 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.RTTIData = Opts.RTTI && !Args.hasArg(OPT_fno_rtti_data);
Opts.Blocks = Args.hasArg(OPT_fblocks);
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
+ Opts.Coroutines = Args.hasArg(OPT_fcoroutines);
Opts.Modules = Args.hasArg(OPT_fmodules);
Opts.ModulesStrictDeclUse = Args.hasArg(OPT_fmodules_strict_decluse);
Opts.ModulesDeclUse =
Args.hasArg(OPT_fmodules_decluse) || Opts.ModulesStrictDeclUse;
Opts.ModulesLocalVisibility =
Args.hasArg(OPT_fmodules_local_submodule_visibility);
- Opts.ModulesHideInternalLinkage =
- !Args.hasArg(OPT_fno_modules_hide_internal_linkage);
Opts.ModulesSearchAll = Opts.Modules &&
!Args.hasArg(OPT_fno_modules_search_all) &&
Args.hasArg(OPT_fmodules_search_all);
@@ -1578,7 +1692,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.PIELevel = getLastArgIntValue(Args, OPT_pie_level, 0, Diags);
Opts.Static = Args.hasArg(OPT_static_define);
Opts.DumpRecordLayoutsSimple = Args.hasArg(OPT_fdump_record_layouts_simple);
- Opts.DumpRecordLayouts = Opts.DumpRecordLayoutsSimple
+ Opts.DumpRecordLayouts = Opts.DumpRecordLayoutsSimple
|| Args.hasArg(OPT_fdump_record_layouts);
Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts);
Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking);
@@ -1603,6 +1717,17 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.HalfArgsAndReturns = Args.hasArg(OPT_fallow_half_arguments_and_returns);
Opts.GNUAsm = !Args.hasArg(OPT_fno_gnu_inline_asm);
+ // __declspec is enabled by default for the PS4 by the driver, and also
+ // enabled for Microsoft Extensions or Borland Extensions, here.
+ //
+ // FIXME: __declspec is also currently enabled for CUDA, but isn't really a
+ // CUDA extension, however it is required for supporting cuda_builtin_vars.h,
+ // which uses __declspec(property). Once that has been rewritten in terms of
+ // something more generic, remove the Opts.CUDA term here.
+ Opts.DeclSpecKeyword =
+ Args.hasFlag(OPT_fdeclspec, OPT_fno_declspec,
+ (Opts.MicrosoftExt || Opts.Borland || Opts.CUDA));
+
if (!Opts.CurrentModule.empty() && !Opts.ImplementationOfModule.empty() &&
Opts.CurrentModule != Opts.ImplementationOfModule) {
Diags.Report(diag::err_conflicting_module_names)
@@ -1622,7 +1747,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
.Case("yes", LangOptions::ASMM_On)
.Default(255)) {
default:
- Diags.Report(diag::err_drv_invalid_value)
+ Diags.Report(diag::err_drv_invalid_value)
<< "-faddress-space-map-mangling=" << A->getValue();
break;
case LangOptions::ASMM_Target:
@@ -1681,6 +1806,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) ||
Args.hasArg(OPT_cl_finite_math_only) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.RetainCommentsFromSystemHeaders =
Args.hasArg(OPT_fretain_comments_from_system_headers);
@@ -1891,14 +2019,23 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags,
Res.getTargetOpts());
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args);
- if (DashX != IK_AST && DashX != IK_LLVM_IR) {
+ if (DashX == IK_AST || DashX == IK_LLVM_IR) {
+ // ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
+ // PassManager in BackendUtil.cpp. They need to be initializd no matter
+ // what the input type is.
+ if (Args.hasArg(OPT_fobjc_arc))
+ Res.getLangOpts()->ObjCAutoRefCount = 1;
+ parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
+ Diags, Res.getLangOpts()->Sanitize);
+ } else {
+ // Other LangOpts are only initialzed when the input is not AST or LLVM IR.
ParseLangArgs(*Res.getLangOpts(), Args, DashX, Diags);
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
Res.getLangOpts()->ObjCExceptions = 1;
}
// FIXME: ParsePreprocessorArgs uses the FileManager to read the contents of
// PCH file and find the original header name. Remove the need to do that in
- // ParsePreprocessorArgs and remove the FileManager
+ // ParsePreprocessorArgs and remove the FileManager
// parameters from the function and the "FileManager.h" #include.
FileManager FileMgr(Res.getFileSystemOpts());
ParsePreprocessorArgs(Res.getPreprocessorOpts(), Args, FileMgr, Diags);
@@ -1913,14 +2050,14 @@ namespace {
SmallVector<uint64_t, 16> Data;
unsigned CurBit;
uint64_t CurValue;
-
+
public:
ModuleSignature() : CurBit(0), CurValue(0) { }
-
+
void add(uint64_t Value, unsigned Bits);
void add(StringRef Value);
void flush();
-
+
llvm::APInt getAsInteger() const;
};
}
@@ -1931,10 +2068,10 @@ void ModuleSignature::add(uint64_t Value, unsigned int NumBits) {
CurBit += NumBits;
return;
}
-
+
// Add the current word.
Data.push_back(CurValue);
-
+
if (CurBit)
CurValue = Value >> (64-CurBit);
else
@@ -1945,15 +2082,15 @@ void ModuleSignature::add(uint64_t Value, unsigned int NumBits) {
void ModuleSignature::flush() {
if (CurBit == 0)
return;
-
+
Data.push_back(CurValue);
CurBit = 0;
CurValue = 0;
}
void ModuleSignature::add(StringRef Value) {
- for (StringRef::iterator I = Value.begin(), IEnd = Value.end(); I != IEnd;++I)
- add(*I, 8);
+ for (auto &c : Value)
+ add(c, 8);
}
llvm::APInt ModuleSignature::getAsInteger() const {
@@ -1983,7 +2120,7 @@ std::string CompilerInvocation::getModuleHash() const {
for (StringRef Feature : LangOpts->ModuleFeatures)
code = hash_combine(code, Feature);
-
+
// Extend the signature with the target options.
code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
TargetOpts->ABI);
@@ -1995,7 +2132,7 @@ std::string CompilerInvocation::getModuleHash() const {
const HeaderSearchOptions &hsOpts = getHeaderSearchOpts();
code = hash_combine(code, ppOpts.UsePredefines, ppOpts.DetailedRecord);
- for (std::vector<std::pair<std::string, bool/*isUndef*/> >::const_iterator
+ for (std::vector<std::pair<std::string, bool/*isUndef*/>>::const_iterator
I = getPreprocessorOpts().Macros.begin(),
IEnd = getPreprocessorOpts().Macros.end();
I != IEnd; ++I) {
@@ -2021,6 +2158,12 @@ std::string CompilerInvocation::getModuleHash() const {
// Extend the signature with the user build path.
code = hash_combine(code, hsOpts.ModuleUserBuildPath);
+ // Extend the signature with the module file extensions.
+ const FrontendOptions &frontendOpts = getFrontendOpts();
+ for (auto ext : frontendOpts.ModuleFileExtensions) {
+ code = ext->hashExtension(code);
+ }
+
// Darwin-specific hack: if we have a sysroot, use the contents and
// modification time of
// $sysroot/System/Library/CoreServices/SystemVersion.plist
diff --git a/lib/Frontend/CreateInvocationFromCommandLine.cpp b/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 2afd23fcb9e8..301916422564 100644
--- a/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -39,15 +39,13 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions);
}
- SmallVector<const char *, 16> Args;
- Args.push_back("<clang>"); // FIXME: Remove dummy argument.
- Args.insert(Args.end(), ArgList.begin(), ArgList.end());
+ SmallVector<const char *, 16> Args(ArgList.begin(), ArgList.end());
// FIXME: Find a cleaner way to force the driver into restricted modes.
Args.push_back("-fsyntax-only");
// FIXME: We shouldn't have to pass in the path info.
- driver::Driver TheDriver("clang", llvm::sys::getDefaultTargetTriple(),
+ driver::Driver TheDriver(Args[0], llvm::sys::getDefaultTargetTriple(),
*Diags);
// Don't check that inputs exist, they may have been remapped.
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index 0995ab4bf077..93d4a8034696 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -18,6 +18,7 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
@@ -50,15 +51,8 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
if (!FE)
return;
- StringRef Filename = FE->getName();
-
- // Remove leading "./" (or ".//" or "././" etc.)
- while (Filename.size() > 2 && Filename[0] == '.' &&
- llvm::sys::path::is_separator(Filename[1])) {
- Filename = Filename.substr(1);
- while (llvm::sys::path::is_separator(Filename[0]))
- Filename = Filename.substr(1);
- }
+ StringRef Filename =
+ llvm::sys::path::remove_leading_dotslash(FE->getName());
DepCollector.maybeAddDependency(Filename, /*FromModule*/false,
FileType != SrcMgr::C_User,
@@ -82,6 +76,20 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
}
};
+struct DepCollectorMMCallbacks : public ModuleMapCallbacks {
+ DependencyCollector &DepCollector;
+ DepCollectorMMCallbacks(DependencyCollector &DC) : DepCollector(DC) {}
+
+ void moduleMapFileRead(SourceLocation Loc, const FileEntry &Entry,
+ bool IsSystem) override {
+ StringRef Filename = Entry.getName();
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/false,
+ /*IsSystem*/IsSystem,
+ /*IsModuleFile*/false,
+ /*IsMissing*/false);
+ }
+};
+
struct DepCollectorASTListener : public ASTReaderListener {
DependencyCollector &DepCollector;
DepCollectorASTListener(DependencyCollector &L) : DepCollector(L) { }
@@ -89,14 +97,15 @@ struct DepCollectorASTListener : public ASTReaderListener {
bool needsSystemInputFileVisitation() override {
return DepCollector.needSystemDependencies();
}
- void visitModuleFile(StringRef Filename) override {
+ void visitModuleFile(StringRef Filename,
+ serialization::ModuleKind Kind) override {
DepCollector.maybeAddDependency(Filename, /*FromModule*/true,
/*IsSystem*/false, /*IsModuleFile*/true,
/*IsMissing*/false);
}
bool visitInputFile(StringRef Filename, bool IsSystem,
- bool IsOverridden) override {
- if (IsOverridden)
+ bool IsOverridden, bool IsExplicitModule) override {
+ if (IsOverridden || IsExplicitModule)
return true;
DepCollector.maybeAddDependency(Filename, /*FromModule*/true, IsSystem,
@@ -132,6 +141,8 @@ DependencyCollector::~DependencyCollector() { }
void DependencyCollector::attachToPreprocessor(Preprocessor &PP) {
PP.addPPCallbacks(
llvm::make_unique<DepCollectorPPCallbacks>(*this, PP.getSourceManager()));
+ PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
+ llvm::make_unique<DepCollectorMMCallbacks>(*this));
}
void DependencyCollector::attachToASTReader(ASTReader &R) {
R.addListener(llvm::make_unique<DepCollectorASTListener>(*this));
@@ -165,7 +176,11 @@ public:
AddMissingHeaderDeps(Opts.AddMissingHeaderDeps),
SeenMissingHeader(false),
IncludeModuleFiles(Opts.IncludeModuleFiles),
- OutputFormat(Opts.OutputFormat) {}
+ OutputFormat(Opts.OutputFormat) {
+ for (auto ExtraDep : Opts.ExtraDeps) {
+ AddFilename(ExtraDep);
+ }
+ }
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -185,6 +200,17 @@ public:
bool includeModuleFiles() const { return IncludeModuleFiles; }
};
+class DFGMMCallback : public ModuleMapCallbacks {
+ DFGImpl &Parent;
+public:
+ DFGMMCallback(DFGImpl &Parent) : Parent(Parent) {}
+ void moduleMapFileRead(SourceLocation Loc, const FileEntry &Entry,
+ bool IsSystem) override {
+ if (!IsSystem || Parent.includeSystemHeaders())
+ Parent.AddFilename(Entry.getName());
+ }
+};
+
class DFGASTReaderListener : public ASTReaderListener {
DFGImpl &Parent;
public:
@@ -194,9 +220,10 @@ public:
bool needsSystemInputFileVisitation() override {
return Parent.includeSystemHeaders();
}
- void visitModuleFile(StringRef Filename) override;
+ void visitModuleFile(StringRef Filename,
+ serialization::ModuleKind Kind) override;
bool visitInputFile(StringRef Filename, bool isSystem,
- bool isOverridden) override;
+ bool isOverridden, bool isExplicitModule) override;
};
}
@@ -217,6 +244,8 @@ DependencyFileGenerator *DependencyFileGenerator::CreateAndAttachToPreprocessor(
DFGImpl *Callback = new DFGImpl(&PP, Opts);
PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Callback));
+ PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
+ llvm::make_unique<DFGMMCallback>(*Callback));
return new DependencyFileGenerator(Callback);
}
@@ -259,15 +288,7 @@ void DFGImpl::FileChanged(SourceLocation Loc,
if (!FileMatchesDepCriteria(Filename.data(), FileType))
return;
- // Remove leading "./" (or ".//" or "././" etc.)
- while (Filename.size() > 2 && Filename[0] == '.' &&
- llvm::sys::path::is_separator(Filename[1])) {
- Filename = Filename.substr(1);
- while (llvm::sys::path::is_separator(Filename[0]))
- Filename = Filename.substr(1);
- }
-
- AddFilename(Filename);
+ AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
}
void DFGImpl::InclusionDirective(SourceLocation HashLoc,
@@ -438,16 +459,18 @@ void DFGImpl::OutputDependencyFile() {
}
bool DFGASTReaderListener::visitInputFile(llvm::StringRef Filename,
- bool IsSystem, bool IsOverridden) {
+ bool IsSystem, bool IsOverridden,
+ bool IsExplicitModule) {
assert(!IsSystem || needsSystemInputFileVisitation());
- if (IsOverridden)
+ if (IsOverridden || IsExplicitModule)
return true;
Parent.AddFilename(Filename);
return true;
}
-void DFGASTReaderListener::visitModuleFile(llvm::StringRef Filename) {
+void DFGASTReaderListener::visitModuleFile(llvm::StringRef Filename,
+ serialization::ModuleKind Kind) {
if (Parent.includeModuleFiles())
Parent.AddFilename(Filename);
}
diff --git a/lib/Frontend/DiagnosticRenderer.cpp b/lib/Frontend/DiagnosticRenderer.cpp
index c63e98dbe4f1..caf1f0dce99f 100644
--- a/lib/Frontend/DiagnosticRenderer.cpp
+++ b/lib/Frontend/DiagnosticRenderer.cpp
@@ -169,9 +169,7 @@ void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
// If this location is within a macro, walk from UnexpandedLoc up to Loc
// and produce a macro backtrace.
if (UnexpandedLoc.isValid() && UnexpandedLoc.isMacroID()) {
- unsigned MacroDepth = 0;
- emitMacroExpansions(UnexpandedLoc, Level, MutableRanges, FixItHints, *SM,
- MacroDepth);
+ emitMacroExpansions(UnexpandedLoc, Level, MutableRanges, FixItHints, *SM);
}
}
@@ -247,7 +245,7 @@ void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc,
// import stack rather than the
// FIXME: We want submodule granularity here.
std::pair<SourceLocation, StringRef> Imported = SM.getModuleImportLoc(Loc);
- if (Imported.first.isValid()) {
+ if (!Imported.second.empty()) {
// This location was imported by a module. Emit the module import stack.
emitImportStackRecursively(Imported.first, Imported.second, SM);
return;
@@ -278,13 +276,11 @@ void DiagnosticRenderer::emitImportStack(SourceLocation Loc,
void DiagnosticRenderer::emitImportStackRecursively(SourceLocation Loc,
StringRef ModuleName,
const SourceManager &SM) {
- if (Loc.isInvalid()) {
+ if (ModuleName.empty()) {
return;
}
PresumedLoc PLoc = SM.getPresumedLoc(Loc, DiagOpts->ShowPresumedLoc);
- if (PLoc.isInvalid())
- return;
// Emit the other import frames first.
std::pair<SourceLocation, StringRef> NextImportLoc
@@ -310,6 +306,81 @@ void DiagnosticRenderer::emitModuleBuildStack(const SourceManager &SM) {
}
}
+/// A recursive function to trace all possible backtrace locations
+/// to match the \p CaretLocFileID.
+static SourceLocation
+retrieveMacroLocation(SourceLocation Loc, FileID MacroFileID,
+ FileID CaretFileID,
+ const SmallVectorImpl<FileID> &CommonArgExpansions,
+ bool IsBegin, const SourceManager *SM) {
+ assert(SM->getFileID(Loc) == MacroFileID);
+ if (MacroFileID == CaretFileID)
+ return Loc;
+ if (!Loc.isMacroID())
+ return SourceLocation();
+
+ SourceLocation MacroLocation, MacroArgLocation;
+
+ if (SM->isMacroArgExpansion(Loc)) {
+ // Only look at the immediate spelling location of this macro argument if
+ // the other location in the source range is also present in that expansion.
+ if (std::binary_search(CommonArgExpansions.begin(),
+ CommonArgExpansions.end(), MacroFileID))
+ MacroLocation = SM->getImmediateSpellingLoc(Loc);
+ MacroArgLocation = IsBegin ? SM->getImmediateExpansionRange(Loc).first
+ : SM->getImmediateExpansionRange(Loc).second;
+ } else {
+ MacroLocation = IsBegin ? SM->getImmediateExpansionRange(Loc).first
+ : SM->getImmediateExpansionRange(Loc).second;
+ MacroArgLocation = SM->getImmediateSpellingLoc(Loc);
+ }
+
+ if (MacroLocation.isValid()) {
+ MacroFileID = SM->getFileID(MacroLocation);
+ MacroLocation =
+ retrieveMacroLocation(MacroLocation, MacroFileID, CaretFileID,
+ CommonArgExpansions, IsBegin, SM);
+ if (MacroLocation.isValid())
+ return MacroLocation;
+ }
+
+ MacroFileID = SM->getFileID(MacroArgLocation);
+ return retrieveMacroLocation(MacroArgLocation, MacroFileID, CaretFileID,
+ CommonArgExpansions, IsBegin, SM);
+}
+
+/// Walk up the chain of macro expansions and collect the FileIDs identifying the
+/// expansions.
+static void getMacroArgExpansionFileIDs(SourceLocation Loc,
+ SmallVectorImpl<FileID> &IDs,
+ bool IsBegin, const SourceManager *SM) {
+ while (Loc.isMacroID()) {
+ if (SM->isMacroArgExpansion(Loc)) {
+ IDs.push_back(SM->getFileID(Loc));
+ Loc = SM->getImmediateSpellingLoc(Loc);
+ } else {
+ auto ExpRange = SM->getImmediateExpansionRange(Loc);
+ Loc = IsBegin ? ExpRange.first : ExpRange.second;
+ }
+ }
+}
+
+/// Collect the expansions of the begin and end locations and compute the set
+/// intersection. Produces a sorted vector of FileIDs in CommonArgExpansions.
+static void computeCommonMacroArgExpansionFileIDs(
+ SourceLocation Begin, SourceLocation End, const SourceManager *SM,
+ SmallVectorImpl<FileID> &CommonArgExpansions) {
+ SmallVector<FileID, 4> BeginArgExpansions;
+ SmallVector<FileID, 4> EndArgExpansions;
+ getMacroArgExpansionFileIDs(Begin, BeginArgExpansions, /*IsBegin=*/true, SM);
+ getMacroArgExpansionFileIDs(End, EndArgExpansions, /*IsBegin=*/false, SM);
+ std::sort(BeginArgExpansions.begin(), BeginArgExpansions.end());
+ std::sort(EndArgExpansions.begin(), EndArgExpansions.end());
+ std::set_intersection(BeginArgExpansions.begin(), BeginArgExpansions.end(),
+ EndArgExpansions.begin(), EndArgExpansions.end(),
+ std::back_inserter(CommonArgExpansions));
+}
+
// Helper function to fix up source ranges. It takes in an array of ranges,
// and outputs an array of ranges where we want to draw the range highlighting
// around the location specified by CaretLoc.
@@ -327,9 +398,9 @@ static void mapDiagnosticRanges(
const SourceManager *SM) {
FileID CaretLocFileID = SM->getFileID(CaretLoc);
- for (ArrayRef<CharSourceRange>::const_iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I) {
+ for (auto I = Ranges.begin(), E = Ranges.end(); I != E; ++I) {
+ if (I->isInvalid()) continue;
+
SourceLocation Begin = I->getBegin(), End = I->getEnd();
bool IsTokenRange = I->isTokenRange();
@@ -358,27 +429,19 @@ static void mapDiagnosticRanges(
}
}
- while (Begin.isMacroID() && BeginFileID != CaretLocFileID) {
- if (SM->isMacroArgExpansion(Begin)) {
- Begin = SM->getImmediateSpellingLoc(Begin);
- End = SM->getImmediateSpellingLoc(End);
- } else {
- Begin = SM->getImmediateExpansionRange(Begin).first;
- End = SM->getImmediateExpansionRange(End).second;
- }
- BeginFileID = SM->getFileID(Begin);
- if (BeginFileID != SM->getFileID(End)) {
- // FIXME: Ugly hack to stop a crash; this code is making bad
- // assumptions and it's too complicated for me to reason
- // about.
- Begin = End = SourceLocation();
- break;
- }
- }
+ // Do the backtracking.
+ SmallVector<FileID, 4> CommonArgExpansions;
+ computeCommonMacroArgExpansionFileIDs(Begin, End, SM, CommonArgExpansions);
+ Begin = retrieveMacroLocation(Begin, BeginFileID, CaretLocFileID,
+ CommonArgExpansions, /*IsBegin=*/true, SM);
+ End = retrieveMacroLocation(End, BeginFileID, CaretLocFileID,
+ CommonArgExpansions, /*IsBegin=*/false, SM);
+ if (Begin.isInvalid() || End.isInvalid()) continue;
// Return the spelling location of the beginning and end of the range.
Begin = SM->getSpellingLoc(Begin);
End = SM->getSpellingLoc(End);
+
SpellingRanges.push_back(CharSourceRange(SourceRange(Begin, End),
IsTokenRange));
}
@@ -394,6 +457,96 @@ void DiagnosticRenderer::emitCaret(SourceLocation Loc,
emitCodeContext(Loc, Level, SpellingRanges, Hints, SM);
}
+/// \brief A helper function for emitMacroExpansion to print the
+/// macro expansion message
+void DiagnosticRenderer::emitSingleMacroExpansion(
+ SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) {
+ // Find the spelling location for the macro definition. We must use the
+ // spelling location here to avoid emitting a macro backtrace for the note.
+ SourceLocation SpellingLoc = SM.getSpellingLoc(Loc);
+
+ // Map the ranges into the FileID of the diagnostic location.
+ SmallVector<CharSourceRange, 4> SpellingRanges;
+ mapDiagnosticRanges(Loc, Ranges, SpellingRanges, &SM);
+
+ SmallString<100> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ StringRef MacroName = getImmediateMacroName(Loc, SM, LangOpts);
+ if (MacroName.empty())
+ Message << "expanded from here";
+ else
+ Message << "expanded from macro '" << MacroName << "'";
+
+ emitDiagnostic(SpellingLoc, DiagnosticsEngine::Note, Message.str(),
+ SpellingRanges, None, &SM);
+}
+
+/// Check that the macro argument location of Loc starts with ArgumentLoc.
+/// The starting location of the macro expansions is used to differeniate
+/// different macro expansions.
+static bool checkLocForMacroArgExpansion(SourceLocation Loc,
+ const SourceManager &SM,
+ SourceLocation ArgumentLoc) {
+ SourceLocation MacroLoc;
+ if (SM.isMacroArgExpansion(Loc, &MacroLoc)) {
+ if (ArgumentLoc == MacroLoc) return true;
+ }
+
+ return false;
+}
+
+/// Check if all the locations in the range have the same macro argument
+/// expansion, and that that expansion starts with ArgumentLoc.
+static bool checkRangeForMacroArgExpansion(CharSourceRange Range,
+ const SourceManager &SM,
+ SourceLocation ArgumentLoc) {
+ SourceLocation BegLoc = Range.getBegin(), EndLoc = Range.getEnd();
+ while (BegLoc != EndLoc) {
+ if (!checkLocForMacroArgExpansion(BegLoc, SM, ArgumentLoc))
+ return false;
+ BegLoc.getLocWithOffset(1);
+ }
+
+ return checkLocForMacroArgExpansion(BegLoc, SM, ArgumentLoc);
+}
+
+/// A helper function to check if the current ranges are all inside the same
+/// macro argument expansion as Loc.
+static bool checkRangesForMacroArgExpansion(SourceLocation Loc,
+ ArrayRef<CharSourceRange> Ranges,
+ const SourceManager &SM) {
+ assert(Loc.isMacroID() && "Must be a macro expansion!");
+
+ SmallVector<CharSourceRange, 4> SpellingRanges;
+ mapDiagnosticRanges(Loc, Ranges, SpellingRanges, &SM);
+
+ /// Count all valid ranges.
+ unsigned ValidCount = 0;
+ for (auto I : Ranges)
+ if (I.isValid()) ValidCount++;
+
+ if (ValidCount > SpellingRanges.size())
+ return false;
+
+ /// To store the source location of the argument location.
+ SourceLocation ArgumentLoc;
+
+ /// Set the ArgumentLoc to the beginning location of the expansion of Loc
+ /// so to check if the ranges expands to the same beginning location.
+ if (!SM.isMacroArgExpansion(Loc,&ArgumentLoc))
+ return false;
+
+ for (auto I = SpellingRanges.begin(), E = SpellingRanges.end(); I != E; ++I) {
+ if (!checkRangeForMacroArgExpansion(*I, SM, ArgumentLoc))
+ return false;
+ }
+
+ return true;
+}
+
/// \brief Recursively emit notes for each macro expansion and caret
/// diagnostics where appropriate.
///
@@ -405,71 +558,68 @@ void DiagnosticRenderer::emitCaret(SourceLocation Loc,
/// \param Level The diagnostic level currently being emitted.
/// \param Ranges The underlined ranges for this code snippet.
/// \param Hints The FixIt hints active for this diagnostic.
-/// \param OnMacroInst The current depth of the macro expansion stack.
void DiagnosticRenderer::emitMacroExpansions(SourceLocation Loc,
DiagnosticsEngine::Level Level,
ArrayRef<CharSourceRange> Ranges,
ArrayRef<FixItHint> Hints,
- const SourceManager &SM,
- unsigned &MacroDepth,
- unsigned OnMacroInst) {
- assert(!Loc.isInvalid() && "must have a valid source location here");
-
- // Walk up to the caller of this macro, and produce a backtrace down to there.
- SourceLocation OneLevelUp = SM.getImmediateMacroCallerLoc(Loc);
- if (OneLevelUp.isMacroID())
- emitMacroExpansions(OneLevelUp, Level, Ranges, Hints, SM,
- MacroDepth, OnMacroInst + 1);
- else
- MacroDepth = OnMacroInst + 1;
-
- unsigned MacroSkipStart = 0, MacroSkipEnd = 0;
- if (MacroDepth > DiagOpts->MacroBacktraceLimit &&
- DiagOpts->MacroBacktraceLimit != 0) {
- MacroSkipStart = DiagOpts->MacroBacktraceLimit / 2 +
- DiagOpts->MacroBacktraceLimit % 2;
- MacroSkipEnd = MacroDepth - DiagOpts->MacroBacktraceLimit / 2;
+ const SourceManager &SM) {
+ assert(Loc.isValid() && "must have a valid source location here");
+
+ // Produce a stack of macro backtraces.
+ SmallVector<SourceLocation, 8> LocationStack;
+ unsigned IgnoredEnd = 0;
+ while (Loc.isMacroID()) {
+ // If this is the expansion of a macro argument, point the caret at the
+ // use of the argument in the definition of the macro, not the expansion.
+ if (SM.isMacroArgExpansion(Loc))
+ LocationStack.push_back(SM.getImmediateExpansionRange(Loc).first);
+ else
+ LocationStack.push_back(Loc);
+
+ if (checkRangesForMacroArgExpansion(Loc, Ranges, SM))
+ IgnoredEnd = LocationStack.size();
+
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+
+ // Once the location no longer points into a macro, try stepping through
+ // the last found location. This sometimes produces additional useful
+ // backtraces.
+ if (Loc.isFileID())
+ Loc = SM.getImmediateMacroCallerLoc(LocationStack.back());
+ assert(Loc.isValid() && "must have a valid source location here");
}
- // Whether to suppress printing this macro expansion.
- bool Suppressed = (OnMacroInst >= MacroSkipStart &&
- OnMacroInst < MacroSkipEnd);
-
- if (Suppressed) {
- // Tell the user that we've skipped contexts.
- if (OnMacroInst == MacroSkipStart) {
- SmallString<200> MessageStorage;
- llvm::raw_svector_ostream Message(MessageStorage);
- Message << "(skipping " << (MacroSkipEnd - MacroSkipStart)
- << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
- "see all)";
- emitBasicNote(Message.str());
- }
+ LocationStack.erase(LocationStack.begin(),
+ LocationStack.begin() + IgnoredEnd);
+
+ unsigned MacroDepth = LocationStack.size();
+ unsigned MacroLimit = DiagOpts->MacroBacktraceLimit;
+ if (MacroDepth <= MacroLimit || MacroLimit == 0) {
+ for (auto I = LocationStack.rbegin(), E = LocationStack.rend();
+ I != E; ++I)
+ emitSingleMacroExpansion(*I, Level, Ranges, SM);
return;
}
- // Find the spelling location for the macro definition. We must use the
- // spelling location here to avoid emitting a macro bactrace for the note.
- SourceLocation SpellingLoc = Loc;
- // If this is the expansion of a macro argument, point the caret at the
- // use of the argument in the definition of the macro, not the expansion.
- if (SM.isMacroArgExpansion(Loc))
- SpellingLoc = SM.getImmediateExpansionRange(Loc).first;
- SpellingLoc = SM.getSpellingLoc(SpellingLoc);
+ unsigned MacroStartMessages = MacroLimit / 2;
+ unsigned MacroEndMessages = MacroLimit / 2 + MacroLimit % 2;
- // Map the ranges into the FileID of the diagnostic location.
- SmallVector<CharSourceRange, 4> SpellingRanges;
- mapDiagnosticRanges(Loc, Ranges, SpellingRanges, &SM);
+ for (auto I = LocationStack.rbegin(),
+ E = LocationStack.rbegin() + MacroStartMessages;
+ I != E; ++I)
+ emitSingleMacroExpansion(*I, Level, Ranges, SM);
- SmallString<100> MessageStorage;
+ SmallString<200> MessageStorage;
llvm::raw_svector_ostream Message(MessageStorage);
- StringRef MacroName = getImmediateMacroName(Loc, SM, LangOpts);
- if (MacroName.empty())
- Message << "expanded from here";
- else
- Message << "expanded from macro '" << MacroName << "'";
- emitDiagnostic(SpellingLoc, DiagnosticsEngine::Note, Message.str(),
- SpellingRanges, None, &SM);
+ Message << "(skipping " << (MacroDepth - MacroLimit)
+ << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
+ "see all)";
+ emitBasicNote(Message.str());
+
+ for (auto I = LocationStack.rend() - MacroEndMessages,
+ E = LocationStack.rend();
+ I != E; ++I)
+ emitSingleMacroExpansion(*I, Level, Ranges, SM);
}
DiagnosticNoteRenderer::~DiagnosticNoteRenderer() {}
@@ -492,8 +642,11 @@ void DiagnosticNoteRenderer::emitImportLocation(SourceLocation Loc,
// Generate a note indicating the include location.
SmallString<200> MessageStorage;
llvm::raw_svector_ostream Message(MessageStorage);
- Message << "in module '" << ModuleName << "' imported from "
- << PLoc.getFilename() << ':' << PLoc.getLine() << ":";
+ Message << "in module '" << ModuleName;
+ if (PLoc.isValid())
+ Message << "' imported from " << PLoc.getFilename() << ':'
+ << PLoc.getLine();
+ Message << ":";
emitNote(Loc, Message.str(), &SM);
}
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 3e0f7a12c3b3..ecef92e0a7dd 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -190,9 +190,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
- std::unique_ptr<ASTUnit> AST =
- ASTUnit::LoadFromASTFile(InputFile, CI.getPCHContainerReader(),
- Diags, CI.getFileSystemOpts());
+ std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
+ InputFile, CI.getPCHContainerReader(), Diags, CI.getFileSystemOpts(),
+ CI.getCodeGenOpts().DebugTypeExtRefs);
if (!AST)
goto failure;
@@ -284,7 +284,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!Found) {
CI.getDiagnostics().Report(diag::err_fe_no_pch_in_dir) << PCHInclude;
- return true;
+ goto failure;
}
}
}
@@ -375,7 +375,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (CI.getLangOpts().Modules)
CI.createModuleManager();
- PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
+ PP.getBuiltinInfo().initializeBuiltins(PP.getIdentifierTable(),
PP.getLangOpts());
} else {
// FIXME: If this is a problem, recover from it by creating a multiplex
@@ -442,9 +442,11 @@ bool FrontendAction::Execute() {
// there were any module-build failures.
if (CI.shouldBuildGlobalModuleIndex() && CI.hasFileManager() &&
CI.hasPreprocessor()) {
- GlobalModuleIndex::writeIndex(
- CI.getFileManager(), CI.getPCHContainerReader(),
- CI.getPreprocessor().getHeaderSearchInfo().getModuleCachePath());
+ StringRef Cache =
+ CI.getPreprocessor().getHeaderSearchInfo().getModuleCachePath();
+ if (!Cache.empty())
+ GlobalModuleIndex::writeIndex(CI.getFileManager(),
+ CI.getPCHContainerReader(), Cache);
}
return true;
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index 40277bdaa52d..d6c88d20fc2a 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -91,12 +91,10 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
auto Buffer = std::make_shared<PCHBuffer>();
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
Consumers.push_back(llvm::make_unique<PCHGenerator>(
- CI.getPreprocessor(), OutputFile, nullptr, Sysroot, Buffer));
- Consumers.push_back(
- CI.getPCHContainerWriter().CreatePCHContainerGenerator(
- CI.getDiagnostics(), CI.getHeaderSearchOpts(),
- CI.getPreprocessorOpts(), CI.getTargetOpts(), CI.getLangOpts(),
- InFile, OutputFile, OS, Buffer));
+ CI.getPreprocessor(), OutputFile, nullptr, Sysroot,
+ Buffer, CI.getFrontendOpts().ModuleFileExtensions));
+ Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
+ CI, InFile, OutputFile, OS, Buffer));
return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -136,13 +134,15 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
auto Buffer = std::make_shared<PCHBuffer>();
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+
Consumers.push_back(llvm::make_unique<PCHGenerator>(
- CI.getPreprocessor(), OutputFile, Module, Sysroot, Buffer));
- Consumers.push_back(
- CI.getPCHContainerWriter().CreatePCHContainerGenerator(
- CI.getDiagnostics(), CI.getHeaderSearchOpts(),
- CI.getPreprocessorOpts(), CI.getTargetOpts(), CI.getLangOpts(),
- InFile, OutputFile, OS, Buffer));
+ CI.getPreprocessor(), OutputFile, Module, Sysroot,
+ Buffer, CI.getFrontendOpts().ModuleFileExtensions,
+ /*AllowASTWithErrors=*/false,
+ /*IncludeTimestamps=*/
+ +CI.getFrontendOpts().BuildingImplicitModule));
+ Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
+ CI, InFile, OutputFile, OS, Buffer));
return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -268,14 +268,26 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
StringRef Filename) {
- // Find the module map file.
- const FileEntry *ModuleMap = CI.getFileManager().getFile(Filename);
+ // Find the module map file.
+ const FileEntry *ModuleMap =
+ CI.getFileManager().getFile(Filename, /*openFile*/true);
if (!ModuleMap) {
CI.getDiagnostics().Report(diag::err_module_map_not_found)
<< Filename;
return false;
}
+ // Set up embedding for any specified files. Do this before we load any
+ // source files, including the primary module map for the compilation.
+ for (const auto &F : CI.getFrontendOpts().ModulesEmbedFiles) {
+ if (const auto *FE = CI.getFileManager().getFile(F, /*openFile*/true))
+ CI.getSourceManager().setFileIsTransient(FE);
+ else
+ CI.getDiagnostics().Report(diag::err_modules_embed_file_not_found) << F;
+ }
+ if (CI.getFrontendOpts().ModulesEmbedAllFiles)
+ CI.getSourceManager().setAllFilesAreTransient(true);
+
// Parse the module map file.
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
if (HS.loadModuleMapFile(ModuleMap, IsSystem))
@@ -416,6 +428,7 @@ void VerifyPCHAction::ExecuteAction() {
const std::string &Sysroot = CI.getHeaderSearchOpts().Sysroot;
std::unique_ptr<ASTReader> Reader(new ASTReader(
CI.getPreprocessor(), CI.getASTContext(), CI.getPCHContainerReader(),
+ CI.getFrontendOpts().ModuleFileExtensions,
Sysroot.empty() ? "" : Sysroot.c_str(),
/*DisableValidation*/ false,
/*AllowPCHWithCompilerErrors*/ false,
@@ -559,6 +572,20 @@ namespace {
}
return false;
}
+
+ /// Indicates that a particular module file extension has been read.
+ void readModuleFileExtension(
+ const ModuleFileExtensionMetadata &Metadata) override {
+ Out.indent(2) << "Module file extension '"
+ << Metadata.BlockName << "' " << Metadata.MajorVersion
+ << "." << Metadata.MinorVersion;
+ if (!Metadata.UserInfo.empty()) {
+ Out << ": ";
+ Out.write_escaped(Metadata.UserInfo);
+ }
+
+ Out << "\n";
+ }
#undef DUMP_BOOLEAN
};
}
@@ -578,7 +605,8 @@ void DumpModuleInfoAction::ExecuteAction() {
DumpModuleInfoListener Listener(Out);
ASTReader::readASTFileControlBlock(
getCurrentFile(), getCompilerInstance().getFileManager(),
- getCompilerInstance().getPCHContainerReader(), Listener);
+ getCompilerInstance().getPCHContainerReader(),
+ /*FindModuleFileExtensions=*/true, Listener);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/HeaderIncludeGen.cpp b/lib/Frontend/HeaderIncludeGen.cpp
index 5732e5b3fb73..0bc1169ba0a9 100644
--- a/lib/Frontend/HeaderIncludeGen.cpp
+++ b/lib/Frontend/HeaderIncludeGen.cpp
@@ -46,7 +46,36 @@ public:
};
}
-void clang::AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders,
+static void PrintHeaderInfo(raw_ostream *OutputFile, const char* Filename,
+ bool ShowDepth, unsigned CurrentIncludeDepth,
+ bool MSStyle) {
+ // Write to a temporary string to avoid unnecessary flushing on errs().
+ SmallString<512> Pathname(Filename);
+ if (!MSStyle)
+ Lexer::Stringify(Pathname);
+
+ SmallString<256> Msg;
+ if (MSStyle)
+ Msg += "Note: including file:";
+
+ if (ShowDepth) {
+ // The main source file is at depth 1, so skip one dot.
+ for (unsigned i = 1; i != CurrentIncludeDepth; ++i)
+ Msg += MSStyle ? ' ' : '.';
+
+ if (!MSStyle)
+ Msg += ' ';
+ }
+ Msg += Pathname;
+ Msg += '\n';
+
+ OutputFile->write(Msg.data(), Msg.size());
+ OutputFile->flush();
+}
+
+void clang::AttachHeaderIncludeGen(Preprocessor &PP,
+ const std::vector<std::string> &ExtraHeaders,
+ bool ShowAllHeaders,
StringRef OutputPath, bool ShowDepth,
bool MSStyle) {
raw_ostream *OutputFile = MSStyle ? &llvm::outs() : &llvm::errs();
@@ -63,12 +92,19 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders,
delete OS;
} else {
OS->SetUnbuffered();
- OS->SetUseAtomicWrites(true);
OutputFile = OS;
OwnsOutputFile = true;
}
}
+ // Print header info for extra headers, pretending they were discovered
+ // by the regular preprocessor. The primary use case is to support
+ // proper generation of Make / Ninja file dependencies for implicit includes,
+ // such as sanitizer blacklists. It's only important for cl.exe
+ // compatibility, the GNU way to generate rules is -M / -MM / -MD / -MMD.
+ for (auto Header : ExtraHeaders) {
+ PrintHeaderInfo(OutputFile, Header.c_str(), ShowDepth, 2, MSStyle);
+ }
PP.addPPCallbacks(llvm::make_unique<HeaderIncludesCallback>(&PP,
ShowAllHeaders,
OutputFile,
@@ -112,27 +148,7 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
// Dump the header include information we are past the predefines buffer or
// are showing all headers.
if (ShowHeader && Reason == PPCallbacks::EnterFile) {
- // Write to a temporary string to avoid unnecessary flushing on errs().
- SmallString<512> Filename(UserLoc.getFilename());
- if (!MSStyle)
- Lexer::Stringify(Filename);
-
- SmallString<256> Msg;
- if (MSStyle)
- Msg += "Note: including file:";
-
- if (ShowDepth) {
- // The main source file is at depth 1, so skip one dot.
- for (unsigned i = 1; i != CurrentIncludeDepth; ++i)
- Msg += MSStyle ? ' ' : '.';
-
- if (!MSStyle)
- Msg += ' ';
- }
- Msg += Filename;
- Msg += '\n';
-
- OutputFile->write(Msg.data(), Msg.size());
- OutputFile->flush();
+ PrintHeaderInfo(OutputFile, UserLoc.getFilename(),
+ ShowDepth, CurrentIncludeDepth, MSStyle);
}
}
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index e3a17c922fb8..26bab0db5347 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -15,6 +15,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Config/config.h" // C_INCLUDE_DIRS
+#include "clang/Lex/HeaderMap.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -215,6 +216,8 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::OpenBSD:
case llvm::Triple::Bitrig:
case llvm::Triple::NaCl:
+ case llvm::Triple::PS4:
+ case llvm::Triple::ELFIAMCU:
break;
case llvm::Triple::Win32:
if (triple.getEnvironment() != llvm::Triple::Cygnus)
@@ -246,10 +249,8 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
if (CIncludeDirs != "") {
SmallVector<StringRef, 5> dirs;
CIncludeDirs.split(dirs, ":");
- for (SmallVectorImpl<StringRef>::iterator i = dirs.begin();
- i != dirs.end();
- ++i)
- AddPath(*i, ExternCSystem, false);
+ for (StringRef dir : dirs)
+ AddPath(dir, ExternCSystem, false);
return;
}
@@ -319,7 +320,30 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::CloudABI:
case llvm::Triple::RTEMS:
case llvm::Triple::NaCl:
+ case llvm::Triple::ELFIAMCU:
break;
+ case llvm::Triple::PS4: {
+ // <isysroot> gets prepended later in AddPath().
+ std::string BaseSDKPath = "";
+ if (!HasSysroot) {
+ const char *envValue = getenv("SCE_PS4_SDK_DIR");
+ if (envValue)
+ BaseSDKPath = envValue;
+ else {
+ // HSOpts.ResourceDir variable contains the location of Clang's
+ // resource files.
+ // Assuming that Clang is configured for PS4 without
+ // --with-clang-resource-dir option, the location of Clang's resource
+ // files is <SDK_DIR>/host_tools/lib/clang
+ SmallString<128> P = StringRef(HSOpts.ResourceDir);
+ llvm::sys::path::append(P, "../../..");
+ BaseSDKPath = P.str();
+ }
+ }
+ AddPath(BaseSDKPath + "/target/include", System, false);
+ if (triple.isPS4CPU())
+ AddPath(BaseSDKPath + "/target/include_common", System, false);
+ }
default:
AddPath("/usr/include", ExternCSystem, false);
break;
@@ -387,10 +411,7 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
}
break;
case llvm::Triple::DragonFly:
- if (llvm::sys::fs::exists("/usr/lib/gcc47"))
- AddPath("/usr/include/c++/4.7", CXXSystem, false);
- else
- AddPath("/usr/include/c++/4.4", CXXSystem, false);
+ AddPath("/usr/include/c++/5.0", CXXSystem, false);
break;
case llvm::Triple::OpenBSD: {
std::string t = triple.getTriple();
@@ -404,10 +425,6 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
AddGnuCPlusPlusIncludePaths("/usr/gnu/include/c++/4.4.3",
"", "", "", triple);
break;
- case llvm::Triple::Solaris:
- AddGnuCPlusPlusIncludePaths("/usr/gcc/4.5/include/c++/4.5.2/",
- "i386-pc-solaris2.11", "", "", triple);
- break;
default:
break;
}
@@ -453,11 +470,6 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
AddUnmappedPath(P, CXXSystem, false);
}
}
- // On Solaris, include the support directory for things like xlocale and
- // fudged system headers.
- if (triple.getOS() == llvm::Triple::Solaris)
- AddPath("/usr/include/c++/v1/support/solaris", CXXSystem, false);
-
AddPath("/usr/include/c++/v1", CXXSystem, false);
} else {
AddDefaultCPlusPlusIncludePaths(triple, HSOpts);
@@ -568,39 +580,33 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
SearchList.reserve(IncludePath.size());
// Quoted arguments go first.
- for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
- it != ie; ++it) {
- if (it->first == Quoted)
- SearchList.push_back(it->second);
- }
+ for (auto &Include : IncludePath)
+ if (Include.first == Quoted)
+ SearchList.push_back(Include.second);
+
// Deduplicate and remember index.
RemoveDuplicates(SearchList, 0, Verbose);
unsigned NumQuoted = SearchList.size();
- for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
- it != ie; ++it) {
- if (it->first == Angled || it->first == IndexHeaderMap)
- SearchList.push_back(it->second);
- }
+ for (auto &Include : IncludePath)
+ if (Include.first == Angled || Include.first == IndexHeaderMap)
+ SearchList.push_back(Include.second);
RemoveDuplicates(SearchList, NumQuoted, Verbose);
unsigned NumAngled = SearchList.size();
- for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
- it != ie; ++it) {
- if (it->first == System || it->first == ExternCSystem ||
- (!Lang.ObjC1 && !Lang.CPlusPlus && it->first == CSystem) ||
- (/*FIXME !Lang.ObjC1 && */Lang.CPlusPlus && it->first == CXXSystem) ||
- (Lang.ObjC1 && !Lang.CPlusPlus && it->first == ObjCSystem) ||
- (Lang.ObjC1 && Lang.CPlusPlus && it->first == ObjCXXSystem))
- SearchList.push_back(it->second);
- }
-
- for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
- it != ie; ++it) {
- if (it->first == After)
- SearchList.push_back(it->second);
- }
+ for (auto &Include : IncludePath)
+ if (Include.first == System || Include.first == ExternCSystem ||
+ (!Lang.ObjC1 && !Lang.CPlusPlus && Include.first == CSystem) ||
+ (/*FIXME !Lang.ObjC1 && */ Lang.CPlusPlus &&
+ Include.first == CXXSystem) ||
+ (Lang.ObjC1 && !Lang.CPlusPlus && Include.first == ObjCSystem) ||
+ (Lang.ObjC1 && Lang.CPlusPlus && Include.first == ObjCXXSystem))
+ SearchList.push_back(Include.second);
+
+ for (auto &Include : IncludePath)
+ if (Include.first == After)
+ SearchList.push_back(Include.second);
// Remove duplicates across both the Angled and System directories. GCC does
// this and failing to remove duplicates across these two groups breaks
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index 0791494f7919..15aa54607ced 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -20,6 +20,7 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTReader.h"
@@ -323,15 +324,17 @@ static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
Out << "template<typename _Tp> struct __is_scalar;\n"
<< "\n";
+
+ if (LangOpts.ObjCAutoRefCount) {
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(strong))) _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+ }
- Out << "template<typename _Tp>\n"
- << "struct __is_scalar<__attribute__((objc_ownership(strong))) _Tp> {\n"
- << " enum { __value = 0 };\n"
- << " typedef __false_type __type;\n"
- << "};\n"
- << "\n";
-
- if (LangOpts.ObjCARCWeak) {
+ if (LangOpts.ObjCWeak) {
Out << "template<typename _Tp>\n"
<< "struct __is_scalar<__attribute__((objc_ownership(weak))) _Tp> {\n"
<< " enum { __value = 0 };\n"
@@ -340,13 +343,15 @@ static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
<< "\n";
}
- Out << "template<typename _Tp>\n"
- << "struct __is_scalar<__attribute__((objc_ownership(autoreleasing)))"
- << " _Tp> {\n"
- << " enum { __value = 0 };\n"
- << " typedef __false_type __type;\n"
- << "};\n"
- << "\n";
+ if (LangOpts.ObjCAutoRefCount) {
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(autoreleasing)))"
+ << " _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+ }
Out << "}\n";
}
@@ -406,6 +411,8 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
+ if (LangOpts.CUDA)
+ Builder.defineMacro("__CUDA__");
}
/// Initialize the predefined C++ language feature test macros defined in
@@ -456,6 +463,8 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_sized_deallocation", "201309");
if (LangOpts.ConceptsTS)
Builder.defineMacro("__cpp_experimental_concepts", "1");
+ if (LangOpts.Coroutines)
+ Builder.defineMacro("__cpp_coroutines", "1");
}
static void InitializePredefinedMacros(const TargetInfo &TI,
@@ -849,9 +858,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
Builder.defineMacro("__SSP_ALL__", "3");
- if (FEOpts.ProgramAction == frontend::RewriteObjC)
- Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
-
// Define a macro that exists only when using the static analyzer.
if (FEOpts.ProgramAction == frontend::RunAnalysis)
Builder.defineMacro("__clang_analyzer__");
@@ -859,7 +865,13 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.FastRelaxedMath)
Builder.defineMacro("__FAST_RELAXED_MATH__");
- if (LangOpts.ObjCAutoRefCount) {
+ if (FEOpts.ProgramAction == frontend::RewriteObjC ||
+ LangOpts.getGC() != LangOptions::NonGC) {
+ Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
+ Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
+ Builder.defineMacro("__autoreleasing", "");
+ Builder.defineMacro("__unsafe_unretained", "");
+ } else if (LangOpts.ObjC1) {
Builder.defineMacro("__weak", "__attribute__((objc_ownership(weak)))");
Builder.defineMacro("__strong", "__attribute__((objc_ownership(strong)))");
Builder.defineMacro("__autoreleasing",
@@ -918,14 +930,19 @@ void clang::InitializePreprocessor(
// Install things like __POWERPC__, __GNUC__, etc into the macro table.
if (InitOpts.UsePredefines) {
+ if (LangOpts.CUDA && PP.getAuxTargetInfo())
+ InitializePredefinedMacros(*PP.getAuxTargetInfo(), LangOpts, FEOpts,
+ Builder);
+
InitializePredefinedMacros(PP.getTargetInfo(), LangOpts, FEOpts, Builder);
// Install definitions to make Objective-C++ ARC work well with various
// C++ Standard Library implementations.
- if (LangOpts.ObjC1 && LangOpts.CPlusPlus && LangOpts.ObjCAutoRefCount) {
+ if (LangOpts.ObjC1 && LangOpts.CPlusPlus &&
+ (LangOpts.ObjCAutoRefCount || LangOpts.ObjCWeak)) {
switch (InitOpts.ObjCXXARCStandardLibrary) {
case ARCXX_nolib:
- case ARCXX_libcxx:
+ case ARCXX_libcxx:
break;
case ARCXX_libstdcxx:
diff --git a/lib/Frontend/LogDiagnosticPrinter.cpp b/lib/Frontend/LogDiagnosticPrinter.cpp
index c6a18e0d80d2..9998f65457cf 100644
--- a/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -118,7 +118,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
if (MainFilename.empty() && Info.hasSourceManager()) {
const SourceManager &SM = Info.getSourceManager();
FileID FID = SM.getMainFileID();
- if (!FID.isInvalid()) {
+ if (FID.isValid()) {
const FileEntry *FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid())
MainFilename = FE->getName();
@@ -147,7 +147,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
if (PLoc.isInvalid()) {
// At least print the file name if available:
FileID FID = SM.getFileID(Info.getLocation());
- if (!FID.isInvalid()) {
+ if (FID.isValid()) {
const FileEntry *FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid())
DE.Filename = FE->getName();
diff --git a/lib/Frontend/ModuleDependencyCollector.cpp b/lib/Frontend/ModuleDependencyCollector.cpp
index 67852dc02036..9768a164acbc 100644
--- a/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/lib/Frontend/ModuleDependencyCollector.cpp
@@ -32,8 +32,8 @@ public:
: Collector(Collector) {}
bool needsInputFileVisitation() override { return true; }
bool needsSystemInputFileVisitation() override { return true; }
- bool visitInputFile(StringRef Filename, bool IsSystem,
- bool IsOverridden) override;
+ bool visitInputFile(StringRef Filename, bool IsSystem, bool IsOverridden,
+ bool IsExplicitModule) override;
};
}
@@ -67,7 +67,7 @@ std::error_code ModuleDependencyListener::copyToRoot(StringRef Src) {
path::native(AbsoluteSrc);
// TODO: We probably need to handle .. as well as . in order to have valid
// input to the YAMLVFSWriter.
- FileManager::removeDotPaths(AbsoluteSrc);
+ path::remove_dots(AbsoluteSrc);
// Build the destination path.
SmallString<256> Dest = Collector.getDest();
@@ -85,7 +85,8 @@ std::error_code ModuleDependencyListener::copyToRoot(StringRef Src) {
}
bool ModuleDependencyListener::visitInputFile(StringRef Filename, bool IsSystem,
- bool IsOverridden) {
+ bool IsOverridden,
+ bool IsExplicitModule) {
if (Collector.insertSeen(Filename))
if (copyToRoot(Filename))
Collector.setHasErrors();
diff --git a/lib/Frontend/MultiplexConsumer.cpp b/lib/Frontend/MultiplexConsumer.cpp
index 91ee100f6394..12c85240bd75 100644
--- a/lib/Frontend/MultiplexConsumer.cpp
+++ b/lib/Frontend/MultiplexConsumer.cpp
@@ -122,9 +122,6 @@ public:
void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD) override;
void FunctionDefinitionInstantiated(const FunctionDecl *D) override;
- void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
- const ObjCPropertyDecl *OrigProp,
- const ObjCCategoryDecl *ClassExt) override;
void DeclarationMarkedUsed(const Decl *D) override;
void DeclarationMarkedOpenMPThreadPrivate(const Decl *D) override;
void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override;
@@ -207,13 +204,6 @@ void MultiplexASTMutationListener::FunctionDefinitionInstantiated(
for (auto &Listener : Listeners)
Listener->FunctionDefinitionInstantiated(D);
}
-void MultiplexASTMutationListener::AddedObjCPropertyInClassExtension(
- const ObjCPropertyDecl *Prop,
- const ObjCPropertyDecl *OrigProp,
- const ObjCCategoryDecl *ClassExt) {
- for (size_t i = 0, e = Listeners.size(); i != e; ++i)
- Listeners[i]->AddedObjCPropertyInClassExtension(Prop, OrigProp, ClassExt);
-}
void MultiplexASTMutationListener::DeclarationMarkedUsed(const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeclarationMarkedUsed(D);
diff --git a/lib/Frontend/PCHContainerOperations.cpp b/lib/Frontend/PCHContainerOperations.cpp
index cde3ba139bcf..5e1d77205098 100644
--- a/lib/Frontend/PCHContainerOperations.cpp
+++ b/lib/Frontend/PCHContainerOperations.cpp
@@ -16,6 +16,7 @@
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/Support/raw_ostream.h"
#include "clang/Lex/ModuleLoader.h"
+
using namespace clang;
namespace {
@@ -26,17 +27,11 @@ class RawPCHContainerGenerator : public ASTConsumer {
raw_pwrite_stream *OS;
public:
- RawPCHContainerGenerator(DiagnosticsEngine &Diags,
- const HeaderSearchOptions &HSO,
- const PreprocessorOptions &PPO,
- const TargetOptions &TO, const LangOptions &LO,
- const std::string &MainFileName,
- const std::string &OutputFileName,
- llvm::raw_pwrite_stream *OS,
+ RawPCHContainerGenerator(llvm::raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer)
: Buffer(Buffer), OS(OS) {}
- virtual ~RawPCHContainerGenerator() {}
+ ~RawPCHContainerGenerator() override = default;
void HandleTranslationUnit(ASTContext &Ctx) override {
if (Buffer->IsComplete) {
@@ -49,16 +44,14 @@ public:
Buffer->Data = std::move(Empty);
}
};
-}
+
+} // anonymous namespace
std::unique_ptr<ASTConsumer> RawPCHContainerWriter::CreatePCHContainerGenerator(
- DiagnosticsEngine &Diags, const HeaderSearchOptions &HSO,
- const PreprocessorOptions &PPO, const TargetOptions &TO,
- const LangOptions &LO, const std::string &MainFileName,
+ CompilerInstance &CI, const std::string &MainFileName,
const std::string &OutputFileName, llvm::raw_pwrite_stream *OS,
std::shared_ptr<PCHBuffer> Buffer) const {
- return llvm::make_unique<RawPCHContainerGenerator>(
- Diags, HSO, PPO, TO, LO, MainFileName, OutputFileName, OS, Buffer);
+ return llvm::make_unique<RawPCHContainerGenerator>(OS, Buffer);
}
void RawPCHContainerReader::ExtractPCH(
diff --git a/lib/Frontend/Rewrite/FrontendActions.cpp b/lib/Frontend/Rewrite/FrontendActions.cpp
index dbc661b71905..8cf8adf37ed6 100644
--- a/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -78,7 +78,7 @@ public:
std::string RewriteFilename(const std::string &Filename, int &fd) override {
SmallString<128> Path;
llvm::sys::fs::createTemporaryFile(llvm::sys::path::filename(Filename),
- llvm::sys::path::extension(Filename), fd,
+ llvm::sys::path::extension(Filename).drop_front(), fd,
Path);
return Path.str();
}
diff --git a/lib/Frontend/Rewrite/InclusionRewriter.cpp b/lib/Frontend/Rewrite/InclusionRewriter.cpp
index 08d6cf1f92c2..ca8226251fd9 100644
--- a/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -160,7 +160,7 @@ void InclusionRewriter::FileChanged(SourceLocation Loc,
void InclusionRewriter::FileSkipped(const FileEntry &/*SkippedFile*/,
const Token &/*FilenameTok*/,
SrcMgr::CharacteristicKind /*FileType*/) {
- assert(!LastInclusionLocation.isInvalid() &&
+ assert(LastInclusionLocation.isValid() &&
"A file, that wasn't found via an inclusion directive, was skipped");
LastInclusionLocation = SourceLocation();
}
@@ -389,9 +389,10 @@ bool InclusionRewriter::HandleHasInclude(
SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 1>
Includers;
Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ // FIXME: Why don't we call PP.LookupFile here?
const FileEntry *File = PP.getHeaderSearchInfo().LookupFile(
Filename, SourceLocation(), isAngled, nullptr, CurDir, Includers, nullptr,
- nullptr, nullptr, false);
+ nullptr, nullptr, nullptr, false);
FileExists = File != nullptr;
return true;
diff --git a/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 2902ba78c4ef..be68d42affa1 100644
--- a/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -412,7 +412,7 @@ namespace {
// Misc. AST transformation routines. Sometimes they end up calling
// rewriting routines on the new ASTs.
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
- Expr **args, unsigned nargs,
+ ArrayRef<Expr *> Args,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
@@ -2105,15 +2105,17 @@ Stmt *RewriteModernObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size());
+ SelExprs);
ReplaceStmt(Exp, SelExp);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return SelExp;
}
-CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl(
- FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+CallExpr *
+RewriteModernObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ ArrayRef<Expr *> Args,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = FD->getType();
@@ -2129,10 +2131,9 @@ CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl(
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *Exp =
- new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs),
- FT->getCallResultType(*Context),
- VK_RValue, EndLoc);
+ CallExpr *Exp = new (Context) CallExpr(*Context, ICE, Args,
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
return Exp;
}
@@ -2660,9 +2661,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
IdentifierInfo *clsName = BoxingClass->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
- CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
@@ -2672,8 +2671,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
SelExprs.push_back(
getStringLiteral(BoxingMethod->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size(),
- StartLoc, EndLoc);
+ SelExprs, StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// User provided sub-expression is the 3rd, and last, argument.
@@ -2788,9 +2786,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
- CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
@@ -2801,8 +2797,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
SelExprs.push_back(
getStringLiteral(ArrayMethod->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size(),
- StartLoc, EndLoc);
+ SelExprs, StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// (const id [])objects
@@ -2939,9 +2934,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
- CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
@@ -2951,8 +2944,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod();
SelExprs.push_back(getStringLiteral(DictMethod->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size(),
- StartLoc, EndLoc);
+ SelExprs, StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// (const id [])objects
@@ -3298,14 +3290,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
// (Class)objc_getClass("CurrentClass")
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
- StartLoc,
- EndLoc);
+ ClsExprs, StartLoc, EndLoc);
ClsExprs.clear();
ClsExprs.push_back(Cls);
- Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
- &ClsExprs[0], ClsExprs.size(),
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
@@ -3366,9 +3354,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
= Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
- CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCIdType(),
@@ -3398,14 +3384,11 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SmallVector<Expr*, 8> ClsExprs;
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
// (Class)objc_getClass("CurrentClass")
- CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
ClsExprs.clear();
ClsExprs.push_back(Cls);
- Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
- &ClsExprs[0], ClsExprs.size(),
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
@@ -3476,9 +3459,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size(),
- StartLoc,
- EndLoc);
+ SelExprs, StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// Now push any user supplied arguments.
@@ -4862,7 +4843,7 @@ void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) {
std::string Str = "(";
Str += TypeString;
Str += ")";
- InsertText(IC->getSubExpr()->getLocStart(), &Str[0], Str.size());
+ InsertText(IC->getSubExpr()->getLocStart(), Str);
return;
}
@@ -5641,7 +5622,7 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
// FIXME: Missing definition of
// InsertText(clang::SourceLocation, char const*, unsigned int).
- // InsertText(startLoc, messString.c_str(), messString.size());
+ // InsertText(startLoc, messString);
// Tried this, but it didn't work either...
// ReplaceText(startLoc, 0, messString.c_str(), messString.size());
#endif
@@ -5767,7 +5748,7 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
- InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
+ InsertText(ICE->getSubExpr()->getLocStart(), Str);
delete S;
return Replacement;
}
diff --git a/lib/Frontend/Rewrite/RewriteObjC.cpp b/lib/Frontend/Rewrite/RewriteObjC.cpp
index 204820b3041a..e0ddadb12306 100644
--- a/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -346,7 +346,7 @@ namespace {
// Misc. AST transformation routines. Sometimes they end up calling
// rewriting routines on the new ASTs.
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
- Expr **args, unsigned nargs,
+ ArrayRef<Expr *> Args,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
CallExpr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
@@ -1997,15 +1997,17 @@ Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size());
+ SelExprs);
ReplaceStmt(Exp, SelExp);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return SelExp;
}
-CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
- FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+CallExpr *
+RewriteObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ ArrayRef<Expr *> Args,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = FD->getType();
@@ -2021,10 +2023,9 @@ CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *Exp =
- new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs),
- FT->getCallResultType(*Context),
- VK_RValue, EndLoc);
+ CallExpr *Exp = new (Context) CallExpr(*Context, ICE, Args,
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
return Exp;
}
@@ -2680,20 +2681,15 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SmallVector<Expr*, 8> ClsExprs;
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
- StartLoc,
- EndLoc);
+ ClsExprs, StartLoc, EndLoc);
// (Class)objc_getClass("CurrentClass")
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCClassType(),
CK_BitCast, Cls);
ClsExprs.clear();
ClsExprs.push_back(ArgExpr);
- Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
- &ClsExprs[0], ClsExprs.size(),
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
-
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
// To turn off a warning, type-cast to 'id'
InitExprs.push_back( // set 'super class', using class_getSuperclass().
@@ -2752,9 +2748,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
= Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
- CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
break;
@@ -2780,9 +2774,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
- CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
- &ClsExprs[0],
- ClsExprs.size(),
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
// (Class)objc_getClass("CurrentClass")
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
@@ -2790,8 +2782,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
CK_BitCast, Cls);
ClsExprs.clear();
ClsExprs.push_back(ArgExpr);
- Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
- &ClsExprs[0], ClsExprs.size(),
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, ClsExprs,
StartLoc, EndLoc);
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
@@ -2862,9 +2853,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size(),
- StartLoc,
- EndLoc);
+ SelExprs, StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// Now push any user supplied arguments.
@@ -4675,7 +4664,7 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
// FIXME: Missing definition of
// InsertText(clang::SourceLocation, char const*, unsigned int).
- // InsertText(startLoc, messString.c_str(), messString.size());
+ // InsertText(startLoc, messString);
// Tried this, but it didn't work either...
// ReplaceText(startLoc, 0, messString.c_str(), messString.size());
#endif
@@ -4790,7 +4779,7 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
- InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
+ InsertText(ICE->getSubExpr()->getLocStart(), Str);
delete S;
return Replacement;
}
diff --git a/lib/Frontend/SerializedDiagnosticPrinter.cpp b/lib/Frontend/SerializedDiagnosticPrinter.cpp
index d31b12e87a4e..1bf10d276945 100644
--- a/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -51,6 +51,7 @@ public:
typedef SmallVector<uint64_t, 64> RecordData;
typedef SmallVectorImpl<uint64_t> RecordDataImpl;
+typedef ArrayRef<uint64_t> RecordDataRef;
class SDiagsWriter;
@@ -393,13 +394,9 @@ unsigned SDiagsWriter::getEmitFile(const char *FileName){
// Lazily generate the record for the file.
entry = State->Files.size();
- RecordData Record;
- Record.push_back(RECORD_FILENAME);
- Record.push_back(entry);
- Record.push_back(0); // For legacy.
- Record.push_back(0); // For legacy.
StringRef Name(FileName);
- Record.push_back(Name.size());
+ RecordData::value_type Record[] = {RECORD_FILENAME, entry, 0 /* For legacy */,
+ 0 /* For legacy */, Name.size()};
State->Stream.EmitRecordWithBlob(State->Abbrevs.get(RECORD_FILENAME), Record,
Name);
@@ -478,7 +475,7 @@ void SDiagsWriter::EmitBlockInfoBlock() {
AddSourceLocationAbbrev(Abbrev);
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Category.
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped Diag ID.
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // Text size.
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Diagnostc text.
Abbrevs.set(RECORD_DIAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
@@ -531,14 +528,11 @@ void SDiagsWriter::EmitBlockInfoBlock() {
void SDiagsWriter::EmitMetaBlock() {
llvm::BitstreamWriter &Stream = State->Stream;
- RecordData &Record = State->Record;
AbbreviationMap &Abbrevs = State->Abbrevs;
Stream.EnterSubblock(BLOCK_META, 3);
- Record.clear();
- Record.push_back(RECORD_VERSION);
- Record.push_back(VersionNumber);
- Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_VERSION), Record);
+ RecordData::value_type Record[] = {RECORD_VERSION, VersionNumber};
+ Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_VERSION), Record);
Stream.ExitBlock();
}
@@ -548,11 +542,8 @@ unsigned SDiagsWriter::getEmitCategory(unsigned int category) {
// We use a local version of 'Record' so that we can be generating
// another record when we lazily generate one for the category entry.
- RecordData Record;
- Record.push_back(RECORD_CATEGORY);
- Record.push_back(category);
StringRef catName = DiagnosticIDs::getCategoryNameFromID(category);
- Record.push_back(catName.size());
+ RecordData::value_type Record[] = {RECORD_CATEGORY, category, catName.size()};
State->Stream.EmitRecordWithBlob(State->Abbrevs.get(RECORD_CATEGORY), Record,
catName);
@@ -581,10 +572,8 @@ unsigned SDiagsWriter::getEmitDiagnosticFlag(StringRef FlagName) {
entry.second = FlagName;
// Lazily emit the string in a separate record.
- RecordData Record;
- Record.push_back(RECORD_DIAG_FLAG);
- Record.push_back(entry.first);
- Record.push_back(FlagName.size());
+ RecordData::value_type Record[] = {RECORD_DIAG_FLAG, entry.first,
+ FlagName.size()};
State->Stream.EmitRecordWithBlob(State->Abbrevs.get(RECORD_DIAG_FLAG),
Record, FlagName);
}
@@ -844,17 +833,9 @@ std::error_code SDiagsMerger::visitEndOfDiagnostic() {
std::error_code
SDiagsMerger::visitSourceRangeRecord(const serialized_diags::Location &Start,
const serialized_diags::Location &End) {
- RecordData Record;
- Record.push_back(RECORD_SOURCE_RANGE);
- Record.push_back(FileLookup[Start.FileID]);
- Record.push_back(Start.Line);
- Record.push_back(Start.Col);
- Record.push_back(Start.Offset);
- Record.push_back(FileLookup[End.FileID]);
- Record.push_back(End.Line);
- Record.push_back(End.Col);
- Record.push_back(End.Offset);
-
+ RecordData::value_type Record[] = {
+ RECORD_SOURCE_RANGE, FileLookup[Start.FileID], Start.Line, Start.Col,
+ Start.Offset, FileLookup[End.FileID], End.Line, End.Col, End.Offset};
Writer.State->Stream.EmitRecordWithAbbrev(
Writer.State->Abbrevs.get(RECORD_SOURCE_RANGE), Record);
return std::error_code();
@@ -863,19 +844,13 @@ SDiagsMerger::visitSourceRangeRecord(const serialized_diags::Location &Start,
std::error_code SDiagsMerger::visitDiagnosticRecord(
unsigned Severity, const serialized_diags::Location &Location,
unsigned Category, unsigned Flag, StringRef Message) {
- RecordData MergedRecord;
- MergedRecord.push_back(RECORD_DIAG);
- MergedRecord.push_back(Severity);
- MergedRecord.push_back(FileLookup[Location.FileID]);
- MergedRecord.push_back(Location.Line);
- MergedRecord.push_back(Location.Col);
- MergedRecord.push_back(Location.Offset);
- MergedRecord.push_back(CategoryLookup[Category]);
- MergedRecord.push_back(Flag ? DiagFlagLookup[Flag] : 0);
- MergedRecord.push_back(Message.size());
+ RecordData::value_type Record[] = {
+ RECORD_DIAG, Severity, FileLookup[Location.FileID], Location.Line,
+ Location.Col, Location.Offset, CategoryLookup[Category],
+ Flag ? DiagFlagLookup[Flag] : 0, Message.size()};
Writer.State->Stream.EmitRecordWithBlob(
- Writer.State->Abbrevs.get(RECORD_DIAG), MergedRecord, Message);
+ Writer.State->Abbrevs.get(RECORD_DIAG), Record, Message);
return std::error_code();
}
@@ -883,17 +858,10 @@ std::error_code
SDiagsMerger::visitFixitRecord(const serialized_diags::Location &Start,
const serialized_diags::Location &End,
StringRef Text) {
- RecordData Record;
- Record.push_back(RECORD_FIXIT);
- Record.push_back(FileLookup[Start.FileID]);
- Record.push_back(Start.Line);
- Record.push_back(Start.Col);
- Record.push_back(Start.Offset);
- Record.push_back(FileLookup[End.FileID]);
- Record.push_back(End.Line);
- Record.push_back(End.Col);
- Record.push_back(End.Offset);
- Record.push_back(Text.size());
+ RecordData::value_type Record[] = {RECORD_FIXIT, FileLookup[Start.FileID],
+ Start.Line, Start.Col, Start.Offset,
+ FileLookup[End.FileID], End.Line, End.Col,
+ End.Offset, Text.size()};
Writer.State->Stream.EmitRecordWithBlob(
Writer.State->Abbrevs.get(RECORD_FIXIT), Record, Text);
diff --git a/lib/Frontend/TestModuleFileExtension.cpp b/lib/Frontend/TestModuleFileExtension.cpp
new file mode 100644
index 000000000000..d1b20c4a80b3
--- /dev/null
+++ b/lib/Frontend/TestModuleFileExtension.cpp
@@ -0,0 +1,123 @@
+//===-- TestModuleFileExtension.cpp - Module Extension Tester -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "TestModuleFileExtension.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+using namespace clang;
+using namespace clang::serialization;
+
+TestModuleFileExtension::Writer::~Writer() { }
+
+void TestModuleFileExtension::Writer::writeExtensionContents(
+ Sema &SemaRef,
+ llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+
+ // Write an abbreviation for this record.
+ BitCodeAbbrev *Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(FIRST_EXTENSION_RECORD_ID));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of characters
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // message
+ auto Abbrev = Stream.EmitAbbrev(Abv);
+
+ // Write a message into the extension block.
+ SmallString<64> Message;
+ {
+ auto Ext = static_cast<TestModuleFileExtension *>(getExtension());
+ raw_svector_ostream OS(Message);
+ OS << "Hello from " << Ext->BlockName << " v" << Ext->MajorVersion << "."
+ << Ext->MinorVersion;
+ }
+ SmallVector<uint64_t, 4> Record;
+ Record.push_back(FIRST_EXTENSION_RECORD_ID);
+ Record.push_back(Message.size());
+ Stream.EmitRecordWithBlob(Abbrev, Record, Message);
+}
+
+TestModuleFileExtension::Reader::Reader(ModuleFileExtension *Ext,
+ const llvm::BitstreamCursor &InStream)
+ : ModuleFileExtensionReader(Ext), Stream(InStream)
+{
+ // Read the extension block.
+ SmallVector<uint64_t, 4> Record;
+ while (true) {
+ llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock:
+ case llvm::BitstreamEntry::EndBlock:
+ case llvm::BitstreamEntry::Error:
+ return;
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ StringRef Blob;
+ unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
+ switch (RecCode) {
+ case FIRST_EXTENSION_RECORD_ID: {
+ StringRef Message = Blob.substr(0, Record[0]);
+ fprintf(stderr, "Read extension block message: %s\n",
+ Message.str().c_str());
+ break;
+ }
+ }
+ }
+}
+
+TestModuleFileExtension::Reader::~Reader() { }
+
+TestModuleFileExtension::~TestModuleFileExtension() { }
+
+ModuleFileExtensionMetadata
+TestModuleFileExtension::getExtensionMetadata() const {
+ return { BlockName, MajorVersion, MinorVersion, UserInfo };
+}
+
+llvm::hash_code TestModuleFileExtension::hashExtension(
+ llvm::hash_code Code) const {
+ if (Hashed) {
+ Code = llvm::hash_combine(Code, BlockName);
+ Code = llvm::hash_combine(Code, MajorVersion);
+ Code = llvm::hash_combine(Code, MinorVersion);
+ Code = llvm::hash_combine(Code, UserInfo);
+ }
+
+ return Code;
+}
+
+std::unique_ptr<ModuleFileExtensionWriter>
+TestModuleFileExtension::createExtensionWriter(ASTWriter &) {
+ return std::unique_ptr<ModuleFileExtensionWriter>(new Writer(this));
+}
+
+std::unique_ptr<ModuleFileExtensionReader>
+TestModuleFileExtension::createExtensionReader(
+ const ModuleFileExtensionMetadata &Metadata,
+ ASTReader &Reader, serialization::ModuleFile &Mod,
+ const llvm::BitstreamCursor &Stream)
+{
+ assert(Metadata.BlockName == BlockName && "Wrong block name");
+ if (std::make_pair(Metadata.MajorVersion, Metadata.MinorVersion) !=
+ std::make_pair(MajorVersion, MinorVersion)) {
+ Reader.getDiags().Report(Mod.ImportLoc,
+ diag::err_test_module_file_extension_version)
+ << BlockName << Metadata.MajorVersion << Metadata.MinorVersion
+ << MajorVersion << MinorVersion;
+ return nullptr;
+ }
+
+ return std::unique_ptr<ModuleFileExtensionReader>(
+ new TestModuleFileExtension::Reader(this, Stream));
+}
diff --git a/lib/Frontend/TestModuleFileExtension.h b/lib/Frontend/TestModuleFileExtension.h
new file mode 100644
index 000000000000..41f3ca9f05fc
--- /dev/null
+++ b/lib/Frontend/TestModuleFileExtension.h
@@ -0,0 +1,72 @@
+//===-- TestModuleFileExtension.h - Module Extension Tester -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_FRONTEND_TESTMODULEFILEEXTENSION_H
+#define LLVM_CLANG_FRONTEND_TESTMODULEFILEEXTENSION_H
+
+#include "clang/Serialization/ModuleFileExtension.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include <string>
+
+namespace clang {
+
+/// A module file extension used for testing purposes.
+class TestModuleFileExtension : public ModuleFileExtension {
+ std::string BlockName;
+ unsigned MajorVersion;
+ unsigned MinorVersion;
+ bool Hashed;
+ std::string UserInfo;
+
+ class Writer : public ModuleFileExtensionWriter {
+ public:
+ Writer(ModuleFileExtension *Ext) : ModuleFileExtensionWriter(Ext) { }
+ ~Writer() override;
+
+ void writeExtensionContents(Sema &SemaRef,
+ llvm::BitstreamWriter &Stream) override;
+ };
+
+ class Reader : public ModuleFileExtensionReader {
+ llvm::BitstreamCursor Stream;
+
+ public:
+ ~Reader() override;
+
+ Reader(ModuleFileExtension *Ext, const llvm::BitstreamCursor &InStream);
+ };
+
+public:
+ TestModuleFileExtension(StringRef BlockName,
+ unsigned MajorVersion,
+ unsigned MinorVersion,
+ bool Hashed,
+ StringRef UserInfo)
+ : BlockName(BlockName),
+ MajorVersion(MajorVersion), MinorVersion(MinorVersion),
+ Hashed(Hashed), UserInfo(UserInfo) { }
+ ~TestModuleFileExtension() override;
+
+ ModuleFileExtensionMetadata getExtensionMetadata() const override;
+
+ llvm::hash_code hashExtension(llvm::hash_code Code) const override;
+
+ std::unique_ptr<ModuleFileExtensionWriter>
+ createExtensionWriter(ASTWriter &Writer) override;
+
+ std::unique_ptr<ModuleFileExtensionReader>
+ createExtensionReader(const ModuleFileExtensionMetadata &Metadata,
+ ASTReader &Reader, serialization::ModuleFile &Mod,
+ const llvm::BitstreamCursor &Stream) override;
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_FRONTEND_TESTMODULEFILEEXTENSION_H
diff --git a/lib/Frontend/TextDiagnostic.cpp b/lib/Frontend/TextDiagnostic.cpp
index aaf17a983371..d4e156d44582 100644
--- a/lib/Frontend/TextDiagnostic.cpp
+++ b/lib/Frontend/TextDiagnostic.cpp
@@ -777,7 +777,7 @@ void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
if (PLoc.isInvalid()) {
// At least print the file name if available:
FileID FID = SM.getFileID(Loc);
- if (!FID.isInvalid()) {
+ if (FID.isValid()) {
const FileEntry* FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid()) {
OS << FE->getName();
@@ -875,7 +875,7 @@ void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
PresumedLoc PLoc,
const SourceManager &SM) {
- if (DiagOpts->ShowLocation)
+ if (DiagOpts->ShowLocation && PLoc.getFilename())
OS << "In file included from " << PLoc.getFilename() << ':'
<< PLoc.getLine() << ":\n";
else
@@ -885,11 +885,11 @@ void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
void TextDiagnostic::emitImportLocation(SourceLocation Loc, PresumedLoc PLoc,
StringRef ModuleName,
const SourceManager &SM) {
- if (DiagOpts->ShowLocation)
+ if (DiagOpts->ShowLocation && PLoc.getFilename())
OS << "In module '" << ModuleName << "' imported from "
<< PLoc.getFilename() << ':' << PLoc.getLine() << ":\n";
else
- OS << "In module " << ModuleName << "':\n";
+ OS << "In module '" << ModuleName << "':\n";
}
void TextDiagnostic::emitBuildingModuleLocation(SourceLocation Loc,
@@ -1060,7 +1060,7 @@ void TextDiagnostic::emitSnippetAndCaret(
SmallVectorImpl<CharSourceRange>& Ranges,
ArrayRef<FixItHint> Hints,
const SourceManager &SM) {
- assert(!Loc.isInvalid() && "must have a valid source location here");
+ assert(Loc.isValid() && "must have a valid source location here");
assert(Loc.isFileID() && "must have a file location here");
// If caret diagnostics are enabled and we have location, we want to
diff --git a/lib/Frontend/VerifyDiagnosticConsumer.cpp b/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 55df9361b58e..7331d77d1c18 100644
--- a/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -186,9 +186,7 @@ public:
Regex(RegexStr) { }
bool isValid(std::string &Error) override {
- if (Regex.isValid(Error))
- return true;
- return false;
+ return Regex.isValid(Error);
}
bool match(StringRef S) override {
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 7de5fbe8be0e..9393f69d41fa 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -15,6 +15,7 @@ set(files
avxintrin.h
bmi2intrin.h
bmiintrin.h
+ __clang_cuda_runtime_wrapper.h
cpuid.h
cuda_builtin_vars.h
emmintrin.h
@@ -66,6 +67,10 @@ set(files
x86intrin.h
xmmintrin.h
xopintrin.h
+ xsaveintrin.h
+ xsaveoptintrin.h
+ xsavecintrin.h
+ xsavesintrin.h
xtestintrin.h
)
@@ -97,5 +102,14 @@ set_target_properties(clang-headers PROPERTIES FOLDER "Misc")
install(
FILES ${files} ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h
+ COMPONENT clang-headers
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
+
+if (NOT CMAKE_CONFIGURATION_TYPES) # don't add this for IDE's.
+ add_custom_target(install-clang-headers
+ DEPENDS
+ COMMAND "${CMAKE_COMMAND}"
+ -DCMAKE_INSTALL_COMPONENT=clang-headers
+ -P "${CMAKE_BINARY_DIR}/cmake_install.cmake")
+endif()
diff --git a/lib/Headers/Intrin.h b/lib/Headers/Intrin.h
index 24b3eae8bf86..6c1d0d16eabf 100644
--- a/lib/Headers/Intrin.h
+++ b/lib/Headers/Intrin.h
@@ -49,10 +49,7 @@ extern "C" {
#if defined(__MMX__)
/* And the random ones that aren't in those files. */
__m64 _m_from_float(float);
-__m64 _m_from_int(int _l);
-void _m_prefetch(void *);
float _m_to_float(__m64);
-int _m_to_int(__m64 _M);
#endif
/* Other assorted instruction intrinsics. */
@@ -292,9 +289,6 @@ void _xend(void);
static __inline__
#define _XCR_XFEATURE_ENABLED_MASK 0
unsigned __int64 __cdecl _xgetbv(unsigned int);
-void __cdecl _xrstor(void const *, unsigned __int64);
-void __cdecl _xsave(void *, unsigned __int64);
-void __cdecl _xsaveopt(void *, unsigned __int64);
void __cdecl _xsetbv(unsigned int, unsigned __int64);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
@@ -434,13 +428,21 @@ __umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) {
(unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
return _FullProduct >> 64;
}
-void __cdecl _xrstor64(void const *, unsigned __int64);
-void __cdecl _xsave64(void *, unsigned __int64);
-void __cdecl _xsaveopt64(void *, unsigned __int64);
#endif /* __x86_64__ */
/*----------------------------------------------------------------------------*\
+|* Multiplication
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+__emul(int __in1, int __in2) {
+ return (__int64)__in1 * (__int64)__in2;
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__emulu(unsigned int __in1, unsigned int __in2) {
+ return (unsigned __int64)__in1 * (unsigned __int64)__in2;
+}
+/*----------------------------------------------------------------------------*\
|* Bit Twiddling
\*----------------------------------------------------------------------------*/
static __inline__ unsigned char __DEFAULT_FN_ATTRS
@@ -770,27 +772,25 @@ _InterlockedCompareExchange64(__int64 volatile *_Destination,
/*----------------------------------------------------------------------------*\
|* Barriers
\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __DEFAULT_FN_ATTRS
__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
_ReadWriteBarrier(void) {
- __asm__ volatile ("" : : : "memory");
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
}
static __inline__ void __DEFAULT_FN_ATTRS
__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
_ReadBarrier(void) {
- __asm__ volatile ("" : : : "memory");
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
}
static __inline__ void __DEFAULT_FN_ATTRS
__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
_WriteBarrier(void) {
- __asm__ volatile ("" : : : "memory");
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
}
-#endif
#ifdef __x86_64__
static __inline__ void __DEFAULT_FN_ATTRS
__faststorefence(void) {
- __asm__ volatile("lock orq $0, (%%rsp)" : : : "memory");
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
}
#endif
/*----------------------------------------------------------------------------*\
@@ -851,7 +851,7 @@ __movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
}
static __inline__ void __DEFAULT_FN_ATTRS
__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
- __asm__("rep movsh" : : "D"(__dst), "S"(__src), "c"(__n)
+ __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n)
: "%edi", "%esi", "%ecx");
}
static __inline__ void __DEFAULT_FN_ATTRS
@@ -866,7 +866,7 @@ __stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
}
static __inline__ void __DEFAULT_FN_ATTRS
__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
- __asm__("rep stosh" : : "D"(__dst), "a"(__x), "c"(__n)
+ __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n)
: "%edi", "%ecx");
}
#endif
diff --git a/lib/Headers/__clang_cuda_runtime_wrapper.h b/lib/Headers/__clang_cuda_runtime_wrapper.h
new file mode 100644
index 000000000000..8e5f0331cb38
--- /dev/null
+++ b/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -0,0 +1,216 @@
+/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ * CUDA headers are implemented in a way that currently makes it
+ * impossible for user code to #include directly when compiling with
+ * Clang. They present different view of CUDA-supplied functions
+ * depending on where in NVCC's compilation pipeline the headers are
+ * included. Neither of these modes provides function definitions with
+ * correct attributes, so we use preprocessor to force the headers
+ * into a form that Clang can use.
+ *
+ * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's
+ * this file during every CUDA compilation.
+ */
+
+#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
+#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
+
+#if defined(__CUDA__) && defined(__clang__)
+
+// Include some standard headers to avoid CUDA headers including them
+// while some required macros (like __THROW) are in a weird state.
+#include <stdlib.h>
+#include <cmath>
+
+// Preserve common macros that will be changed below by us or by CUDA
+// headers.
+#pragma push_macro("__THROW")
+#pragma push_macro("__CUDA_ARCH__")
+
+// WARNING: Preprocessor hacks below are based on specific details of
+// CUDA-7.x headers and are not expected to work with any other
+// version of CUDA headers.
+#include "cuda.h"
+#if !defined(CUDA_VERSION)
+#error "cuda.h did not define CUDA_VERSION"
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050
+#error "Unsupported CUDA version!"
+#endif
+
+// Make largest subset of device functions available during host
+// compilation -- SM_35 for the time being.
+#ifndef __CUDA_ARCH__
+#define __CUDA_ARCH__ 350
+#endif
+
+#include "cuda_builtin_vars.h"
+
+// No need for device_launch_parameters.h as cuda_builtin_vars.h above
+// has taken care of builtin variables declared in the file.
+#define __DEVICE_LAUNCH_PARAMETERS_H__
+
+// {math,device}_functions.h only have declarations of the
+// functions. We don't need them as we're going to pull in their
+// definitions from .hpp files.
+#define __DEVICE_FUNCTIONS_H__
+#define __MATH_FUNCTIONS_H__
+
+#undef __CUDACC__
+#define __CUDABE__
+// Disables definitions of device-side runtime support stubs in
+// cuda_device_runtime_api.h
+#define __CUDADEVRT_INTERNAL__
+#include "host_config.h"
+#include "host_defines.h"
+#include "driver_types.h"
+#include "common_functions.h"
+#undef __CUDADEVRT_INTERNAL__
+
+#undef __CUDABE__
+#define __CUDACC__
+#include "cuda_runtime.h"
+
+#undef __CUDACC__
+#define __CUDABE__
+
+// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
+// not have at the moment. Emulate them with a builtin memcpy/memset.
+#define __nvvm_memcpy(s,d,n,a) __builtin_memcpy(s,d,n)
+#define __nvvm_memset(d,c,n,a) __builtin_memset(d,c,n)
+
+#include "crt/host_runtime.h"
+#include "crt/device_runtime.h"
+// device_runtime.h defines __cxa_* macros that will conflict with
+// cxxabi.h.
+// FIXME: redefine these as __device__ functions.
+#undef __cxa_vec_ctor
+#undef __cxa_vec_cctor
+#undef __cxa_vec_dtor
+#undef __cxa_vec_new2
+#undef __cxa_vec_new3
+#undef __cxa_vec_delete2
+#undef __cxa_vec_delete
+#undef __cxa_vec_delete3
+#undef __cxa_pure_virtual
+
+// We need decls for functions in CUDA's libdevice with __device__
+// attribute only. Alas they come either as __host__ __device__ or
+// with no attributes at all. To work around that, define __CUDA_RTC__
+// which produces HD variant and undef __host__ which gives us desided
+// decls with __device__ attribute.
+#pragma push_macro("__host__")
+#define __host__
+#define __CUDACC_RTC__
+#include "device_functions_decls.h"
+#undef __CUDACC_RTC__
+
+// Temporarily poison __host__ macro to ensure it's not used by any of
+// the headers we're about to include.
+#define __host__ UNEXPECTED_HOST_ATTRIBUTE
+
+// device_functions.hpp and math_functions*.hpp use 'static
+// __forceinline__' (with no __device__) for definitions of device
+// functions. Temporarily redefine __forceinline__ to include
+// __device__.
+#pragma push_macro("__forceinline__")
+#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
+#include "device_functions.hpp"
+#include "math_functions.hpp"
+#include "math_functions_dbl_ptx3.hpp"
+#pragma pop_macro("__forceinline__")
+
+// Pull in host-only functions that are only available when neither
+// __CUDACC__ nor __CUDABE__ are defined.
+#undef __MATH_FUNCTIONS_HPP__
+#undef __CUDABE__
+#include "math_functions.hpp"
+// Alas, additional overloads for these functions are hard to get to.
+// Considering that we only need these overloads for a few functions,
+// we can provide them here.
+static inline float rsqrt(float a) { return rsqrtf(a); }
+static inline float rcbrt(float a) { return rcbrtf(a); }
+static inline float sinpi(float a) { return sinpif(a); }
+static inline float cospi(float a) { return cospif(a); }
+static inline void sincospi(float a, float *b, float *c) {
+ return sincospi(a, b, c);
+}
+static inline float erfcinv(float a) { return erfcinvf(a); }
+static inline float normcdfinv(float a) { return normcdfinvf(a); }
+static inline float normcdf(float a) { return normcdff(a); }
+static inline float erfcx(float a) { return erfcxf(a); }
+
+// For some reason single-argument variant is not always declared by
+// CUDA headers. Alas, device_functions.hpp included below needs it.
+static inline __device__ void __brkpt(int c) { __brkpt(); }
+
+// Now include *.hpp with definitions of various GPU functions. Alas,
+// a lot of thins get declared/defined with __host__ attribute which
+// we don't want and we have to define it out. We also have to include
+// {device,math}_functions.hpp again in order to extract the other
+// branch of #if/else inside.
+
+#define __host__
+#undef __CUDABE__
+#define __CUDACC__
+#undef __DEVICE_FUNCTIONS_HPP__
+#include "device_functions.hpp"
+#include "device_atomic_functions.hpp"
+#include "sm_20_atomic_functions.hpp"
+#include "sm_32_atomic_functions.hpp"
+#include "sm_20_intrinsics.hpp"
+// sm_30_intrinsics.h has declarations that use default argument, so
+// we have to include it and it will in turn include .hpp
+#include "sm_30_intrinsics.h"
+#include "sm_32_intrinsics.hpp"
+#undef __MATH_FUNCTIONS_HPP__
+#include "math_functions.hpp"
+#pragma pop_macro("__host__")
+
+#include "texture_indirect_functions.h"
+
+// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
+#pragma pop_macro("__CUDA_ARCH__")
+#pragma pop_macro("__THROW")
+
+// Set up compiler macros expected to be seen during compilation.
+#undef __CUDABE__
+#define __CUDACC__
+#define __NVCC__
+
+#if defined(__CUDA_ARCH__)
+// We need to emit IR declaration for non-existing __nvvm_reflect() to
+// let backend know that it should be treated as const nothrow
+// function which is what NVVMReflect pass expects to see.
+extern "C" __device__ __attribute__((const)) int __nvvm_reflect(const void *);
+static __device__ __attribute__((used)) int __nvvm_reflect_anchor() {
+ return __nvvm_reflect("NONE");
+}
+#endif
+
+#endif // __CUDA__
+#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/lib/Headers/__wmmintrin_aes.h b/lib/Headers/__wmmintrin_aes.h
index 9f594ee56092..100799ebfdb8 100644
--- a/lib/Headers/__wmmintrin_aes.h
+++ b/lib/Headers/__wmmintrin_aes.h
@@ -25,12 +25,8 @@
#include <emmintrin.h>
-#if !defined (__AES__)
-# error "AES instructions not enabled"
-#else
-
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes")))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_aesenc_si128(__m128i __V, __m128i __R)
@@ -63,10 +59,8 @@ _mm_aesimc_si128(__m128i __V)
}
#define _mm_aeskeygenassist_si128(C, R) \
- __builtin_ia32_aeskeygenassist128((C), (R))
+ (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))
#undef __DEFAULT_FN_ATTRS
-#endif
-
#endif /* _WMMINTRIN_AES_H */
diff --git a/lib/Headers/__wmmintrin_pclmul.h b/lib/Headers/__wmmintrin_pclmul.h
index 8d1f1b7c0868..68e944e92198 100644
--- a/lib/Headers/__wmmintrin_pclmul.h
+++ b/lib/Headers/__wmmintrin_pclmul.h
@@ -1,4 +1,4 @@
-/*===---- __wmmintrin_pclmul.h - AES intrinsics ----------------------------===
+/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -23,12 +23,8 @@
#ifndef _WMMINTRIN_PCLMUL_H
#define _WMMINTRIN_PCLMUL_H
-#if !defined (__PCLMUL__)
-# error "PCLMUL instruction is not enabled"
-#else
#define _mm_clmulepi64_si128(__X, __Y, __I) \
((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
(__v2di)(__m128i)(__Y), (char)(__I)))
-#endif
#endif /* _WMMINTRIN_PCLMUL_H */
diff --git a/lib/Headers/adxintrin.h b/lib/Headers/adxintrin.h
index b8eb9cbf6ebb..ee347284178e 100644
--- a/lib/Headers/adxintrin.h
+++ b/lib/Headers/adxintrin.h
@@ -32,8 +32,7 @@
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* Intrinsics that are available only if __ADX__ defined */
-#ifdef __ADX__
-static __inline unsigned char __DEFAULT_FN_ATTRS
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p)
{
@@ -41,14 +40,13 @@ _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
}
#ifdef __x86_64__
-static __inline unsigned char __DEFAULT_FN_ATTRS
+static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
_addcarryx_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p)
{
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
-#endif
/* Intrinsics that are also available if __ADX__ undefined */
static __inline unsigned char __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index 5c8eb5640546..dc0dcbc7385c 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -27,7 +27,7 @@
#error "AltiVec support not enabled"
#endif
-/* constants for mapping CR6 bits to predicate result. */
+/* Constants for mapping CR6 bits to predicate result. */
#define __CR6_EQ 0
#define __CR6_EQ_REV 1
@@ -137,7 +137,7 @@ static vector double __ATTRS_o_ai vec_abs(vector double __a) {
}
#endif
-/* vec_abss */
+/* vec_abss */
#define __builtin_altivec_abss_v16qi vec_abss
#define __builtin_altivec_abss_v8hi vec_abss
#define __builtin_altivec_abss_v4si vec_abss
@@ -278,6 +278,38 @@ vec_add(vector double __a, vector double __b) {
}
#endif // __VSX__
+/* vec_adde */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_adde(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+#endif
+
+/* vec_addec */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_addec(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif
+
/* vec_vaddubm */
#define __builtin_altivec_vaddubm vec_vaddubm
@@ -390,6 +422,12 @@ vec_vaddfp(vector float __a, vector float __b) {
/* vec_addc */
+static vector signed int __ATTRS_o_ai vec_addc(vector signed int __a,
+ vector signed int __b) {
+ return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a,
vector unsigned int __b) {
return __builtin_altivec_vaddcuw(__a, __b);
@@ -398,7 +436,9 @@ static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a,
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
static vector signed __int128 __ATTRS_o_ai
vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vaddcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vaddcuq(
+ (vector unsigned __int128)__a,
+ (vector unsigned __int128)__b);
}
static vector unsigned __int128 __ATTRS_o_ai
@@ -1512,48 +1552,6 @@ vec_cmpeq(vector double __a, vector double __b) {
}
#endif
-/* vec_cmpge */
-
-static vector bool int __ATTRS_o_ai
-vec_cmpge(vector float __a, vector float __b) {
-#ifdef __VSX__
- return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
-#else
- return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static vector bool long long __ATTRS_o_ai
-vec_cmpge(vector double __a, vector double __b) {
- return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-/* Forwrad declarations as the functions are used here */
-static vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b);
-static vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector signed long long __a, vector signed long long __b);
-
-static vector bool long long __ATTRS_o_ai
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
- return ~(vec_cmpgt(__b, __a));
-}
-
-static vector bool long long __ATTRS_o_ai
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
- return ~(vec_cmpgt(__b, __a));
-}
-#endif
-
-/* vec_vcmpgefp */
-
-static vector bool int __attribute__((__always_inline__))
-vec_vcmpgefp(vector float __a, vector float __b) {
- return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-}
/* vec_cmpgt */
@@ -1613,6 +1611,74 @@ vec_cmpgt(vector double __a, vector double __b) {
return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
}
#endif
+
+/* vec_cmpge */
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector signed char __a, vector signed char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector unsigned char __a, vector unsigned char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector signed short __a, vector signed short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector unsigned short __a, vector unsigned short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector signed int __a, vector signed int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector unsigned int __a, vector unsigned int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
+/* vec_vcmpgefp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgefp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+}
+
/* vec_vcmpgtsb */
static vector bool char __attribute__((__always_inline__))
@@ -1664,6 +1730,36 @@ vec_vcmpgtfp(vector float __a, vector float __b) {
/* vec_cmple */
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector signed char __a, vector signed char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector unsigned char __a, vector unsigned char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector signed short __a, vector signed short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector unsigned short __a, vector unsigned short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector signed int __a, vector signed int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector unsigned int __a, vector unsigned int __b) {
+ return vec_cmpge(__b, __a);
+}
+
static vector bool int __ATTRS_o_ai
vec_cmple(vector float __a, vector float __b) {
return vec_cmpge(__b, __a);
@@ -1837,6 +1933,20 @@ vec_vctuxs(vector float __a, int __b) {
return __builtin_altivec_vctuxs(__a, __b);
}
+/* vec_double */
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_double (vector signed long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+
+static vector double __ATTRS_o_ai vec_double (vector unsigned long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+#endif
+
/* vec_div */
/* Integer vector divides (vectors are scalarized, elements divided
@@ -1942,34 +2052,16 @@ static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
(vector unsigned int)__b);
}
-static vector signed char __ATTRS_o_ai vec_eqv(vector bool char __a,
- vector signed char __b) {
- return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
- vector bool char __b) {
- return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
vector unsigned char __b) {
return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
-static vector unsigned char __ATTRS_o_ai vec_eqv(vector bool char __a,
- vector unsigned char __b) {
- return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
- vector bool char __b) {
- return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
+static vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
+ vector bool char __b) {
+ return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
}
static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
@@ -1978,70 +2070,33 @@ static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
(vector unsigned int)__b);
}
-static vector signed short __ATTRS_o_ai vec_eqv(vector bool short __a,
- vector signed short __b) {
- return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
- vector bool short __b) {
- return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
vector unsigned short __b) {
return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
-static vector unsigned short __ATTRS_o_ai vec_eqv(vector bool short __a,
- vector unsigned short __b) {
- return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
- vector bool short __b) {
- return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
- vector signed int __b) {
- return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed int __ATTRS_o_ai vec_eqv(vector bool int __a,
- vector signed int __b) {
- return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+static vector bool short __ATTRS_o_ai vec_eqv(vector bool short __a,
+ vector bool short __b) {
+ return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
- vector bool int __b) {
+ vector signed int __b) {
return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
vector unsigned int __b) {
- return __builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_eqv(vector bool int __a,
- vector unsigned int __b) {
- return __builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
+ return __builtin_vsx_xxleqv(__a, __b);
}
-static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
- vector bool int __b) {
- return __builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
+static vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
+ vector bool int __b) {
+ return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
}
static vector signed long long __ATTRS_o_ai
@@ -2050,33 +2105,15 @@ vec_eqv(vector signed long long __a, vector signed long long __b) {
__builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
}
-static vector signed long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector signed long long __b) {
- return (vector signed long long)
- __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static vector signed long long __ATTRS_o_ai
-vec_eqv(vector signed long long __a, vector bool long long __b) {
- return (vector signed long long)
- __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
-}
-
static vector unsigned long long __ATTRS_o_ai
vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
return (vector unsigned long long)
__builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
}
-static vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)
- __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector unsigned long long __a, vector bool long long __b) {
- return (vector unsigned long long)
+static vector bool long long __ATTRS_o_ai
+vec_eqv(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)
__builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
}
@@ -2085,35 +2122,11 @@ static vector float __ATTRS_o_ai vec_eqv(vector float __a, vector float __b) {
(vector unsigned int)__b);
}
-static vector float __ATTRS_o_ai vec_eqv(vector bool int __a,
- vector float __b) {
- return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector float __ATTRS_o_ai vec_eqv(vector float __a,
- vector bool int __b) {
- return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
static vector double __ATTRS_o_ai vec_eqv(vector double __a,
vector double __b) {
return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
-
-static vector double __ATTRS_o_ai vec_eqv(vector bool long long __a,
- vector double __b) {
- return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector double __ATTRS_o_ai vec_eqv(vector double __a,
- vector bool long long __b) {
- return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
#endif
/* vec_expte */
@@ -2815,6 +2828,38 @@ static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const float *__b) {
#endif
/* vec_madd */
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector signed short, vector signed short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector signed short, vector signed short);
+static vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
static vector float __ATTRS_o_ai
vec_madd(vector float __a, vector float __b, vector float __c) {
@@ -3256,6 +3301,16 @@ vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17));
}
+
+static vector bool long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
vector double __b) {
return vec_perm(__a, __b,
@@ -3519,6 +3574,14 @@ vec_mergel(vector bool long long __a, vector unsigned long long __b) {
0x18, 0X19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F));
}
+static vector bool long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
static vector double __ATTRS_o_ai
vec_mergel(vector double __a, vector double __b) {
return vec_perm(__a, __b,
@@ -3651,21 +3714,21 @@ static vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
static vector bool int __ATTRS_o_ai
vec_mergee(vector bool int __a, vector bool int __b) {
return vec_perm(__a, __b, (vector unsigned char)
- (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
}
static vector signed int __ATTRS_o_ai
vec_mergee(vector signed int __a, vector signed int __b) {
return vec_perm(__a, __b, (vector unsigned char)
- (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
}
static vector unsigned int __ATTRS_o_ai
vec_mergee(vector unsigned int __a, vector unsigned int __b) {
return vec_perm(__a, __b, (vector unsigned char)
- (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B));
}
@@ -4439,6 +4502,11 @@ static vector unsigned char __ATTRS_o_ai vec_nand(vector bool char __a,
return ~(__a & __b);
}
+static vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+}
+
static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
vector signed short __b) {
return ~(__a & __b);
@@ -4465,8 +4533,8 @@ static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
}
-static vector unsigned short __ATTRS_o_ai vec_nand(vector bool short __a,
- vector unsigned short __b) {
+static vector bool short __ATTRS_o_ai vec_nand(vector bool short __a,
+ vector bool short __b) {
return ~(__a & __b);
}
@@ -4501,6 +4569,11 @@ static vector unsigned int __ATTRS_o_ai vec_nand(vector bool int __a,
return ~(__a & __b);
}
+static vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
static vector signed long long __ATTRS_o_ai
vec_nand(vector signed long long __a, vector signed long long __b) {
return ~(__a & __b);
@@ -4531,6 +4604,11 @@ vec_nand(vector bool long long __a, vector unsigned long long __b) {
return ~(__a & __b);
}
+static vector bool long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
#endif
/* vec_nmadd */
@@ -4909,6 +4987,11 @@ static vector unsigned char __ATTRS_o_ai vec_orc(vector bool char __a,
return __a | ~__b;
}
+static vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
vector signed short __b) {
return __a | ~__b;
@@ -4939,6 +5022,11 @@ vec_orc(vector bool short __a, vector unsigned short __b) {
return __a | ~__b;
}
+static vector bool short __ATTRS_o_ai vec_orc(vector bool short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
vector signed int __b) {
return __a | ~__b;
@@ -4969,6 +5057,11 @@ static vector unsigned int __ATTRS_o_ai vec_orc(vector bool int __a,
return __a | ~__b;
}
+static vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
static vector signed long long __ATTRS_o_ai
vec_orc(vector signed long long __a, vector signed long long __b) {
return __a | ~__b;
@@ -4998,6 +5091,11 @@ static vector unsigned long long __ATTRS_o_ai
vec_orc(vector bool long long __a, vector unsigned long long __b) {
return __a | ~__b;
}
+
+static vector bool long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
#endif
/* vec_vor */
@@ -9191,17 +9289,27 @@ vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
}
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) {
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_sub(vector signed long long __a, vector signed long long __b) {
+ return __a - __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
return __a - __b;
}
-#ifdef __VSX__
static vector double __ATTRS_o_ai
vec_sub(vector double __a, vector double __b) {
return __a - __b;
}
#endif
+static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) {
+ return __a - __b;
+}
+
/* vec_vsububm */
#define __builtin_altivec_vsububm vec_vsububm
@@ -10390,7 +10498,12 @@ static unsigned char __ATTRS_o_ai vec_extract(vector unsigned char __a,
return __a[__b];
}
-static short __ATTRS_o_ai vec_extract(vector short __a, int __b) {
+static unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed short __ATTRS_o_ai vec_extract(vector signed short __a, int __b) {
return __a[__b];
}
@@ -10399,7 +10512,12 @@ static unsigned short __ATTRS_o_ai vec_extract(vector unsigned short __a,
return __a[__b];
}
-static int __ATTRS_o_ai vec_extract(vector int __a, int __b) {
+static unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed int __ATTRS_o_ai vec_extract(vector signed int __a, int __b) {
return __a[__b];
}
@@ -10407,6 +10525,31 @@ static unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a, int __b) {
return __a[__b];
}
+static unsigned int __ATTRS_o_ai vec_extract(vector bool int __a, int __b) {
+ return __a[__b];
+}
+
+#ifdef __VSX__
+static signed long long __ATTRS_o_ai vec_extract(vector signed long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai
+vec_extract(vector unsigned long long __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai vec_extract(vector bool long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
+ return __a[__b];
+}
+#endif
+
static float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
return __a[__b];
}
@@ -10427,8 +10570,16 @@ static vector unsigned char __ATTRS_o_ai vec_insert(unsigned char __a,
return __b;
}
-static vector short __ATTRS_o_ai vec_insert(short __a, vector short __b,
- int __c) {
+static vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
+ vector bool char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_insert(signed short __a,
+ vector signed short __b,
+ int __c) {
__b[__c] = __a;
return __b;
}
@@ -10440,7 +10591,16 @@ static vector unsigned short __ATTRS_o_ai vec_insert(unsigned short __a,
return __b;
}
-static vector int __ATTRS_o_ai vec_insert(int __a, vector int __b, int __c) {
+static vector bool short __ATTRS_o_ai vec_insert(unsigned short __a,
+ vector bool short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_insert(signed int __a,
+ vector signed int __b,
+ int __c) {
__b[__c] = __a;
return __b;
}
@@ -10452,6 +10612,38 @@ static vector unsigned int __ATTRS_o_ai vec_insert(unsigned int __a,
return __b;
}
+static vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
+ vector bool int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_insert(signed long long __a, vector signed long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+static vector double __ATTRS_o_ai vec_insert(double __a, vector double __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+#endif
+
static vector float __ATTRS_o_ai vec_insert(float __a, vector float __b,
int __c) {
__b[__c] = __a;
@@ -11376,6 +11568,33 @@ static vector unsigned int __ATTRS_o_ai vec_splats(unsigned int __a) {
return (vector unsigned int)(__a);
}
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai vec_splats(signed long long __a) {
+ return (vector signed long long)(__a);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_splats(unsigned long long __a) {
+ return (vector unsigned long long)(__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai vec_splats(signed __int128 __a) {
+ return (vector signed __int128)(__a);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_splats(unsigned __int128 __a) {
+ return (vector unsigned __int128)(__a);
+}
+
+#endif
+
+static vector double __ATTRS_o_ai vec_splats(double __a) {
+ return (vector double)(__a);
+}
+#endif
+
static vector float __ATTRS_o_ai vec_splats(float __a) {
return (vector float)(__a);
}
@@ -11546,8 +11765,18 @@ static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
}
+#endif
/* vec_all_ge */
@@ -11698,8 +11927,18 @@ static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
}
+#endif
/* vec_all_gt */
@@ -11850,8 +12089,18 @@ static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
}
+#endif
/* vec_all_in */
@@ -12010,9 +12259,19 @@ static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
+}
+#endif
+
/* vec_all_lt */
static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
@@ -12163,15 +12422,35 @@ static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
}
+#endif
/* vec_all_nan */
-static int __attribute__((__always_inline__)) vec_all_nan(vector float __a) {
+static int __ATTRS_o_ai vec_all_nan(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_nan(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
+}
+#endif
+
/* vec_all_ne */
static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
@@ -12337,22 +12616,54 @@ static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
/* vec_all_nge */
-static int __attribute__((__always_inline__))
+static int __ATTRS_o_ai
vec_all_nge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_nge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
}
+#endif
/* vec_all_ngt */
-static int __attribute__((__always_inline__))
+static int __ATTRS_o_ai
vec_all_ngt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_ngt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
}
+#endif
/* vec_all_nle */
@@ -12540,8 +12851,18 @@ static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
}
+#endif
/* vec_any_ge */
@@ -12700,9 +13021,19 @@ static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
/* vec_any_gt */
static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
@@ -12860,8 +13191,18 @@ static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
}
+#endif
/* vec_any_le */
@@ -13020,8 +13361,18 @@ static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
}
+#endif
/* vec_any_lt */
@@ -13180,8 +13531,18 @@ static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
}
+#endif
/* vec_any_nan */
@@ -13354,9 +13715,19 @@ static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
/* vec_any_nge */
static int __attribute__((__always_inline__))
@@ -13411,11 +13782,14 @@ support). As a result, we need to be able to turn off support for those.
The remaining ones (currently controlled by -mcrypto for GCC) still
need to be provided on compliant hardware even if Vector.Crypto is not
provided.
-FIXME: the naming convention for the builtins will be adjusted due
-to the inconsistency (__builtin_crypto_ prefix on builtins that cannot be
-removed with -mno-crypto). This is under development.
*/
#ifdef __CRYPTO__
+#define vec_sbox_be __builtin_altivec_crypto_vsbox
+#define vec_cipher_be __builtin_altivec_crypto_vcipher
+#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
+#define vec_ncipher_be __builtin_altivec_crypto_vncipher
+#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
+
static vector unsigned long long __attribute__((__always_inline__))
__builtin_crypto_vsbox(vector unsigned long long __a) {
return __builtin_altivec_crypto_vsbox(__a);
@@ -13447,6 +13821,11 @@ __builtin_crypto_vncipherlast(vector unsigned long long __a,
#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
+
+#define vec_shasigma_be(X, Y, Z) \
+ _Generic((X), vector unsigned int: __builtin_crypto_vshasigmaw, \
+ vector unsigned long long: __builtin_crypto_vshasigmad) \
+((X), (Y), (Z))
#endif
#ifdef __POWER8_VECTOR__
@@ -13494,8 +13873,9 @@ __builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
return __builtin_altivec_crypto_vpmsumw(__a, __b);
}
-static vector unsigned long long __ATTRS_o_ai __builtin_crypto_vpmsumb(
- vector unsigned long long __a, vector unsigned long long __b) {
+static vector unsigned long long __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned long long __a,
+ vector unsigned long long __b) {
return __builtin_altivec_crypto_vpmsumd(__a, __b);
}
@@ -13504,6 +13884,9 @@ static vector signed char __ATTRS_o_ai vec_vgbbd (vector signed char __a)
return __builtin_altivec_vgbbd((vector unsigned char) __a);
}
+#define vec_pmsum_be __builtin_crypto_vpmsumb
+#define vec_gb __builtin_altivec_vgbbd
+
static vector unsigned char __ATTRS_o_ai vec_vgbbd (vector unsigned char __a)
{
return __builtin_altivec_vgbbd(__a);
@@ -13521,6 +13904,14 @@ vec_vbpermq (vector unsigned char __a, vector unsigned char __b)
{
return __builtin_altivec_vbpermq(__a, __b);
}
+
+#ifdef __powerpc64__
+static vector unsigned long long __attribute__((__always_inline__))
+vec_bperm (vector unsigned __int128 __a, vector unsigned char __b) {
+ return __builtin_altivec_vbpermq((vector unsigned char) __a,
+ (vector unsigned char) __b);
+}
+#endif
#endif
#undef __ATTRS_o_ai
diff --git a/lib/Headers/ammintrin.h b/lib/Headers/ammintrin.h
index 4d0e770ff9e4..4880fd7ebad1 100644
--- a/lib/Headers/ammintrin.h
+++ b/lib/Headers/ammintrin.h
@@ -24,27 +24,23 @@
#ifndef __AMMINTRIN_H
#define __AMMINTRIN_H
-#ifndef __SSE4A__
-#error "SSE4A instruction set not enabled"
-#else
-
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a")))
/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
/// integer vector operand at the index idx and of the length len.
///
/// \headerfile <x86intrin.h>
///
-/// \code
+/// \code
/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx);
-/// \endcode
+/// \endcode
///
-/// \code
+/// \code
/// This intrinsic corresponds to the \c EXTRQ instruction.
-/// \endcode
+/// \endcode
///
/// \param x
/// The value from which bits are extracted.
@@ -52,10 +48,10 @@
/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
/// are zero, the length is interpreted as 64.
/// \param idx
-/// Bits [5:0] specify the index of the least significant bit; the other
-/// bits are ignored. If the sum of the index and length is greater than
-/// 64, the result is undefined. If the length and index are both zero,
-/// bits [63:0] of parameter x are extracted. If the length is zero
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than
+/// 64, the result is undefined. If the length and index are both zero,
+/// bits [63:0] of parameter x are extracted. If the length is zero
/// but the index is non-zero, the result is undefined.
/// \returns A 128-bit integer vector whose lower 64 bits contain the bits
/// extracted from the source operand.
@@ -68,21 +64,21 @@
///
/// \headerfile <x86intrin.h>
///
-/// \code
+/// \code
/// This intrinsic corresponds to the \c EXTRQ instruction.
-/// \endcode
+/// \endcode
///
/// \param __x
/// The value from which bits are extracted.
/// \param __y
-/// Specifies the index of the least significant bit at [13:8]
-/// and the length at [5:0]; all other bits are ignored.
+/// Specifies the index of the least significant bit at [13:8]
+/// and the length at [5:0]; all other bits are ignored.
/// If bits [5:0] are zero, the length is interpreted as 64.
-/// If the sum of the index and length is greater than 64, the result is
-/// undefined. If the length and index are both zero, bits [63:0] of
-/// parameter __x are extracted. If the length is zero but the index is
-/// non-zero, the result is undefined.
-/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted
+/// If the sum of the index and length is greater than 64, the result is
+/// undefined. If the length and index are both zero, bits [63:0] of
+/// parameter __x are extracted. If the length is zero but the index is
+/// non-zero, the result is undefined.
+/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted
/// from the source operand.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_extract_si64(__m128i __x, __m128i __y)
@@ -90,40 +86,40 @@ _mm_extract_si64(__m128i __x, __m128i __y)
return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
}
-/// \brief Inserts bits of a specified length from the source integer vector
-/// y into the lower 64 bits of the destination integer vector x at the
+/// \brief Inserts bits of a specified length from the source integer vector
+/// y into the lower 64 bits of the destination integer vector x at the
/// index idx and of the length len.
///
/// \headerfile <x86intrin.h>
///
-/// \code
+/// \code
/// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len,
/// const int idx);
-/// \endcode
+/// \endcode
///
-/// \code
+/// \code
/// This intrinsic corresponds to the \c INSERTQ instruction.
-/// \endcode
+/// \endcode
///
/// \param x
-/// The destination operand where bits will be inserted. The inserted bits
-/// are defined by the length len and by the index idx specifying the least
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length len and by the index idx specifying the least
/// significant bit.
/// \param y
-/// The source operand containing the bits to be extracted. The extracted
+/// The source operand containing the bits to be extracted. The extracted
/// bits are the least significant bits of operand y of length len.
/// \param len
/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
/// are zero, the length is interpreted as 64.
/// \param idx
-/// Bits [5:0] specify the index of the least significant bit; the other
-/// bits are ignored. If the sum of the index and length is greater than
-/// 64, the result is undefined. If the length and index are both zero,
-/// bits [63:0] of parameter y are inserted into parameter x. If the
+/// Bits [5:0] specify the index of the least significant bit; the other
+/// bits are ignored. If the sum of the index and length is greater than
+/// 64, the result is undefined. If the length and index are both zero,
+/// bits [63:0] of parameter y are inserted into parameter x. If the
/// length is zero but the index is non-zero, the result is undefined.
-/// \returns A 128-bit integer vector containing the original lower 64-bits
+/// \returns A 128-bit integer vector containing the original lower 64-bits
/// of destination operand x with the specified bitfields replaced by the
-/// lower bits of source operand y. The upper 64 bits of the return value
+/// lower bits of source operand y. The upper 64 bits of the return value
/// are undefined.
#define _mm_inserti_si64(x, y, len, idx) \
@@ -131,33 +127,33 @@ _mm_extract_si64(__m128i __x, __m128i __y)
(__v2di)(__m128i)(y), \
(char)(len), (char)(idx)))
-/// \brief Inserts bits of a specified length from the source integer vector
-/// __y into the lower 64 bits of the destination integer vector __x at
+/// \brief Inserts bits of a specified length from the source integer vector
+/// __y into the lower 64 bits of the destination integer vector __x at
/// the index and of the length specified by __y.
///
/// \headerfile <x86intrin.h>
///
-/// \code
+/// \code
/// This intrinsic corresponds to the \c INSERTQ instruction.
-/// \endcode
+/// \endcode
///
/// \param __x
-/// The destination operand where bits will be inserted. The inserted bits
-/// are defined by the length and by the index of the least significant bit
+/// The destination operand where bits will be inserted. The inserted bits
+/// are defined by the length and by the index of the least significant bit
/// specified by operand __y.
/// \param __y
-/// The source operand containing the bits to be extracted. The extracted
+/// The source operand containing the bits to be extracted. The extracted
/// bits are the least significant bits of operand __y with length specified
-/// by bits [69:64]. These are inserted into the destination at the index
+/// by bits [69:64]. These are inserted into the destination at the index
/// specified by bits [77:72]; all other bits are ignored.
/// If bits [69:64] are zero, the length is interpreted as 64.
-/// If the sum of the index and length is greater than 64, the result is
-/// undefined. If the length and index are both zero, bits [63:0] of
+/// If the sum of the index and length is greater than 64, the result is
+/// undefined. If the length and index are both zero, bits [63:0] of
/// parameter __y are inserted into parameter __x. If the length
-/// is zero but the index is non-zero, the result is undefined.
-/// \returns A 128-bit integer vector containing the original lower 64-bits
+/// is zero but the index is non-zero, the result is undefined.
+/// \returns A 128-bit integer vector containing the original lower 64-bits
/// of destination operand __x with the specified bitfields replaced by the
-/// lower bits of source operand __y. The upper 64 bits of the return value
+/// lower bits of source operand __y. The upper 64 bits of the return value
/// are undefined.
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -166,15 +162,15 @@ _mm_insert_si64(__m128i __x, __m128i __y)
return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
}
-/// \brief Stores a 64-bit double-precision value in a 64-bit memory location.
+/// \brief Stores a 64-bit double-precision value in a 64-bit memory location.
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
///
/// \headerfile <x86intrin.h>
///
-/// \code
+/// \code
/// This intrinsic corresponds to the \c MOVNTSD instruction.
-/// \endcode
+/// \endcode
///
/// \param __p
/// The 64-bit memory location used to store the register value.
@@ -193,9 +189,9 @@ _mm_stream_sd(double *__p, __m128d __a)
///
/// \headerfile <x86intrin.h>
///
-/// \code
+/// \code
/// This intrinsic corresponds to the \c MOVNTSS instruction.
-/// \endcode
+/// \endcode
///
/// \param __p
/// The 32-bit memory location used to store the register value.
@@ -210,6 +206,4 @@ _mm_stream_ss(float *__p, __m128 __a)
#undef __DEFAULT_FN_ATTRS
-#endif /* __SSE4A__ */
-
#endif /* __AMMINTRIN_H */
diff --git a/lib/Headers/arm_acle.h b/lib/Headers/arm_acle.h
index 73a7e76ce3c4..4be1d097dc5e 100644
--- a/lib/Headers/arm_acle.h
+++ b/lib/Headers/arm_acle.h
@@ -175,14 +175,18 @@ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
return __ror(__rev(t), 16);
}
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
- __rev16l(unsigned long t) {
- return __rorl(__revl(t), sizeof(long) / 2);
-}
-
static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
__rev16ll(uint64_t t) {
- return __rorll(__revll(t), 32);
+ return (((uint64_t)__rev16(t >> 32)) << 32) | __rev16(t);
+}
+
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+ __rev16l(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __rev16(t);
+#else
+ return __rev16ll(t);
+#endif
}
/* REVSH */
diff --git a/lib/Headers/avx2intrin.h b/lib/Headers/avx2intrin.h
index d8b6b0aa4d23..f786572dae7d 100644
--- a/lib/Headers/avx2intrin.h
+++ b/lib/Headers/avx2intrin.h
@@ -29,7 +29,7 @@
#define __AVX2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2")))
/* SSE4 Multiple Packed Sums of Absolute Difference. */
#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
@@ -124,10 +124,9 @@ _mm256_adds_epu16(__m256i __a, __m256i __b)
return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
}
-#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
- __m256i __a = (a); \
- __m256i __b = (b); \
- (__m256i)__builtin_ia32_palignr256((__v32qi)__a, (__v32qi)__b, (n)); })
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+ (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (n)); })
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_and_si256(__m256i __a, __m256i __b)
@@ -160,20 +159,19 @@ _mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
(__v32qi)__M);
}
-#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
- __m256i __V1 = (V1); \
- __m256i __V2 = (V2); \
- (__m256i)__builtin_shufflevector((__v16hi)__V1, (__v16hi)__V2, \
- (((M) & 0x01) ? 16 : 0), \
- (((M) & 0x02) ? 17 : 1), \
- (((M) & 0x04) ? 18 : 2), \
- (((M) & 0x08) ? 19 : 3), \
- (((M) & 0x10) ? 20 : 4), \
- (((M) & 0x20) ? 21 : 5), \
- (((M) & 0x40) ? 22 : 6), \
- (((M) & 0x80) ? 23 : 7), \
- (((M) & 0x01) ? 24 : 8), \
- (((M) & 0x02) ? 25 : 9), \
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(V1), \
+ (__v16hi)(__m256i)(V2), \
+ (((M) & 0x01) ? 16 : 0), \
+ (((M) & 0x02) ? 17 : 1), \
+ (((M) & 0x04) ? 18 : 2), \
+ (((M) & 0x08) ? 19 : 3), \
+ (((M) & 0x10) ? 20 : 4), \
+ (((M) & 0x20) ? 21 : 5), \
+ (((M) & 0x40) ? 22 : 6), \
+ (((M) & 0x80) ? 23 : 7), \
+ (((M) & 0x01) ? 24 : 8), \
+ (((M) & 0x02) ? 25 : 9), \
(((M) & 0x04) ? 26 : 10), \
(((M) & 0x08) ? 27 : 11), \
(((M) & 0x10) ? 28 : 12), \
@@ -208,7 +206,9 @@ _mm256_cmpeq_epi64(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)((__v32qi)__a > (__v32qi)__b);
+ /* This function always performs a signed comparison, but __v32qi is a char
+ which may be signed or unsigned, so use __v32qs. */
+ return (__m256i)((__v32qs)__a > (__v32qs)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -488,8 +488,8 @@ _mm256_shuffle_epi8(__m256i __a, __m256i __b)
}
#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
- __m256i __a = (a); \
- (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)_mm256_set1_epi32(0), \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(a), \
+ (__v8si)_mm256_setzero_si256(), \
(imm) & 0x3, ((imm) & 0xc) >> 2, \
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
4 + (((imm) & 0x03) >> 0), \
@@ -498,8 +498,8 @@ _mm256_shuffle_epi8(__m256i __a, __m256i __b)
4 + (((imm) & 0xc0) >> 6)); })
#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
- __m256i __a = (a); \
- (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_setzero_si256(), \
0, 1, 2, 3, \
4 + (((imm) & 0x03) >> 0), \
4 + (((imm) & 0x0c) >> 2), \
@@ -512,8 +512,8 @@ _mm256_shuffle_epi8(__m256i __a, __m256i __b)
12 + (((imm) & 0xc0) >> 6)); })
#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
- __m256i __a = (a); \
- (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
+ (__v16hi)_mm256_setzero_si256(), \
(imm) & 0x3,((imm) & 0xc) >> 2, \
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
4, 5, 6, 7, \
@@ -542,8 +542,7 @@ _mm256_sign_epi32(__m256i __a, __m256i __b)
}
#define _mm256_slli_si256(a, count) __extension__ ({ \
- __m256i __a = (a); \
- (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); })
+ (__m256i)__builtin_ia32_pslldqi256((__m256i)(a), (count)*8); })
#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
@@ -608,8 +607,7 @@ _mm256_sra_epi32(__m256i __a, __m128i __count)
}
#define _mm256_srli_si256(a, count) __extension__ ({ \
- __m256i __a = (a); \
- (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); })
+ (__m256i)__builtin_ia32_psrldqi256((__m256i)(a), (count)*8); })
#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
@@ -752,15 +750,15 @@ _mm256_xor_si256(__m256i __a, __m256i __b)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_stream_load_si256(__m256i *__V)
+_mm256_stream_load_si256(__m256i const *__V)
{
- return (__m256i)__builtin_ia32_movntdqa256((__v4di *)__V);
+ return (__m256i)__builtin_ia32_movntdqa256((const __v4di *)__V);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_broadcastss_ps(__m128 __X)
{
- return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X);
+ return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -772,13 +770,13 @@ _mm_broadcastsd_pd(__m128d __a)
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_broadcastss_ps(__m128 __X)
{
- return (__m256)__builtin_ia32_vbroadcastss_ps256((__v4sf)__X);
+ return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_broadcastsd_pd(__m128d __X)
{
- return (__m256d)__builtin_ia32_vbroadcastsd_pd256((__v2df)__X);
+ return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -788,18 +786,16 @@ _mm256_broadcastsi128_si256(__m128i __X)
}
#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
- __m128i __V1 = (V1); \
- __m128i __V2 = (V2); \
- (__m128i)__builtin_shufflevector((__v4si)__V1, (__v4si)__V2, \
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(V1), \
+ (__v4si)(__m128i)(V2), \
(((M) & 0x01) ? 4 : 0), \
(((M) & 0x02) ? 5 : 1), \
(((M) & 0x04) ? 6 : 2), \
(((M) & 0x08) ? 7 : 3)); })
#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
- __m256i __V1 = (V1); \
- __m256i __V2 = (V2); \
- (__m256i)__builtin_shufflevector((__v8si)__V1, (__v8si)__V2, \
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), \
(((M) & 0x01) ? 8 : 0), \
(((M) & 0x02) ? 9 : 1), \
(((M) & 0x04) ? 10 : 2), \
@@ -812,50 +808,50 @@ _mm256_broadcastsi128_si256(__m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_broadcastb_epi8(__m128i __X)
{
- return (__m256i)__builtin_ia32_pbroadcastb256((__v16qi)__X);
+ return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_broadcastw_epi16(__m128i __X)
{
- return (__m256i)__builtin_ia32_pbroadcastw256((__v8hi)__X);
+ return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_broadcastd_epi32(__m128i __X)
{
- return (__m256i)__builtin_ia32_pbroadcastd256((__v4si)__X);
+ return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_broadcastq_epi64(__m128i __X)
{
- return (__m256i)__builtin_ia32_pbroadcastq256(__X);
+ return (__m256i)__builtin_shufflevector(__X, __X, 0, 0, 0, 0);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_broadcastb_epi8(__m128i __X)
{
- return (__m128i)__builtin_ia32_pbroadcastb128((__v16qi)__X);
+ return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_broadcastw_epi16(__m128i __X)
{
- return (__m128i)__builtin_ia32_pbroadcastw128((__v8hi)__X);
+ return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_broadcastd_epi32(__m128i __X)
{
- return (__m128i)__builtin_ia32_pbroadcastd128((__v4si)__X);
+ return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_broadcastq_epi64(__m128i __X)
{
- return (__m128i)__builtin_ia32_pbroadcastq128(__X);
+ return (__m128i)__builtin_shufflevector(__X, __X, 0, 0);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -865,43 +861,39 @@ _mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
}
#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
- __m256d __V = (V); \
- (__m256d)__builtin_shufflevector((__v4df)__V, (__v4df) _mm256_setzero_pd(), \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \
+ (__v4df)_mm256_setzero_pd(), \
(M) & 0x3, ((M) & 0xc) >> 2, \
((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_permutevar8x32_ps(__m256 __a, __m256 __b)
+_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
{
- return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8sf)__b);
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
}
#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
- __m256i __V = (V); \
- (__m256i)__builtin_shufflevector((__v4di)__V, (__v4di) _mm256_setzero_si256(), \
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_setzero_si256(), \
(M) & 0x3, ((M) & 0xc) >> 2, \
((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
- __m256i __V1 = (V1); \
- __m256i __V2 = (V2); \
- (__m256i)__builtin_ia32_permti256(__V1, __V2, (M)); })
+ (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (M)); })
#define _mm256_extracti128_si256(V, M) __extension__ ({ \
- (__m128i)__builtin_shufflevector( \
- (__v4di)(V), \
- (__v4di)(_mm256_setzero_si256()), \
- (((M) & 1) ? 2 : 0), \
- (((M) & 1) ? 3 : 1) );})
+ (__m128i)__builtin_shufflevector((__v4di)(__m256i)(V), \
+ (__v4di)_mm256_setzero_si256(), \
+ (((M) & 1) ? 2 : 0), \
+ (((M) & 1) ? 3 : 1) ); })
#define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \
- (__m256i)__builtin_shufflevector( \
- (__v4di)(V1), \
- (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
- (((M) & 1) ? 0 : 4), \
- (((M) & 1) ? 1 : 5), \
- (((M) & 1) ? 4 : 2), \
- (((M) & 1) ? 5 : 3) );})
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V1), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
+ (((M) & 1) ? 0 : 4), \
+ (((M) & 1) ? 1 : 5), \
+ (((M) & 1) ? 4 : 2), \
+ (((M) & 1) ? 5 : 3) ); })
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskload_epi32(int const *__X, __m256i __M)
@@ -1012,244 +1004,211 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
}
#define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
- __m128d __a = (a); \
- double const *__m = (m); \
- __m128i __i = (i); \
- __m128d __mask = (mask); \
- (__m128d)__builtin_ia32_gatherd_pd((__v2df)__a, (const __v2df *)__m, \
- (__v4si)__i, (__v2df)__mask, (s)); })
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
- __m256d __a = (a); \
- double const *__m = (m); \
- __m128i __i = (i); \
- __m256d __mask = (mask); \
- (__m256d)__builtin_ia32_gatherd_pd256((__v4df)__a, (const __v4df *)__m, \
- (__v4si)__i, (__v4df)__mask, (s)); })
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
- __m128d __a = (a); \
- double const *__m = (m); \
- __m128i __i = (i); \
- __m128d __mask = (mask); \
- (__m128d)__builtin_ia32_gatherq_pd((__v2df)__a, (const __v2df *)__m, \
- (__v2di)__i, (__v2df)__mask, (s)); })
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)); })
#define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
- __m256d __a = (a); \
- double const *__m = (m); \
- __m256i __i = (i); \
- __m256d __mask = (mask); \
- (__m256d)__builtin_ia32_gatherq_pd256((__v4df)__a, (const __v4df *)__m, \
- (__v4di)__i, (__v4df)__mask, (s)); })
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)(__m256d)(mask), (s)); })
#define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
- __m128 __a = (a); \
- float const *__m = (m); \
- __m128i __i = (i); \
- __m128 __mask = (mask); \
- (__m128)__builtin_ia32_gatherd_ps((__v4sf)__a, (const __v4sf *)__m, \
- (__v4si)__i, (__v4sf)__mask, (s)); })
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
#define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
- __m256 __a = (a); \
- float const *__m = (m); \
- __m256i __i = (i); \
- __m256 __mask = (mask); \
- (__m256)__builtin_ia32_gatherd_ps256((__v8sf)__a, (const __v8sf *)__m, \
- (__v8si)__i, (__v8sf)__mask, (s)); })
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)(__m256)(mask), (s)); })
#define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
- __m128 __a = (a); \
- float const *__m = (m); \
- __m128i __i = (i); \
- __m128 __mask = (mask); \
- (__m128)__builtin_ia32_gatherq_ps((__v4sf)__a, (const __v4sf *)__m, \
- (__v2di)__i, (__v4sf)__mask, (s)); })
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
- __m128 __a = (a); \
- float const *__m = (m); \
- __m256i __i = (i); \
- __m128 __mask = (mask); \
- (__m128)__builtin_ia32_gatherq_ps256((__v4sf)__a, (const __v4sf *)__m, \
- (__v4di)__i, (__v4sf)__mask, (s)); })
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)(__m128)(mask), (s)); })
#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
- __m128i __a = (a); \
- int const *__m = (m); \
- __m128i __i = (i); \
- __m128i __mask = (mask); \
- (__m128i)__builtin_ia32_gatherd_d((__v4si)__a, (const __v4si *)__m, \
- (__v4si)__i, (__v4si)__mask, (s)); })
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
- __m256i __a = (a); \
- int const *__m = (m); \
- __m256i __i = (i); \
- __m256i __mask = (mask); \
- (__m256i)__builtin_ia32_gatherd_d256((__v8si)__a, (const __v8si *)__m, \
- (__v8si)__i, (__v8si)__mask, (s)); })
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
+ (int const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8si)(__m256i)(mask), (s)); })
#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
- __m128i __a = (a); \
- int const *__m = (m); \
- __m128i __i = (i); \
- __m128i __mask = (mask); \
- (__m128i)__builtin_ia32_gatherq_d((__v4si)__a, (const __v4si *)__m, \
- (__v2di)__i, (__v4si)__mask, (s)); })
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
- __m128i __a = (a); \
- int const *__m = (m); \
- __m256i __i = (i); \
- __m128i __mask = (mask); \
- (__m128i)__builtin_ia32_gatherq_d256((__v4si)__a, (const __v4si *)__m, \
- (__v4di)__i, (__v4si)__mask, (s)); })
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4si)(__m128i)(mask), (s)); })
#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
- __m128i __a = (a); \
- long long const *__m = (m); \
- __m128i __i = (i); \
- __m128i __mask = (mask); \
- (__m128i)__builtin_ia32_gatherd_q((__v2di)__a, (const __v2di *)__m, \
- (__v4si)__i, (__v2di)__mask, (s)); })
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
- __m256i __a = (a); \
- long long const *__m = (m); \
- __m128i __i = (i); \
- __m256i __mask = (mask); \
- (__m256i)__builtin_ia32_gatherd_q256((__v4di)__a, (const __v4di *)__m, \
- (__v4si)__i, (__v4di)__mask, (s)); })
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
- __m128i __a = (a); \
- long long const *__m = (m); \
- __m128i __i = (i); \
- __m128i __mask = (mask); \
- (__m128i)__builtin_ia32_gatherq_q((__v2di)__a, (const __v2di *)__m, \
- (__v2di)__i, (__v2di)__mask, (s)); })
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)); })
#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
- __m256i __a = (a); \
- long long const *__m = (m); \
- __m256i __i = (i); \
- __m256i __mask = (mask); \
- (__m256i)__builtin_ia32_gatherq_q256((__v4di)__a, (const __v4di *)__m, \
- (__v4di)__i, (__v4di)__mask, (s)); })
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)(__m256i)(mask), (s)); })
#define _mm_i32gather_pd(m, i, s) __extension__ ({ \
- double const *__m = (m); \
- __m128i __i = (i); \
- (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_setzero_pd(), \
- (const __v2df *)__m, (__v4si)__i, \
- (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
+ (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
#define _mm256_i32gather_pd(m, i, s) __extension__ ({ \
- double const *__m = (m); \
- __m128i __i = (i); \
- (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_setzero_pd(), \
- (const __v4df *)__m, (__v4si)__i, \
- (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
+ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
#define _mm_i64gather_pd(m, i, s) __extension__ ({ \
- double const *__m = (m); \
- __m128i __i = (i); \
- (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_setzero_pd(), \
- (const __v2df *)__m, (__v2di)__i, \
- (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
+ (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)); })
#define _mm256_i64gather_pd(m, i, s) __extension__ ({ \
- double const *__m = (m); \
- __m256i __i = (i); \
- (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_setzero_pd(), \
- (const __v4df *)__m, (__v4di)__i, \
- (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
+ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)); })
#define _mm_i32gather_ps(m, i, s) __extension__ ({ \
- float const *__m = (m); \
- __m128i __i = (i); \
- (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_setzero_ps(), \
- (const __v4sf *)__m, (__v4si)__i, \
- (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+ (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
#define _mm256_i32gather_ps(m, i, s) __extension__ ({ \
- float const *__m = (m); \
- __m256i __i = (i); \
- (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_setzero_ps(), \
- (const __v8sf *)__m, (__v8si)__i, \
- (__v8sf)_mm256_set1_ps((float)(int)-1), (s)); })
+ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
+ _mm256_setzero_ps(), \
+ _CMP_EQ_OQ), \
+ (s)); })
#define _mm_i64gather_ps(m, i, s) __extension__ ({ \
- float const *__m = (m); \
- __m128i __i = (i); \
- (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_setzero_ps(), \
- (const __v4sf *)__m, (__v2di)__i, \
- (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+ (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
#define _mm256_i64gather_ps(m, i, s) __extension__ ({ \
- float const *__m = (m); \
- __m256i __i = (i); \
- (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_setzero_ps(), \
- (const __v4sf *)__m, (__v4di)__i, \
- (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)); })
#define _mm_i32gather_epi32(m, i, s) __extension__ ({ \
- int const *__m = (m); \
- __m128i __i = (i); \
- (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_setzero_si128(), \
- (const __v4si *)__m, (__v4si)__i, \
- (__v4si)_mm_set1_epi32(-1), (s)); })
+ (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4si)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
#define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \
- int const *__m = (m); \
- __m256i __i = (i); \
- (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_setzero_si256(), \
- (const __v8si *)__m, (__v8si)__i, \
- (__v8si)_mm256_set1_epi32(-1), (s)); })
+ (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
+ (int const *)(m), (__v8si)(__m256i)(i), \
+ (__v8si)_mm256_set1_epi32(-1), (s)); })
#define _mm_i64gather_epi32(m, i, s) __extension__ ({ \
- int const *__m = (m); \
- __m128i __i = (i); \
- (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_setzero_si128(), \
- (const __v4si *)__m, (__v2di)__i, \
- (__v4si)_mm_set1_epi32(-1), (s)); })
+ (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v2di)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
#define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \
- int const *__m = (m); \
- __m256i __i = (i); \
- (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_setzero_si128(), \
- (const __v4si *)__m, (__v4di)__i, \
- (__v4si)_mm_set1_epi32(-1), (s)); })
+ (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4di)(__m256i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)); })
#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
- long long const *__m = (m); \
- __m128i __i = (i); \
- (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_setzero_si128(), \
- (const __v2di *)__m, (__v4si)__i, \
- (__v2di)_mm_set1_epi64x(-1), (s)); })
+ (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
- long long const *__m = (m); \
- __m128i __i = (i); \
- (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_setzero_si256(), \
- (const __v4di *)__m, (__v4si)__i, \
- (__v4di)_mm256_set1_epi64x(-1), (s)); })
+ (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
- long long const *__m = (m); \
- __m128i __i = (i); \
- (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_setzero_si128(), \
- (const __v2di *)__m, (__v2di)__i, \
- (__v2di)_mm_set1_epi64x(-1), (s)); })
+ (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)); })
#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
- long long const *__m = (m); \
- __m256i __i = (i); \
- (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_setzero_si256(), \
- (const __v4di *)__m, (__v4di)__i, \
- (__v4di)_mm256_set1_epi64x(-1), (s)); })
+ (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)); })
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
index 9e8297a9c9a5..f289ed71a332 100644
--- a/lib/Headers/avx512bwintrin.h
+++ b/lib/Headers/avx512bwintrin.h
@@ -34,10 +34,10 @@ typedef char __v64qi __attribute__ ((__vector_size__ (64)));
typedef short __v32hi __attribute__ ((__vector_size__ (64)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
static __inline __v64qi __DEFAULT_FN_ATTRS
-_mm512_setzero_qi (void) {
+_mm512_setzero_qi(void) {
return (__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -49,7 +49,7 @@ _mm512_setzero_qi (void) {
}
static __inline __v32hi __DEFAULT_FN_ATTRS
-_mm512_setzero_hi (void) {
+_mm512_setzero_hi(void) {
return (__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -363,8 +363,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi)
- _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -385,8 +384,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi)
- _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -407,8 +405,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -429,8 +426,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -451,8 +447,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -476,7 +471,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_abs_epi8 (__m512i __A)
{
return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -492,7 +487,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -500,7 +495,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_abs_epi16 (__m512i __A)
{
return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -516,7 +511,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -525,7 +520,7 @@ _mm512_packs_epi32 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A,
(__v16si) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -553,7 +548,7 @@ _mm512_packs_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -581,7 +576,7 @@ _mm512_packus_epi32 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A,
(__v16si) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -609,7 +604,7 @@ _mm512_packus_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -637,7 +632,7 @@ _mm512_adds_epi8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -656,7 +651,7 @@ _mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -665,7 +660,7 @@ _mm512_adds_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -684,7 +679,7 @@ _mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -693,7 +688,7 @@ _mm512_adds_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -712,7 +707,7 @@ _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -721,7 +716,7 @@ _mm512_adds_epu16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -740,7 +735,7 @@ _mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -749,7 +744,7 @@ _mm512_avg_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -777,7 +772,7 @@ _mm512_avg_epu16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -805,7 +800,7 @@ _mm512_max_epi8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -833,7 +828,7 @@ _mm512_max_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -861,7 +856,7 @@ _mm512_max_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -889,7 +884,7 @@ _mm512_max_epu16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -917,7 +912,7 @@ _mm512_min_epi8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -945,7 +940,7 @@ _mm512_min_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -973,7 +968,7 @@ _mm512_min_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -1001,7 +996,7 @@ _mm512_min_epu16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -1029,7 +1024,7 @@ _mm512_shuffle_epi8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -1048,7 +1043,7 @@ _mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -1057,7 +1052,7 @@ _mm512_subs_epi8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -1076,7 +1071,7 @@ _mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -1085,7 +1080,7 @@ _mm512_subs_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -1104,7 +1099,7 @@ _mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -1113,7 +1108,7 @@ _mm512_subs_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) -1);
}
@@ -1132,7 +1127,7 @@ _mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi (),
+ (__v64qi) _mm512_setzero_qi(),
(__mmask64) __U);
}
@@ -1141,7 +1136,7 @@ _mm512_subs_epu16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) -1);
}
@@ -1160,7 +1155,7 @@ _mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi (),
+ (__v32hi) _mm512_setzero_hi(),
(__mmask32) __U);
}
@@ -1204,6 +1199,303 @@ _mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A,
(__mmask32) __U);
}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhrs_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epi16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mulhi_epu16 (__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maddubs_epi16 (__m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X,
+ __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y) {
+ return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_madd_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v16si) _mm512_setzero_si512(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtsepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)_mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi)__O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtusepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_cvtepi16_epi8 (__m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) __O,
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
+ return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
+ (__v32qi) _mm256_setzero_si256(),
+ __M);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpackhi_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpackhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpackhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi8 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) __W,
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A,
+ (__v64qi) __B,
+ (__v64qi) _mm512_setzero_qi(),
+ (__mmask64) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_unpacklo_epi16 (__m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) -1);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_unpacklo_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+ __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_unpacklo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) _mm512_setzero_hi(),
+ (__mmask32) __U);
+}
+
#define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \
(__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
(__v64qi)(__m512i)(b), \
diff --git a/lib/Headers/avx512dqintrin.h b/lib/Headers/avx512dqintrin.h
index c946de286742..afee4903ba77 100644
--- a/lib/Headers/avx512dqintrin.h
+++ b/lib/Headers/avx512dqintrin.h
@@ -29,7 +29,7 @@
#define __AVX512DQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
@@ -237,6 +237,542 @@ _mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
(__mmask16) __U);
}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundpd_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R); })
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundpd_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundpd_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundps_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvtps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundps_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundps_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_pd(__A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepi64_pd(__W, __U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepi64_pd(__U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepi64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_ps(__A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepi64_ps(__W, __U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepi64_ps(__U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epi64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundpd_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundpd_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttpd_epu64 (__m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
+ return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundpd_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundpd_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundpd_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epi64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epi64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundps_epi64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundps_epi64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_cvttps_epu64 (__m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
+ return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
+ (__v8di) _mm512_setzero_si512(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundps_epu64(__A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(),(__mmask8) -1, __R);})
+
+#define _mm512_mask_cvtt_roundps_epu64(__W, __U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvtt_roundps_epu64(__U, __A, __R) __extension__ ({ \
+ (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, \
+ (__v8di) _mm512_setzero_si512(), (__mmask8) __U, __R);})
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_pd (__m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
+ return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_pd(__A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepu64_pd(__W, __U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+
+#define _mm512_maskz_cvt_roundepu64_pd(__U, __A, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_cvtepu64_ps (__m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
+ return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
+ (__v8sf) _mm256_setzero_ps(),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_ps(__A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_cvt_roundepu64_ps(__W, __U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_cvt_roundepu64_ps(__U, __A, __R) __extension__ ({ \
+ (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U, __R);})
+
+#define _mm512_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C,\
+ (__v8df) __W, (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_range_round_pd(__A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_range_round_pd(__W, __U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) __W, (__mmask8) __U, __R);})
+
+#define _mm512_maskz_range_round_pd(__U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, (__v8df) __B, __C, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, __C, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A,(__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, \
+ _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_range_round_ps(__A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+
+#define _mm512_mask_range_round_ps(__W, __U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) __W, (__mmask16) __U, __R);})
+
+#define _mm512_maskz_range_round_ps(__U, __A, __B, __C, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ __C, (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+
+#define _mm512_reduce_pd(__A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) __W,(__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_reduce_ps(__A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) __W, (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, _MM_FROUND_CUR_DIRECTION);})
+
+#define _mm512_reduce_round_pd(__A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R);})
+
+#define _mm512_mask_reduce_round_pd(__W, __U, __A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) __W,(__mmask8) __U, __R);})
+
+#define _mm512_maskz_reduce_round_pd(__U, __A, __B, __R) __extension__ ({\
+ (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_reduce_round_ps(__A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R);})
+
+#define _mm512_mask_reduce_round_ps(__W, __U, __A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) __W, (__mmask16) __U, __R);})
+
+#define _mm512_maskz_reduce_round_ps(__U, __A, __B, __R) __extension__ ({\
+ (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) __U, __R);})
+
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/lib/Headers/avx512erintrin.h b/lib/Headers/avx512erintrin.h
index 56edffc11ca8..40a912189e5d 100644
--- a/lib/Headers/avx512erintrin.h
+++ b/lib/Headers/avx512erintrin.h
@@ -1,4 +1,4 @@
-/*===---- avx512fintrin.h - AVX2 intrinsics -----------------------------------===
+/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -126,19 +126,19 @@
_mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)-1, (R)); })
#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(S), \
(__mmask8)(M), (R)); })
#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \
+ (__m128)__builtin_ia32_rsqrt28ss_round((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(M), (R)); })
@@ -153,19 +153,19 @@
_mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
#define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
- (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)-1, (R)); })
#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
- (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(S), \
(__mmask8)(M), (R)); })
#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
- (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \
+ (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)(M), (R)); })
@@ -229,19 +229,19 @@
_mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)-1, (R)); })
#define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(S), \
(__mmask8)(M), (R)); })
#define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \
+ (__m128)__builtin_ia32_rcp28ss_round((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(M), (R)); })
@@ -256,19 +256,19 @@
_mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
#define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
- (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)-1, (R)); })
#define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
- (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(S), \
(__mmask8)(M), (R)); })
#define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
- (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \
+ (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)(M), (R)); })
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index 4f7cba0b1507..8dcdc710d5c3 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -1,4 +1,4 @@
-/*===---- avx512fintrin.h - AVX2 intrinsics --------------------------------===
+/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -47,7 +47,7 @@ typedef unsigned short __mmask16;
#define _MM_FROUND_CUR_DIRECTION 0x04
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
/* Create vectors with repeated elements */
@@ -57,6 +57,30 @@ _mm512_setzero_si512(void)
return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
}
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_undefined_pd()
+{
+ return (__m512d)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined()
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_undefined_ps()
+{
+ return (__m512)__builtin_ia32_undef512();
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_undefined_epi32()
+{
+ return (__m512i)__builtin_ia32_undef512();
+}
+
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
{
@@ -543,6 +567,66 @@ _mm512_max_ps(__m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_max_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_max_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_max_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_max_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
static __inline __m512i
__DEFAULT_FN_ATTRS
_mm512_max_epi32(__m512i __A, __m512i __B)
@@ -606,6 +690,66 @@ _mm512_min_ps(__m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_min_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_min_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_min_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_min_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
static __inline __m512i
__DEFAULT_FN_ATTRS
_mm512_min_epi32(__m512i __A, __m512i __B)
@@ -728,18 +872,18 @@ _mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_sqrt_pd(__m512d a)
+_mm512_sqrt_pd(__m512d __a)
{
- return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)a,
+ return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__a,
(__v8df) _mm512_setzero_pd (),
(__mmask8) -1,
_MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_sqrt_ps(__m512 a)
+_mm512_sqrt_ps(__m512 __a)
{
- return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)a,
+ return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__a,
(__v16sf) _mm512_setzero_ps (),
(__mmask16) -1,
_MM_FROUND_CUR_DIRECTION);
@@ -765,7 +909,7 @@ _mm512_rsqrt14_ps(__m512 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_rsqrt14_ss(__m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_rsqrt14ss ((__v4sf) __A,
(__v4sf) __B,
(__v4sf)
_mm_setzero_ps (),
@@ -775,7 +919,7 @@ _mm_rsqrt14_ss(__m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_rsqrt14_sd(__m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_rsqrt14sd ((__v2df) __A,
(__v2df) __B,
(__v2df)
_mm_setzero_pd (),
@@ -802,7 +946,7 @@ _mm512_rcp14_ps(__m512 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_rcp14_ss(__m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_rcp14ss ((__v4sf) __A,
(__v4sf) __B,
(__v4sf)
_mm_setzero_ps (),
@@ -812,7 +956,7 @@ _mm_rcp14_ss(__m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_rcp14_sd(__m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_rcp14sd ((__v2df) __A,
(__v2df) __B,
(__v2df)
_mm_setzero_pd (),
@@ -873,6 +1017,489 @@ _mm512_abs_epi32(__m512i __A)
(__mmask16) -1);
}
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_add_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_add_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_add_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_add_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_add_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_add_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_add_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_add_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_add_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R); })
+
+#define _mm512_add_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16) -1, __R); })
+
+#define _mm512_mask_add_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16)__U, __R); })
+
+#define _mm512_maskz_add_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps(), (__mmask16)__U, __R); })
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_sub_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_sub_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_sub_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_sub_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_sub_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_sub_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_sub_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_sub_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_sub_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_sub_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_sub_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_sub_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#define _mm_mul_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_mul_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_mul_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mul_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_mul_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_mul_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mul_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_mul_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_mul_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_mul_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_mul_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_mul_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) _mm_setzero_ps (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_ss(__A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
+
+#define _mm_mask_div_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_div_round_ss(__U, __A, __B, __R) __extension__ ({ \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) _mm_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_div_round_sd(__A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm_mask_div_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) __W, (__mmask8) __U,__R); })
+
+#define _mm_maskz_div_round_sd(__U, __A, __B, __R) __extension__ ({ \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __W,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df)
+ _mm512_setzero_pd (),
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __W,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf)
+ _mm512_setzero_ps (),
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_div_round_pd(__A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B,\
+ (__v8df) _mm512_setzero_pd(), (__mmask8) -1, __R); })
+
+#define _mm512_mask_div_round_pd(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) __W, (__mmask8) __U, __R); })
+
+#define _mm512_maskz_div_round_pd(__U, __A, __B, __R) __extension__ ({ \
+ (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A, (__v8df) __B, \
+ (__v8df) _mm512_setzero_pd(), (__mmask8) __U, __R);})
+
+#define _mm512_div_round_ps(__A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, __R);})
+
+#define _mm512_mask_div_round_ps(__W, __U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) __W, (__mmask16) __U, __R); });
+
+#define _mm512_maskz_div_round_ps(__U, __A, __B, __R) __extension__ ({ \
+ (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, (__v16sf) __B, \
+ (__v16sf) _mm512_setzero_ps (), (__mmask16) __U, __R);});
+
#define _mm512_roundscale_ps(A, B) __extension__ ({ \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(A), (B), (__v16sf)(A), \
-1, _MM_FROUND_CUR_DIRECTION); })
@@ -1706,17 +2333,15 @@ _mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
/* Vector Extract */
#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
- __m512d __A = (A); \
(__m256d) \
- __builtin_ia32_extractf64x4_mask((__v8df)__A, \
+ __builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), \
(I), \
(__v4df)_mm256_setzero_si256(), \
(__mmask8) -1); })
#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
- __m512 __A = (A); \
(__m128) \
- __builtin_ia32_extractf32x4_mask((__v16sf)__A, \
+ __builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), \
(I), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8) -1); })
@@ -1850,18 +2475,18 @@ _mm512_cvtph_ps(__m256i __A)
}
static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_cvttps_epi32(__m512 a)
+_mm512_cvttps_epi32(__m512 __a)
{
return (__m512i)
- __builtin_ia32_cvttps2dq512_mask((__v16sf) a,
+ __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
(__v16si) _mm512_setzero_si512 (),
(__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
}
static __inline __m256i __DEFAULT_FN_ATTRS
-_mm512_cvttpd_epi32(__m512d a)
+_mm512_cvttpd_epi32(__m512d __a)
{
- return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) a,
+ return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
(__v8si)_mm256_setzero_si256(),
(__mmask8) -1,
_MM_FROUND_CUR_DIRECTION);
@@ -2405,51 +3030,43 @@ _mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
}
#define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, (p), \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
(__mmask16)-1); })
#define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, (p), \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
(__mmask16)-1); })
#define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, (p), \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
(__mmask8)-1); })
#define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, (p), \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
(__mmask8)-1); })
#define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, (p), \
+ (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
(__mmask16)(m)); })
#define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, (p), \
+ (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (p), \
(__mmask16)(m)); })
#define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, (p), \
+ (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
(__mmask8)(m)); })
#define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
- __m512i __a = (a); \
- __m512i __b = (b); \
- (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, (p), \
+ (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (p), \
(__mmask8)(m)); })
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index 74ec17583096..b4542d69ab08 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -1,4 +1,4 @@
-/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ----------===
+/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -29,7 +29,7 @@
#define __AVX512VLBWINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw")))
/* Integer compare */
@@ -1822,6 +1822,435 @@ _mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A,
(__mmask16) __U);
}
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_maddubs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_maddubs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
+ (__v16qi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_maddubs_epi16 (__m256i __W, __mmask16 __U, __m256i __X,
+ __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_maddubs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
+ (__v32qi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_madd_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_madd_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v4si) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_madd_epi16 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_madd_epi16 (__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v8si) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtsepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtsepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtusepi16_epi8 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtusepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtepi16_epi8 (__m128i __A) {
+
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) {
+ return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtepi16_epi8 (__m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) __O,
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) {
+ return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
+ (__v16qi) _mm_setzero_si128(),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhrs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
+ (__v8hi) __Y,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhrs_epi16 (__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhrs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
+ (__v16hi) __Y,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epu16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epu16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_mulhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_mulhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_mulhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_mulhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpackhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpackhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpackhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpackhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A,
+ (__v16qi) __B,
+ (__v16qi) _mm_setzero_si128(),
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) __W,
+ (__mmask32) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A,
+ (__v32qi) __B,
+ (__v32qi) _mm256_setzero_si256(),
+ (__mmask32) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_unpacklo_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_unpacklo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A,
+ (__v8hi) __B,
+ (__v8hi) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_unpacklo_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) __W,
+ (__mmask16) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_unpacklo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A,
+ (__v16hi) __B,
+ (__v16hi) _mm256_setzero_si256(),
+ (__mmask16) __U);
+}
+
#define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \
(__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), \
diff --git a/lib/Headers/avx512vldqintrin.h b/lib/Headers/avx512vldqintrin.h
index 1edf29d128ee..dfd858e013da 100644
--- a/lib/Headers/avx512vldqintrin.h
+++ b/lib/Headers/avx512vldqintrin.h
@@ -1,4 +1,4 @@
-/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ---------------------------===
+/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -20,7 +20,7 @@
*
*===-----------------------------------------------------------------------===
*/
-
+
#ifndef __IMMINTRIN_H
#error "Never use <avx512vldqintrin.h> directly; include <immintrin.h> instead."
#endif
@@ -29,7 +29,7 @@
#define __AVX512VLDQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq")))
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
@@ -348,6 +348,606 @@ _mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) {
(__mmask8) __U);
}
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepi64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepi64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepi64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epi64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epi64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu64 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu64 (__m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) {
+ return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epi64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epi64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu64 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
+ (__v2di) _mm_setzero_si128(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu64 (__m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
+ return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
+ (__v4di) _mm256_setzero_si256(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu64_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
+ (__v2df) _mm_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_pd (__m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) {
+ return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
+ (__v4df) _mm256_setzero_pd(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu64_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_cvtepu64_ps (__m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
+ return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
+ (__v4sf) _mm_setzero_ps(),
+ (__mmask8) __U);
+}
+
+#define _mm_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+
+#define _mm_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) __W, (__mmask8) __U); })
+
+#define _mm_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, (__v2df) __B, __C, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+
+#define _mm256_range_pd(__A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+
+#define _mm256_mask_range_pd(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_range_pd(__U, __A, __B, __C) __extension__ ({ \
+ (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, (__v4df) __B, __C, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+#define _mm_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+#define _mm_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, (__v4sf) __B, __C, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_range_ps(__A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_range_ps(__W, __U, __A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_range_ps(__U, __A, __B, __C) __extension__ ({ \
+ (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, (__v8sf) __B, __C, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+#define _mm_reduce_pd(__A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) -1); })
+
+#define _mm_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) __W, (__mmask8) __U); })
+
+#define _mm_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, \
+ (__v2df) _mm_setzero_pd(), (__mmask8) __U); })
+
+#define _mm256_reduce_pd(__A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) -1); })
+
+#define _mm256_mask_reduce_pd(__W, __U, __A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_reduce_pd(__U, __A, __B) __extension__ ({ \
+ (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_reduce_ps(__A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+#define _mm_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+#define _mm_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_reduce_ps(__A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_reduce_ps(__W, __U, __A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+#define _mm256_maskz_reduce_ps(__U, __A, __B) __extension__ ({ \
+ (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
index fc1b9d6e7a23..8f13536fbb0e 100644
--- a/lib/Headers/avx512vlintrin.h
+++ b/lib/Headers/avx512vlintrin.h
@@ -28,18 +28,18 @@
#ifndef __AVX512VLINTRIN_H
#define __AVX512VLINTRIN_H
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl")))
+#define __DEFAULT_FN_ATTRS_BOTH __attribute__((__always_inline__, __nodebug__, __target__("avx512vl, avx512bw")))
/* Integer compare */
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_cmpeq_epi32_mask(__m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpeqd128_mask((__v4si)__a, (__v4si)__b,
__u);
@@ -57,13 +57,13 @@ _mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
__u);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpeqd256_mask((__v8si)__a, (__v8si)__b,
__u);
@@ -81,13 +81,13 @@ _mm256_mask_cmpeq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
__u);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b,
__u);
@@ -105,13 +105,13 @@ _mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
__u);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpeqq256_mask((__v4di)__a, (__v4di)__b,
__u);
@@ -226,16 +226,13 @@ _mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
__u);
}
-
-
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b,
__u);
@@ -253,13 +250,13 @@ _mm_mask_cmpgt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) {
__u);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b,
__u);
@@ -277,13 +274,13 @@ _mm256_mask_cmpgt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) {
__u);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b,
__u);
@@ -301,13 +298,13 @@ _mm_mask_cmpgt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) {
__u);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
(__mmask8)-1);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_BOTH
_mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b,
__u);
@@ -1977,6 +1974,2633 @@ _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
(__mmask8) __U);
}
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_add_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_add_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_add_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_add_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
+ return (__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
+ return (__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
+ return (__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
+ return (__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
+ return (__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
+ return (__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) {
+ __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
+ (__v2df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) {
+ __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
+ (__v4df) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
+ (__v2di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
+ (__v4di) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) {
+ __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
+ (__v4sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) {
+ __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
+ (__v8sf) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
+ (__v4si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
+ __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
+ (__v8si) __A,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) {
+ return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvtpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
+ return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvtps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvtps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttpd_epu32 (__m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_cvttpd_epu32 (__m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
+ return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_cvttps_epu32 (__m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
+ return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_cvttps_epu32 (__m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
+ return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_cvtepu32_pd (__m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_pd (__m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
+ return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_cvtepu32_ps (__m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) {
+ return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_cvtepu32_ps (__m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
+ return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
+ return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
+ return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
+ void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si) __W,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
+ return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_getexp_pd (__m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_getexp_pd (__m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_getexp_ps (__m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_getexp_ps (__m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_max_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_max_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_max_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_max_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_max_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_max_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_max_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_min_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_min_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_min_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_min_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_min_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_min_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_min_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi32 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
+ (__v4si)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi32 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_abs_epi64 (__m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
+ return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_abs_epi64 (__m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
+ return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_max_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_max_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
+ (__v4si) __B,
+ (__v4si) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
+ (__v8si) __B,
+ (__v8si) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_min_epu64 (__m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di) __W, __M);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
+ (__v2di) __B,
+ (__v2di)
+ _mm_setzero_si128 (),
+ __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_min_epu64 (__m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di) __W, __M);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
+ (__v4di) __B,
+ (__v4di)
+ _mm256_setzero_si256 (),
+ __M);
+}
+
+#define _mm_roundscale_pd(__A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, \
+ __imm, (__v2df) _mm_setzero_pd (), (__mmask8) -1); })
+
+
+#define _mm_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
+ (__v2df) __W, (__mmask8) __U); })
+
+
+#define _mm_maskz_roundscale_pd(__U, __A, __imm) __extension__ ({ \
+ (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, __imm, \
+ (__v2df) _mm_setzero_pd (), (__mmask8) __U); })
+
+
+#define _mm256_roundscale_pd(__A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) _mm256_setzero_pd (), (__mmask8) -1); })
+
+
+#define _mm256_mask_roundscale_pd(__W, __U, __A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) __W, (__mmask8) __U); })
+
+
+#define _mm256_maskz_roundscale_pd(__U, __A, __imm) __extension__ ({ \
+ (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, __imm, \
+ (__v4df) _mm256_setzero_pd(), (__mmask8) __U); })
+
+#define _mm_roundscale_ps(__A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) -1); })
+
+
+#define _mm_mask_roundscale_ps(__W, __U, __A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) __W, (__mmask8) __U); })
+
+
+#define _mm_maskz_roundscale_ps(__U, __A, __imm) __extension__ ({ \
+ (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, __imm, \
+ (__v4sf) _mm_setzero_ps(), (__mmask8) __U); })
+
+#define _mm256_roundscale_ps(__A, __imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A,__imm, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) -1); })
+
+#define _mm256_mask_roundscale_ps(__W, __U, __A,__imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
+ (__v8sf) __W, (__mmask8) __U); })
+
+
+#define _mm256_maskz_roundscale_ps(__U, __A, __imm) __extension__ ({ \
+ (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, __imm, \
+ (__v8sf) _mm256_setzero_ps(), (__mmask8) __U); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_scalef_pd (__m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_scalef_pd (__m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_scalef_ps (__m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_scalef_ps (__m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+#define _mm_i64scatter_pd(__addr,__index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df(__addr, (__mmask8) 0xFF, (__v2di) __index, \
+ (__v2df) __v1, __scale); })
+
+#define _mm_mask_i64scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2df (__addr, __mask, (__v2di) __index, \
+ (__v2df) __v1, __scale); })
+
+
+#define _mm_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di (__addr, (__mmask8) 0xFF, \
+ (__v2di) __index, (__v2di) __v1, __scale); })
+
+#define _mm_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv2di (__addr, __mask, (__v2di) __index,\
+ (__v2di) __v1, __scale); })
+
+#define _mm256_i64scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df (__addr, (__mmask8) 0xFF,\
+ (__v4di) __index, (__v4df) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_pd(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4df (__addr, __mask, (__v4di) __index,\
+ (__v4df) __v1, __scale); })
+
+#define _mm256_i64scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di (__addr, (__mmask8) 0xFF, (__v4di) __index,\
+ (__v4di) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_epi64(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4di (__addr, __mask, (__v4di) __index,\
+ (__v4di) __v1, __scale); })
+
+#define _mm_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf (__addr, (__mmask8) 0xFF,\
+ (__v2di) __index, (__v4sf) __v1, __scale); })
+
+#define _mm_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4sf (__addr, __mask, (__v2di) __index,\
+ (__v4sf) __v1, __scale); })
+
+#define _mm_i64scatter_epi32(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si (__addr, (__mmask8) 0xFF,\
+ (__v2di) __index, (__v4si) __v1, __scale); })
+
+#define _mm_mask_i64scatter_epi32(__addr, __mask, __index, __v1,\
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv4si (__addr, __mask, (__v2di) __index,\
+ (__v4si) __v1, __scale); })
+
+#define _mm256_i64scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf (__addr, (__mmask8) 0xFF, (__v4di) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8sf (__addr, __mask, (__v4di) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm256_i64scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si (__addr, (__mmask8) 0xFF, \
+ (__v4di) __index, (__v4si) __v1, __scale); })
+
+#define _mm256_mask_i64scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scatterdiv8si(__addr, __mask, (__v4di) __index, \
+ (__v4si) __v1, __scale); })
+
+#define _mm_i32scatter_pd(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v2df) __v1, __scale); })
+
+#define _mm_mask_i32scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2df (__addr, __mask, (__v4si) __index,\
+ (__v2df) __v1, __scale); })
+
+#define _mm_i32scatter_epi64(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v2di) __v1, __scale); })
+
+#define _mm_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv2di (__addr, __mask, (__v4si) __index, \
+ (__v2di) __v1, __scale); })
+
+#define _mm256_i32scatter_pd(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4df) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_pd(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4df (__addr, __mask, (__v4si) __index, \
+ (__v4df) __v1, __scale); })
+
+#define _mm256_i32scatter_epi64(__addr, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4di) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_epi64(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4di (__addr, __mask, (__v4si) __index, \
+ (__v4di) __v1, __scale); })
+
+#define _mm_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4sf) __v1, __scale); })
+
+#define _mm_mask_i32scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4sf (__addr, __mask, (__v4si) __index, \
+ (__v4sf) __v1, __scale); })
+
+#define _mm_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si (__addr, (__mmask8) 0xFF, \
+ (__v4si) __index, (__v4si) __v1, __scale); })
+
+#define _mm_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv4si (__addr, __mask, (__v4si) __index,\
+ (__v4si) __v1, __scale); })
+
+#define _mm256_i32scatter_ps(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf (__addr, (__mmask8) 0xFF, \
+ (__v8si) __index, (__v8sf) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_ps(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8sf (__addr, __mask, (__v8si) __index,\
+ (__v8sf) __v1, __scale); })
+
+#define _mm256_i32scatter_epi32(__addr, __index, __v1, __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si (__addr, (__mmask8) 0xFF, \
+ (__v8si) __index, (__v8si) __v1, __scale); })
+
+#define _mm256_mask_i32scatter_epi32(__addr, __mask, __index, __v1, \
+ __scale) __extension__ ({ \
+ __builtin_ia32_scattersiv8si (__addr, __mask, (__v8si) __index, \
+ (__v8si) __v1, __scale); })
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_pd (__mmask8 __U, __m128d __A) {
+ return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_pd (__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_pd (__mmask8 __U, __m256d __A) {
+ return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sqrt_ps (__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sqrt_ps (__mmask8 __U, __m128 __A) {
+ return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sqrt_ps (__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A) {
+ return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df)
+ _mm_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_sub_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_sub_ps (__mmask16 __U, __m128 __A, __m128 __B) {
+ return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf)
+ _mm_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_sub_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_sub_ps (__mmask16 __U, __m256 __A, __m256 __B) {
+ return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A,
+ (__v4si) __I
+ /* idx */ ,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A,
+ (__v8si) __I
+ /* idx */ ,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A,
+ (__v2di) __I
+ /* idx */ ,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I,
+ __mmask8 __U, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A,
+ (__v4di) __I
+ /* idx */ ,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4si) __A,
+ (__v4si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8si) __A,
+ (__v8si) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I,
+ __m128d __B) {
+ return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2df) __A,
+ (__v2df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8) -
+ 1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I,
+ __m256d __B) {
+ return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4df) __A,
+ (__v4df) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I,
+ __m128 __B) {
+ return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I
+ /* idx */ ,
+ (__v4sf) __A,
+ (__v4sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I,
+ __m256 __B) {
+ return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I
+ /* idx */ ,
+ (__v8sf) __A,
+ (__v8sf) __B,
+ (__mmask8)
+ __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I
+ /* idx */ ,
+ (__v2di) __A,
+ (__v2di) __B,
+ (__mmask8)
+ __U);
+}
+
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) -1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8) __U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A,
+ __m256i __I, __m256i __B) {
+ return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I
+ /* idx */ ,
+ (__v4di) __A,
+ (__v4di) __B,
+ (__mmask8)
+ __U);
+}
+
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_BOTH
#endif /* __AVX512VLINTRIN_H */
diff --git a/lib/Headers/avxintrin.h b/lib/Headers/avxintrin.h
index c1bc85b39e82..6d1ca5473dcf 100644
--- a/lib/Headers/avxintrin.h
+++ b/lib/Headers/avxintrin.h
@@ -35,12 +35,16 @@ typedef int __v8si __attribute__ ((__vector_size__ (32)));
typedef short __v16hi __attribute__ ((__vector_size__ (32)));
typedef char __v32qi __attribute__ ((__vector_size__ (32)));
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v32qs __attribute__((__vector_size__(32)));
+
typedef float __m256 __attribute__ ((__vector_size__ (32)));
typedef double __m256d __attribute__((__vector_size__(32)));
typedef long long __m256i __attribute__((__vector_size__(32)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
/* Arithmetic */
static __inline __m256d __DEFAULT_FN_ATTRS
@@ -152,12 +156,10 @@ _mm256_rcp_ps(__m256 __a)
}
#define _mm256_round_pd(V, M) __extension__ ({ \
- __m256d __V = (V); \
- (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); })
+ (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
#define _mm256_round_ps(V, M) __extension__ ({ \
- __m256 __V = (V); \
- (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); })
+ (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
@@ -264,26 +266,26 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
}
#define _mm_permute_pd(A, C) __extension__ ({ \
- __m128d __A = (A); \
- (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
+ (__v2df)_mm_setzero_pd(), \
(C) & 0x1, ((C) & 0x2) >> 1); })
#define _mm256_permute_pd(A, C) __extension__ ({ \
- __m256d __A = (A); \
- (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
+ (__v4df)_mm256_setzero_pd(), \
(C) & 0x1, ((C) & 0x2) >> 1, \
2 + (((C) & 0x4) >> 2), \
2 + (((C) & 0x8) >> 3)); })
#define _mm_permute_ps(A, C) __extension__ ({ \
- __m128 __A = (A); \
- (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
+ (__v4sf)_mm_setzero_ps(), \
(C) & 0x3, ((C) & 0xc) >> 2, \
((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
#define _mm256_permute_ps(A, C) __extension__ ({ \
- __m256 __A = (A); \
- (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
(C) & 0x3, ((C) & 0xc) >> 2, \
((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
4 + (((C) & 0x03) >> 0), \
@@ -292,34 +294,29 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
4 + (((C) & 0xc0) >> 6)); })
#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
- __m256d __V1 = (V1); \
- __m256d __V2 = (V2); \
- (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__V1, (__v4df)__V2, (M)); })
+ (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), (M)); })
#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
- __m256 __V1 = (V1); \
- __m256 __V2 = (V2); \
- (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
+ (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
- __m256i __V1 = (V1); \
- __m256i __V2 = (V2); \
- (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__V1, (__v8si)__V2, (M)); })
+ (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (M)); })
/* Vector Blend */
#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
- __m256d __V1 = (V1); \
- __m256d __V2 = (V2); \
- (__m256d)__builtin_shufflevector((__v4df)__V1, (__v4df)__V2, \
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), \
(((M) & 0x01) ? 4 : 0), \
(((M) & 0x02) ? 5 : 1), \
(((M) & 0x04) ? 6 : 2), \
(((M) & 0x08) ? 7 : 3)); })
#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
- __m256 __V1 = (V1); \
- __m256 __V2 = (V2); \
- (__m256)__builtin_shufflevector((__v8sf)__V1, (__v8sf)__V2, \
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), \
(((M) & 0x01) ? 8 : 0), \
(((M) & 0x02) ? 9 : 1), \
(((M) & 0x04) ? 10 : 2), \
@@ -345,28 +342,29 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/* Vector Dot Product */
#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
- __m256 __V1 = (V1); \
- __m256 __V2 = (V2); \
- (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
+ (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)); })
/* Vector shuffle */
#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
- __m256 __a = (a); \
- __m256 __b = (b); \
- (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \
- (mask) & 0x3, ((mask) & 0xc) >> 2, \
- (((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \
- ((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \
- (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); })
+ (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), \
+ (mask) & 0x3, \
+ ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 8, \
+ (((mask) & 0xc0) >> 6) + 8, \
+ ((mask) & 0x3) + 4, \
+ (((mask) & 0xc) >> 2) + 4, \
+ (((mask) & 0x30) >> 4) + 12, \
+ (((mask) & 0xc0) >> 6) + 12); })
#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
- __m256d __a = (a); \
- __m256d __b = (b); \
- (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \
- (mask) & 0x1, \
- (((mask) & 0x2) >> 1) + 4, \
- (((mask) & 0x4) >> 2) + 2, \
- (((mask) & 0x8) >> 3) + 6); })
+ (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), \
+ (mask) & 0x1, \
+ (((mask) & 0x2) >> 1) + 4, \
+ (((mask) & 0x4) >> 2) + 2, \
+ (((mask) & 0x8) >> 3) + 6); })
/* Compare */
#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
@@ -403,34 +401,28 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
#define _mm_cmp_pd(a, b, c) __extension__ ({ \
- __m128d __a = (a); \
- __m128d __b = (b); \
- (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); })
+ (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
#define _mm_cmp_ps(a, b, c) __extension__ ({ \
- __m128 __a = (a); \
- __m128 __b = (b); \
- (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); })
+ (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
- __m256d __a = (a); \
- __m256d __b = (b); \
- (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); })
+ (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (c)); })
#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
- __m256 __a = (a); \
- __m256 __b = (b); \
- (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); })
+ (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (c)); })
#define _mm_cmp_sd(a, b, c) __extension__ ({ \
- __m128d __a = (a); \
- __m128d __b = (b); \
- (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); })
+ (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)); })
#define _mm_cmp_ss(a, b, c) __extension__ ({ \
- __m128 __a = (a); \
- __m128 __b = (b); \
- (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); })
+ (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)); })
static __inline int __DEFAULT_FN_ATTRS
_mm256_extract_epi32(__m256i __a, const int __imm)
@@ -831,53 +823,53 @@ _mm256_storeu_si256(__m256i *__p, __m256i __a)
/* Conditional load ops */
static __inline __m128d __DEFAULT_FN_ATTRS
-_mm_maskload_pd(double const *__p, __m128d __m)
+_mm_maskload_pd(double const *__p, __m128i __m)
{
- return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2df)__m);
+ return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
}
static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_maskload_pd(double const *__p, __m256d __m)
+_mm256_maskload_pd(double const *__p, __m256i __m)
{
return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
- (__v4df)__m);
+ (__v4di)__m);
}
static __inline __m128 __DEFAULT_FN_ATTRS
-_mm_maskload_ps(float const *__p, __m128 __m)
+_mm_maskload_ps(float const *__p, __m128i __m)
{
- return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4sf)__m);
+ return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
}
static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_maskload_ps(float const *__p, __m256 __m)
+_mm256_maskload_ps(float const *__p, __m256i __m)
{
- return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8sf)__m);
+ return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m);
}
/* Conditional store ops */
static __inline void __DEFAULT_FN_ATTRS
-_mm256_maskstore_ps(float *__p, __m256 __m, __m256 __a)
+_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
{
- __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8sf)__m, (__v8sf)__a);
+ __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
}
static __inline void __DEFAULT_FN_ATTRS
-_mm_maskstore_pd(double *__p, __m128d __m, __m128d __a)
+_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
{
- __builtin_ia32_maskstorepd((__v2df *)__p, (__v2df)__m, (__v2df)__a);
+ __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
}
static __inline void __DEFAULT_FN_ATTRS
-_mm256_maskstore_pd(double *__p, __m256d __m, __m256d __a)
+_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
{
- __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4df)__m, (__v4df)__a);
+ __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
}
static __inline void __DEFAULT_FN_ATTRS
-_mm_maskstore_ps(float *__p, __m128 __m, __m128 __a)
+_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
{
- __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4sf)__m, (__v4sf)__a);
+ __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
}
/* Cacheability support ops */
@@ -900,6 +892,24 @@ _mm256_stream_ps(float *__p, __m256 __a)
}
/* Create vectors */
+static __inline__ __m256d __DEFAULT_FN_ATTRS
+_mm256_undefined_pd()
+{
+ return (__m256d)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS
+_mm256_undefined_ps()
+{
+ return (__m256)__builtin_ia32_undef256();
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_undefined_si256()
+{
+ return (__m256i)__builtin_ia32_undef256();
+}
+
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_set_pd(double __a, double __b, double __c, double __d)
{
@@ -1140,14 +1150,14 @@ _mm256_castsi128_si256(__m128i __a)
return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
}
-/*
+/*
Vector insert.
We use macros rather than inlines because we only want to accept
invocations where the immediate M is a constant expression.
*/
#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
(__m256)__builtin_shufflevector( \
- (__v8sf)(V1), \
+ (__v8sf)(__m256)(V1), \
(__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
(((M) & 1) ? 0 : 8), \
(((M) & 1) ? 1 : 9), \
@@ -1160,7 +1170,7 @@ _mm256_castsi128_si256(__m128i __a)
#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
(__m256d)__builtin_shufflevector( \
- (__v4df)(V1), \
+ (__v4df)(__m256d)(V1), \
(__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
(((M) & 1) ? 0 : 4), \
(((M) & 1) ? 1 : 5), \
@@ -1169,21 +1179,21 @@ _mm256_castsi128_si256(__m128i __a)
#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
(__m256i)__builtin_shufflevector( \
- (__v4di)(V1), \
+ (__v4di)(__m256i)(V1), \
(__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
(((M) & 1) ? 0 : 4), \
(((M) & 1) ? 1 : 5), \
(((M) & 1) ? 4 : 2), \
(((M) & 1) ? 5 : 3) );})
-/*
+/*
Vector extract.
We use macros rather than inlines because we only want to accept
invocations where the immediate M is a constant expression.
*/
#define _mm256_extractf128_ps(V, M) __extension__ ({ \
(__m128)__builtin_shufflevector( \
- (__v8sf)(V), \
+ (__v8sf)(__m256)(V), \
(__v8sf)(_mm256_setzero_ps()), \
(((M) & 1) ? 4 : 0), \
(((M) & 1) ? 5 : 1), \
@@ -1192,14 +1202,14 @@ _mm256_castsi128_si256(__m128i __a)
#define _mm256_extractf128_pd(V, M) __extension__ ({ \
(__m128d)__builtin_shufflevector( \
- (__v4df)(V), \
+ (__v4df)(__m256d)(V), \
(__v4df)(_mm256_setzero_pd()), \
(((M) & 1) ? 2 : 0), \
(((M) & 1) ? 3 : 1) );})
#define _mm256_extractf128_si256(V, M) __extension__ ({ \
(__m128i)__builtin_shufflevector( \
- (__v4di)(V), \
+ (__v4di)(__m256i)(V), \
(__v4di)(_mm256_setzero_si256()), \
(((M) & 1) ? 2 : 0), \
(((M) & 1) ? 3 : 1) );})
@@ -1222,7 +1232,7 @@ _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
struct __loadu_pd {
__m128d __v;
} __attribute__((__packed__, __may_alias__));
-
+
__m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v);
return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1);
}
diff --git a/lib/Headers/bmi2intrin.h b/lib/Headers/bmi2intrin.h
index c63397c96ebe..fdae82cf2ba7 100644
--- a/lib/Headers/bmi2intrin.h
+++ b/lib/Headers/bmi2intrin.h
@@ -25,15 +25,11 @@
#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
#endif
-#ifndef __BMI2__
-# error "BMI2 instruction set not enabled"
-#endif /* __BMI2__ */
-
#ifndef __BMI2INTRIN_H
#define __BMI2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_bzhi_u32(unsigned int __X, unsigned int __Y)
diff --git a/lib/Headers/bmiintrin.h b/lib/Headers/bmiintrin.h
index 0e93d575cb8b..da98792d8307 100644
--- a/lib/Headers/bmiintrin.h
+++ b/lib/Headers/bmiintrin.h
@@ -25,10 +25,6 @@
#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
#endif
-#ifndef __BMI__
-# error "BMI instruction set not enabled"
-#endif /* __BMI__ */
-
#ifndef __BMIINTRIN_H
#define __BMIINTRIN_H
@@ -41,9 +37,14 @@
#define _tzcnt_u32(a) (__tzcnt_u32((a)))
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
+
+/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
+ instruction behaves as BSF on non-BMI targets, there is code that expects
+ to use it as a potentially faster version of BSF. */
+#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-static __inline__ unsigned short __DEFAULT_FN_ATTRS
+static __inline__ unsigned short __RELAXED_FN_ATTRS
__tzcnt_u16(unsigned short __X)
{
return __X ? __builtin_ctzs(__X) : 16;
@@ -87,7 +88,7 @@ __blsr_u32(unsigned int __X)
return __X & (__X - 1);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
+static __inline__ unsigned int __RELAXED_FN_ATTRS
__tzcnt_u32(unsigned int __X)
{
return __X ? __builtin_ctz(__X) : 32;
@@ -140,7 +141,7 @@ __blsr_u64(unsigned long long __X)
return __X & (__X - 1);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __RELAXED_FN_ATTRS
__tzcnt_u64(unsigned long long __X)
{
return __X ? __builtin_ctzll(__X) : 64;
@@ -149,5 +150,6 @@ __tzcnt_u64(unsigned long long __X)
#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
+#undef __RELAXED_FN_ATTRS
#endif /* __BMIINTRIN_H */
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index 656bc19d3dea..cfc2c7161460 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -24,10 +24,6 @@
#ifndef __EMMINTRIN_H
#define __EMMINTRIN_H
-#ifndef __SSE2__
-#error "SSE2 instruction set not enabled"
-#else
-
#include <xmmintrin.h>
typedef double __m128d __attribute__((__vector_size__(16)));
@@ -39,8 +35,14 @@ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
typedef short __v8hi __attribute__((__vector_size__(16)));
typedef char __v16qi __attribute__((__vector_size__(16)));
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v16qs __attribute__((__vector_size__(16)));
+
+#include <f16cintrin.h>
+
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_add_sd(__m128d __a, __m128d __b)
@@ -527,6 +529,12 @@ _mm_loadl_pd(__m128d __a, double const *__dp)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_undefined_pd()
+{
+ return (__m128d)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_set_sd(double __w)
{
return (__m128d){ __w, 0 };
@@ -639,7 +647,7 @@ _mm_add_epi32(__m128i __a, __m128i __b)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_add_si64(__m64 __a, __m64 __b)
{
- return __a + __b;
+ return (__m64)__builtin_ia32_paddq(__a, __b);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -771,7 +779,7 @@ _mm_sub_epi32(__m128i __a, __m128i __b)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sub_si64(__m64 __a, __m64 __b)
{
- return __a - __b;
+ return (__m64)__builtin_ia32_psubq(__a, __b);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -992,8 +1000,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cmpgt_epi8(__m128i __a, __m128i __b)
{
/* This function always performs a signed comparison, but __v16qi is a char
- which may be signed or unsigned. */
- typedef signed char __v16qs __attribute__((__vector_size__(16)));
+ which may be signed or unsigned, so use __v16qs. */
return (__m128i)((__v16qs)__a > (__v16qs)__b);
}
@@ -1120,33 +1127,39 @@ _mm_loadl_epi64(__m128i const *__p)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long q1, long long q0)
+_mm_undefined_si128()
{
- return (__m128i){ q0, q1 };
+ return (__m128i)__builtin_ia32_undef128();
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 q1, __m64 q0)
+_mm_set_epi64x(long long __q1, long long __q0)
{
- return (__m128i){ (long long)q0, (long long)q1 };
+ return (__m128i){ __q0, __q1 };
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int i3, int i2, int i1, int i0)
+_mm_set_epi64(__m64 __q1, __m64 __q0)
{
- return (__m128i)(__v4si){ i0, i1, i2, i3};
+ return (__m128i){ (long long)__q0, (long long)__q1 };
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0)
+_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
{
- return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
{
- return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
+{
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -1180,27 +1193,27 @@ _mm_set1_epi8(char __b)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 q0, __m64 q1)
+_mm_setr_epi64(__m64 __q0, __m64 __q1)
{
- return (__m128i){ (long long)q0, (long long)q1 };
+ return (__m128i){ (long long)__q0, (long long)__q1 };
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int i0, int i1, int i2, int i3)
+_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
{
- return (__m128i)(__v4si){ i0, i1, i2, i3};
+ return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7)
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
{
- return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
+ return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char b0, char b1, char b2, char b3, char b4, char b5, char b6, char b7, char b8, char b9, char b10, char b11, char b12, char b13, char b14, char b15)
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
{
- return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
+ return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -1321,20 +1334,20 @@ _mm_movemask_epi8(__m128i __a)
#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \
- (__v4si)_mm_set1_epi32(0), \
+ (__v4si)_mm_setzero_si128(), \
(imm) & 0x3, ((imm) & 0xc) >> 2, \
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
- (__v8hi)_mm_set1_epi16(0), \
+ (__v8hi)_mm_setzero_si128(), \
(imm) & 0x3, ((imm) & 0xc) >> 2, \
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
4, 5, 6, 7); })
#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
- (__v8hi)_mm_set1_epi16(0), \
+ (__v8hi)_mm_setzero_si128(), \
0, 1, 2, 3, \
4 + (((imm) & 0x03) >> 0), \
4 + (((imm) & 0x0c) >> 2), \
@@ -1426,8 +1439,8 @@ _mm_movemask_pd(__m128d __a)
}
#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
- __builtin_shufflevector((__m128d)(a), (__m128d)(b), \
- (i) & 1, (((i) & 2) >> 1) + 2); })
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ (i) & 1, (((i) & 2) >> 1) + 2); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_castpd_ps(__m128d __a)
@@ -1468,13 +1481,11 @@ _mm_castsi128_pd(__m128i __a)
static __inline__ void __DEFAULT_FN_ATTRS
_mm_pause(void)
{
- __asm__ volatile ("pause");
+ __builtin_ia32_pause();
}
#undef __DEFAULT_FN_ATTRS
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-#endif /* __SSE2__ */
-
#endif /* __EMMINTRIN_H */
diff --git a/lib/Headers/f16cintrin.h b/lib/Headers/f16cintrin.h
index 3730ae0d3eeb..c655d98ee9ab 100644
--- a/lib/Headers/f16cintrin.h
+++ b/lib/Headers/f16cintrin.h
@@ -21,30 +21,18 @@
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <f16cintrin.h> directly; include <x86intrin.h> instead."
+#if !defined __X86INTRIN_H && !defined __EMMINTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <f16cintrin.h> directly; include <emmintrin.h> instead."
#endif
-#ifndef __F16C__
-# error "F16C instruction is not enabled"
-#endif /* __F16C__ */
-
#ifndef __F16CINTRIN_H
#define __F16CINTRIN_H
-typedef float __v8sf __attribute__ ((__vector_size__ (32)));
-typedef float __m256 __attribute__ ((__vector_size__ (32)));
-
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
#define _mm_cvtps_ph(a, imm) __extension__ ({ \
- __m128 __a = (a); \
- (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)__a, (imm)); })
-
-#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
- __m256 __a = (a); \
- (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)__a, (imm)); })
+ (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)); })
static __inline __m128 __DEFAULT_FN_ATTRS
_mm_cvtph_ps(__m128i __a)
@@ -52,12 +40,6 @@ _mm_cvtph_ps(__m128i __a)
return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
}
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_cvtph_ps(__m128i __a)
-{
- return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
-}
-
#undef __DEFAULT_FN_ATTRS
#endif /* __F16CINTRIN_H */
diff --git a/lib/Headers/fma4intrin.h b/lib/Headers/fma4intrin.h
index d6405cf02922..f1178877b252 100644
--- a/lib/Headers/fma4intrin.h
+++ b/lib/Headers/fma4intrin.h
@@ -28,14 +28,10 @@
#ifndef __FMA4INTRIN_H
#define __FMA4INTRIN_H
-#ifndef __FMA4__
-# error "FMA4 instruction set is not enabled"
-#else
-
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma4")))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
@@ -231,6 +227,4 @@ _mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
#undef __DEFAULT_FN_ATTRS
-#endif /* __FMA4__ */
-
#endif /* __FMA4INTRIN_H */
diff --git a/lib/Headers/fmaintrin.h b/lib/Headers/fmaintrin.h
index ad693fed0bfd..114a14380ea0 100644
--- a/lib/Headers/fmaintrin.h
+++ b/lib/Headers/fmaintrin.h
@@ -28,12 +28,8 @@
#ifndef __FMAINTRIN_H
#define __FMAINTRIN_H
-#ifndef __FMA__
-# error "FMA instruction set is not enabled"
-#else
-
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma")))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
@@ -229,6 +225,4 @@ _mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
#undef __DEFAULT_FN_ATTRS
-#endif /* __FMA__ */
-
#endif /* __FMAINTRIN_H */
diff --git a/lib/Headers/fxsrintrin.h b/lib/Headers/fxsrintrin.h
index 2b3549c057a1..ac6026aa5ba2 100644
--- a/lib/Headers/fxsrintrin.h
+++ b/lib/Headers/fxsrintrin.h
@@ -28,7 +28,7 @@
#ifndef __FXSRINTRIN_H
#define __FXSRINTRIN_H
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr")))
static __inline__ void __DEFAULT_FN_ATTRS
_fxsave(void *__p) {
diff --git a/lib/Headers/htmxlintrin.h b/lib/Headers/htmxlintrin.h
index 30f524d5df49..c7571ecd0661 100644
--- a/lib/Headers/htmxlintrin.h
+++ b/lib/Headers/htmxlintrin.h
@@ -46,7 +46,7 @@ extern "C" {
typedef char TM_buff_type[16];
-/* This macro can be used to determine whether a transaction was successfully
+/* This macro can be used to determine whether a transaction was successfully
started from the __TM_begin() and __TM_simple_begin() intrinsic functions
below. */
#define _HTM_TBEGIN_STARTED 1
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index 21ad3281f850..f3c6d1914d61 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -24,178 +24,147 @@
#ifndef __IMMINTRIN_H
#define __IMMINTRIN_H
-#ifdef __MMX__
#include <mmintrin.h>
-#endif
-#ifdef __SSE__
#include <xmmintrin.h>
-#endif
-#ifdef __SSE2__
#include <emmintrin.h>
-#endif
-#ifdef __SSE3__
#include <pmmintrin.h>
-#endif
-#ifdef __SSSE3__
#include <tmmintrin.h>
-#endif
-#if defined (__SSE4_2__) || defined (__SSE4_1__)
#include <smmintrin.h>
-#endif
-#if defined (__AES__) || defined (__PCLMUL__)
#include <wmmintrin.h>
-#endif
-#ifdef __AVX__
#include <avxintrin.h>
-#endif
-#ifdef __AVX2__
#include <avx2intrin.h>
-#endif
-#ifdef __BMI__
+/* The 256-bit versions of functions in f16cintrin.h.
+ Intel documents these as being in immintrin.h, and
+ they depend on typedefs from avxintrin.h. */
+
+#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
+ (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)); })
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+_mm256_cvtph_ps(__m128i __a)
+{
+ return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
+}
+
#include <bmiintrin.h>
-#endif
-#ifdef __BMI2__
#include <bmi2intrin.h>
-#endif
-#ifdef __LZCNT__
#include <lzcntintrin.h>
-#endif
-#ifdef __FMA__
#include <fmaintrin.h>
-#endif
-#ifdef __AVX512F__
#include <avx512fintrin.h>
-#endif
-#ifdef __AVX512VL__
#include <avx512vlintrin.h>
-#endif
-#ifdef __AVX512BW__
#include <avx512bwintrin.h>
-#endif
-#ifdef __AVX512CD__
#include <avx512cdintrin.h>
-#endif
-#ifdef __AVX512DQ__
#include <avx512dqintrin.h>
-#endif
-#if defined (__AVX512VL__) && defined (__AVX512BW__)
#include <avx512vlbwintrin.h>
-#endif
-#if defined (__AVX512VL__) && defined (__AVX512DQ__)
#include <avx512vldqintrin.h>
-#endif
-#ifdef __AVX512ER__
#include <avx512erintrin.h>
-#endif
-#ifdef __RDRND__
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
return __builtin_ia32_rdrand16_step(__p);
}
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand32_step(unsigned int *__p)
{
return __builtin_ia32_rdrand32_step(__p);
}
#ifdef __x86_64__
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand64_step(unsigned long long *__p)
{
return __builtin_ia32_rdrand64_step(__p);
}
#endif
-#endif /* __RDRND__ */
-#ifdef __FSGSBASE__
#ifdef __x86_64__
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u32(void)
{
return __builtin_ia32_rdfsbase32();
}
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u64(void)
{
return __builtin_ia32_rdfsbase64();
}
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u32(void)
{
return __builtin_ia32_rdgsbase32();
}
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u64(void)
{
return __builtin_ia32_rdgsbase64();
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u32(unsigned int __V)
{
return __builtin_ia32_wrfsbase32(__V);
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u64(unsigned long long __V)
{
return __builtin_ia32_wrfsbase64(__V);
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u32(unsigned int __V)
{
return __builtin_ia32_wrgsbase32(__V);
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u64(unsigned long long __V)
{
return __builtin_ia32_wrgsbase64(__V);
}
#endif
-#endif /* __FSGSBASE__ */
-#ifdef __RTM__
#include <rtmintrin.h>
-#endif
-#ifdef __RTM__
#include <xtestintrin.h>
-#endif
-#ifdef __SHA__
#include <shaintrin.h>
-#endif
#include <fxsrintrin.h>
+#include <xsaveintrin.h>
+
+#include <xsaveoptintrin.h>
+
+#include <xsavecintrin.h>
+
+#include <xsavesintrin.h>
+
/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
* whereas others are also available at all times. */
#include <adxintrin.h>
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index 8ee29975c2eb..4c00e42ac3a9 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -25,15 +25,11 @@
#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
#endif
-#ifndef __LZCNT__
-# error "LZCNT instruction is not enabled"
-#endif /* __LZCNT__ */
-
#ifndef __LZCNTINTRIN_H
#define __LZCNTINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
static __inline__ unsigned short __DEFAULT_FN_ATTRS
__lzcnt16(unsigned short __X)
diff --git a/lib/Headers/mm3dnow.h b/lib/Headers/mm3dnow.h
index ac8e0f4af1bf..cb93faf2b6a4 100644
--- a/lib/Headers/mm3dnow.h
+++ b/lib/Headers/mm3dnow.h
@@ -30,7 +30,7 @@
typedef float __v2sf __attribute__((__vector_size__(8)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
static __inline__ void __DEFAULT_FN_ATTRS
_m_femms() {
@@ -132,6 +132,10 @@ _m_pmulhrw(__m64 __m1, __m64 __m2) {
return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);
}
+/* Handle the 3dnowa instructions here. */
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa")))
+
static __inline__ __m64 __DEFAULT_FN_ATTRS
_m_pf2iw(__m64 __m) {
return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
index 0be5f32c7d02..162cb1aa1711 100644
--- a/lib/Headers/mmintrin.h
+++ b/lib/Headers/mmintrin.h
@@ -24,10 +24,6 @@
#ifndef __MMINTRIN_H
#define __MMINTRIN_H
-#ifndef __MMX__
-#error "MMX instruction set not enabled"
-#else
-
typedef long long __m64 __attribute__((__vector_size__(8)));
typedef int __v2si __attribute__((__vector_size__(8)));
@@ -35,7 +31,7 @@ typedef short __v4hi __attribute__((__vector_size__(8)));
typedef char __v8qi __attribute__((__vector_size__(8)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
static __inline__ void __DEFAULT_FN_ATTRS
_mm_empty(void)
@@ -140,7 +136,7 @@ _mm_add_pi32(__m64 __m1, __m64 __m2)
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_adds_pi8(__m64 __m1, __m64 __m2)
+_mm_adds_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
}
@@ -148,17 +144,17 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_adds_pi16(__m64 __m1, __m64 __m2)
{
- return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
+ return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_adds_pu8(__m64 __m1, __m64 __m2)
+_mm_adds_pu8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
}
-
+
static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_adds_pu16(__m64 __m1, __m64 __m2)
+_mm_adds_pu16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
}
@@ -168,13 +164,13 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
}
-
+
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sub_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
}
-
+
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sub_pi32(__m64 __m1, __m64 __m2)
{
@@ -198,7 +194,7 @@ _mm_subs_pu8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
}
-
+
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_subs_pu16(__m64 __m1, __m64 __m2)
{
@@ -216,9 +212,9 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
}
-
+
static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_mullo_pi16(__m64 __m1, __m64 __m2)
+_mm_mullo_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
}
@@ -232,7 +228,7 @@ _mm_sll_pi16(__m64 __m, __m64 __count)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_slli_pi16(__m64 __m, int __count)
{
- return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
+ return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -256,13 +252,13 @@ _mm_sll_si64(__m64 __m, __m64 __count)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_slli_si64(__m64 __m, int __count)
{
- return (__m64)__builtin_ia32_psllqi(__m, __count);
+ return (__m64)__builtin_ia32_psllqi(__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sra_pi16(__m64 __m, __m64 __count)
{
- return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
+ return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -274,7 +270,7 @@ _mm_srai_pi16(__m64 __m, int __count)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sra_pi32(__m64 __m, __m64 __count)
{
- return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
+ return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -286,19 +282,19 @@ _mm_srai_pi32(__m64 __m, int __count)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_srl_pi16(__m64 __m, __m64 __count)
{
- return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
+ return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_srli_pi16(__m64 __m, int __count)
{
- return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
+ return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_srl_pi32(__m64 __m, __m64 __count)
{
- return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
+ return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -310,13 +306,13 @@ _mm_srli_pi32(__m64 __m, int __count)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_srl_si64(__m64 __m, __m64 __count)
{
- return (__m64)__builtin_ia32_psrlq(__m, __count);
+ return (__m64)__builtin_ia32_psrlq(__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_srli_si64(__m64 __m, int __count)
{
- return (__m64)__builtin_ia32_psrlqi(__m, __count);
+ return (__m64)__builtin_ia32_psrlqi(__m, __count);
}
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -447,7 +443,9 @@ _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
/* Aliases for compatibility. */
#define _m_empty _mm_empty
#define _m_from_int _mm_cvtsi32_si64
+#define _m_from_int64 _mm_cvtsi64_m64
#define _m_to_int _mm_cvtsi64_si32
+#define _m_to_int64 _mm_cvtm64_si64
#define _m_packsswb _mm_packs_pi16
#define _m_packssdw _mm_packs_pi32
#define _m_packuswb _mm_packs_pu16
@@ -501,7 +499,5 @@ _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
#define _m_pcmpgtw _mm_cmpgt_pi16
#define _m_pcmpgtd _mm_cmpgt_pi32
-#endif /* __MMX__ */
-
#endif /* __MMINTRIN_H */
diff --git a/lib/Headers/module.modulemap b/lib/Headers/module.modulemap
index b861fdd8c2a1..b147e891dceb 100644
--- a/lib/Headers/module.modulemap
+++ b/lib/Headers/module.modulemap
@@ -32,142 +32,117 @@ module _Builtin_intrinsics [system] [extern_c] {
}
explicit module cpuid {
- requires x86
header "cpuid.h"
}
explicit module mmx {
- requires mmx
header "mmintrin.h"
}
explicit module f16c {
- requires f16c
header "f16cintrin.h"
}
explicit module sse {
- requires sse
export mmx
export sse2 // note: for hackish <emmintrin.h> dependency
header "xmmintrin.h"
}
explicit module sse2 {
- requires sse2
export sse
header "emmintrin.h"
}
explicit module sse3 {
- requires sse3
export sse2
header "pmmintrin.h"
}
explicit module ssse3 {
- requires ssse3
export sse3
header "tmmintrin.h"
}
explicit module sse4_1 {
- requires sse41
export ssse3
header "smmintrin.h"
}
explicit module sse4_2 {
- requires sse42
export sse4_1
header "nmmintrin.h"
}
explicit module sse4a {
- requires sse4a
export sse3
header "ammintrin.h"
}
explicit module avx {
- requires avx
export sse4_2
header "avxintrin.h"
}
explicit module avx2 {
- requires avx2
export avx
header "avx2intrin.h"
}
explicit module avx512f {
- requires avx512f
export avx2
header "avx512fintrin.h"
}
explicit module avx512er {
- requires avx512er
header "avx512erintrin.h"
}
explicit module bmi {
- requires bmi
header "bmiintrin.h"
}
explicit module bmi2 {
- requires bmi2
header "bmi2intrin.h"
}
explicit module fma {
- requires fma
header "fmaintrin.h"
}
explicit module fma4 {
- requires fma4
export sse3
header "fma4intrin.h"
}
explicit module lzcnt {
- requires lzcnt
header "lzcntintrin.h"
}
explicit module popcnt {
- requires popcnt
header "popcntintrin.h"
}
explicit module mm3dnow {
- requires mm3dnow
header "mm3dnow.h"
}
explicit module xop {
- requires xop
export fma4
header "xopintrin.h"
}
explicit module aes_pclmul {
- requires aes, pclmul
header "wmmintrin.h"
export aes
export pclmul
}
explicit module aes {
- requires aes
header "__wmmintrin_aes.h"
}
explicit module pclmul {
- requires pclmul
header "__wmmintrin_pclmul.h"
}
}
diff --git a/lib/Headers/nmmintrin.h b/lib/Headers/nmmintrin.h
index f12622d7be68..57fec15963d1 100644
--- a/lib/Headers/nmmintrin.h
+++ b/lib/Headers/nmmintrin.h
@@ -24,12 +24,7 @@
#ifndef _NMMINTRIN_H
#define _NMMINTRIN_H
-#ifndef __SSE4_2__
-#error "SSE4.2 instruction set not enabled"
-#else
-
/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
just include it now then. */
#include <smmintrin.h>
-#endif /* __SSE4_2__ */
#endif /* _NMMINTRIN_H */
diff --git a/lib/Headers/pmmintrin.h b/lib/Headers/pmmintrin.h
index e1b8d9b603d1..0ff940912483 100644
--- a/lib/Headers/pmmintrin.h
+++ b/lib/Headers/pmmintrin.h
@@ -20,18 +20,14 @@
*
*===-----------------------------------------------------------------------===
*/
-
+
#ifndef __PMMINTRIN_H
#define __PMMINTRIN_H
-#ifndef __SSE3__
-#error "SSE3 instruction set not enabled"
-#else
-
#include <emmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_lddqu_si128(__m128i const *__p)
@@ -117,6 +113,4 @@ _mm_mwait(unsigned __extensions, unsigned __hints)
#undef __DEFAULT_FN_ATTRS
-#endif /* __SSE3__ */
-
#endif /* __PMMINTRIN_H */
diff --git a/lib/Headers/popcntintrin.h b/lib/Headers/popcntintrin.h
index 1a4e9000aeb6..6fcda65c7807 100644
--- a/lib/Headers/popcntintrin.h
+++ b/lib/Headers/popcntintrin.h
@@ -21,15 +21,11 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __POPCNT__
-#error "POPCNT instruction set not enabled"
-#endif
-
#ifndef _POPCNTINTRIN_H
#define _POPCNTINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
static __inline__ int __DEFAULT_FN_ATTRS
_mm_popcnt_u32(unsigned int __A)
@@ -37,12 +33,24 @@ _mm_popcnt_u32(unsigned int __A)
return __builtin_popcount(__A);
}
+static __inline__ int __DEFAULT_FN_ATTRS
+_popcnt32(int __A)
+{
+ return __builtin_popcount(__A);
+}
+
#ifdef __x86_64__
static __inline__ long long __DEFAULT_FN_ATTRS
_mm_popcnt_u64(unsigned long long __A)
{
return __builtin_popcountll(__A);
}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_popcnt64(long long __A)
+{
+ return __builtin_popcountll(__A);
+}
#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/prfchwintrin.h b/lib/Headers/prfchwintrin.h
index 9825bd8c9700..ba0285751823 100644
--- a/lib/Headers/prfchwintrin.h
+++ b/lib/Headers/prfchwintrin.h
@@ -30,6 +30,12 @@
#if defined(__PRFCHW__) || defined(__3dNOW__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_prefetch(void *__P)
+{
+ __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_m_prefetchw(void *__P)
{
__builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
diff --git a/lib/Headers/rdseedintrin.h b/lib/Headers/rdseedintrin.h
index fdf7e18afa95..421f4ea48702 100644
--- a/lib/Headers/rdseedintrin.h
+++ b/lib/Headers/rdseedintrin.h
@@ -28,10 +28,8 @@
#ifndef __RDSEEDINTRIN_H
#define __RDSEEDINTRIN_H
-#ifdef __RDSEED__
-
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed16_step(unsigned short *__p)
@@ -55,5 +53,4 @@ _rdseed64_step(unsigned long long *__p)
#undef __DEFAULT_FN_ATTRS
-#endif /* __RDSEED__ */
#endif /* __RDSEEDINTRIN_H */
diff --git a/lib/Headers/rtmintrin.h b/lib/Headers/rtmintrin.h
index 17256815fb8d..e6a58d743bc9 100644
--- a/lib/Headers/rtmintrin.h
+++ b/lib/Headers/rtmintrin.h
@@ -38,7 +38,7 @@
#define _XABORT_CODE(x) (((x) >> 24) & 0xFF)
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm")))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_xbegin(void)
diff --git a/lib/Headers/shaintrin.h b/lib/Headers/shaintrin.h
index 960cced7a55c..9b5d21800819 100644
--- a/lib/Headers/shaintrin.h
+++ b/lib/Headers/shaintrin.h
@@ -28,15 +28,11 @@
#ifndef __SHAINTRIN_H
#define __SHAINTRIN_H
-#if !defined (__SHA__)
-# error "SHA instructions not enabled"
-#endif
-
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha")))
#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
- __builtin_ia32_sha1rnds4((V1), (V2), (M)); })
+ __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)); })
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
diff --git a/lib/Headers/smmintrin.h b/lib/Headers/smmintrin.h
index 04bd0722b11f..69ad07f42ad6 100644
--- a/lib/Headers/smmintrin.h
+++ b/lib/Headers/smmintrin.h
@@ -24,14 +24,10 @@
#ifndef _SMMINTRIN_H
#define _SMMINTRIN_H
-#ifndef __SSE4_1__
-#error "SSE4.1 instruction set not enabled"
-#else
-
#include <tmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1")))
/* SSE4 Rounding macros. */
#define _MM_FROUND_TO_NEAREST_INT 0x00
@@ -61,35 +57,28 @@
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
#define _mm_round_ps(X, M) __extension__ ({ \
- __m128 __X = (X); \
- (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
+ (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); })
#define _mm_round_ss(X, Y, M) __extension__ ({ \
- __m128 __X = (X); \
- __m128 __Y = (Y); \
- (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
+ (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
#define _mm_round_pd(X, M) __extension__ ({ \
- __m128d __X = (X); \
- (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
+ (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); })
#define _mm_round_sd(X, Y, M) __extension__ ({ \
- __m128d __X = (X); \
- __m128d __Y = (Y); \
- (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
+ (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
/* SSE4 Packed Blending Intrinsics. */
#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
- __m128d __V1 = (V1); \
- __m128d __V2 = (V2); \
- (__m128d)__builtin_shufflevector((__v2df)__V1, (__v2df)__V2, \
+ (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), \
(((M) & 0x01) ? 2 : 0), \
(((M) & 0x02) ? 3 : 1)); })
#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
- __m128 __V1 = (V1); \
- __m128 __V2 = (V2); \
- (__m128)__builtin_shufflevector((__v4sf)__V1, (__v4sf)__V2, \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
(((M) & 0x01) ? 4 : 0), \
(((M) & 0x02) ? 5 : 1), \
(((M) & 0x04) ? 6 : 2), \
@@ -117,9 +106,8 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
}
#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
- __m128i __V1 = (V1); \
- __m128i __V2 = (V2); \
- (__m128i)__builtin_shufflevector((__v8hi)__V1, (__v8hi)__V2, \
+ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), \
(((M) & 0x01) ? 8 : 0), \
(((M) & 0x02) ? 9 : 1), \
(((M) & 0x04) ? 10 : 2), \
@@ -144,20 +132,18 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/* SSE4 Floating Point Dot Product Instructions. */
#define _mm_dp_ps(X, Y, M) __extension__ ({ \
- __m128 __X = (X); \
- __m128 __Y = (Y); \
- (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
+ (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (M)); })
#define _mm_dp_pd(X, Y, M) __extension__ ({\
- __m128d __X = (X); \
- __m128d __Y = (Y); \
- (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
+ (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (M)); })
/* SSE4 Streaming Load Hint Instruction. */
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_stream_load_si128 (__m128i *__V)
+_mm_stream_load_si128 (__m128i const *__V)
{
- return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V);
+ return (__m128i) __builtin_ia32_movntdqa ((const __v2di *) __V);
}
/* SSE4 Packed Integer Min/Max Instructions. */
@@ -213,7 +199,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
#define _mm_extract_ps(X, N) (__extension__ \
({ union { int __i; float __f; } __t; \
- __v4sf __a = (__v4sf)(X); \
+ __v4sf __a = (__v4sf)(__m128)(X); \
__t.__f = __a[(N) & 3]; \
__t.__i;}))
@@ -221,39 +207,44 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/* Extract a single-precision float from X at index N into D. */
#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
(D) = __a[N]; }))
-
+
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
an index suitable for _mm_insert_ps. */
#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
-
+
/* Extract a float from X at index N into the first index of the return. */
#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
_MM_MK_INSERTPS_NDX((N), 0, 0x0e))
-
+
/* Insert int into packed integer array at index. */
-#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- __a[(N) & 15] = (I); \
- __a;}))
-#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- __a[(N) & 3] = (I); \
- __a;}))
+#define _mm_insert_epi8(X, I, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ __a[(N) & 15] = (I); \
+ __a;}))
+#define _mm_insert_epi32(X, I, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ __a[(N) & 3] = (I); \
+ __a;}))
#ifdef __x86_64__
-#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[(N) & 1] = (I); \
- __a;}))
+#define _mm_insert_epi64(X, I, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ __a[(N) & 1] = (I); \
+ __a;}))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. This returns the element
* as a zero extended value, so it is unsigned.
*/
-#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- (int)(unsigned char) \
- __a[(N) & 15];}))
-#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- __a[(N) & 3];}))
+#define _mm_extract_epi8(X, N) (__extension__ \
+ ({ __v16qi __a = (__v16qi)(__m128i)(X); \
+ (int)(unsigned char) __a[(N) & 15];}))
+#define _mm_extract_epi32(X, N) (__extension__ \
+ ({ __v4si __a = (__v4si)(__m128i)(X); \
+ (int)__a[(N) & 3];}))
#ifdef __x86_64__
-#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[(N) & 1];}))
+#define _mm_extract_epi64(X, N) (__extension__ \
+ ({ __v2di __a = (__v2di)(__m128i)(X); \
+ (long long)__a[(N) & 1];}))
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
@@ -290,37 +281,44 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi16(__m128i __V)
{
- return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V);
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi32(__m128i __V)
{
- return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V);
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi64(__m128i __V)
{
- return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V);
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ typedef signed char __v16qs __attribute__((__vector_size__(16)));
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi16_epi32(__m128i __V)
{
- return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V);
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi16_epi64(__m128i __V)
{
- return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V);
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi32_epi64(__m128i __V)
{
- return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V);
+ return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
}
/* SSE4 Packed Integer Zero-Extension. */
@@ -369,9 +367,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/* SSE4 Multiple Packed Sums of Absolute Difference. */
#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
- __m128i __X = (X); \
- __m128i __Y = (Y); \
- (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
+ (__v16qi)(__m128i)(Y), (M)); })
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_minpos_epu16(__m128i __V)
@@ -379,9 +376,13 @@ _mm_minpos_epu16(__m128i __V)
return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
}
+/* Handle the sse4.2 definitions here. */
+
/* These definitions are normally in nmmintrin.h, but gcc puts them in here
so we'll do the same. */
-#ifdef __SSE4_2__
+
+#undef __DEFAULT_FN_ATTRS
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
/* These specify the type of data that we're comparing. */
#define _SIDD_UBYTE_OPS 0x00
@@ -410,36 +411,59 @@ _mm_minpos_epu16(__m128i __V)
#define _SIDD_UNIT_MASK 0x40
/* SSE4.2 Packed Comparison Intrinsics. */
-#define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M))
-#define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M))
+#define _mm_cmpistrm(A, B, M) \
+ (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistri(A, B, M) \
+ (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
#define _mm_cmpestrm(A, LA, B, LB, M) \
- __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
+ (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
#define _mm_cmpestri(A, LA, B, LB, M) \
- __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
-
+ (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
+
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
#define _mm_cmpistra(A, B, M) \
- __builtin_ia32_pcmpistria128((A), (B), (M))
+ (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
#define _mm_cmpistrc(A, B, M) \
- __builtin_ia32_pcmpistric128((A), (B), (M))
+ (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
#define _mm_cmpistro(A, B, M) \
- __builtin_ia32_pcmpistrio128((A), (B), (M))
+ (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
#define _mm_cmpistrs(A, B, M) \
- __builtin_ia32_pcmpistris128((A), (B), (M))
+ (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
#define _mm_cmpistrz(A, B, M) \
- __builtin_ia32_pcmpistriz128((A), (B), (M))
+ (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M))
#define _mm_cmpestra(A, LA, B, LB, M) \
- __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
+ (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
#define _mm_cmpestrc(A, LA, B, LB, M) \
- __builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M))
+ (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
#define _mm_cmpestro(A, LA, B, LB, M) \
- __builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M))
+ (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
#define _mm_cmpestrs(A, LA, B, LB, M) \
- __builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M))
+ (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
#define _mm_cmpestrz(A, LA, B, LB, M) \
- __builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M))
+ (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M))
/* SSE4.2 Compare Packed Data -- Greater Than. */
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -481,7 +505,4 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
#include <popcntintrin.h>
#endif
-#endif /* __SSE4_2__ */
-#endif /* __SSE4_1__ */
-
#endif /* _SMMINTRIN_H */
diff --git a/lib/Headers/stdint.h b/lib/Headers/stdint.h
index 0303db90be1f..3f2fcbc57023 100644
--- a/lib/Headers/stdint.h
+++ b/lib/Headers/stdint.h
@@ -77,14 +77,14 @@
* C99 7.18.1.2 Minimum-width integer types.
* C99 7.18.1.3 Fastest minimum-width integer types.
*
- * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
+ * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
* 64-bit types if they are implemented. Other exact width types are optional.
* This implementation defines an exact-width types for every integer width
* that is represented in the standard integer types.
*
* The standard also requires minimum-width types be defined for 8-, 16-, 32-,
* and 64-bit widths regardless of whether there are corresponding exact-width
- * types.
+ * types.
*
* To accommodate targets that are missing types that are exactly 8, 16, 32, or
* 64 bits wide, this implementation takes an approach of cascading
@@ -97,7 +97,7 @@
* suboptimal.
*
* In violation of the standard, some targets do not implement a type that is
- * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
+ * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
* To accommodate these targets, a required minimum-width type is only
* defined if there exists an exact-width type of equal or greater width.
*/
@@ -247,7 +247,7 @@ typedef __uint_least8_t uint_fast8_t;
#endif /* __int_least8_t */
/* prevent glibc sys/types.h from defining conflicting types */
-#ifndef __int8_t_defined
+#ifndef __int8_t_defined
# define __int8_t_defined
#endif /* __int8_t_defined */
@@ -280,9 +280,9 @@ typedef __UINTMAX_TYPE__ uintmax_t;
*
* The standard requires that integer constant macros be defined for all the
* minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
- * types are required, the corresponding integer constant macros are defined
+ * types are required, the corresponding integer constant macros are defined
* here. This implementation also defines minimum-width types for every other
- * integer width that the target implements, so corresponding macros are
+ * integer width that the target implements, so corresponding macros are
* defined below, too.
*
* These macros are defined using the same successive-shrinking approach as
@@ -452,7 +452,7 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#endif /* __int_least8_t */
-/* C99 7.18.2.1 Limits of exact-width integer types.
+/* C99 7.18.2.1 Limits of exact-width integer types.
* C99 7.18.2.2 Limits of minimum-width integer types.
* C99 7.18.2.3 Limits of fastest minimum-width integer types.
*
diff --git a/lib/Headers/tbmintrin.h b/lib/Headers/tbmintrin.h
index 48c0b07f423f..785961c6ab86 100644
--- a/lib/Headers/tbmintrin.h
+++ b/lib/Headers/tbmintrin.h
@@ -21,10 +21,6 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __TBM__
-#error "TBM instruction set is not enabled"
-#endif
-
#ifndef __X86INTRIN_H
#error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead."
#endif
@@ -33,9 +29,11 @@
#define __TBMINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm")))
-#define __bextri_u32(a, b) (__builtin_ia32_bextri_u32((a), (b)))
+#define __bextri_u32(a, b) \
+ ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \
+ (unsigned int)(b)))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__blcfill_u32(unsigned int a)
@@ -92,7 +90,9 @@ __tzmsk_u32(unsigned int a)
}
#ifdef __x86_64__
-#define __bextri_u64(a, b) (__builtin_ia32_bextri_u64((a), (int)(b)))
+#define __bextri_u64(a, b) \
+ ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \
+ (unsigned long long)(b)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__blcfill_u64(unsigned long long a)
diff --git a/lib/Headers/tgmath.h b/lib/Headers/tgmath.h
index a48e267e60d0..318e1185feee 100644
--- a/lib/Headers/tgmath.h
+++ b/lib/Headers/tgmath.h
@@ -490,7 +490,7 @@ static double _Complex
static long double _Complex
_TG_ATTRS
- __tg_pow(long double _Complex __x, long double _Complex __y)
+ __tg_pow(long double _Complex __x, long double _Complex __y)
{return cpowl(__x, __y);}
#undef pow
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index 2ecc730e90e9..0002890c1393 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -20,18 +20,14 @@
*
*===-----------------------------------------------------------------------===
*/
-
+
#ifndef __TMMINTRIN_H
#define __TMMINTRIN_H
-#ifndef __SSSE3__
-#error "SSSE3 instruction set not enabled"
-#else
-
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3")))
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_abs_pi8(__m64 __a)
@@ -70,14 +66,11 @@ _mm_abs_epi32(__m128i __a)
}
#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
- __m128i __a = (a); \
- __m128i __b = (b); \
- (__m128i)__builtin_ia32_palignr128((__v16qi)__a, (__v16qi)__b, (n)); })
+ (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (n)); })
#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
- __m64 __a = (a); \
- __m64 __b = (b); \
- (__m64)__builtin_ia32_palignr((__v8qi)__a, (__v8qi)__b, (n)); })
+ (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)); })
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_hadd_epi16(__m128i __a, __m128i __b)
@@ -225,6 +218,4 @@ _mm_sign_pi32(__m64 __a, __m64 __b)
#undef __DEFAULT_FN_ATTRS
-#endif /* __SSSE3__ */
-
#endif /* __TMMINTRIN_H */
diff --git a/lib/Headers/wmmintrin.h b/lib/Headers/wmmintrin.h
index 369e3c208e53..a2d931010aea 100644
--- a/lib/Headers/wmmintrin.h
+++ b/lib/Headers/wmmintrin.h
@@ -26,17 +26,8 @@
#include <emmintrin.h>
-#if !defined (__AES__) && !defined (__PCLMUL__)
-# error "AES/PCLMUL instructions not enabled"
-#else
-
-#ifdef __AES__
#include <__wmmintrin_aes.h>
-#endif /* __AES__ */
-#ifdef __PCLMUL__
#include <__wmmintrin_pclmul.h>
-#endif /* __PCLMUL__ */
-#endif /* __AES__ || __PCLMUL__ */
#endif /* _WMMINTRIN_H */
diff --git a/lib/Headers/x86intrin.h b/lib/Headers/x86intrin.h
index 21a43daf3c2d..4d8077e38291 100644
--- a/lib/Headers/x86intrin.h
+++ b/lib/Headers/x86intrin.h
@@ -28,53 +28,29 @@
#include <immintrin.h>
-#ifdef __3dNOW__
#include <mm3dnow.h>
-#endif
-#ifdef __BMI__
#include <bmiintrin.h>
-#endif
-#ifdef __BMI2__
#include <bmi2intrin.h>
-#endif
-#ifdef __LZCNT__
#include <lzcntintrin.h>
-#endif
-#ifdef __POPCNT__
#include <popcntintrin.h>
-#endif
-#ifdef __RDSEED__
#include <rdseedintrin.h>
-#endif
-#ifdef __PRFCHW__
#include <prfchwintrin.h>
-#endif
-#ifdef __SSE4A__
#include <ammintrin.h>
-#endif
-#ifdef __FMA4__
#include <fma4intrin.h>
-#endif
-#ifdef __XOP__
#include <xopintrin.h>
-#endif
-#ifdef __TBM__
#include <tbmintrin.h>
-#endif
-#ifdef __F16C__
#include <f16cintrin.h>
-#endif
/* FIXME: LWP */
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index 0d58c753029f..ae0b2cd1b26e 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -20,13 +20,9 @@
*
*===-----------------------------------------------------------------------===
*/
-
+
#ifndef __XMMINTRIN_H
#define __XMMINTRIN_H
-
-#ifndef __SSE__
-#error "SSE instruction set not enabled"
-#else
#include <mmintrin.h>
@@ -41,7 +37,7 @@ typedef float __m128 __attribute__((__vector_size__(16)));
#endif
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse")))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_add_ss(__m128 __a, __m128 __b)
@@ -581,6 +577,12 @@ _mm_loadr_ps(const float *__p)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_undefined_ps()
+{
+ return (__m128)__builtin_ia32_undef128();
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_set_ss(float __w)
{
return (__m128){ __w, 0, 0, 0 };
@@ -752,8 +754,7 @@ _mm_mulhi_pu16(__m64 __a, __m64 __b)
}
#define _mm_shuffle_pi16(a, n) __extension__ ({ \
- __m64 __a = (a); \
- (__m64)__builtin_ia32_pshufw((__v4hi)__a, (n)); })
+ (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); })
static __inline__ void __DEFAULT_FN_ATTRS
_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
@@ -792,9 +793,7 @@ _mm_setcsr(unsigned int __i)
}
#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
- __m128 __a = (a); \
- __m128 __b = (b); \
- (__m128)__builtin_shufflevector((__v4sf)__a, (__v4sf)__b, \
+ (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
(mask) & 0x3, ((mask) & 0xc) >> 2, \
(((mask) & 0x30) >> 4) + 4, \
(((mask) & 0xc0) >> 6) + 4); })
@@ -868,7 +867,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_cvtpi8_ps(__m64 __a)
{
__m64 __b;
-
+
__b = _mm_setzero_si64();
__b = _mm_cmpgt_pi8(__b, __a);
__b = _mm_unpacklo_pi8(__a, __b);
@@ -880,7 +879,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_cvtpu8_ps(__m64 __a)
{
__m64 __b;
-
+
__b = _mm_setzero_si64();
__b = _mm_unpacklo_pi8(__a, __b);
@@ -891,7 +890,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
{
__m128 __c;
-
+
__c = _mm_setzero_ps();
__c = _mm_cvtpi32_ps(__c, __b);
__c = _mm_movelh_ps(__c, __c);
@@ -903,11 +902,11 @@ static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_cvtps_pi16(__m128 __a)
{
__m64 __b, __c;
-
+
__b = _mm_cvtps_pi32(__a);
__a = _mm_movehl_ps(__a, __a);
__c = _mm_cvtps_pi32(__a);
-
+
return _mm_packs_pi32(__b, __c);
}
@@ -915,10 +914,10 @@ static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_cvtps_pi8(__m128 __a)
{
__m64 __b, __c;
-
+
__b = _mm_cvtps_pi16(__a);
__c = _mm_setzero_si64();
-
+
return _mm_packs_pi16(__b, __c);
}
@@ -928,6 +927,11 @@ _mm_movemask_ps(__m128 __a)
return __builtin_ia32_movmskps(__a);
}
+
+#ifdef _MSC_VER
+#define _MM_ALIGN16 __declspec(align(16))
+#endif
+
#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#define _MM_EXCEPT_INVALID (0x0001)
@@ -1003,6 +1007,4 @@ do { \
#include <emmintrin.h>
#endif
-#endif /* __SSE__ */
-
#endif /* __XMMINTRIN_H */
diff --git a/lib/Headers/xopintrin.h b/lib/Headers/xopintrin.h
index 2eb35c4be844..f07f51c27515 100644
--- a/lib/Headers/xopintrin.h
+++ b/lib/Headers/xopintrin.h
@@ -28,14 +28,10 @@
#ifndef __XOPINTRIN_H
#define __XOPINTRIN_H
-#ifndef __XOP__
-# error "XOP instruction set is not enabled"
-#else
-
#include <fma4intrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop")))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
@@ -242,20 +238,16 @@ _mm_rot_epi64(__m128i __A, __m128i __B)
}
#define _mm_roti_epi8(A, N) __extension__ ({ \
- __m128i __A = (A); \
- (__m128i)__builtin_ia32_vprotbi((__v16qi)__A, (N)); })
+ (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)); })
#define _mm_roti_epi16(A, N) __extension__ ({ \
- __m128i __A = (A); \
- (__m128i)__builtin_ia32_vprotwi((__v8hi)__A, (N)); })
+ (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)); })
#define _mm_roti_epi32(A, N) __extension__ ({ \
- __m128i __A = (A); \
- (__m128i)__builtin_ia32_vprotdi((__v4si)__A, (N)); })
+ (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)); })
#define _mm_roti_epi64(A, N) __extension__ ({ \
- __m128i __A = (A); \
- (__m128i)__builtin_ia32_vprotqi((__v2di)__A, (N)); })
+ (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)); })
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi8(__m128i __A, __m128i __B)
@@ -306,44 +298,36 @@ _mm_sha_epi64(__m128i __A, __m128i __B)
}
#define _mm_com_epu8(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomub((__v16qi)__A, (__v16qi)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
#define _mm_com_epu16(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomuw((__v8hi)__A, (__v8hi)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
#define _mm_com_epu32(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomud((__v4si)__A, (__v4si)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
#define _mm_com_epu64(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomuq((__v2di)__A, (__v2di)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
#define _mm_com_epi8(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomb((__v16qi)__A, (__v16qi)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)); })
#define _mm_com_epi16(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomw((__v8hi)__A, (__v8hi)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)); })
#define _mm_com_epi32(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomd((__v4si)__A, (__v4si)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)); })
#define _mm_com_epi64(A, B, N) __extension__ ({ \
- __m128i __A = (A); \
- __m128i __B = (B); \
- (__m128i)__builtin_ia32_vpcomq((__v2di)__A, (__v2di)__B, (N)); })
+ (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)); })
#define _MM_PCOMCTRL_LT 0
#define _MM_PCOMCTRL_LE 1
@@ -739,32 +723,23 @@ _mm_comtrue_epi64(__m128i __A, __m128i __B)
}
#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
- __m128d __X = (X); \
- __m128d __Y = (Y); \
- __m128i __C = (C); \
- (__m128d)__builtin_ia32_vpermil2pd((__v2df)__X, (__v2df)__Y, \
- (__v2di)__C, (I)); })
+ (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__v2di)(__m128i)(C), (I)); })
#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
- __m256d __X = (X); \
- __m256d __Y = (Y); \
- __m256i __C = (C); \
- (__m256d)__builtin_ia32_vpermil2pd256((__v4df)__X, (__v4df)__Y, \
- (__v4di)__C, (I)); })
+ (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
+ (__v4df)(__m256d)(Y), \
+ (__v4di)(__m256i)(C), (I)); })
#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
- __m128 __X = (X); \
- __m128 __Y = (Y); \
- __m128i __C = (C); \
- (__m128)__builtin_ia32_vpermil2ps((__v4sf)__X, (__v4sf)__Y, \
- (__v4si)__C, (I)); })
+ (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (__v4si)(__m128i)(C), (I)); })
#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
- __m256 __X = (X); \
- __m256 __Y = (Y); \
- __m256i __C = (C); \
- (__m256)__builtin_ia32_vpermil2ps256((__v8sf)__X, (__v8sf)__Y, \
- (__v8si)__C, (I)); })
+ (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
+ (__v8sf)(__m256)(Y), \
+ (__v8si)(__m256i)(C), (I)); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_frcz_ss(__m128 __A)
@@ -804,6 +779,4 @@ _mm256_frcz_pd(__m256d __A)
#undef __DEFAULT_FN_ATTRS
-#endif /* __XOP__ */
-
#endif /* __XOPINTRIN_H */
diff --git a/lib/Headers/xsavecintrin.h b/lib/Headers/xsavecintrin.h
new file mode 100644
index 000000000000..598470a682e2
--- /dev/null
+++ b/lib/Headers/xsavecintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavecintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVECINTRIN_H
+#define __XSAVECINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsavec64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsavec64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Headers/xsaveintrin.h b/lib/Headers/xsaveintrin.h
new file mode 100644
index 000000000000..a2e6b2e742ff
--- /dev/null
+++ b/lib/Headers/xsaveintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEINTRIN_H
+#define __XSAVEINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsave64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsave64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstor64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xrstor64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Headers/xsaveoptintrin.h b/lib/Headers/xsaveoptintrin.h
new file mode 100644
index 000000000000..d3faae78be4f
--- /dev/null
+++ b/lib/Headers/xsaveoptintrin.h
@@ -0,0 +1,48 @@
+/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsaveoptintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVEOPTINTRIN_H
+#define __XSAVEOPTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaveopt64(void *__p, unsigned long long __m) {
+ return __builtin_ia32_xsaveopt64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Headers/xsavesintrin.h b/lib/Headers/xsavesintrin.h
new file mode 100644
index 000000000000..c5e540a86edb
--- /dev/null
+++ b/lib/Headers/xsavesintrin.h
@@ -0,0 +1,58 @@
+/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <xsavesintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __XSAVESINTRIN_H
+#define __XSAVESINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors(__p, __m);
+}
+
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS
+_xrstors64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xrstors64(__p, __m);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_xsaves64(void *__p, unsigned long long __m) {
+ __builtin_ia32_xsaves64(__p, __m);
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Index/CommentToXML.cpp b/lib/Index/CommentToXML.cpp
index ef6aeefa6526..15f1696cbe91 100644
--- a/lib/Index/CommentToXML.cpp
+++ b/lib/Index/CommentToXML.cpp
@@ -481,7 +481,6 @@ void CommentASTToHTMLConverter::visitFullComment(const FullComment *C) {
Result << "</div>";
}
- Result.flush();
}
void CommentASTToHTMLConverter::visitNonStandaloneParagraphComment(
@@ -895,7 +894,7 @@ void CommentASTToXMLConverter::visitFullComment(const FullComment *C) {
FileID FID = LocInfo.first;
unsigned FileOffset = LocInfo.second;
- if (!FID.isInvalid()) {
+ if (FID.isValid()) {
if (const FileEntry *FE = SM.getFileEntryForID(FID)) {
Result << " file=\"";
appendToResultWithXMLEscaping(FE->getName());
@@ -1078,8 +1077,6 @@ void CommentASTToXMLConverter::visitFullComment(const FullComment *C) {
}
Result << RootEndTag;
-
- Result.flush();
}
void CommentASTToXMLConverter::appendToResultWithXMLEscaping(StringRef S) {
diff --git a/lib/Index/SimpleFormatContext.h b/lib/Index/SimpleFormatContext.h
index b88421498959..2c26e4d82e08 100644
--- a/lib/Index/SimpleFormatContext.h
+++ b/lib/Index/SimpleFormatContext.h
@@ -38,18 +38,17 @@ public:
: DiagOpts(new DiagnosticOptions()),
Diagnostics(new DiagnosticsEngine(new DiagnosticIDs,
DiagOpts.get())),
- Files((FileSystemOptions())),
+ InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ Files(FileSystemOptions(), InMemoryFileSystem),
Sources(*Diagnostics, Files),
Rewrite(Sources, Options) {
Diagnostics->setClient(new IgnoringDiagConsumer, true);
}
FileID createInMemoryFile(StringRef Name, StringRef Content) {
- std::unique_ptr<llvm::MemoryBuffer> Source =
- llvm::MemoryBuffer::getMemBuffer(Content);
- const FileEntry *Entry =
- Files.getVirtualFile(Name, Source->getBufferSize(), 0);
- Sources.overrideFileContents(Entry, std::move(Source));
+ InMemoryFileSystem->addFile(Name, 0,
+ llvm::MemoryBuffer::getMemBuffer(Content));
+ const FileEntry *Entry = Files.getFile(Name);
assert(Entry != nullptr);
return Sources.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
}
@@ -64,6 +63,7 @@ public:
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem;
FileManager Files;
SourceManager Sources;
Rewriter Rewrite;
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index 8cdd283ba5c5..c57694fc10a8 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -156,10 +156,8 @@ public:
//===----------------------------------------------------------------------===//
bool USRGenerator::EmitDeclName(const NamedDecl *D) {
- Out.flush();
const unsigned startSize = Buf.size();
D->printName(Out);
- Out.flush();
const unsigned endSize = Buf.size();
return startSize == endSize;
}
@@ -462,7 +460,6 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
}
Out << '@';
- Out.flush();
assert(Buf.size() > 0);
const unsigned off = Buf.size() - 1;
@@ -613,8 +610,18 @@ void USRGenerator::VisitType(QualType T) {
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
+ case BuiltinType::OCLImage2dDepth:
+ case BuiltinType::OCLImage2dArrayDepth:
+ case BuiltinType::OCLImage2dMSAA:
+ case BuiltinType::OCLImage2dArrayMSAA:
+ case BuiltinType::OCLImage2dMSAADepth:
+ case BuiltinType::OCLImage2dArrayMSAADepth:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLNDRange:
+ case BuiltinType::OCLReserveID:
case BuiltinType::OCLSampler:
IgnoreResults = true;
return;
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index 983dc18b57af..8a686a7f3d74 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -127,11 +127,12 @@ std::string HeaderSearch::getModuleFileName(Module *Module) {
std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
StringRef ModuleMapPath) {
- // If we don't have a module cache path, we can't do anything.
- if (ModuleCachePath.empty())
+ // If we don't have a module cache path or aren't supposed to use one, we
+ // can't do anything.
+ if (getModuleCachePath().empty())
return std::string();
- SmallString<256> Result(ModuleCachePath);
+ SmallString<256> Result(getModuleCachePath());
llvm::sys::fs::make_absolute(Result);
if (HSOpts->DisableModuleHash) {
@@ -153,7 +154,7 @@ std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
llvm::hash_code Hash =
llvm::hash_combine(DirName.lower(), FileName.lower(),
- HSOpts->ModuleFormat);
+ HSOpts->ModuleFormat, HSOpts->UseDebugInfo);
SmallString<128> HashStr;
llvm::APInt(64, size_t(Hash)).toStringUnsigned(HashStr, /*Radix*/36);
@@ -249,31 +250,22 @@ const char *DirectoryLookup::getName() const {
return getHeaderMap()->getFileName();
}
-static const FileEntry *
-getFileAndSuggestModule(HeaderSearch &HS, StringRef FileName,
- const DirectoryEntry *Dir, bool IsSystemHeaderDir,
- ModuleMap::KnownHeader *SuggestedModule) {
+const FileEntry *HeaderSearch::getFileAndSuggestModule(
+ StringRef FileName, const DirectoryEntry *Dir, bool IsSystemHeaderDir,
+ Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule) {
// If we have a module map that might map this header, load it and
// check whether we'll have a suggestion for a module.
- HS.hasModuleMap(FileName, Dir, IsSystemHeaderDir);
- if (SuggestedModule) {
- const FileEntry *File = HS.getFileMgr().getFile(FileName,
- /*OpenFile=*/false);
- if (File) {
- // If there is a module that corresponds to this header, suggest it.
- *SuggestedModule = HS.findModuleForHeader(File);
-
- // FIXME: This appears to be a no-op. We loaded the module map for this
- // directory at the start of this function.
- if (!SuggestedModule->getModule() &&
- HS.hasModuleMap(FileName, Dir, IsSystemHeaderDir))
- *SuggestedModule = HS.findModuleForHeader(File);
- }
+ const FileEntry *File = getFileMgr().getFile(FileName, /*OpenFile=*/true);
+ if (!File)
+ return nullptr;
- return File;
- }
+ // If there is a module that corresponds to this header, suggest it.
+ if (!findUsableModuleForHeader(File, Dir ? Dir : File->getDir(),
+ RequestingModule, SuggestedModule,
+ IsSystemHeaderDir))
+ return nullptr;
- return HS.getFileMgr().getFile(FileName, /*openFile=*/true);
+ return File;
}
/// LookupFile - Lookup the specified file in this search path, returning it
@@ -283,6 +275,7 @@ const FileEntry *DirectoryLookup::LookupFile(
HeaderSearch &HS,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
+ Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
bool &InUserSpecifiedSystemFramework,
bool &HasBeenMapped,
@@ -305,14 +298,15 @@ const FileEntry *DirectoryLookup::LookupFile(
RelativePath->append(Filename.begin(), Filename.end());
}
- return getFileAndSuggestModule(HS, TmpDir, getDir(),
- isSystemHeaderDirectory(),
- SuggestedModule);
+ return HS.getFileAndSuggestModule(TmpDir, getDir(),
+ isSystemHeaderDirectory(),
+ RequestingModule, SuggestedModule);
}
if (isFramework())
return DoFrameworkLookup(Filename, HS, SearchPath, RelativePath,
- SuggestedModule, InUserSpecifiedSystemFramework);
+ RequestingModule, SuggestedModule,
+ InUserSpecifiedSystemFramework);
assert(isHeaderMap() && "Unknown directory lookup");
const HeaderMap *HM = getHeaderMap();
@@ -404,13 +398,10 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
/// DoFrameworkLookup - Do a lookup of the specified file in the current
/// DirectoryLookup, which is a framework directory.
const FileEntry *DirectoryLookup::DoFrameworkLookup(
- StringRef Filename,
- HeaderSearch &HS,
- SmallVectorImpl<char> *SearchPath,
- SmallVectorImpl<char> *RelativePath,
+ StringRef Filename, HeaderSearch &HS, SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
- bool &InUserSpecifiedSystemFramework) const
-{
+ bool &InUserSpecifiedSystemFramework) const {
FileManager &FileMgr = HS.getFileMgr();
// Framework names must have a '/' in the filename.
@@ -522,27 +513,15 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
break;
} while (true);
+ bool IsSystem = getDirCharacteristic() != SrcMgr::C_User;
if (FoundFramework) {
- // Find the top-level framework based on this framework.
- SmallVector<std::string, 4> SubmodulePath;
- const DirectoryEntry *TopFrameworkDir
- = ::getTopFrameworkDir(FileMgr, FrameworkPath, SubmodulePath);
-
- // Determine the name of the top-level framework.
- StringRef ModuleName = llvm::sys::path::stem(TopFrameworkDir->getName());
-
- // Load this framework module. If that succeeds, find the suggested module
- // for this header, if any.
- bool IsSystem = getDirCharacteristic() != SrcMgr::C_User;
- HS.loadFrameworkModule(ModuleName, TopFrameworkDir, IsSystem);
-
- // FIXME: This can find a module not part of ModuleName, which is
- // important so that we're consistent about whether this header
- // corresponds to a module. Possibly we should lock down framework modules
- // so that this is not possible.
- *SuggestedModule = HS.findModuleForHeader(FE);
+ if (!HS.findUsableModuleForFrameworkHeader(
+ FE, FrameworkPath, RequestingModule, SuggestedModule, IsSystem))
+ return nullptr;
} else {
- *SuggestedModule = HS.findModuleForHeader(FE);
+ if (!HS.findUsableModuleForHeader(FE, getDir(), RequestingModule,
+ SuggestedModule, IsSystem))
+ return nullptr;
}
}
return FE;
@@ -588,7 +567,8 @@ const FileEntry *HeaderSearch::LookupFile(
const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
- ModuleMap::KnownHeader *SuggestedModule, bool SkipCache) {
+ Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
+ bool SkipCache) {
if (SuggestedModule)
*SuggestedModule = ModuleMap::KnownHeader();
@@ -606,13 +586,9 @@ const FileEntry *HeaderSearch::LookupFile(
RelativePath->append(Filename.begin(), Filename.end());
}
// Otherwise, just return the file.
- const FileEntry *File = FileMgr.getFile(Filename, /*openFile=*/true);
- if (File && SuggestedModule) {
- // If there is a module that corresponds to this header, suggest it.
- hasModuleMap(Filename, File->getDir(), /*SystemHeaderDir*/false);
- *SuggestedModule = findModuleForHeader(File);
- }
- return File;
+ return getFileAndSuggestModule(Filename, nullptr,
+ /*IsSystemHeaderDir*/false,
+ RequestingModule, SuggestedModule);
}
// This is the header that MSVC's header search would have found.
@@ -646,8 +622,8 @@ const FileEntry *HeaderSearch::LookupFile(
bool IncluderIsSystemHeader =
Includer && getFileInfo(Includer).DirInfo != SrcMgr::C_User;
if (const FileEntry *FE = getFileAndSuggestModule(
- *this, TmpDir, IncluderAndDir.second,
- IncluderIsSystemHeader, SuggestedModule)) {
+ TmpDir, IncluderAndDir.second, IncluderIsSystemHeader,
+ RequestingModule, SuggestedModule)) {
if (!Includer) {
assert(First && "only first includer can have no file");
return FE;
@@ -736,10 +712,10 @@ const FileEntry *HeaderSearch::LookupFile(
for (; i != SearchDirs.size(); ++i) {
bool InUserSpecifiedSystemFramework = false;
bool HasBeenMapped = false;
- const FileEntry *FE =
- SearchDirs[i].LookupFile(Filename, *this, SearchPath, RelativePath,
- SuggestedModule, InUserSpecifiedSystemFramework,
- HasBeenMapped, MappedName);
+ const FileEntry *FE = SearchDirs[i].LookupFile(
+ Filename, *this, SearchPath, RelativePath, RequestingModule,
+ SuggestedModule, InUserSpecifiedSystemFramework, HasBeenMapped,
+ MappedName);
if (HasBeenMapped) {
CacheLookup.MappedName =
copyString(Filename, LookupFileCache.getAllocator());
@@ -803,9 +779,10 @@ const FileEntry *HeaderSearch::LookupFile(
ScratchFilename += '/';
ScratchFilename += Filename;
- const FileEntry *FE = LookupFile(
- ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir, CurDir,
- Includers.front(), SearchPath, RelativePath, SuggestedModule);
+ const FileEntry *FE =
+ LookupFile(ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir,
+ CurDir, Includers.front(), SearchPath, RelativePath,
+ RequestingModule, SuggestedModule);
if (checkMSVCHeaderSearch(Diags, MSFE, FE, IncludeLoc)) {
if (SuggestedModule)
@@ -841,6 +818,7 @@ LookupSubframeworkHeader(StringRef Filename,
const FileEntry *ContextFileEnt,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
+ Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule) {
assert(ContextFileEnt && "No context file?");
@@ -932,24 +910,10 @@ LookupSubframeworkHeader(StringRef Filename,
unsigned DirInfo = getFileInfo(ContextFileEnt).DirInfo;
getFileInfo(FE).DirInfo = DirInfo;
- // If we're supposed to suggest a module, look for one now.
- if (SuggestedModule) {
- // Find the top-level framework based on this framework.
- FrameworkName.pop_back(); // remove the trailing '/'
- SmallVector<std::string, 4> SubmodulePath;
- const DirectoryEntry *TopFrameworkDir
- = ::getTopFrameworkDir(FileMgr, FrameworkName, SubmodulePath);
-
- // Determine the name of the top-level framework.
- StringRef ModuleName = llvm::sys::path::stem(TopFrameworkDir->getName());
-
- // Load this framework module. If that succeeds, find the suggested module
- // for this header, if any.
- bool IsSystem = false;
- if (loadFrameworkModule(ModuleName, TopFrameworkDir, IsSystem)) {
- *SuggestedModule = findModuleForHeader(FE);
- }
- }
+ FrameworkName.pop_back(); // remove the trailing '/'
+ if (!findUsableModuleForFrameworkHeader(FE, FrameworkName, RequestingModule,
+ SuggestedModule, /*IsSystem*/ false))
+ return nullptr;
return FE;
}
@@ -962,77 +926,112 @@ LookupSubframeworkHeader(StringRef Filename,
/// header file info (\p HFI)
static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
const HeaderFileInfo &OtherHFI) {
+ assert(OtherHFI.External && "expected to merge external HFI");
+
HFI.isImport |= OtherHFI.isImport;
HFI.isPragmaOnce |= OtherHFI.isPragmaOnce;
HFI.isModuleHeader |= OtherHFI.isModuleHeader;
HFI.NumIncludes += OtherHFI.NumIncludes;
-
+
if (!HFI.ControllingMacro && !HFI.ControllingMacroID) {
HFI.ControllingMacro = OtherHFI.ControllingMacro;
HFI.ControllingMacroID = OtherHFI.ControllingMacroID;
}
-
- if (OtherHFI.External) {
- HFI.DirInfo = OtherHFI.DirInfo;
- HFI.External = OtherHFI.External;
- HFI.IndexHeaderMapHeader = OtherHFI.IndexHeaderMapHeader;
- }
+
+ HFI.DirInfo = OtherHFI.DirInfo;
+ HFI.External = (!HFI.IsValid || HFI.External);
+ HFI.IsValid = true;
+ HFI.IndexHeaderMapHeader = OtherHFI.IndexHeaderMapHeader;
if (HFI.Framework.empty())
HFI.Framework = OtherHFI.Framework;
-
- HFI.Resolved = true;
}
/// getFileInfo - Return the HeaderFileInfo structure for the specified
/// FileEntry.
HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
if (FE->getUID() >= FileInfo.size())
- FileInfo.resize(FE->getUID()+1);
-
- HeaderFileInfo &HFI = FileInfo[FE->getUID()];
- if (ExternalSource && !HFI.Resolved)
- mergeHeaderFileInfo(HFI, ExternalSource->GetHeaderFileInfo(FE));
- HFI.IsValid = 1;
- return HFI;
+ FileInfo.resize(FE->getUID() + 1);
+
+ HeaderFileInfo *HFI = &FileInfo[FE->getUID()];
+ // FIXME: Use a generation count to check whether this is really up to date.
+ if (ExternalSource && !HFI->Resolved) {
+ HFI->Resolved = true;
+ auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
+
+ HFI = &FileInfo[FE->getUID()];
+ if (ExternalHFI.External)
+ mergeHeaderFileInfo(*HFI, ExternalHFI);
+ }
+
+ HFI->IsValid = true;
+ // We have local information about this header file, so it's no longer
+ // strictly external.
+ HFI->External = false;
+ return *HFI;
}
-bool HeaderSearch::tryGetFileInfo(const FileEntry *FE,
- HeaderFileInfo &Result) const {
- if (FE->getUID() >= FileInfo.size())
- return false;
- const HeaderFileInfo &HFI = FileInfo[FE->getUID()];
- if (HFI.IsValid) {
- Result = HFI;
- return true;
+const HeaderFileInfo *
+HeaderSearch::getExistingFileInfo(const FileEntry *FE,
+ bool WantExternal) const {
+ // If we have an external source, ensure we have the latest information.
+ // FIXME: Use a generation count to check whether this is really up to date.
+ HeaderFileInfo *HFI;
+ if (ExternalSource) {
+ if (FE->getUID() >= FileInfo.size()) {
+ if (!WantExternal)
+ return nullptr;
+ FileInfo.resize(FE->getUID() + 1);
+ }
+
+ HFI = &FileInfo[FE->getUID()];
+ if (!WantExternal && (!HFI->IsValid || HFI->External))
+ return nullptr;
+ if (!HFI->Resolved) {
+ HFI->Resolved = true;
+ auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
+
+ HFI = &FileInfo[FE->getUID()];
+ if (ExternalHFI.External)
+ mergeHeaderFileInfo(*HFI, ExternalHFI);
+ }
+ } else if (FE->getUID() >= FileInfo.size()) {
+ return nullptr;
+ } else {
+ HFI = &FileInfo[FE->getUID()];
}
- return false;
+
+ if (!HFI->IsValid || (HFI->External && !WantExternal))
+ return nullptr;
+
+ return HFI;
}
bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
// Check if we've ever seen this file as a header.
- if (File->getUID() >= FileInfo.size())
- return false;
-
- // Resolve header file info from the external source, if needed.
- HeaderFileInfo &HFI = FileInfo[File->getUID()];
- if (ExternalSource && !HFI.Resolved)
- mergeHeaderFileInfo(HFI, ExternalSource->GetHeaderFileInfo(File));
-
- return HFI.isPragmaOnce || HFI.isImport ||
- HFI.ControllingMacro || HFI.ControllingMacroID;
+ if (auto *HFI = getExistingFileInfo(File))
+ return HFI->isPragmaOnce || HFI->isImport || HFI->ControllingMacro ||
+ HFI->ControllingMacroID;
+ return false;
}
void HeaderSearch::MarkFileModuleHeader(const FileEntry *FE,
ModuleMap::ModuleHeaderRole Role,
bool isCompilingModuleHeader) {
- if (FE->getUID() >= FileInfo.size())
- FileInfo.resize(FE->getUID()+1);
+ bool isModularHeader = !(Role & ModuleMap::TextualHeader);
+
+ // Don't mark the file info as non-external if there's nothing to change.
+ if (!isCompilingModuleHeader) {
+ if (!isModularHeader)
+ return;
+ auto *HFI = getExistingFileInfo(FE);
+ if (HFI && HFI->isModuleHeader)
+ return;
+ }
- HeaderFileInfo &HFI = FileInfo[FE->getUID()];
- HFI.isModuleHeader = true;
+ auto &HFI = getFileInfo(FE);
+ HFI.isModuleHeader |= isModularHeader;
HFI.isCompilingModuleHeader |= isCompilingModuleHeader;
- HFI.setHeaderRole(Role);
}
bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
@@ -1142,11 +1141,48 @@ HeaderSearch::findModuleForHeader(const FileEntry *File) const {
if (ExternalSource) {
// Make sure the external source has handled header info about this file,
// which includes whether the file is part of a module.
- (void)getFileInfo(File);
+ (void)getExistingFileInfo(File);
}
return ModMap.findModuleForHeader(File);
}
+bool HeaderSearch::findUsableModuleForHeader(
+ const FileEntry *File, const DirectoryEntry *Root, Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule, bool IsSystemHeaderDir) {
+ if (File && SuggestedModule) {
+ // If there is a module that corresponds to this header, suggest it.
+ hasModuleMap(File->getName(), Root, IsSystemHeaderDir);
+ *SuggestedModule = findModuleForHeader(File);
+ }
+ return true;
+}
+
+bool HeaderSearch::findUsableModuleForFrameworkHeader(
+ const FileEntry *File, StringRef FrameworkName, Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule, bool IsSystemFramework) {
+ // If we're supposed to suggest a module, look for one now.
+ if (SuggestedModule) {
+ // Find the top-level framework based on this framework.
+ SmallVector<std::string, 4> SubmodulePath;
+ const DirectoryEntry *TopFrameworkDir
+ = ::getTopFrameworkDir(FileMgr, FrameworkName, SubmodulePath);
+
+ // Determine the name of the top-level framework.
+ StringRef ModuleName = llvm::sys::path::stem(TopFrameworkDir->getName());
+
+ // Load this framework module. If that succeeds, find the suggested module
+ // for this header, if any.
+ loadFrameworkModule(ModuleName, TopFrameworkDir, IsSystemFramework);
+
+ // FIXME: This can find a module not part of ModuleName, which is
+ // important so that we're consistent about whether this header
+ // corresponds to a module. Possibly we should lock down framework modules
+ // so that this is not possible.
+ *SuggestedModule = findModuleForHeader(File);
+ }
+ return true;
+}
+
static const FileEntry *getPrivateModuleMap(const FileEntry *File,
FileManager &FileMgr) {
StringRef Filename = llvm::sys::path::filename(File->getName());
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 4007914b6c08..27b0feb48270 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -235,7 +235,7 @@ static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
size_t Length = 0;
const char *BufEnd = BufPtr + Tok.getLength();
- if (Tok.is(tok::string_literal)) {
+ if (tok::isStringLiteral(Tok.getKind())) {
// Munch the encoding-prefix and opening double-quote.
while (BufPtr < BufEnd) {
unsigned Size;
@@ -1354,7 +1354,9 @@ void Lexer::SkipBytes(unsigned Bytes, bool StartOfLine) {
}
static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
- if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ if (LangOpts.AsmPreprocessor) {
+ return false;
+ } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
C11AllowedIDCharRanges);
return C11AllowedIDChars.contains(C);
@@ -1371,7 +1373,9 @@ static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
assert(isAllowedIDChar(C, LangOpts));
- if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ if (LangOpts.AsmPreprocessor) {
+ return false;
+ } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
C11DisallowedInitialIDCharRanges);
return !C11DisallowedInitialIDChars.contains(C);
@@ -1732,7 +1736,7 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
- Diag(BufferPtr, diag::ext_unterminated_string);
+ Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1;
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return true;
}
@@ -1756,7 +1760,7 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
// If a nul character existed in the string, warn about it.
if (NulCharacter && !isLexingRawMode())
- Diag(NulCharacter, diag::null_in_string);
+ Diag(NulCharacter, diag::null_in_char_or_string) << 1;
// Update the location of the token as well as the BufferPtr instance var.
const char *TokStart = BufferPtr;
@@ -1872,7 +1876,7 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
// If a nul character existed in the string, warn about it.
if (NulCharacter && !isLexingRawMode())
- Diag(NulCharacter, diag::null_in_string);
+ Diag(NulCharacter, diag::null_in_char_or_string) << 1;
// Update the location of token as well as BufferPtr.
const char *TokStart = BufferPtr;
@@ -1914,7 +1918,7 @@ bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
- Diag(BufferPtr, diag::ext_unterminated_char);
+ Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0;
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return true;
}
@@ -1938,7 +1942,7 @@ bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
// If a nul character existed in the character, warn about it.
if (NulCharacter && !isLexingRawMode())
- Diag(NulCharacter, diag::null_in_char);
+ Diag(NulCharacter, diag::null_in_char_or_string) << 0;
// Update the location of token as well as BufferPtr.
const char *TokStart = BufferPtr;
@@ -2956,8 +2960,11 @@ LexNextToken:
case 26: // DOS & CP/M EOF: "^Z".
// If we're in Microsoft extensions mode, treat this as end of file.
- if (LangOpts.MicrosoftExt)
+ if (LangOpts.MicrosoftExt) {
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft);
return LexEndOfFile(Result, CurPtr-1);
+ }
// If Microsoft extensions are disabled, this is just random garbage.
Kind = tok::unknown;
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index aed91648799b..1e7858af8948 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -159,7 +159,7 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
// Check for overflow.
if (Overflow && Diags) // Too many digits to fit in
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
- diag::err_hex_escape_too_large);
+ diag::err_escape_too_large) << 0;
break;
}
case '0': case '1': case '2': case '3':
@@ -182,7 +182,7 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
if (Diags)
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
- diag::err_octal_escape_too_large);
+ diag::err_escape_too_large) << 1;
ResultChar &= ~0U >> (32-CharWidth);
}
break;
@@ -538,7 +538,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// Done.
} else if (isHexDigit(*s) && !(*s == 'e' || *s == 'E')) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
- diag::err_invalid_decimal_digit) << StringRef(s, 1);
+ diag::err_invalid_digit) << StringRef(s, 1) << 0;
hadError = true;
return;
} else if (*s == '.') {
@@ -613,7 +613,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
break;
if (!isFPConstant) {
- // Allow i8, i16, i32, i64, and i128.
+ // Allow i8, i16, i32, and i64.
switch (s[1]) {
case '8':
s += 2; // i8 suffix
@@ -623,9 +623,6 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (s[2] == '6') {
s += 3; // i16 suffix
MicrosoftInteger = 16;
- } else if (s[2] == '2' && s[3] == '8') {
- s += 4; // i128 suffix
- MicrosoftInteger = 128;
}
break;
case '3':
@@ -683,9 +680,8 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// Report an error if there are any.
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin - ThisTokBegin),
- isFPConstant ? diag::err_invalid_suffix_float_constant :
- diag::err_invalid_suffix_integer_constant)
- << StringRef(SuffixBegin, ThisTokEnd-SuffixBegin);
+ diag::err_invalid_suffix_constant)
+ << StringRef(SuffixBegin, ThisTokEnd-SuffixBegin) << isFPConstant;
hadError = true;
return;
}
@@ -770,7 +766,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
if (noSignificand) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
- diag::err_hexconstant_requires_digits);
+ diag::err_hexconstant_requires) << 1;
hadError = true;
return;
}
@@ -797,7 +793,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
PP.Diag(TokLoc, diag::ext_hexconstant_invalid);
} else if (saw_period) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_hexconstant_requires_exponent);
+ diag::err_hexconstant_requires) << 0;
hadError = true;
}
return;
@@ -821,7 +817,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// Done.
} else if (isHexDigit(*s)) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_binary_digit) << StringRef(s, 1);
+ diag::err_invalid_digit) << StringRef(s, 1) << 2;
hadError = true;
}
// Other suffixes will be diagnosed by the caller.
@@ -851,7 +847,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// the code is using an incorrect base.
if (isHexDigit(*s) && *s != 'e' && *s != 'E') {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_octal_digit) << StringRef(s, 1);
+ diag::err_invalid_digit) << StringRef(s, 1) << 1;
hadError = true;
return;
}
@@ -1420,10 +1416,23 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
ThisTokEnd -= ThisTokBuf - Prefix;
assert(ThisTokEnd >= ThisTokBuf && "malformed raw string literal");
- // Copy the string over
- if (CopyStringFragment(StringToks[i], ThisTokBegin,
- StringRef(ThisTokBuf, ThisTokEnd - ThisTokBuf)))
- hadError = true;
+ // C++14 [lex.string]p4: A source-file new-line in a raw string literal
+ // results in a new-line in the resulting execution string-literal.
+ StringRef RemainingTokenSpan(ThisTokBuf, ThisTokEnd - ThisTokBuf);
+ while (!RemainingTokenSpan.empty()) {
+ // Split the string literal on \r\n boundaries.
+ size_t CRLFPos = RemainingTokenSpan.find("\r\n");
+ StringRef BeforeCRLF = RemainingTokenSpan.substr(0, CRLFPos);
+ StringRef AfterCRLF = RemainingTokenSpan.substr(CRLFPos);
+
+ // Copy everything before the \r\n sequence into the string literal.
+ if (CopyStringFragment(StringToks[i], ThisTokBegin, BeforeCRLF))
+ hadError = true;
+
+ // Point into the \n inside the \r\n sequence and operate on the
+ // remaining portion of the literal.
+ RemainingTokenSpan = AfterCRLF.substr(1);
+ }
} else {
if (ThisTokBuf[0] != '"') {
// The file may have come from PCH and then changed after loading the
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index 109b6c12b89b..0b4292fbeae5 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -154,16 +154,20 @@ void MacroInfo::dump() const {
Out << ")";
}
+ bool First = true;
for (const Token &Tok : ReplacementTokens) {
- Out << " ";
+ // Leading space is semantically meaningful in a macro definition,
+ // so preserve it in the dump output.
+ if (First || Tok.hasLeadingSpace())
+ Out << " ";
+ First = false;
+
if (const char *Punc = tok::getPunctuatorSpelling(Tok.getKind()))
Out << Punc;
- else if (const char *Kwd = tok::getKeywordSpelling(Tok.getKind()))
- Out << Kwd;
- else if (Tok.is(tok::identifier))
- Out << Tok.getIdentifierInfo()->getName();
else if (Tok.isLiteral() && Tok.getLiteralData())
Out << StringRef(Tok.getLiteralData(), Tok.getLength());
+ else if (auto *II = Tok.getIdentifierInfo())
+ Out << II->getName();
else
Out << Tok.getName();
}
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index 96d3e4b8fe65..a7524028a229 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -231,11 +231,9 @@ static bool violatesPrivateInclude(Module *RequestingModule,
assert((!IsPrivateRole || IsPrivate) && "inconsistent headers and roles");
}
#endif
- return IsPrivateRole &&
- // FIXME: Should we map RequestingModule to its top-level module here
- // too? This check is redundant with the isSubModuleOf check in
- // diagnoseHeaderInclusion.
- RequestedModule->getTopLevelModule() != RequestingModule;
+ return IsPrivateRole && (!RequestingModule ||
+ RequestedModule->getTopLevelModule() !=
+ RequestingModule->getTopLevelModule());
}
static Module *getTopLevelOrNull(Module *M) {
@@ -261,11 +259,6 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
HeadersMap::iterator Known = findKnownHeader(File);
if (Known != Headers.end()) {
for (const KnownHeader &Header : Known->second) {
- // If 'File' is part of 'RequestingModule' we can definitely include it.
- if (Header.getModule() &&
- Header.getModule()->isSubModuleOf(RequestingModule))
- return;
-
// Remember private headers for later printing of a diagnostic.
if (violatesPrivateInclude(RequestingModule, File, Header.getRole(),
Header.getModule())) {
@@ -320,6 +313,10 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New,
const ModuleMap::KnownHeader &Old) {
+ // Prefer available modules.
+ if (New.getModule()->isAvailable() && !Old.getModule()->isAvailable())
+ return true;
+
// Prefer a public header over a private header.
if ((New.getRole() & ModuleMap::PrivateHeader) !=
(Old.getRole() & ModuleMap::PrivateHeader))
@@ -349,15 +346,19 @@ ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File) {
// Prefer a header from the current module over all others.
if (H.getModule()->getTopLevelModule() == CompilingModule)
return MakeResult(H);
- // Cannot use a module if it is unavailable.
- if (!H.getModule()->isAvailable())
- continue;
if (!Result || isBetterKnownHeader(H, Result))
Result = H;
}
return MakeResult(Result);
}
+ return MakeResult(findOrCreateModuleForHeaderInUmbrellaDir(File));
+}
+
+ModuleMap::KnownHeader
+ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
+ assert(!Headers.count(File) && "already have a module for this header");
+
SmallVector<const DirectoryEntry *, 2> SkippedDirs;
KnownHeader H = findHeaderInUmbrellaDirs(File, SkippedDirs);
if (H) {
@@ -418,19 +419,22 @@ ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File) {
UmbrellaDirs[SkippedDirs[I]] = Result;
}
- Headers[File].push_back(KnownHeader(Result, NormalHeader));
-
- // If a header corresponds to an unavailable module, don't report
- // that it maps to anything.
- if (!Result->isAvailable())
- return KnownHeader();
-
- return MakeResult(Headers[File].back());
+ KnownHeader Header(Result, NormalHeader);
+ Headers[File].push_back(Header);
+ return Header;
}
return KnownHeader();
}
+ArrayRef<ModuleMap::KnownHeader>
+ModuleMap::findAllModulesForHeader(const FileEntry *File) const {
+ auto It = Headers.find(File);
+ if (It == Headers.end())
+ return None;
+ return It->second;
+}
+
bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) const {
return isHeaderUnavailableInModule(Header, nullptr);
}
@@ -577,9 +581,18 @@ static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
SmallString<128> LibName;
LibName += FrameworkDir->getName();
llvm::sys::path::append(LibName, Mod->Name);
- if (FileMgr.getFile(LibName)) {
- Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
- /*IsFramework=*/true));
+
+ // The library name of a framework has more than one possible extension since
+ // the introduction of the text-based dynamic library format. We need to check
+ // for both before we give up.
+ static const char *frameworkExtensions[] = {"", ".tbd"};
+ for (const auto *extension : frameworkExtensions) {
+ llvm::sys::path::replace_extension(LibName, extension);
+ if (FileMgr.getFile(LibName)) {
+ Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
+ /*IsFramework=*/true));
+ return;
+ }
}
}
@@ -785,15 +798,27 @@ static Module::HeaderKind headerRoleToKind(ModuleMap::ModuleHeaderRole Role) {
}
void ModuleMap::addHeader(Module *Mod, Module::Header Header,
- ModuleHeaderRole Role) {
- if (!(Role & TextualHeader)) {
- bool isCompilingModuleHeader = Mod->getTopLevelModule() == CompilingModule;
+ ModuleHeaderRole Role, bool Imported) {
+ KnownHeader KH(Mod, Role);
+
+ // Only add each header to the headers list once.
+ // FIXME: Should we diagnose if a header is listed twice in the
+ // same module definition?
+ auto &HeaderList = Headers[Header.Entry];
+ for (auto H : HeaderList)
+ if (H == KH)
+ return;
+
+ HeaderList.push_back(KH);
+ Mod->Headers[headerRoleToKind(Role)].push_back(std::move(Header));
+
+ bool isCompilingModuleHeader = Mod->getTopLevelModule() == CompilingModule;
+ if (!Imported || isCompilingModuleHeader) {
+ // When we import HeaderFileInfo, the external source is expected to
+ // set the isModuleHeader flag itself.
HeaderInfo.MarkFileModuleHeader(Header.Entry, Role,
isCompilingModuleHeader);
}
- Headers[Header.Entry].push_back(KnownHeader(Mod, Role));
-
- Mod->Headers[headerRoleToKind(Role)].push_back(std::move(Header));
}
void ModuleMap::excludeHeader(Module *Mod, Module::Header Header) {
@@ -1015,7 +1040,17 @@ namespace clang {
/// \brief The active module.
Module *ActiveModule;
-
+
+ /// \brief Whether a module uses the 'requires excluded' hack to mark its
+ /// contents as 'textual'.
+ ///
+ /// On older Darwin SDK versions, 'requires excluded' is used to mark the
+ /// contents of the Darwin.C.excluded (assert.h) and Tcl.Private modules as
+ /// non-modular headers. For backwards compatibility, we continue to
+ /// support this idiom for just these modules, and map the headers to
+ /// 'textual' to match the original intent.
+ llvm::SmallPtrSet<Module *, 2> UsesRequiresExcludedHack;
+
/// \brief Consume the current token and return its location.
SourceLocation consumeToken();
@@ -1570,6 +1605,38 @@ void ModuleMapParser::parseExternModuleDecl() {
: File->getDir(), ExternLoc);
}
+/// Whether to add the requirement \p Feature to the module \p M.
+///
+/// This preserves backwards compatibility for two hacks in the Darwin system
+/// module map files:
+///
+/// 1. The use of 'requires excluded' to make headers non-modular, which
+/// should really be mapped to 'textual' now that we have this feature. We
+/// drop the 'excluded' requirement, and set \p IsRequiresExcludedHack to
+/// true. Later, this bit will be used to map all the headers inside this
+/// module to 'textual'.
+///
+/// This affects Darwin.C.excluded (for assert.h) and Tcl.Private.
+///
+/// 2. Removes a bogus cplusplus requirement from IOKit.avc. This requirement
+/// was never correct and causes issues now that we check it, so drop it.
+static bool shouldAddRequirement(Module *M, StringRef Feature,
+ bool &IsRequiresExcludedHack) {
+ static const StringRef DarwinCExcluded[] = {"Darwin", "C", "excluded"};
+ static const StringRef TclPrivate[] = {"Tcl", "Private"};
+ static const StringRef IOKitAVC[] = {"IOKit", "avc"};
+
+ if (Feature == "excluded" && (M->fullModuleNameIs(DarwinCExcluded) ||
+ M->fullModuleNameIs(TclPrivate))) {
+ IsRequiresExcludedHack = true;
+ return false;
+ } else if (Feature == "cplusplus" && M->fullModuleNameIs(IOKitAVC)) {
+ return false;
+ }
+
+ return true;
+}
+
/// \brief Parse a requires declaration.
///
/// requires-declaration:
@@ -1605,9 +1672,18 @@ void ModuleMapParser::parseRequiresDecl() {
std::string Feature = Tok.getString();
consumeToken();
- // Add this feature.
- ActiveModule->addRequirement(Feature, RequiredState,
- Map.LangOpts, *Map.Target);
+ bool IsRequiresExcludedHack = false;
+ bool ShouldAddRequirement =
+ shouldAddRequirement(ActiveModule, Feature, IsRequiresExcludedHack);
+
+ if (IsRequiresExcludedHack)
+ UsesRequiresExcludedHack.insert(ActiveModule);
+
+ if (ShouldAddRequirement) {
+ // Add this feature.
+ ActiveModule->addRequirement(Feature, RequiredState, Map.LangOpts,
+ *Map.Target);
+ }
if (!Tok.is(MMToken::Comma))
break;
@@ -1657,9 +1733,16 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
consumeToken();
}
}
+
if (LeadingToken == MMToken::TextualKeyword)
Role = ModuleMap::ModuleHeaderRole(Role | ModuleMap::TextualHeader);
+ if (UsesRequiresExcludedHack.count(ActiveModule)) {
+ // Mark this header 'textual' (see doc comment for
+ // Module::UsesRequiresExcludedHack).
+ Role = ModuleMap::ModuleHeaderRole(Role | ModuleMap::TextualHeader);
+ }
+
if (LeadingToken != MMToken::HeaderKeyword) {
if (!Tok.is(MMToken::HeaderKeyword)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
@@ -1797,6 +1880,11 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
}
}
+static int compareModuleHeaders(const Module::Header *A,
+ const Module::Header *B) {
+ return A->NameAsWritten.compare(B->NameAsWritten);
+}
+
/// \brief Parse an umbrella directory declaration.
///
/// umbrella-dir-declaration:
@@ -1838,14 +1926,38 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
HadError = true;
return;
}
-
+
+ if (UsesRequiresExcludedHack.count(ActiveModule)) {
+ // Mark this header 'textual' (see doc comment for
+ // ModuleMapParser::UsesRequiresExcludedHack). Although iterating over the
+ // directory is relatively expensive, in practice this only applies to the
+ // uncommonly used Tcl module on Darwin platforms.
+ std::error_code EC;
+ SmallVector<Module::Header, 6> Headers;
+ for (llvm::sys::fs::recursive_directory_iterator I(Dir->getName(), EC), E;
+ I != E && !EC; I.increment(EC)) {
+ if (const FileEntry *FE = SourceMgr.getFileManager().getFile(I->path())) {
+
+ Module::Header Header = {I->path(), FE};
+ Headers.push_back(std::move(Header));
+ }
+ }
+
+ // Sort header paths so that the pcm doesn't depend on iteration order.
+ llvm::array_pod_sort(Headers.begin(), Headers.end(), compareModuleHeaders);
+
+ for (auto &Header : Headers)
+ Map.addHeader(ActiveModule, std::move(Header), ModuleMap::TextualHeader);
+ return;
+ }
+
if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
<< OwningModule->getFullModuleName();
HadError = true;
return;
- }
-
+ }
+
// Record this umbrella directory.
Map.setUmbrellaDir(ActiveModule, Dir, DirName);
}
@@ -2335,9 +2447,14 @@ bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
// Parse this module map file.
Lexer L(ID, SourceMgr.getBuffer(ID), SourceMgr, MMapLangOpts);
+ SourceLocation Start = L.getSourceLocation();
ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, File, Dir,
BuiltinIncludeDir, IsSystem);
bool Result = Parser.parseModuleMapFile();
ParsedModuleMap[File] = Result;
+
+ // Notify callbacks that we parsed it.
+ for (const auto &Cb : Callbacks)
+ Cb->moduleMapFileRead(Start, *File, IsSystem);
return Result;
}
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index ce64538de41b..c02a0cb8d302 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -611,6 +611,8 @@ const FileEntry *Preprocessor::LookupFile(
SmallVectorImpl<char> *RelativePath,
ModuleMap::KnownHeader *SuggestedModule,
bool SkipCache) {
+ Module *RequestingModule = getModuleForLocation(FilenameLoc);
+
// If the header lookup mechanism may be relative to the current inclusion
// stack, record the parent #includes.
SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 16>
@@ -648,8 +650,7 @@ const FileEntry *Preprocessor::LookupFile(
for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
IncludeStackInfo &ISEntry = IncludeMacroStack[e - i - 1];
if (IsFileLexer(ISEntry))
- if ((FileEnt = SourceMgr.getFileEntryForID(
- ISEntry.ThePPLexer->getFileID())))
+ if ((FileEnt = ISEntry.ThePPLexer->getFileEntry()))
Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
}
}
@@ -664,8 +665,8 @@ const FileEntry *Preprocessor::LookupFile(
const DirectoryLookup *TmpFromDir = nullptr;
while (const FileEntry *FE = HeaderInfo.LookupFile(
Filename, FilenameLoc, isAngled, TmpFromDir, TmpCurDir,
- Includers, SearchPath, RelativePath, SuggestedModule,
- SkipCache)) {
+ Includers, SearchPath, RelativePath, RequestingModule,
+ SuggestedModule, SkipCache)) {
// Keep looking as if this file did a #include_next.
TmpFromDir = TmpCurDir;
++TmpFromDir;
@@ -681,11 +682,11 @@ const FileEntry *Preprocessor::LookupFile(
// Do a standard file entry lookup.
const FileEntry *FE = HeaderInfo.LookupFile(
Filename, FilenameLoc, isAngled, FromDir, CurDir, Includers, SearchPath,
- RelativePath, SuggestedModule, SkipCache);
+ RelativePath, RequestingModule, SuggestedModule, SkipCache);
if (FE) {
if (SuggestedModule && !LangOpts.AsmPreprocessor)
HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
- getModuleForLocation(FilenameLoc), FilenameLoc, Filename, FE);
+ RequestingModule, FilenameLoc, Filename, FE);
return FE;
}
@@ -694,13 +695,14 @@ const FileEntry *Preprocessor::LookupFile(
// to one of the headers on the #include stack. Walk the list of the current
// headers on the #include stack and pass them to HeaderInfo.
if (IsFileLexer()) {
- if ((CurFileEnt = SourceMgr.getFileEntryForID(CurPPLexer->getFileID()))) {
+ if ((CurFileEnt = CurPPLexer->getFileEntry())) {
if ((FE = HeaderInfo.LookupSubframeworkHeader(Filename, CurFileEnt,
SearchPath, RelativePath,
+ RequestingModule,
SuggestedModule))) {
if (SuggestedModule && !LangOpts.AsmPreprocessor)
HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
- getModuleForLocation(FilenameLoc), FilenameLoc, Filename, FE);
+ RequestingModule, FilenameLoc, Filename, FE);
return FE;
}
}
@@ -709,14 +711,13 @@ const FileEntry *Preprocessor::LookupFile(
for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
IncludeStackInfo &ISEntry = IncludeMacroStack[e-i-1];
if (IsFileLexer(ISEntry)) {
- if ((CurFileEnt =
- SourceMgr.getFileEntryForID(ISEntry.ThePPLexer->getFileID()))) {
+ if ((CurFileEnt = ISEntry.ThePPLexer->getFileEntry())) {
if ((FE = HeaderInfo.LookupSubframeworkHeader(
Filename, CurFileEnt, SearchPath, RelativePath,
- SuggestedModule))) {
+ RequestingModule, SuggestedModule))) {
if (SuggestedModule && !LangOpts.AsmPreprocessor)
HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
- getModuleForLocation(FilenameLoc), FilenameLoc, Filename, FE);
+ RequestingModule, FilenameLoc, Filename, FE);
return FE;
}
}
@@ -1674,6 +1675,29 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
getLangOpts().CurrentModule &&
SuggestedModule.getModule()->getTopLevelModuleName() !=
getLangOpts().ImplementationOfModule) {
+
+ // If this include corresponds to a module but that module is
+ // unavailable, diagnose the situation and bail out.
+ if (!SuggestedModule.getModule()->isAvailable()) {
+ clang::Module::Requirement Requirement;
+ clang::Module::UnresolvedHeaderDirective MissingHeader;
+ Module *M = SuggestedModule.getModule();
+ // Identify the cause.
+ (void)M->isAvailable(getLangOpts(), getTargetInfo(), Requirement,
+ MissingHeader);
+ if (MissingHeader.FileNameLoc.isValid()) {
+ Diag(MissingHeader.FileNameLoc, diag::err_module_header_missing)
+ << MissingHeader.IsUmbrella << MissingHeader.FileName;
+ } else {
+ Diag(M->DefinitionLoc, diag::err_module_unavailable)
+ << M->getFullModuleName() << Requirement.second << Requirement.first;
+ }
+ Diag(FilenameTok.getLocation(),
+ diag::note_implicit_top_level_module_import_here)
+ << M->getTopLevelModuleName();
+ return;
+ }
+
// Compute the module access path corresponding to this module.
// FIXME: Should we have a second loadModule() overload to avoid this
// extra lookup step?
@@ -1776,7 +1800,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (IncludePos.isMacroID())
IncludePos = SourceMgr.getExpansionRange(IncludePos).second;
FileID FID = SourceMgr.createFileID(File, IncludePos, FileCharacter);
- assert(!FID.isInvalid() && "Expected valid file ID");
+ assert(FID.isValid() && "Expected valid file ID");
// If all is good, enter the new file!
if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation()))
@@ -1925,7 +1949,7 @@ bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
// Add the __VA_ARGS__ identifier as an argument.
Arguments.push_back(Ident__VA_ARGS__);
MI->setIsC99Varargs();
- MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ MI->setArgumentList(Arguments, BP);
return false;
case tok::eod: // #define X(
Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
@@ -1959,7 +1983,7 @@ bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
Diag(Tok, diag::err_pp_expected_comma_in_arg_list);
return true;
case tok::r_paren: // #define X(A)
- MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ MI->setArgumentList(Arguments, BP);
return false;
case tok::comma: // #define X(A,
break;
@@ -1975,7 +1999,7 @@ bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
}
MI->setIsGNUVarargs();
- MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ MI->setArgumentList(Arguments, BP);
return false;
}
}
@@ -2019,13 +2043,9 @@ static bool isConfigurationPattern(Token &MacroName, MacroInfo *MI,
}
// #define inline
- if (MacroName.isOneOf(tok::kw_extern, tok::kw_inline, tok::kw_static,
- tok::kw_const) &&
- MI->getNumTokens() == 0) {
- return true;
- }
-
- return false;
+ return MacroName.isOneOf(tok::kw_extern, tok::kw_inline, tok::kw_static,
+ tok::kw_const) &&
+ MI->getNumTokens() == 0;
}
/// HandleDefineDirective - Implements \#define. This consumes the entire macro
@@ -2240,6 +2260,30 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok,
// Finally, if this identifier already had a macro defined for it, verify that
// the macro bodies are identical, and issue diagnostics if they are not.
if (const MacroInfo *OtherMI=getMacroInfo(MacroNameTok.getIdentifierInfo())) {
+ // In Objective-C, ignore attempts to directly redefine the builtin
+ // definitions of the ownership qualifiers. It's still possible to
+ // #undef them.
+ auto isObjCProtectedMacro = [](const IdentifierInfo *II) -> bool {
+ return II->isStr("__strong") ||
+ II->isStr("__weak") ||
+ II->isStr("__unsafe_unretained") ||
+ II->isStr("__autoreleasing");
+ };
+ if (getLangOpts().ObjC1 &&
+ SourceMgr.getFileID(OtherMI->getDefinitionLoc())
+ == getPredefinesFileID() &&
+ isObjCProtectedMacro(MacroNameTok.getIdentifierInfo())) {
+ // Warn if it changes the tokens.
+ if ((!getDiagnostics().getSuppressSystemWarnings() ||
+ !SourceMgr.isInSystemHeader(DefineTok.getLocation())) &&
+ !MI->isIdenticalTo(*OtherMI, *this,
+ /*Syntactic=*/LangOpts.MicrosoftExt)) {
+ Diag(MI->getDefinitionLoc(), diag::warn_pp_objc_macro_redef_ignored);
+ }
+ assert(!OtherMI->isWarnIfUnused());
+ return;
+ }
+
// It is very common for system headers to have tons of macro redefinitions
// and for warnings to be disabled in system headers. If this is the case,
// then don't bother calling MacroInfo::isIdenticalTo.
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 44513023395d..c40598c06756 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -42,7 +42,7 @@ public:
unsigned getBitWidth() const { return Val.getBitWidth(); }
bool isUnsigned() const { return Val.isUnsigned(); }
- const SourceRange &getRange() const { return Range; }
+ SourceRange getRange() const { return Range; }
void setRange(SourceLocation L) { Range.setBegin(L); Range.setEnd(L); }
void setRange(SourceLocation B, SourceLocation E) {
@@ -549,12 +549,12 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
// value was negative, warn about it.
if (ValueLive && Res.isUnsigned()) {
if (!LHS.isUnsigned() && LHS.Val.isNegative())
- PP.Diag(OpLoc, diag::warn_pp_convert_lhs_to_positive)
+ PP.Diag(OpLoc, diag::warn_pp_convert_to_positive) << 0
<< LHS.Val.toString(10, true) + " to " +
LHS.Val.toString(10, false)
<< LHS.getRange() << RHS.getRange();
if (!RHS.isUnsigned() && RHS.Val.isNegative())
- PP.Diag(OpLoc, diag::warn_pp_convert_rhs_to_positive)
+ PP.Diag(OpLoc, diag::warn_pp_convert_to_positive) << 1
<< RHS.Val.toString(10, true) + " to " +
RHS.Val.toString(10, false)
<< LHS.getRange() << RHS.getRange();
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index c231e18eecc3..2f09841c5b5d 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -18,6 +18,7 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PTHManager.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -120,7 +121,7 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
CurSubmodule = nullptr;
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_Lexer;
-
+
// Notify the client, if desired, that we are in a new source file.
if (Callbacks && !CurLexer->Is_PragmaLexer) {
SrcMgr::CharacteristicKind FileType =
@@ -300,8 +301,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (const IdentifierInfo *ControllingMacro =
CurPPLexer->MIOpt.GetControllingMacroAtEndOfFile()) {
// Okay, this has a controlling macro, remember in HeaderFileInfo.
- if (const FileEntry *FE =
- SourceMgr.getFileEntryForID(CurPPLexer->getFileID())) {
+ if (const FileEntry *FE = CurPPLexer->getFileEntry()) {
HeaderInfo.SetFileControllingMacro(FE, ControllingMacro);
if (MacroInfo *MI =
getMacroInfo(const_cast<IdentifierInfo*>(ControllingMacro))) {
@@ -561,7 +561,6 @@ void Preprocessor::RemoveTopOfLexerStack() {
void Preprocessor::HandleMicrosoftCommentPaste(Token &Tok) {
assert(CurTokenLexer && !CurPPLexer &&
"Pasted comment can only be formed from macro");
-
// We handle this by scanning for the closest real lexer, switching it to
// raw mode and preprocessor mode. This will cause it to return \n as an
// explicit EOD token.
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index 64ce8c918258..18348df0a39e 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -145,8 +145,12 @@ void Preprocessor::updateModuleMacroInfo(const IdentifierInfo *II,
NumHiddenOverrides[O] = -1;
// Collect all macros that are not overridden by a visible macro.
- llvm::SmallVector<ModuleMacro *, 16> Worklist(Leaf->second.begin(),
- Leaf->second.end());
+ llvm::SmallVector<ModuleMacro *, 16> Worklist;
+ for (auto *LeafMM : Leaf->second) {
+ assert(LeafMM->getNumOverridingMacros() == 0 && "leaf macro overridden");
+ if (NumHiddenOverrides.lookup(LeafMM) == 0)
+ Worklist.push_back(LeafMM);
+ }
while (!Worklist.empty()) {
auto *MM = Worklist.pop_back_val();
if (CurSubmoduleState->VisibleModules.isVisible(MM->getOwningModule())) {
@@ -593,9 +597,7 @@ static bool CheckMatchedBrackets(const SmallVectorImpl<Token> &Tokens) {
Brackets.pop_back();
}
}
- if (!Brackets.empty())
- return false;
- return true;
+ return Brackets.empty();
}
/// GenerateNewArgTokens - Returns true if OldTokens can be converted to a new
@@ -867,7 +869,7 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
DiagnosticBuilder DB =
Diag(MacroName,
diag::note_init_list_at_beginning_of_macro_argument);
- for (const SourceRange &Range : InitLists)
+ for (SourceRange Range : InitLists)
DB << Range;
}
return nullptr;
@@ -876,7 +878,7 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
return nullptr;
DiagnosticBuilder DB = Diag(MacroName, diag::note_suggest_parens_for_macro);
- for (const SourceRange &ParenLocation : ParenHints) {
+ for (SourceRange ParenLocation : ParenHints) {
DB << FixItHint::CreateInsertion(ParenLocation.getBegin(), "(");
DB << FixItHint::CreateInsertion(ParenLocation.getEnd(), ")");
}
@@ -1057,6 +1059,9 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("attribute_availability", true)
.Case("attribute_availability_with_message", true)
.Case("attribute_availability_app_extension", true)
+ .Case("attribute_availability_with_version_underscores", true)
+ .Case("attribute_availability_tvos", true)
+ .Case("attribute_availability_watchos", true)
.Case("attribute_cf_returns_not_retained", true)
.Case("attribute_cf_returns_retained", true)
.Case("attribute_cf_returns_on_parameters", true)
@@ -1075,7 +1080,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("blocks", LangOpts.Blocks)
.Case("c_thread_safety_attributes", true)
.Case("cxx_exceptions", LangOpts.CXXExceptions)
- .Case("cxx_rtti", LangOpts.RTTI)
+ .Case("cxx_rtti", LangOpts.RTTI && LangOpts.RTTIData)
.Case("enumerator_attributes", true)
.Case("nullability", true)
.Case("memory_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Memory))
@@ -1084,7 +1089,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
// Objective-C features
.Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
.Case("objc_arc", LangOpts.ObjCAutoRefCount)
- .Case("objc_arc_weak", LangOpts.ObjCARCWeak)
+ .Case("objc_arc_weak", LangOpts.ObjCWeak)
.Case("objc_default_synthesize_properties", LangOpts.ObjC2)
.Case("objc_fixed_enum", LangOpts.ObjC2)
.Case("objc_instancetype", LangOpts.ObjC2)
@@ -1629,16 +1634,23 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
Value = FeatureII->getTokenID() == tok::identifier;
else if (II == Ident__has_builtin) {
// Check for a builtin is trivial.
- Value = FeatureII->getBuiltinID() != 0;
+ if (FeatureII->getBuiltinID() != 0) {
+ Value = true;
+ } else {
+ StringRef Feature = FeatureII->getName();
+ Value = llvm::StringSwitch<bool>(Feature)
+ .Case("__make_integer_seq", getLangOpts().CPlusPlus)
+ .Default(false);
+ }
} else if (II == Ident__has_attribute)
Value = hasAttribute(AttrSyntax::GNU, nullptr, FeatureII,
- getTargetInfo().getTriple(), getLangOpts());
+ getTargetInfo(), getLangOpts());
else if (II == Ident__has_cpp_attribute)
Value = hasAttribute(AttrSyntax::CXX, ScopeII, FeatureII,
- getTargetInfo().getTriple(), getLangOpts());
+ getTargetInfo(), getLangOpts());
else if (II == Ident__has_declspec)
Value = hasAttribute(AttrSyntax::Declspec, nullptr, FeatureII,
- getTargetInfo().getTriple(), getLangOpts());
+ getTargetInfo(), getLangOpts());
else if (II == Ident__has_extension)
Value = HasExtension(*this, FeatureII);
else {
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 5eb665549e86..3134790ccb90 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -38,7 +38,7 @@ PragmaHandler::~PragmaHandler() {
// EmptyPragmaHandler Implementation.
//===----------------------------------------------------------------------===//
-EmptyPragmaHandler::EmptyPragmaHandler() {}
+EmptyPragmaHandler::EmptyPragmaHandler(StringRef Name) : PragmaHandler(Name) {}
void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
@@ -191,9 +191,13 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
Lex(Tok);
if (!tok::isStringLiteral(Tok.getKind())) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
- // Skip this token, and the ')', if present.
+ // Skip bad tokens, and the ')', if present.
if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::eof))
Lex(Tok);
+ while (Tok.isNot(tok::r_paren) &&
+ !Tok.isAtStartOfLine() &&
+ Tok.isNot(tok::eof))
+ Lex(Tok);
if (Tok.is(tok::r_paren))
Lex(Tok);
return _PragmaLexing.failed();
diff --git a/lib/Lex/PreprocessingRecord.cpp b/lib/Lex/PreprocessingRecord.cpp
index a423041a2d95..32e6de69f0db 100644
--- a/lib/Lex/PreprocessingRecord.cpp
+++ b/lib/Lex/PreprocessingRecord.cpp
@@ -66,7 +66,7 @@ PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) {
static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID,
SourceManager &SM) {
- assert(!FID.isInvalid());
+ assert(FID.isValid());
if (!PPE)
return false;
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index e2db638a33d0..142d9ce09049 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -38,6 +38,7 @@
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/PreprocessorOptions.h"
@@ -62,20 +63,19 @@ Preprocessor::Preprocessor(IntrusiveRefCntPtr<PreprocessorOptions> PPOpts,
IdentifierInfoLookup *IILookup, bool OwnsHeaders,
TranslationUnitKind TUKind)
: PPOpts(PPOpts), Diags(&diags), LangOpts(opts), Target(nullptr),
- FileMgr(Headers.getFileMgr()), SourceMgr(SM),
- ScratchBuf(new ScratchBuffer(SourceMgr)),HeaderInfo(Headers),
+ AuxTarget(nullptr), FileMgr(Headers.getFileMgr()), SourceMgr(SM),
+ ScratchBuf(new ScratchBuffer(SourceMgr)), HeaderInfo(Headers),
TheModuleLoader(TheModuleLoader), ExternalSource(nullptr),
Identifiers(opts, IILookup),
PragmaHandlers(new PragmaNamespace(StringRef())),
- IncrementalProcessing(false), TUKind(TUKind),
- CodeComplete(nullptr), CodeCompletionFile(nullptr),
- CodeCompletionOffset(0), LastTokenWasAt(false),
- ModuleImportExpectsIdentifier(false), CodeCompletionReached(0),
- MainFileDir(nullptr), SkipMainFilePreamble(0, true), CurPPLexer(nullptr),
- CurDirLookup(nullptr), CurLexerKind(CLK_Lexer), CurSubmodule(nullptr),
- Callbacks(nullptr), CurSubmoduleState(&NullSubmoduleState),
- MacroArgCache(nullptr), Record(nullptr),
- MIChainHead(nullptr), DeserialMIChainHead(nullptr) {
+ IncrementalProcessing(false), TUKind(TUKind), CodeComplete(nullptr),
+ CodeCompletionFile(nullptr), CodeCompletionOffset(0),
+ LastTokenWasAt(false), ModuleImportExpectsIdentifier(false),
+ CodeCompletionReached(0), MainFileDir(nullptr),
+ SkipMainFilePreamble(0, true), CurPPLexer(nullptr), CurDirLookup(nullptr),
+ CurLexerKind(CLK_Lexer), CurSubmodule(nullptr), Callbacks(nullptr),
+ CurSubmoduleState(&NullSubmoduleState), MacroArgCache(nullptr),
+ Record(nullptr), MIChainHead(nullptr), DeserialMIChainHead(nullptr) {
OwnsHeaderSearch = OwnsHeaders;
CounterValue = 0; // __COUNTER__ starts at 0.
@@ -170,13 +170,18 @@ Preprocessor::~Preprocessor() {
delete &HeaderInfo;
}
-void Preprocessor::Initialize(const TargetInfo &Target) {
+void Preprocessor::Initialize(const TargetInfo &Target,
+ const TargetInfo *AuxTarget) {
assert((!this->Target || this->Target == &Target) &&
"Invalid override of target information");
this->Target = &Target;
-
+
+ assert((!this->AuxTarget || this->AuxTarget == AuxTarget) &&
+ "Invalid override of aux target information.");
+ this->AuxTarget = AuxTarget;
+
// Initialize information about built-ins.
- BuiltinInfo.InitializeTarget(Target);
+ BuiltinInfo.InitializeTarget(Target, AuxTarget);
HeaderInfo.setTarget(Target);
}
@@ -515,7 +520,7 @@ void Preprocessor::EnterMainSourceFile() {
llvm::MemoryBuffer::getMemBufferCopy(Predefines, "<built-in>");
assert(SB && "Cannot create predefined source buffer");
FileID FID = SourceMgr.createFileID(std::move(SB));
- assert(!FID.isInvalid() && "Could not create FileID for predefines?");
+ assert(FID.isValid() && "Could not create FileID for predefines?");
setPredefinesFileID(FID);
// Start parsing the predefines.
@@ -712,7 +717,7 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
}
void Preprocessor::Lex(Token &Result) {
- // We loop here until a lex function retuns a token; this avoids recursion.
+ // We loop here until a lex function returns a token; this avoids recursion.
bool ReturnedToken;
do {
switch (CurLexerKind) {
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index e7512fa2831a..c42966928e52 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -624,21 +624,22 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// error. This occurs with "x ## +" and other stuff. Return with Tok
// unmodified and with RHS as the next token to lex.
if (isInvalid) {
+ // Explicitly convert the token location to have proper expansion
+ // information so that the user knows where it came from.
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation Loc =
+ SM.createExpansionLoc(PasteOpLoc, ExpandLocStart, ExpandLocEnd, 2);
+
// Test for the Microsoft extension of /##/ turning into // here on the
// error path.
if (PP.getLangOpts().MicrosoftExt && Tok.is(tok::slash) &&
RHS.is(tok::slash)) {
- HandleMicrosoftCommentPaste(Tok);
+ HandleMicrosoftCommentPaste(Tok, Loc);
return true;
}
// Do not emit the error when preprocessing assembler code.
if (!PP.getLangOpts().AsmPreprocessor) {
- // Explicitly convert the token location to have proper expansion
- // information so that the user knows where it came from.
- SourceManager &SM = PP.getSourceManager();
- SourceLocation Loc =
- SM.createExpansionLoc(PasteOpLoc, ExpandLocStart, ExpandLocEnd, 2);
// If we're in microsoft extensions mode, downgrade this from a hard
// error to an extension that defaults to an error. This allows
// disabling it.
@@ -719,7 +720,9 @@ bool TokenLexer::isParsingPreprocessorDirective() const {
/// macro, other active macros, and anything left on the current physical
/// source line of the expanded buffer. Handle this by returning the
/// first token on the next line.
-void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok) {
+void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok, SourceLocation OpLoc) {
+ PP.Diag(OpLoc, diag::ext_comment_paste_microsoft);
+
// We 'comment out' the rest of this macro by just ignoring the rest of the
// tokens that have not been lexed yet, if any.
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index 6727afc1dd6c..ccf947984945 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -30,6 +30,21 @@ using namespace clang;
namespace {
+/// Resets LLVM's pretty stack state so that stack traces are printed correctly
+/// when there are nested CrashRecoveryContexts and the inner one recovers from
+/// a crash.
+class ResetStackCleanup
+ : public llvm::CrashRecoveryContextCleanupBase<ResetStackCleanup,
+ const void> {
+public:
+ ResetStackCleanup(llvm::CrashRecoveryContext *Context, const void *Top)
+ : llvm::CrashRecoveryContextCleanupBase<ResetStackCleanup, const void>(
+ Context, Top) {}
+ void recoverResources() override {
+ llvm::RestorePrettyStackState(resource);
+ }
+};
+
/// If a crash happens while the parser is active, an entry is printed for it.
class PrettyStackTraceParserEntry : public llvm::PrettyStackTraceEntry {
const Parser &P;
@@ -113,6 +128,8 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
new Parser(S.getPreprocessor(), S, SkipFunctionBodies));
Parser &P = *ParseOP.get();
+ llvm::CrashRecoveryContextCleanupRegistrar<const void, ResetStackCleanup>
+ CleanupPrettyStack(llvm::SavePrettyStackState());
PrettyStackTraceParserEntry CrashInfo(P);
// Recover resources if we crash before exiting this method.
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index ab1f97d31a69..e536644d5bf6 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -67,8 +67,9 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
SourceLocation KWEndLoc = Tok.getEndLoc().getLocWithOffset(-1);
if (TryConsumeToken(tok::kw_delete, KWLoc)) {
Diag(KWLoc, getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_deleted_function
- : diag::ext_deleted_function);
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 1 /* deleted */;
Actions.SetDeclDeleted(FnD, KWLoc);
Delete = true;
if (auto *DeclAsFunction = dyn_cast<FunctionDecl>(FnD)) {
@@ -76,8 +77,9 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
}
} else if (TryConsumeToken(tok::kw_default, KWLoc)) {
Diag(KWLoc, getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_defaulted_function
- : diag::ext_defaulted_function);
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 0 /* defaulted */;
Actions.SetDeclDefaulted(FnD, KWLoc);
if (auto *DeclAsFunction = dyn_cast<FunctionDecl>(FnD)) {
DeclAsFunction->setRangeEnd(KWEndLoc);
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 45878b9b1508..e69bb2745c43 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -1,4 +1,4 @@
-//===--- ParseDecl.cpp - Declaration Parsing ------------------------------===//
+//===--- ParseDecl.cpp - Declaration Parsing --------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -24,9 +24,11 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -67,7 +69,6 @@ TypeResult Parser::ParseTypeName(SourceRange *Range,
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
-
/// isAttributeLateParsed - Return true if the attribute has arguments that
/// require late parsing.
static bool isAttributeLateParsed(const IdentifierInfo &II) {
@@ -387,7 +388,7 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
// If the attribute isn't known, we will not attempt to parse any
// arguments.
if (!hasAttribute(AttrSyntax::Declspec, nullptr, AttrName,
- getTargetInfo().getTriple(), getLangOpts())) {
+ getTargetInfo(), getLangOpts())) {
// Eat the left paren, then skip to the ending right paren.
ConsumeParen();
SkipUntil(tok::r_paren);
@@ -532,9 +533,7 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
/// extended-decl-modifier extended-decl-modifier-seq
void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End) {
- assert((getLangOpts().MicrosoftExt || getLangOpts().Borland ||
- getLangOpts().CUDA) &&
- "Incorrect language options for parsing __declspec");
+ assert(getLangOpts().DeclSpecKeyword && "__declspec keyword is not enabled");
assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
while (Tok.is(tok::kw___declspec)) {
@@ -1155,7 +1154,6 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) {
Class.TagOrTemplate);
}
-
/// \brief Parse all attributes in LAs, and attach them to Decl D.
void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition) {
@@ -1170,7 +1168,6 @@ void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
LAs.clear();
}
-
/// \brief Finish parsing an attribute for which parsing was delayed.
/// This will be called at the end of parsing a class declaration
/// for each LateParsedAttribute. We consume the saved tokens and
@@ -1468,15 +1465,13 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_namespace)) {
ProhibitAttributes(attrs);
SourceLocation InlineLoc = ConsumeToken();
- SingleDecl = ParseNamespace(Context, DeclEnd, InlineLoc);
- break;
+ return ParseNamespace(Context, DeclEnd, InlineLoc);
}
return ParseSimpleDeclaration(Context, DeclEnd, attrs,
true);
case tok::kw_namespace:
ProhibitAttributes(attrs);
- SingleDecl = ParseNamespace(Context, DeclEnd);
- break;
+ return ParseNamespace(Context, DeclEnd);
case tok::kw_using:
SingleDecl = ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
DeclEnd, attrs, &OwnedType);
@@ -1979,8 +1974,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
// Recover as if it were an explicit specialization.
TemplateParameterLists FakedParamLists;
FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
- 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, nullptr,
- 0, LAngleLoc));
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
+ LAngleLoc));
ThisDecl =
Actions.ActOnTemplateDeclarator(getCurScope(), FakedParamLists, D);
@@ -2161,7 +2156,7 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
DS.ClearStorageClassSpecs();
}
- // Issue diagnostic and remove function specfier if present.
+ // Issue diagnostic and remove function specifier if present.
if (Specs & DeclSpec::PQ_FunctionSpecifier) {
if (DS.isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_typename_invalid_functionspec);
@@ -2203,7 +2198,6 @@ static bool isValidAfterIdentifierInDeclarator(const Token &T) {
tok::colon);
}
-
/// ParseImplicitInt - This method is called when we have an non-typename
/// identifier in a declspec (which normally terminates the decl spec) when
/// the declspec has no type specifier. In this case, the declspec is either
@@ -2621,8 +2615,6 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
/// [OpenCL] '__kernel'
/// 'friend': [C++ dcl.friend]
/// 'constexpr': [C++0x dcl.constexpr]
-
-///
void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS,
@@ -2647,6 +2639,18 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
+ // HACK: MSVC doesn't consider _Atomic to be a keyword and its STL
+ // implementation for VS2013 uses _Atomic as an identifier for one of the
+ // classes in <atomic>.
+ //
+ // A typedef declaration containing _Atomic<...> is among the places where
+ // the class is used. If we are currently parsing such a declaration, treat
+ // the token as an identifier.
+ if (getLangOpts().MSVCCompat && Tok.is(tok::kw__Atomic) &&
+ DS.getStorageClassSpec() == clang::DeclSpec::SCS_typedef &&
+ !DS.hasTypeSpecifier() && GetLookAheadToken(1).is(tok::less))
+ Tok.setKind(tok::identifier);
+
SourceLocation Loc = Tok.getLocation();
switch (Tok.getKind()) {
@@ -2665,7 +2669,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// If this is not a declaration specifier token, we're done reading decl
// specifiers. First verify that DeclSpec's are consistent.
- DS.Finish(Diags, PP, Policy);
+ DS.Finish(Actions, Policy);
return;
case tok::l_square:
@@ -2780,8 +2784,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// arguments. Complain, then parse it as a type as the user
// intended.
Diag(TemplateId->TemplateNameLoc,
- diag::err_out_of_line_template_id_names_constructor)
- << TemplateId->Name;
+ diag::err_out_of_line_template_id_type_names_constructor)
+ << TemplateId->Name << 0 /* template name */;
}
DS.getTypeSpecScope() = SS;
@@ -2826,8 +2830,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// looked at the declarator, and the user probably meant this
// to be a type. Complain that it isn't supposed to be treated
// as a type, then proceed to parse it as a type.
- Diag(Next.getLocation(), diag::err_out_of_line_type_names_constructor)
- << Next.getIdentifierInfo();
+ Diag(Next.getLocation(),
+ diag::err_out_of_line_template_id_type_names_constructor)
+ << Next.getIdentifierInfo() << 1 /* type */;
}
ParsedType TypeRep = Actions.getTypeName(*Next.getIdentifierInfo(),
@@ -3133,6 +3138,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
PrevSpec, DiagID, Policy);
isStorageClass = true;
break;
+ case tok::kw___auto_type:
+ Diag(Tok, diag::ext_auto_type);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_auto_type, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
case tok::kw_register:
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_register, Loc,
PrevSpec, DiagID, Policy);
@@ -3577,7 +3587,8 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
SmallVector<Decl *, 32> FieldDecls;
// While we still have something to read, read the declarations in the struct.
- while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
// Each iteration of this loop reads one struct-declaration.
// Check for extraneous top-level semicolon.
@@ -4434,6 +4445,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw___private_extern__:
case tok::kw_static:
case tok::kw_auto:
+ case tok::kw___auto_type:
case tok::kw_register:
case tok::kw___thread:
case tok::kw_thread_local:
@@ -4807,7 +4819,7 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS, unsigned AttrReqs,
DoneWithTypeQuals:
// If this is not a type-qualifier token, we're done reading type
// qualifiers. First verify that DeclSpec's are consistent.
- DS.Finish(Diags, PP, Actions.getASTContext().getPrintingPolicy());
+ DS.Finish(Actions, Actions.getASTContext().getPrintingPolicy());
if (EndLoc.isValid())
DS.SetRangeEnd(EndLoc);
return;
@@ -4822,7 +4834,6 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS, unsigned AttrReqs,
}
}
-
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
///
void Parser::ParseDeclarator(Declarator &D) {
@@ -5187,6 +5198,15 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
}
goto PastIdentifier;
}
+
+ if (D.getCXXScopeSpec().isNotEmpty()) {
+ // We have a scope specifier but no following unqualified-id.
+ Diag(PP.getLocForEndOfToken(D.getCXXScopeSpec().getEndLoc()),
+ diag::err_expected_unqualified_id)
+ << /*C++*/1;
+ D.SetIdentifier(nullptr, Tok.getLocation());
+ goto PastIdentifier;
+ }
} else if (Tok.is(tok::identifier) && D.mayHaveIdentifier()) {
assert(!getLangOpts().CPlusPlus &&
"There's a C++-specific check for tok::identifier above");
@@ -5470,7 +5490,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
SmallVector<ParsedType, 2> DynamicExceptions;
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
- CachedTokens *ExceptionSpecTokens = 0;
+ CachedTokens *ExceptionSpecTokens = nullptr;
ParsedAttributes FnAttrs(AttrFactory);
TypeResult TrailingReturnType;
@@ -5608,7 +5628,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
VolatileQualifierLoc,
RestrictQualifierLoc,
/*MutableLoc=*/SourceLocation(),
- ESpecType, ESpecRange.getBegin(),
+ ESpecType, ESpecRange,
DynamicExceptions.data(),
DynamicExceptionRanges.data(),
DynamicExceptions.size(),
@@ -6227,7 +6247,6 @@ void Parser::ParseAtomicSpecifier(DeclSpec &DS) {
Diag(StartLoc, DiagID) << PrevSpec;
}
-
/// TryAltiVecVectorTokenOutOfLine - Out of line body that should only be called
/// from TryAltiVecVectorToken.
bool Parser::TryAltiVecVectorTokenOutOfLine() {
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index e347d4e27e26..a4de9751f9a0 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -1,4 +1,4 @@
-//===--- ParseDeclCXX.cpp - C++ Declaration Parsing -----------------------===//
+//===--- ParseDeclCXX.cpp - C++ Declaration Parsing -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -26,6 +26,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallString.h"
+
using namespace clang;
/// ParseNamespace - We know that the current token is a namespace keyword. This
@@ -54,9 +55,9 @@ using namespace clang;
/// namespace-alias-definition: [C++ 7.3.2: namespace.alias]
/// 'namespace' identifier '=' qualified-namespace-specifier ';'
///
-Decl *Parser::ParseNamespace(unsigned Context,
- SourceLocation &DeclEnd,
- SourceLocation InlineLoc) {
+Parser::DeclGroupPtrTy Parser::ParseNamespace(unsigned Context,
+ SourceLocation &DeclEnd,
+ SourceLocation InlineLoc) {
assert(Tok.is(tok::kw_namespace) && "Not a namespace!");
SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
ObjCDeclContextSwitch ObjCDC(*this);
@@ -64,7 +65,7 @@ Decl *Parser::ParseNamespace(unsigned Context,
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteNamespaceDecl(getCurScope());
cutOffParsing();
- return nullptr;
+ return DeclGroupPtrTy();
}
SourceLocation IdentLoc;
@@ -108,16 +109,16 @@ Decl *Parser::ParseNamespace(unsigned Context,
Diag(Tok, diag::err_expected) << tok::identifier;
// Skip to end of the definition and eat the ';'.
SkipUntil(tok::semi);
- return nullptr;
+ return DeclGroupPtrTy();
}
if (attrLoc.isValid())
Diag(attrLoc, diag::err_unexpected_namespace_attributes_alias);
if (InlineLoc.isValid())
Diag(InlineLoc, diag::err_inline_namespace_alias)
<< FixItHint::CreateRemoval(InlineLoc);
- return ParseNamespaceAlias(NamespaceLoc, IdentLoc, Ident, DeclEnd);
- }
-
+ Decl *NSAlias = ParseNamespaceAlias(NamespaceLoc, IdentLoc, Ident, DeclEnd);
+ return Actions.ConvertDeclToDeclGroup(NSAlias);
+}
BalancedDelimiterTracker T(*this, tok::l_brace);
if (T.consumeOpen()) {
@@ -125,7 +126,7 @@ Decl *Parser::ParseNamespace(unsigned Context,
Diag(Tok, diag::err_expected) << tok::l_brace;
else
Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
- return nullptr;
+ return DeclGroupPtrTy();
}
if (getCurScope()->isClassScope() || getCurScope()->isTemplateParamScope() ||
@@ -133,7 +134,7 @@ Decl *Parser::ParseNamespace(unsigned Context,
getCurScope()->getFnParent()) {
Diag(T.getOpenLocation(), diag::err_namespace_nonnamespace_scope);
SkipUntil(tok::r_brace);
- return nullptr;
+ return DeclGroupPtrTy();
}
if (ExtraIdent.empty()) {
@@ -180,10 +181,11 @@ Decl *Parser::ParseNamespace(unsigned Context,
// Enter a scope for the namespace.
ParseScope NamespaceScope(this, Scope::DeclScope);
+ UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
Decl *NamespcDecl =
Actions.ActOnStartNamespaceDef(getCurScope(), InlineLoc, NamespaceLoc,
IdentLoc, Ident, T.getOpenLocation(),
- attrs.getList());
+ attrs.getList(), ImplicitUsingDirectiveDecl);
PrettyDeclStackTraceEntry CrashInfo(Actions, NamespcDecl, NamespaceLoc,
"parsing namespace");
@@ -198,8 +200,9 @@ Decl *Parser::ParseNamespace(unsigned Context,
DeclEnd = T.getCloseLocation();
Actions.ActOnFinishNamespaceDef(NamespcDecl, DeclEnd);
-
- return NamespcDecl;
+
+ return Actions.ConvertDeclToDeclGroup(NamespcDecl,
+ ImplicitUsingDirectiveDecl);
}
/// ParseInnerNamespace - Parse the contents of a namespace.
@@ -210,7 +213,8 @@ void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker) {
if (index == Ident.size()) {
- while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
MaybeParseMicrosoftAttributes(attrs);
@@ -228,17 +232,19 @@ void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
// FIXME: Preserve the source information through to the AST rather than
// desugaring it here.
ParseScope NamespaceScope(this, Scope::DeclScope);
+ UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
Decl *NamespcDecl =
Actions.ActOnStartNamespaceDef(getCurScope(), SourceLocation(),
NamespaceLoc[index], IdentLoc[index],
Ident[index], Tracker.getOpenLocation(),
- attrs.getList());
+ attrs.getList(), ImplicitUsingDirectiveDecl);
+ assert(!ImplicitUsingDirectiveDecl &&
+ "nested namespace definition cannot define anonymous namespace");
ParseInnerNamespace(IdentLoc, Ident, NamespaceLoc, ++index, InlineLoc,
attrs, Tracker);
NamespaceScope.Exit();
-
Actions.ActOnFinishNamespaceDef(NamespcDecl, Tracker.getCloseLocation());
}
@@ -279,8 +285,8 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after_namespace_name))
SkipUntil(tok::semi);
- return Actions.ActOnNamespaceAliasDef(getCurScope(), NamespaceLoc, AliasLoc, Alias,
- SS, IdentLoc, Ident);
+ return Actions.ActOnNamespaceAliasDef(getCurScope(), NamespaceLoc, AliasLoc,
+ Alias, SS, IdentLoc, Ident);
}
/// ParseLinkage - We know that the current token is a string_literal
@@ -393,8 +399,8 @@ Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
// Template parameters are always an error here.
if (TemplateInfo.Kind) {
SourceRange R = TemplateInfo.getSourceRange();
- Diag(UsingLoc, diag::err_templated_using_directive)
- << R << FixItHint::CreateRemoval(R);
+ Diag(UsingLoc, diag::err_templated_using_directive_declaration)
+ << 0 /* directive */ << R << FixItHint::CreateRemoval(R);
}
return ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
@@ -546,7 +552,8 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
} else if (ParseUnqualifiedId(
SS, /*EnteringContext=*/false,
/*AllowDestructorName=*/true,
- /*AllowConstructorName=*/NextToken().isNot(tok::equal),
+ /*AllowConstructorName=*/!(Tok.is(tok::identifier) &&
+ NextToken().is(tok::equal)),
ParsedType(), TemplateKWLoc, Name)) {
SkipUntil(tok::semi);
return nullptr;
@@ -643,8 +650,8 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
// template <...> using id = type;
if (TemplateInfo.Kind && !IsAliasDecl) {
SourceRange R = TemplateInfo.getSourceRange();
- Diag(UsingLoc, diag::err_templated_using_declaration)
- << R << FixItHint::CreateRemoval(R);
+ Diag(UsingLoc, diag::err_templated_using_directive_declaration)
+ << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
// Unfortunately, we have to bail out instead of recovering by
// ignoring the parameters, just in case the nested name specifier
@@ -1307,6 +1314,35 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// allow libstdc++ 4.2 and libc++ to work properly.
TryKeywordIdentFallback(true);
+ struct PreserveAtomicIdentifierInfoRAII {
+ PreserveAtomicIdentifierInfoRAII(Token &Tok, bool Enabled)
+ : AtomicII(nullptr) {
+ if (!Enabled)
+ return;
+ assert(Tok.is(tok::kw__Atomic));
+ AtomicII = Tok.getIdentifierInfo();
+ AtomicII->revertTokenIDToIdentifier();
+ Tok.setKind(tok::identifier);
+ }
+ ~PreserveAtomicIdentifierInfoRAII() {
+ if (!AtomicII)
+ return;
+ AtomicII->revertIdentifierToTokenID(tok::kw__Atomic);
+ }
+ IdentifierInfo *AtomicII;
+ };
+
+ // HACK: MSVC doesn't consider _Atomic to be a keyword and its STL
+ // implementation for VS2013 uses _Atomic as an identifier for one of the
+ // classes in <atomic>. When we are parsing 'struct _Atomic', don't consider
+ // '_Atomic' to be a keyword. We are careful to undo this so that clang can
+ // use '_Atomic' in its own header files.
+ bool ShouldChangeAtomicToIdentifier = getLangOpts().MSVCCompat &&
+ Tok.is(tok::kw__Atomic) &&
+ TagType == DeclSpec::TST_struct;
+ PreserveAtomicIdentifierInfoRAII AtomicTokenGuard(
+ Tok, ShouldChangeAtomicToIdentifier);
+
// Parse the (optional) nested-name-specifier.
CXXScopeSpec &SS = DS.getTypeSpecScope();
if (getLangOpts().CPlusPlus) {
@@ -1626,8 +1662,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// "template<>", so that we treat this construct as a class
// template specialization.
FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
- 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, nullptr,
- 0, LAngleLoc));
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
+ LAngleLoc));
TemplateParams = &FakedParamLists;
}
}
@@ -1808,7 +1844,7 @@ void Parser::ParseBaseClause(Decl *ClassDecl) {
}
// Attach the base specifiers
- Actions.ActOnBaseSpecifiers(ClassDecl, BaseInfo.data(), BaseInfo.size());
+ Actions.ActOnBaseSpecifiers(ClassDecl, BaseInfo);
}
/// ParseBaseSpecifier - Parse a C++ base-specifier. A base-specifier is
@@ -1858,6 +1894,15 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
CheckMisplacedCXX11Attribute(Attributes, StartLoc);
// Parse the class-name.
+
+ // HACK: MSVC doesn't consider _Atomic to be a keyword and its STL
+ // implementation for VS2013 uses _Atomic as an identifier for one of the
+ // classes in <atomic>. Treat '_Atomic' to be an identifier when we are
+ // parsing the class-name for a base specifier.
+ if (getLangOpts().MSVCCompat && Tok.is(tok::kw__Atomic) &&
+ NextToken().is(tok::less))
+ Tok.setKind(tok::identifier);
+
SourceLocation EndLocation;
SourceLocation BaseLoc;
TypeResult BaseType = ParseBaseTypeSpecifier(BaseLoc, EndLocation);
@@ -1928,7 +1973,7 @@ void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
// Stash the exception-specification tokens in the late-pased method.
LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens;
- FTI.ExceptionSpecTokens = 0;
+ FTI.ExceptionSpecTokens = nullptr;
// Push tokens for each parameter. Those that do not have
// defaults will be NULL.
@@ -2195,8 +2240,9 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
/// constant-initializer:
/// '=' constant-expression
///
-void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
- AttributeList *AccessAttrs,
+Parser::DeclGroupPtrTy
+Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
+ AttributeList *AccessAttrs,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject *TemplateDiags) {
if (Tok.is(tok::at)) {
@@ -2207,7 +2253,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ConsumeToken();
SkipUntil(tok::r_brace, StopAtSemi);
- return;
+ return DeclGroupPtrTy();
}
// Turn on colon protection early, while parsing declspec, although there is
@@ -2241,7 +2287,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (SS.isInvalid()) {
SkipUntil(tok::semi);
- return;
+ return DeclGroupPtrTy();
}
// Try to parse an unqualified-id.
@@ -2250,24 +2296,21 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (ParseUnqualifiedId(SS, false, true, true, ParsedType(),
TemplateKWLoc, Name)) {
SkipUntil(tok::semi);
- return;
+ return DeclGroupPtrTy();
}
// TODO: recover from mistakenly-qualified operator declarations.
if (ExpectAndConsume(tok::semi, diag::err_expected_after,
"access declaration")) {
SkipUntil(tok::semi);
- return;
+ return DeclGroupPtrTy();
}
- Actions.ActOnUsingDeclaration(getCurScope(), AS,
- /* HasUsingKeyword */ false,
- SourceLocation(),
- SS, Name,
- /* AttrList */ nullptr,
- /* HasTypenameKeyword */ false,
- SourceLocation());
- return;
+ return DeclGroupPtrTy::make(DeclGroupRef(Actions.ActOnUsingDeclaration(
+ getCurScope(), AS,
+ /* HasUsingKeyword */ false, SourceLocation(), SS, Name,
+ /* AttrList */ nullptr,
+ /* HasTypenameKeyword */ false, SourceLocation())));
}
}
@@ -2276,17 +2319,17 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (!TemplateInfo.Kind &&
Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
SourceLocation DeclEnd;
- ParseStaticAssertDeclaration(DeclEnd);
- return;
+ return DeclGroupPtrTy::make(
+ DeclGroupRef(ParseStaticAssertDeclaration(DeclEnd)));
}
if (Tok.is(tok::kw_template)) {
assert(!TemplateInfo.TemplateParams &&
"Nested template improperly parsed?");
SourceLocation DeclEnd;
- ParseDeclarationStartingWithTemplate(Declarator::MemberContext, DeclEnd,
- AS, AccessAttrs);
- return;
+ return DeclGroupPtrTy::make(
+ DeclGroupRef(ParseDeclarationStartingWithTemplate(
+ Declarator::MemberContext, DeclEnd, AS, AccessAttrs)));
}
// Handle: member-declaration ::= '__extension__' member-declaration
@@ -2318,13 +2361,12 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (Tok.is(tok::kw_namespace)) {
Diag(UsingLoc, diag::err_using_namespace_in_class);
SkipUntil(tok::semi, StopBeforeMatch);
- } else {
- SourceLocation DeclEnd;
- // Otherwise, it must be a using-declaration or an alias-declaration.
- ParseUsingDeclaration(Declarator::MemberContext, TemplateInfo,
- UsingLoc, DeclEnd, AS);
+ return DeclGroupPtrTy();
}
- return;
+ SourceLocation DeclEnd;
+ // Otherwise, it must be a using-declaration or an alias-declaration.
+ return DeclGroupPtrTy::make(DeclGroupRef(ParseUsingDeclaration(
+ Declarator::MemberContext, TemplateInfo, UsingLoc, DeclEnd, AS)));
}
// Hold late-parsed attributes so we can attach a Decl to them later.
@@ -2349,7 +2391,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate &&
DiagnoseMissingSemiAfterTagDefinition(DS, AS, DSC_class,
&CommonLateParsedAttrs))
- return;
+ return DeclGroupPtrTy();
MultiTemplateParamsArg TemplateParams(
TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data()
@@ -2363,7 +2405,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
Decl *TheDecl =
Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS, TemplateParams);
DS.complete(TheDecl);
- return;
+ return DeclGroupPtrTy::make(DeclGroupRef(TheDecl));
}
ParsingDeclarator DeclaratorInfo(*this, DS, Declarator::MemberContext);
@@ -2404,7 +2446,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (ParseCXXMemberDeclaratorBeforeInitializer(
DeclaratorInfo, VS, BitfieldSize, LateParsedAttrs)) {
TryConsumeToken(tok::semi);
- return;
+ return DeclGroupPtrTy();
}
// Check for a member function definition.
@@ -2453,7 +2495,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Consume the optional ';'
TryConsumeToken(tok::semi);
- return;
+ return DeclGroupPtrTy();
}
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
@@ -2482,7 +2524,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (Tok.is(tok::semi))
ConsumeExtraSemi(AfterMemberFunctionDefinition);
- return;
+ return DeclGroupPtrTy::make(DeclGroupRef(FunDecl));
}
}
@@ -2656,10 +2698,10 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
// If we stopped at a ';', eat it.
TryConsumeToken(tok::semi);
- return;
+ return DeclGroupPtrTy();
}
- Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup);
+ return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup);
}
/// ParseCXXMemberInitializer - Parse the brace-or-equal-initializer.
@@ -2777,6 +2819,100 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
MaybeParseGNUAttributes(Attrs);
}
+Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
+ AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
+ DeclSpec::TST TagType, Decl *TagDecl) {
+ if (getLangOpts().MicrosoftExt &&
+ Tok.isOneOf(tok::kw___if_exists, tok::kw___if_not_exists)) {
+ ParseMicrosoftIfExistsClassDeclaration(TagType, AS);
+ return DeclGroupPtrTy();
+ }
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ ConsumeExtraSemi(InsideStruct, TagType);
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_vis)) {
+ HandlePragmaVisibility();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_pack)) {
+ HandlePragmaPack();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_align)) {
+ HandlePragmaAlign();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_ms_pointers_to_members)) {
+ HandlePragmaMSPointersToMembers();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_ms_pragma)) {
+ HandlePragmaMSPragma();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_ms_vtordisp)) {
+ HandlePragmaMSVtorDisp();
+ return DeclGroupPtrTy();
+ }
+
+ // If we see a namespace here, a close brace was missing somewhere.
+ if (Tok.is(tok::kw_namespace)) {
+ DiagnoseUnexpectedNamespace(cast<NamedDecl>(TagDecl));
+ return DeclGroupPtrTy();
+ }
+
+ AccessSpecifier NewAS = getAccessSpecifierIfPresent();
+ if (NewAS != AS_none) {
+ // Current token is a C++ access specifier.
+ AS = NewAS;
+ SourceLocation ASLoc = Tok.getLocation();
+ unsigned TokLength = Tok.getLength();
+ ConsumeToken();
+ AccessAttrs.clear();
+ MaybeParseGNUAttributes(AccessAttrs);
+
+ SourceLocation EndLoc;
+ if (TryConsumeToken(tok::colon, EndLoc)) {
+ } else if (TryConsumeToken(tok::semi, EndLoc)) {
+ Diag(EndLoc, diag::err_expected)
+ << tok::colon << FixItHint::CreateReplacement(EndLoc, ":");
+ } else {
+ EndLoc = ASLoc.getLocWithOffset(TokLength);
+ Diag(EndLoc, diag::err_expected)
+ << tok::colon << FixItHint::CreateInsertion(EndLoc, ":");
+ }
+
+ // The Microsoft extension __interface does not permit non-public
+ // access specifiers.
+ if (TagType == DeclSpec::TST_interface && AS != AS_public) {
+ Diag(ASLoc, diag::err_access_specifier_interface) << (AS == AS_protected);
+ }
+
+ if (Actions.ActOnAccessSpecifier(NewAS, ASLoc, EndLoc,
+ AccessAttrs.getList())) {
+ // found another attribute than only annotations
+ AccessAttrs.clear();
+ }
+
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::annot_pragma_openmp))
+ return ParseOpenMPDeclarativeDirective();
+
+ // Parse all the comma separated declarators.
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs.getList());
+}
+
/// ParseCXXMemberSpecification - Parse the class definition.
///
/// member-specification:
@@ -2934,102 +3070,16 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
CurAS = AS_private;
else
CurAS = AS_public;
- ParsedAttributes AccessAttrs(AttrFactory);
+ ParsedAttributesWithRange AccessAttrs(AttrFactory);
if (TagDecl) {
// While we still have something to read, read the member-declarations.
- while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
// Each iteration of this loop reads one member-declaration.
-
- if (getLangOpts().MicrosoftExt && Tok.isOneOf(tok::kw___if_exists,
- tok::kw___if_not_exists)) {
- ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
- continue;
- }
-
- // Check for extraneous top-level semicolon.
- if (Tok.is(tok::semi)) {
- ConsumeExtraSemi(InsideStruct, TagType);
- continue;
- }
-
- if (Tok.is(tok::annot_pragma_vis)) {
- HandlePragmaVisibility();
- continue;
- }
-
- if (Tok.is(tok::annot_pragma_pack)) {
- HandlePragmaPack();
- continue;
- }
-
- if (Tok.is(tok::annot_pragma_align)) {
- HandlePragmaAlign();
- continue;
- }
-
- if (Tok.is(tok::annot_pragma_openmp)) {
- ParseOpenMPDeclarativeDirective();
- continue;
- }
-
- if (Tok.is(tok::annot_pragma_ms_pointers_to_members)) {
- HandlePragmaMSPointersToMembers();
- continue;
- }
-
- if (Tok.is(tok::annot_pragma_ms_pragma)) {
- HandlePragmaMSPragma();
- continue;
- }
-
- // If we see a namespace here, a close brace was missing somewhere.
- if (Tok.is(tok::kw_namespace)) {
- DiagnoseUnexpectedNamespace(cast<NamedDecl>(TagDecl));
- break;
- }
-
- AccessSpecifier AS = getAccessSpecifierIfPresent();
- if (AS != AS_none) {
- // Current token is a C++ access specifier.
- CurAS = AS;
- SourceLocation ASLoc = Tok.getLocation();
- unsigned TokLength = Tok.getLength();
- ConsumeToken();
- AccessAttrs.clear();
- MaybeParseGNUAttributes(AccessAttrs);
-
- SourceLocation EndLoc;
- if (TryConsumeToken(tok::colon, EndLoc)) {
- } else if (TryConsumeToken(tok::semi, EndLoc)) {
- Diag(EndLoc, diag::err_expected)
- << tok::colon << FixItHint::CreateReplacement(EndLoc, ":");
- } else {
- EndLoc = ASLoc.getLocWithOffset(TokLength);
- Diag(EndLoc, diag::err_expected)
- << tok::colon << FixItHint::CreateInsertion(EndLoc, ":");
- }
-
- // The Microsoft extension __interface does not permit non-public
- // access specifiers.
- if (TagType == DeclSpec::TST_interface && CurAS != AS_public) {
- Diag(ASLoc, diag::err_access_specifier_interface)
- << (CurAS == AS_protected);
- }
-
- if (Actions.ActOnAccessSpecifier(AS, ASLoc, EndLoc,
- AccessAttrs.getList())) {
- // found another attribute than only annotations
- AccessAttrs.clear();
- }
-
- continue;
- }
-
- // Parse all the comma separated declarators.
- ParseCXXClassMemberDeclaration(CurAS, AccessAttrs.getList());
+ ParseCXXClassMemberDeclarationWithPragmas(
+ CurAS, AccessAttrs, static_cast<DeclSpec::TST>(TagType), TagDecl);
}
-
T.consumeClose();
} else {
SkipUntil(tok::r_brace);
@@ -3068,7 +3118,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// We've finished parsing everything, including default argument
// initializers.
- Actions.ActOnFinishCXXMemberDefaultArgs(TagDecl);
+ Actions.ActOnFinishCXXNonNestedClass(TagDecl);
}
if (TagDecl)
@@ -3279,7 +3329,7 @@ Parser::tryParseExceptionSpecification(bool Delayed,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens) {
ExceptionSpecificationType Result = EST_None;
- ExceptionSpecTokens = 0;
+ ExceptionSpecTokens = nullptr;
// Handle delayed parsing of exception-specifications.
if (Delayed) {
@@ -3296,7 +3346,7 @@ Parser::tryParseExceptionSpecification(bool Delayed,
// If this is a bare 'noexcept', we're done.
if (IsNoexcept) {
Diag(Tok, diag::warn_cxx98_compat_noexcept_decl);
- NoexceptExpr = 0;
+ NoexceptExpr = nullptr;
return EST_BasicNoexcept;
}
@@ -3379,7 +3429,7 @@ Parser::tryParseExceptionSpecification(bool Delayed,
}
static void diagnoseDynamicExceptionSpecification(
- Parser &P, const SourceRange &Range, bool IsNoexcept) {
+ Parser &P, SourceRange Range, bool IsNoexcept) {
if (P.getLangOpts().CPlusPlus11) {
const char *Replacement = IsNoexcept ? "noexcept" : "noexcept(false)";
P.Diag(Range.getBegin(), diag::warn_exception_spec_deprecated) << Range;
@@ -3577,9 +3627,8 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
case AttributeList::AT_CarriesDependency:
case AttributeList::AT_Deprecated:
case AttributeList::AT_FallThrough:
- case AttributeList::AT_CXX11NoReturn: {
+ case AttributeList::AT_CXX11NoReturn:
return true;
- }
default:
return false;
@@ -3612,7 +3661,7 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
// If the attribute isn't known, we will not attempt to parse any
// arguments.
if (!hasAttribute(AttrSyntax::CXX, ScopeName, AttrName,
- getTargetInfo().getTriple(), getLangOpts())) {
+ getTargetInfo(), getLangOpts())) {
// Eat the left paren, then skip to the ending right paren.
ConsumeParen();
SkipUntil(tok::r_paren);
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index b866798a1c61..490bd5ada62d 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -163,6 +163,8 @@ ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
if (Tok.is(tok::kw_throw))
return ParseThrowExpression();
+ if (Tok.is(tok::kw_co_yield))
+ return ParseCoyieldExpression();
ExprResult LHS = ParseCastExpression(/*isUnaryExpression=*/false,
/*isAddressOfOperand=*/false,
@@ -522,6 +524,7 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
/// postfix-expression
/// '++' unary-expression
/// '--' unary-expression
+/// [Coro] 'co_await' cast-expression
/// unary-operator cast-expression
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
@@ -1041,6 +1044,14 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return Res;
}
+ case tok::kw_co_await: { // unary-expression: 'co_await' cast-expression
+ SourceLocation CoawaitLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnCoawaitExpr(getCurScope(), CoawaitLoc, Res.get());
+ return Res;
+ }
+
case tok::kw___extension__:{//unary-expression:'__extension__' cast-expr [GNU]
// __extension__ silences extension warnings in the subexpression.
ExtensionRAIIObject O(Diags); // Use RAII to do this.
@@ -1396,21 +1407,42 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
Loc = T.getOpenLocation();
- ExprResult Idx;
+ ExprResult Idx, Length;
+ SourceLocation ColonLoc;
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
Idx = ParseBraceInitializer();
+ } else if (getLangOpts().OpenMP) {
+ ColonProtectionRAIIObject RAII(*this);
+ // Parse [: or [ expr or [ expr :
+ if (!Tok.is(tok::colon)) {
+ // [ expr
+ Idx = ParseExpression();
+ }
+ if (Tok.is(tok::colon)) {
+ // Consume ':'
+ ColonLoc = ConsumeToken();
+ if (Tok.isNot(tok::r_square))
+ Length = ParseExpression();
+ }
} else
Idx = ParseExpression();
SourceLocation RLoc = Tok.getLocation();
- if (!LHS.isInvalid() && !Idx.isInvalid() && Tok.is(tok::r_square)) {
- LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
- Idx.get(), RLoc);
+ if (!LHS.isInvalid() && !Idx.isInvalid() && !Length.isInvalid() &&
+ Tok.is(tok::r_square)) {
+ if (ColonLoc.isValid()) {
+ LHS = Actions.ActOnOMPArraySectionExpr(LHS.get(), Loc, Idx.get(),
+ ColonLoc, Length.get(), RLoc);
+ } else {
+ LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
+ Idx.get(), RLoc);
+ }
} else {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
(void)Actions.CorrectDelayedTyposInExpr(Idx);
+ (void)Actions.CorrectDelayedTyposInExpr(Length);
LHS = ExprError();
Idx = ExprError();
}
@@ -1965,7 +1997,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
} else {
PT.consumeClose();
Res = Actions.ActOnBuiltinOffsetOf(getCurScope(), StartLoc, TypeLoc,
- Ty.get(), &Comps[0], Comps.size(),
+ Ty.get(), Comps,
PT.getCloseLocation());
}
break;
@@ -2747,7 +2779,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc,
EST_None,
- /*ESpecLoc=*/NoLoc,
+ /*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index c1dafe9b49b1..f8938ba3495b 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -841,6 +841,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// Parse capture.
LambdaCaptureKind Kind = LCK_ByCopy;
+ LambdaCaptureInitKind InitKind = LambdaCaptureInitKind::NoInit;
SourceLocation Loc;
IdentifierInfo *Id = nullptr;
SourceLocation EllipsisLoc;
@@ -878,6 +879,8 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
BalancedDelimiterTracker Parens(*this, tok::l_paren);
Parens.consumeOpen();
+ InitKind = LambdaCaptureInitKind::DirectInit;
+
ExprVector Exprs;
CommaLocsTy Commas;
if (SkippedInits) {
@@ -898,14 +901,13 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// to save the necessary state, and restore it later.
EnterExpressionEvaluationContext EC(Actions,
Sema::PotentiallyEvaluated);
- bool HadEquals = TryConsumeToken(tok::equal);
+
+ if (TryConsumeToken(tok::equal))
+ InitKind = LambdaCaptureInitKind::CopyInit;
+ else
+ InitKind = LambdaCaptureInitKind::ListInit;
if (!SkippedInits) {
- // Warn on constructs that will change meaning when we implement N3922
- if (!HadEquals && Tok.is(tok::l_brace)) {
- Diag(Tok, diag::warn_init_capture_direct_list_init)
- << FixItHint::CreateInsertion(Tok.getLocation(), "=");
- }
Init = ParseInitializer();
} else if (Tok.is(tok::l_brace)) {
BalancedDelimiterTracker Braces(*this, tok::l_brace);
@@ -993,19 +995,19 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// If x was not const, the second use would require 'L' to capture, and
// that would be an error.
- ParsedType InitCaptureParsedType;
+ ParsedType InitCaptureType;
if (Init.isUsable()) {
// Get the pointer and store it in an lvalue, so we can use it as an
// out argument.
Expr *InitExpr = Init.get();
// This performs any lvalue-to-rvalue conversions if necessary, which
// can affect what gets captured in the containing decl-context.
- QualType InitCaptureType = Actions.performLambdaInitCaptureInitialization(
- Loc, Kind == LCK_ByRef, Id, InitExpr);
+ InitCaptureType = Actions.actOnLambdaInitCaptureInitialization(
+ Loc, Kind == LCK_ByRef, Id, InitKind, InitExpr);
Init = InitExpr;
- InitCaptureParsedType.set(InitCaptureType);
}
- Intro.addCapture(Kind, Loc, Id, EllipsisLoc, Init, InitCaptureParsedType);
+ Intro.addCapture(Kind, Loc, Id, EllipsisLoc, InitKind, Init,
+ InitCaptureType);
}
T.consumeClose();
@@ -1149,7 +1151,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*VolatileQualifierLoc=*/NoLoc,
/*RestrictQualifierLoc=*/NoLoc,
MutableLoc,
- ESpecType, ESpecRange.getBegin(),
+ ESpecType, ESpecRange,
DynamicExceptions.data(),
DynamicExceptionRanges.data(),
DynamicExceptions.size(),
@@ -1217,7 +1219,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*RestrictQualifierLoc=*/NoLoc,
MutableLoc,
EST_None,
- /*ESpecLoc=*/NoLoc,
+ /*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
@@ -1558,6 +1560,21 @@ ExprResult Parser::ParseThrowExpression() {
}
}
+/// \brief Parse the C++ Coroutines co_yield expression.
+///
+/// co_yield-expression:
+/// 'co_yield' assignment-expression[opt]
+ExprResult Parser::ParseCoyieldExpression() {
+ assert(Tok.is(tok::kw_co_yield) && "Not co_yield!");
+
+ SourceLocation Loc = ConsumeToken();
+ ExprResult Expr = Tok.is(tok::l_brace) ? ParseBraceInitializer()
+ : ParseAssignmentExpression();
+ if (!Expr.isInvalid())
+ Expr = Actions.ActOnCoyieldExpr(getCurScope(), Loc, Expr.get());
+ return Expr;
+}
+
/// ParseCXXThis - This handles the C++ 'this' pointer.
///
/// C++ 9.3.2: In the body of a non-static member function, the keyword this is
@@ -1805,7 +1822,7 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
DS.SetRangeEnd(Tok.getAnnotationEndLoc());
ConsumeToken();
- DS.Finish(Diags, PP, Policy);
+ DS.Finish(Actions, Policy);
return;
}
@@ -1861,12 +1878,12 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::annot_decltype:
case tok::kw_decltype:
DS.SetRangeEnd(ParseDecltypeSpecifier(DS));
- return DS.Finish(Diags, PP, Policy);
+ return DS.Finish(Actions, Policy);
// GNU typeof support.
case tok::kw_typeof:
ParseTypeofSpecifier(DS);
- DS.Finish(Diags, PP, Policy);
+ DS.Finish(Actions, Policy);
return;
}
if (Tok.is(tok::annot_typename))
@@ -1874,7 +1891,7 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
else
DS.SetRangeEnd(Tok.getLocation());
ConsumeToken();
- DS.Finish(Diags, PP, Policy);
+ DS.Finish(Actions, Policy);
}
/// ParseCXXTypeSpecifierSeq - Parse a C++ type-specifier-seq (C++
@@ -1890,7 +1907,7 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
///
bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
ParseSpecifierQualifierList(DS, AS_none, DSC_type_specifier);
- DS.Finish(Diags, PP, Actions.getASTContext().getPrintingPolicy());
+ DS.Finish(Actions, Actions.getASTContext().getPrintingPolicy());
return false;
}
@@ -2289,7 +2306,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// This isn't a valid literal-operator-id, but we think we know
// what the user meant. Tell them what they should have written.
SmallString<32> Str;
- Str += "\"\" ";
+ Str += "\"\"";
Str += II->getName();
Diag(DiagLoc, DiagId) << FixItHint::CreateReplacement(
SourceRange(TokLocs.front(), TokLocs.back()), Str);
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index ed6090453daa..e72a1f62f942 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -94,6 +94,28 @@ Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
return Actions.ConvertDeclToDeclGroup(SingleDecl);
}
+/// Class to handle popping type parameters when leaving the scope.
+class Parser::ObjCTypeParamListScope {
+ Sema &Actions;
+ Scope *S;
+ ObjCTypeParamList *Params;
+public:
+ ObjCTypeParamListScope(Sema &Actions, Scope *S)
+ : Actions(Actions), S(S), Params(nullptr) {}
+ ~ObjCTypeParamListScope() {
+ leave();
+ }
+ void enter(ObjCTypeParamList *P) {
+ assert(!Params);
+ Params = P;
+ }
+ void leave() {
+ if (Params)
+ Actions.popObjCTypeParamList(S, Params);
+ Params = nullptr;
+ }
+};
+
///
/// objc-class-declaration:
/// '@' 'class' objc-class-forward-decl (',' objc-class-forward-decl)* ';'
@@ -121,11 +143,8 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
// Parse the optional objc-type-parameter-list.
ObjCTypeParamList *TypeParams = nullptr;
- if (Tok.is(tok::less)) {
+ if (Tok.is(tok::less))
TypeParams = parseObjCTypeParamList();
- if (TypeParams)
- Actions.popObjCTypeParamList(getCurScope(), TypeParams);
- }
ClassTypeParams.push_back(TypeParams);
if (!TryConsumeToken(tok::comma))
break;
@@ -221,11 +240,10 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
SourceLocation LAngleLoc, EndProtoLoc;
SmallVector<IdentifierLocPair, 8> ProtocolIdents;
ObjCTypeParamList *typeParameterList = nullptr;
- if (Tok.is(tok::less)) {
- typeParameterList = parseObjCTypeParamListOrProtocolRefs(LAngleLoc,
- ProtocolIdents,
- EndProtoLoc);
- }
+ ObjCTypeParamListScope typeParamScope(Actions, getCurScope());
+ if (Tok.is(tok::less))
+ typeParameterList = parseObjCTypeParamListOrProtocolRefs(
+ typeParamScope, LAngleLoc, ProtocolIdents, EndProtoLoc);
if (Tok.is(tok::l_paren) &&
!isKnownToBeTypeSpecifier(GetLookAheadToken(1))) { // we have a category.
@@ -286,9 +304,6 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParseObjCInterfaceDeclList(tok::objc_not_keyword, CategoryType);
- if (typeParameterList)
- Actions.popObjCTypeParamList(getCurScope(), typeParameterList);
-
return CategoryType;
}
// Parse a class interface.
@@ -342,8 +357,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
}
Actions.FindProtocolDeclaration(/*WarnOnDeclarations=*/true,
/*ForObjCContainer=*/true,
- &ProtocolIdents[0], ProtocolIdents.size(),
- protocols);
+ ProtocolIdents, protocols);
}
} else if (protocols.empty() && Tok.is(tok::less) &&
ParseObjCProtocolReferences(protocols, protocolLocs, true, true,
@@ -371,9 +385,6 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParseObjCInterfaceDeclList(tok::objc_interface, ClsType);
- if (typeParameterList)
- Actions.popObjCTypeParamList(getCurScope(), typeParameterList);
-
return ClsType;
}
@@ -433,10 +444,9 @@ static void addContextSensitiveTypeNullability(Parser &P,
///
/// \param rAngleLoc The location of the ending '>'.
ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
- SourceLocation &lAngleLoc,
- SmallVectorImpl<IdentifierLocPair> &protocolIdents,
- SourceLocation &rAngleLoc,
- bool mayBeProtocolList) {
+ ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
+ SmallVectorImpl<IdentifierLocPair> &protocolIdents,
+ SourceLocation &rAngleLoc, bool mayBeProtocolList) {
assert(Tok.is(tok::less) && "Not at the beginning of a type parameter list");
// Within the type parameter list, don't treat '>' as an operator.
@@ -493,8 +503,7 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
if (Tok.is(tok::code_completion)) {
// FIXME: If these aren't protocol references, we'll need different
// completions.
- Actions.CodeCompleteObjCProtocolReferences(protocolIdents.data(),
- protocolIdents.size());
+ Actions.CodeCompleteObjCProtocolReferences(protocolIdents);
cutOffParsing();
// FIXME: Better recovery here?.
@@ -581,18 +590,19 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
makeProtocolIdentsIntoTypeParameters();
}
- // Form the type parameter list.
+ // Form the type parameter list and enter its scope.
ObjCTypeParamList *list = Actions.actOnObjCTypeParamList(
getCurScope(),
lAngleLoc,
typeParams,
rAngleLoc);
+ Scope.enter(list);
// Clear out the angle locations; they're used by the caller to indicate
// whether there are any protocol references.
lAngleLoc = SourceLocation();
rAngleLoc = SourceLocation();
- return list;
+ return invalid ? nullptr : list;
}
/// Parse an objc-type-parameter-list.
@@ -600,8 +610,10 @@ ObjCTypeParamList *Parser::parseObjCTypeParamList() {
SourceLocation lAngleLoc;
SmallVector<IdentifierLocPair, 1> protocolIdents;
SourceLocation rAngleLoc;
- return parseObjCTypeParamListOrProtocolRefs(lAngleLoc, protocolIdents,
- rAngleLoc,
+
+ ObjCTypeParamListScope Scope(Actions, getCurScope());
+ return parseObjCTypeParamListOrProtocolRefs(Scope, lAngleLoc, protocolIdents,
+ rAngleLoc,
/*mayBeProtocolList=*/false);
}
@@ -620,7 +632,6 @@ ObjCTypeParamList *Parser::parseObjCTypeParamList() {
void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl) {
SmallVector<Decl *, 32> allMethods;
- SmallVector<Decl *, 16> allProperties;
SmallVector<DeclGroupPtrTy, 8> allTUVariables;
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword;
@@ -776,12 +787,9 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
SetterSel = SelectorTable::constructSetterSelector(
PP.getIdentifierTable(), PP.getSelectorTable(),
FD.D.getIdentifier());
- bool isOverridingProperty = false;
Decl *Property = Actions.ActOnProperty(
getCurScope(), AtLoc, LParenLoc, FD, OCDS, GetterSel, SetterSel,
- &isOverridingProperty, MethodImplKind);
- if (!isOverridingProperty)
- allProperties.push_back(Property);
+ MethodImplKind);
FD.complete(Property);
};
@@ -903,7 +911,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
// getter/setter require extra treatment.
unsigned DiagID = IsSetter ? diag::err_objc_expected_equal_for_setter :
- diag::err_objc_expected_equal_for_getter;
+ diag::err_objc_expected_equal_for_getter;
if (ExpectAndConsume(tok::equal, DiagID)) {
SkipUntil(tok::r_paren, StopAtSemi);
@@ -918,7 +926,6 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
return cutOffParsing();
}
-
SourceLocation SelLoc;
IdentifierInfo *SelIdent = ParseObjCSelectorPiece(SelLoc);
@@ -1114,6 +1121,7 @@ IdentifierInfo *Parser::ParseObjCSelectorPiece(SourceLocation &SelectorLoc) {
case tok::kw__Bool:
case tok::kw__Complex:
case tok::kw___alignof:
+ case tok::kw___auto_type:
IdentifierInfo *II = Tok.getIdentifierInfo();
SelectorLoc = ConsumeToken();
return II;
@@ -1557,8 +1565,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
while (1) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents.data(),
- ProtocolIdents.size());
+ Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents);
cutOffParsing();
return true;
}
@@ -1584,8 +1591,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
// Convert the list of protocols identifiers into a list of protocol decls.
Actions.FindProtocolDeclaration(WarnOnDeclarations, ForObjCContainer,
- &ProtocolIdents[0], ProtocolIdents.size(),
- Protocols);
+ ProtocolIdents, Protocols);
return false;
}
@@ -1662,8 +1668,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
if (!BaseT.isNull() && BaseT->acceptsObjCTypeParams()) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
} else {
- Actions.CodeCompleteObjCProtocolReferences(identifierLocPairs.data(),
- identifierLocPairs.size());
+ Actions.CodeCompleteObjCProtocolReferences(identifierLocPairs);
}
cutOffParsing();
return;
@@ -2015,7 +2020,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (TryConsumeToken(tok::semi)) { // forward declaration of one protocol.
IdentifierLocPair ProtoInfo(protocolName, nameLoc);
- return Actions.ActOnForwardProtocolDeclaration(AtLoc, &ProtoInfo, 1,
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtoInfo,
attrs.getList());
}
@@ -2044,9 +2049,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@protocol"))
return DeclGroupPtrTy();
- return Actions.ActOnForwardProtocolDeclaration(AtLoc,
- &ProtocolRefs[0],
- ProtocolRefs.size(),
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtocolRefs,
attrs.getList());
}
@@ -2114,8 +2117,9 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
SourceLocation lAngleLoc, rAngleLoc;
SmallVector<IdentifierLocPair, 8> protocolIdents;
SourceLocation diagLoc = Tok.getLocation();
- if (parseObjCTypeParamListOrProtocolRefs(lAngleLoc, protocolIdents,
- rAngleLoc)) {
+ ObjCTypeParamListScope typeParamScope(Actions, getCurScope());
+ if (parseObjCTypeParamListOrProtocolRefs(typeParamScope, lAngleLoc,
+ protocolIdents, rAngleLoc)) {
Diag(diagLoc, diag::err_objc_parameterized_implementation)
<< SourceRange(diagLoc, PrevTokLocation);
} else if (lAngleLoc.isValid()) {
@@ -2614,6 +2618,7 @@ void Parser::StashAwayMethodOrFunctionBodyTokens(Decl *MDecl) {
}
else if (Tok.is(tok::colon)) {
ConsumeToken();
+ // FIXME: This is wrong, due to C++11 braced initialization.
while (Tok.isNot(tok::l_brace)) {
ConsumeAndStoreUntil(tok::l_paren, Toks, /*StopAtSemi=*/false);
ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
@@ -3279,8 +3284,7 @@ ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
AtStrings.push_back(Lit.get());
}
- return Actions.ParseObjCStringLiteral(&AtLocs[0], AtStrings.data(),
- AtStrings.size());
+ return Actions.ParseObjCStringLiteral(AtLocs.data(), AtStrings);
}
/// ParseObjCBooleanLiteral -
@@ -3431,7 +3435,7 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
// Create the ObjCDictionaryLiteral.
return Actions.BuildObjCDictionaryLiteral(SourceRange(AtLoc, EndLoc),
- Elements.data(), Elements.size());
+ Elements);
}
/// objc-encode-expression:
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index 0113a3157c25..078f4c388705 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -19,6 +19,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/PointerIntPair.h"
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -32,15 +33,18 @@ static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
const OpenMPDirectiveKind F[][3] = {
{OMPD_unknown /*cancellation*/, OMPD_unknown /*point*/,
OMPD_cancellation_point},
+ {OMPD_target, OMPD_unknown /*data*/, OMPD_target_data},
{OMPD_for, OMPD_simd, OMPD_for_simd},
{OMPD_parallel, OMPD_for, OMPD_parallel_for},
{OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd},
- {OMPD_parallel, OMPD_sections, OMPD_parallel_sections}};
+ {OMPD_parallel, OMPD_sections, OMPD_parallel_sections},
+ {OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd}};
auto Tok = P.getCurToken();
auto DKind =
Tok.isAnnotation()
? OMPD_unknown
: getOpenMPDirectiveKind(P.getPreprocessor().getSpelling(Tok));
+
bool TokenMatched = false;
for (unsigned i = 0; i < llvm::array_lengthof(F); ++i) {
if (!Tok.isAnnotation() && DKind == OMPD_unknown) {
@@ -50,18 +54,24 @@ static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
} else {
TokenMatched = DKind == F[i][0] && DKind != OMPD_unknown;
}
+
if (TokenMatched) {
Tok = P.getPreprocessor().LookAhead(0);
+ auto TokenIsAnnotation = Tok.isAnnotation();
auto SDKind =
- Tok.isAnnotation()
+ TokenIsAnnotation
? OMPD_unknown
: getOpenMPDirectiveKind(P.getPreprocessor().getSpelling(Tok));
- if (!Tok.isAnnotation() && DKind == OMPD_unknown) {
+
+ if (!TokenIsAnnotation && SDKind == OMPD_unknown) {
TokenMatched =
- (i == 0) && !P.getPreprocessor().getSpelling(Tok).compare("point");
+ ((i == 0) &&
+ !P.getPreprocessor().getSpelling(Tok).compare("point")) ||
+ ((i == 1) && !P.getPreprocessor().getSpelling(Tok).compare("data"));
} else {
TokenMatched = SDKind == F[i][1] && SDKind != OMPD_unknown;
}
+
if (TokenMatched) {
P.ConsumeToken();
DKind = F[i][2];
@@ -127,6 +137,10 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirective() {
case OMPD_teams:
case OMPD_cancellation_point:
case OMPD_cancel:
+ case OMPD_target_data:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_distribute:
Diag(Tok, diag::err_omp_unexpected_directive)
<< getOpenMPDirectiveName(DKind);
break;
@@ -146,8 +160,9 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirective() {
/// 'section' | 'single' | 'master' | 'critical' [ '(' <name> ')' ] |
/// 'parallel for' | 'parallel sections' | 'task' | 'taskyield' |
/// 'barrier' | 'taskwait' | 'flush' | 'ordered' | 'atomic' |
-/// 'for simd' | 'parallel for simd' | 'target' | 'teams' | 'taskgroup'
-/// {clause}
+/// 'for simd' | 'parallel for simd' | 'target' | 'target data' |
+/// 'taskgroup' | 'teams' | 'taskloop' | 'taskloop simd' {clause} |
+/// 'distribute'
/// annot_pragma_openmp_end
///
StmtResult
@@ -200,7 +215,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) {
case OMPD_cancel:
if (!StandAloneAllowed) {
Diag(Tok, diag::err_omp_immediate_directive)
- << getOpenMPDirectiveName(DKind);
+ << getOpenMPDirectiveName(DKind) << 0;
}
HasAssociatedStatement = false;
// Fall through for further analysis.
@@ -221,7 +236,11 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) {
case OMPD_atomic:
case OMPD_target:
case OMPD_teams:
- case OMPD_taskgroup: {
+ case OMPD_taskgroup:
+ case OMPD_target_data:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_distribute: {
ConsumeToken();
// Parse directive name of the 'critical' directive if any.
if (DKind == OMPD_critical) {
@@ -276,8 +295,19 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) {
// Consume final annot_pragma_openmp_end.
ConsumeToken();
+ // OpenMP [2.13.8, ordered Construct, Syntax]
+ // If the depend clause is specified, the ordered construct is a stand-alone
+ // directive.
+ if (DKind == OMPD_ordered && FirstClauses[OMPC_depend].getInt()) {
+ if (!StandAloneAllowed) {
+ Diag(Loc, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 1
+ << getOpenMPClauseName(OMPC_depend);
+ }
+ HasAssociatedStatement = false;
+ }
+
StmtResult AssociatedStmt;
- bool CreateDirective = true;
if (HasAssociatedStatement) {
// The body is a block scope like in Lambdas and Blocks.
Sema::CompoundScopeRAII CompoundScope(Actions);
@@ -287,12 +317,10 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) {
AssociatedStmt = ParseStatement();
Actions.ActOnFinishOfCompoundStmt();
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
- CreateDirective = AssociatedStmt.isUsable();
}
- if (CreateDirective)
- Directive = Actions.ActOnOpenMPExecutableDirective(
- DKind, DirName, CancelRegion, Clauses, AssociatedStmt.get(), Loc,
- EndLoc);
+ Directive = Actions.ActOnOpenMPExecutableDirective(
+ DKind, DirName, CancelRegion, Clauses, AssociatedStmt.get(), Loc,
+ EndLoc);
// Exit scope.
Actions.EndOpenMPDSABlock(Directive.get());
@@ -385,7 +413,10 @@ bool Parser::ParseOpenMPSimpleVarList(OpenMPDirectiveKind Kind,
/// lastprivate-clause | reduction-clause | proc_bind-clause |
/// schedule-clause | copyin-clause | copyprivate-clause | untied-clause |
/// mergeable-clause | flush-clause | read-clause | write-clause |
-/// update-clause | capture-clause | seq_cst-clause
+/// update-clause | capture-clause | seq_cst-clause | device-clause |
+/// simdlen-clause | threads-clause | simd-clause | num_teams-clause |
+/// thread_limit-clause | priority-clause | grainsize-clause |
+/// nogroup-clause | num_tasks-clause | hint-clause
///
OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
@@ -399,27 +430,49 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
}
switch (CKind) {
- case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
+ case OMPC_ordered:
+ case OMPC_device:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_num_tasks:
+ case OMPC_hint:
// OpenMP [2.5, Restrictions]
- // At most one if clause can appear on the directive.
// At most one num_threads clause can appear on the directive.
// OpenMP [2.8.1, simd construct, Restrictions]
// Only one safelen clause can appear on a simd directive.
+ // Only one simdlen clause can appear on a simd directive.
// Only one collapse clause can appear on a simd directive.
+ // OpenMP [2.9.1, target data construct, Restrictions]
+ // At most one device clause can appear on the directive.
// OpenMP [2.11.1, task Construct, Restrictions]
// At most one if clause can appear on the directive.
// At most one final clause can appear on the directive.
+ // OpenMP [teams Construct, Restrictions]
+ // At most one num_teams clause can appear on the directive.
+ // At most one thread_limit clause can appear on the directive.
+ // OpenMP [2.9.1, task Construct, Restrictions]
+ // At most one priority clause can appear on the directive.
+ // OpenMP [2.9.2, taskloop Construct, Restrictions]
+ // At most one grainsize clause can appear on the directive.
+ // OpenMP [2.9.2, taskloop Construct, Restrictions]
+ // At most one num_tasks clause can appear on the directive.
if (!FirstClause) {
- Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
- << getOpenMPClauseName(CKind);
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
}
- Clause = ParseOpenMPSingleExprClause(CKind);
+ if (CKind == OMPC_ordered && PP.LookAhead(/*N=*/0).isNot(tok::l_paren))
+ Clause = ParseOpenMPClause(CKind);
+ else
+ Clause = ParseOpenMPSingleExprClause(CKind);
break;
case OMPC_default:
case OMPC_proc_bind:
@@ -429,8 +482,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// OpenMP [2.5, parallel Construct, Restrictions]
// At most one proc_bind clause can appear on the directive.
if (!FirstClause) {
- Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
- << getOpenMPClauseName(CKind);
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
}
@@ -440,14 +493,14 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// OpenMP [2.7.1, Restrictions, p. 3]
// Only one schedule clause can appear on a loop directive.
if (!FirstClause) {
- Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
- << getOpenMPClauseName(CKind);
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
}
+ case OMPC_if:
Clause = ParseOpenMPSingleExprWithArgClause(CKind);
break;
- case OMPC_ordered:
case OMPC_nowait:
case OMPC_untied:
case OMPC_mergeable:
@@ -456,13 +509,16 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_nogroup:
// OpenMP [2.7.1, Restrictions, p. 9]
// Only one ordered clause can appear on a loop directive.
// OpenMP [2.7.1, Restrictions, C/C++, p. 4]
// Only one nowait clause can appear on a for directive.
if (!FirstClause) {
- Diag(Tok, diag::err_omp_more_one_clause) << getOpenMPDirectiveName(DKind)
- << getOpenMPClauseName(CKind);
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
}
@@ -479,7 +535,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_copyprivate:
case OMPC_flush:
case OMPC_depend:
- Clause = ParseOpenMPVarListClause(CKind);
+ case OMPC_map:
+ Clause = ParseOpenMPVarListClause(DKind, CKind);
break;
case OMPC_unknown:
Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
@@ -495,12 +552,9 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
return ErrorFound ? nullptr : Clause;
}
-/// \brief Parsing of OpenMP clauses with single expressions like 'if',
-/// 'final', 'collapse', 'safelen', 'num_threads', 'simdlen', 'num_teams' or
-/// 'thread_limit'.
-///
-/// if-clause:
-/// 'if' '(' expression ')'
+/// \brief Parsing of OpenMP clauses with single expressions like 'final',
+/// 'collapse', 'safelen', 'num_threads', 'simdlen', 'num_teams',
+/// 'thread_limit', 'simdlen', 'priority', 'grainsize', 'num_tasks' or 'hint'.
///
/// final-clause:
/// 'final' '(' expression ')'
@@ -511,9 +565,24 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
/// safelen-clause:
/// 'safelen' '(' expression ')'
///
+/// simdlen-clause:
+/// 'simdlen' '(' expression ')'
+///
/// collapse-clause:
/// 'collapse' '(' expression ')'
///
+/// priority-clause:
+/// 'priority' '(' expression ')'
+///
+/// grainsize-clause:
+/// 'grainsize' '(' expression ')'
+///
+/// num_tasks-clause:
+/// 'num_tasks' '(' expression ')'
+///
+/// hint-clause:
+/// 'hint' '(' expression ')'
+///
OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind) {
SourceLocation Loc = ConsumeToken();
@@ -522,8 +591,10 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind) {
getOpenMPClauseName(Kind)))
return nullptr;
+ SourceLocation ELoc = Tok.getLocation();
ExprResult LHS(ParseCastExpression(false, false, NotTypeCast));
ExprResult Val(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc);
// Parse ')'.
T.consumeClose();
@@ -583,6 +654,15 @@ OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind) {
/// read-clause:
/// 'read'
///
+/// threads-clause:
+/// 'threads'
+///
+/// simd-clause:
+/// 'simd'
+///
+/// nogroup-clause:
+/// 'nogroup'
+///
OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind) {
SourceLocation Loc = Tok.getLocation();
ConsumeAnyToken();
@@ -595,11 +675,15 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind) {
/// argument like 'schedule' or 'dist_schedule'.
///
/// schedule-clause:
-/// 'schedule' '(' kind [',' expression ] ')'
+/// 'schedule' '(' [ modifier [ ',' modifier ] ':' ] kind [',' expression ]
+/// ')'
+///
+/// if-clause:
+/// 'if' '(' [ directive-name-modifier ':' ] expression ')'
///
OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
SourceLocation Loc = ConsumeToken();
- SourceLocation CommaLoc;
+ SourceLocation DelimLoc;
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
@@ -607,29 +691,86 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
return nullptr;
ExprResult Val;
- unsigned Type = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
- SourceLocation KLoc = Tok.getLocation();
- if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
- Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
+ SmallVector<unsigned, 4> Arg;
+ SmallVector<SourceLocation, 4> KLoc;
+ if (Kind == OMPC_schedule) {
+ enum { Modifier1, Modifier2, ScheduleKind, NumberOfElements };
+ Arg.resize(NumberOfElements);
+ KLoc.resize(NumberOfElements);
+ Arg[Modifier1] = OMPC_SCHEDULE_MODIFIER_unknown;
+ Arg[Modifier2] = OMPC_SCHEDULE_MODIFIER_unknown;
+ Arg[ScheduleKind] = OMPC_SCHEDULE_unknown;
+ auto KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ if (KindModifier > OMPC_SCHEDULE_unknown) {
+ // Parse 'modifier'
+ Arg[Modifier1] = KindModifier;
+ KLoc[Modifier1] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ if (Tok.is(tok::comma)) {
+ // Parse ',' 'modifier'
+ ConsumeAnyToken();
+ KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ Arg[Modifier2] = KindModifier > OMPC_SCHEDULE_unknown
+ ? KindModifier
+ : (unsigned)OMPC_SCHEDULE_unknown;
+ KLoc[Modifier2] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ }
+ // Parse ':'
+ if (Tok.is(tok::colon))
+ ConsumeAnyToken();
+ else
+ Diag(Tok, diag::warn_pragma_expected_colon) << "schedule modifier";
+ KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
+ }
+ Arg[ScheduleKind] = KindModifier;
+ KLoc[ScheduleKind] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ if ((Arg[ScheduleKind] == OMPC_SCHEDULE_static ||
+ Arg[ScheduleKind] == OMPC_SCHEDULE_dynamic ||
+ Arg[ScheduleKind] == OMPC_SCHEDULE_guided) &&
+ Tok.is(tok::comma))
+ DelimLoc = ConsumeAnyToken();
+ } else {
+ assert(Kind == OMPC_if);
+ KLoc.push_back(Tok.getLocation());
+ Arg.push_back(ParseOpenMPDirectiveKind(*this));
+ if (Arg.back() != OMPD_unknown) {
+ ConsumeToken();
+ if (Tok.is(tok::colon))
+ DelimLoc = ConsumeToken();
+ else
+ Diag(Tok, diag::warn_pragma_expected_colon)
+ << "directive name modifier";
+ }
+ }
- if (Kind == OMPC_schedule &&
- (Type == OMPC_SCHEDULE_static || Type == OMPC_SCHEDULE_dynamic ||
- Type == OMPC_SCHEDULE_guided) &&
- Tok.is(tok::comma)) {
- CommaLoc = ConsumeAnyToken();
+ bool NeedAnExpression =
+ (Kind == OMPC_schedule && DelimLoc.isValid()) || Kind == OMPC_if;
+ if (NeedAnExpression) {
+ SourceLocation ELoc = Tok.getLocation();
ExprResult LHS(ParseCastExpression(false, false, NotTypeCast));
Val = ParseRHSOfBinaryExpression(LHS, prec::Conditional);
- if (Val.isInvalid())
- return nullptr;
+ Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc);
}
// Parse ')'.
T.consumeClose();
+ if (NeedAnExpression && Val.isInvalid())
+ return nullptr;
+
return Actions.ActOnOpenMPSingleExprWithArgClause(
- Kind, Type, Val.get(), Loc, T.getOpenLocation(), KLoc, CommaLoc,
+ Kind, Arg, Val.get(), Loc, T.getOpenLocation(), KLoc, DelimLoc,
T.getCloseLocation());
}
@@ -691,7 +832,7 @@ static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
/// shared-clause:
/// 'shared' '(' list ')'
/// linear-clause:
-/// 'linear' '(' list [ ':' linear-step ] ')'
+/// 'linear' '(' linear-list [ ':' linear-step ] ')'
/// aligned-clause:
/// 'aligned' '(' list [ ':' alignment ] ')'
/// reduction-clause:
@@ -701,9 +842,17 @@ static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
/// flush-clause:
/// 'flush' '(' list ')'
/// depend-clause:
-/// 'depend' '(' in | out | inout : list ')'
+/// 'depend' '(' in | out | inout : list | source ')'
+/// map-clause:
+/// 'map' '(' [ [ always , ]
+/// to | from | tofrom | alloc | release | delete ':' ] list ')';
///
-OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
+/// For 'linear' clause linear-list may have the following forms:
+/// list
+/// modifier(list)
+/// where modifier is 'val' (C) or 'ref', 'val' or 'uval'(C++).
+OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
+ OpenMPClauseKind Kind) {
SourceLocation Loc = Tok.getLocation();
SourceLocation LOpen = ConsumeToken();
SourceLocation ColonLoc = SourceLocation();
@@ -712,7 +861,14 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
UnqualifiedId ReductionId;
bool InvalidReductionId = false;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
- SourceLocation DepLoc;
+ // OpenMP 4.1 [2.15.3.7, linear Clause]
+ // If no modifier is specified it is assumed to be val.
+ OpenMPLinearClauseKind LinearModifier = OMPC_LINEAR_val;
+ OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
+ OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
+ bool MapTypeModifierSpecified = false;
+ bool UnexpectedId = false;
+ SourceLocation DepLinMapLoc;
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
@@ -720,6 +876,9 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
getOpenMPClauseName(Kind)))
return nullptr;
+ bool NeedRParenForLinear = false;
+ BalancedDelimiterTracker LinearT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
// Handle reduction-identifier for reduction clause.
if (Kind == OMPC_reduction) {
ColonProtectionRAIIObject ColonRAII(*this);
@@ -742,25 +901,109 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
ColonProtectionRAIIObject ColonRAII(*this);
DepKind = static_cast<OpenMPDependClauseKind>(getOpenMPSimpleClauseType(
Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
- DepLoc = Tok.getLocation();
+ DepLinMapLoc = Tok.getLocation();
if (DepKind == OMPC_DEPEND_unknown) {
SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
} else {
ConsumeToken();
+ // Special processing for depend(source) clause.
+ if (DKind == OMPD_ordered && DepKind == OMPC_DEPEND_source) {
+ // Parse ')'.
+ T.consumeClose();
+ return Actions.ActOnOpenMPVarListClause(
+ Kind, llvm::None, /*TailExpr=*/nullptr, Loc, LOpen,
+ /*ColonLoc=*/SourceLocation(), Tok.getLocation(),
+ ReductionIdScopeSpec, DeclarationNameInfo(), DepKind,
+ LinearModifier, MapTypeModifier, MapType, DepLinMapLoc);
+ }
}
if (Tok.is(tok::colon)) {
ColonLoc = ConsumeToken();
} else {
- Diag(Tok, diag::warn_pragma_expected_colon) << "dependency type";
+ Diag(Tok, DKind == OMPD_ordered ? diag::warn_pragma_expected_colon_r_paren
+ : diag::warn_pragma_expected_colon)
+ << "dependency type";
+ }
+ } else if (Kind == OMPC_linear) {
+ // Try to parse modifier if any.
+ if (Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::l_paren)) {
+ LinearModifier = static_cast<OpenMPLinearClauseKind>(
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)));
+ DepLinMapLoc = ConsumeToken();
+ LinearT.consumeOpen();
+ NeedRParenForLinear = true;
+ }
+ } else if (Kind == OMPC_map) {
+ // Handle map type for map clause.
+ ColonProtectionRAIIObject ColonRAII(*this);
+
+ // the first identifier may be a list item, a map-type or
+ // a map-type-modifier
+ MapType = static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ DepLinMapLoc = Tok.getLocation();
+ bool ColonExpected = false;
+
+ if (Tok.is(tok::identifier)) {
+ if (PP.LookAhead(0).is(tok::colon)) {
+ MapType = static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ if (MapType == OMPC_MAP_unknown) {
+ Diag(Tok, diag::err_omp_unknown_map_type);
+ } else if (MapType == OMPC_MAP_always) {
+ Diag(Tok, diag::err_omp_map_type_missing);
+ }
+ ConsumeToken();
+ } else if (PP.LookAhead(0).is(tok::comma)) {
+ if (PP.LookAhead(1).is(tok::identifier) &&
+ PP.LookAhead(2).is(tok::colon)) {
+ MapTypeModifier =
+ static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ if (MapTypeModifier != OMPC_MAP_always) {
+ Diag(Tok, diag::err_omp_unknown_map_type_modifier);
+ MapTypeModifier = OMPC_MAP_unknown;
+ } else {
+ MapTypeModifierSpecified = true;
+ }
+
+ ConsumeToken();
+ ConsumeToken();
+
+ MapType = static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : ""));
+ if (MapType == OMPC_MAP_unknown || MapType == OMPC_MAP_always) {
+ Diag(Tok, diag::err_omp_unknown_map_type);
+ }
+ ConsumeToken();
+ } else {
+ MapType = OMPC_MAP_tofrom;
+ }
+ } else {
+ MapType = OMPC_MAP_tofrom;
+ }
+ } else {
+ UnexpectedId = true;
+ }
+
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+ } else if (ColonExpected) {
+ Diag(Tok, diag::warn_pragma_expected_colon) << "map type";
}
}
SmallVector<Expr *, 5> Vars;
- bool IsComma = ((Kind != OMPC_reduction) && (Kind != OMPC_depend)) ||
- ((Kind == OMPC_reduction) && !InvalidReductionId) ||
- ((Kind == OMPC_depend) && DepKind != OMPC_DEPEND_unknown);
+ bool IsComma =
+ ((Kind != OMPC_reduction) && (Kind != OMPC_depend) &&
+ (Kind != OMPC_map)) ||
+ ((Kind == OMPC_reduction) && !InvalidReductionId) ||
+ ((Kind == OMPC_map) && (UnexpectedId || MapType != OMPC_MAP_unknown) &&
+ (!MapTypeModifierSpecified ||
+ (MapTypeModifierSpecified && MapTypeModifier == OMPC_MAP_always))) ||
+ ((Kind == OMPC_depend) && DepKind != OMPC_DEPEND_unknown);
const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
while (IsComma || (Tok.isNot(tok::r_paren) && Tok.isNot(tok::colon) &&
Tok.isNot(tok::annot_pragma_openmp_end))) {
@@ -787,14 +1030,18 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
<< (Kind == OMPC_flush);
}
+ // Parse ')' for linear clause with modifier.
+ if (NeedRParenForLinear)
+ LinearT.consumeClose();
+
// Parse ':' linear-step (or ':' alignment).
Expr *TailExpr = nullptr;
const bool MustHaveTail = MayHaveTail && Tok.is(tok::colon);
if (MustHaveTail) {
ColonLoc = Tok.getLocation();
- ConsumeToken();
- ExprResult Tail =
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ SourceLocation ELoc = ConsumeToken();
+ ExprResult Tail = ParseAssignmentExpression();
+ Tail = Actions.ActOnFinishFullExpr(Tail.get(), ELoc);
if (Tail.isUsable())
TailExpr = Tail.get();
else
@@ -806,14 +1053,16 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPClauseKind Kind) {
T.consumeClose();
if ((Kind == OMPC_depend && DepKind != OMPC_DEPEND_unknown && Vars.empty()) ||
(Kind != OMPC_depend && Vars.empty()) || (MustHaveTail && !TailExpr) ||
- InvalidReductionId)
+ (Kind == OMPC_map && MapType == OMPC_MAP_unknown) ||
+ InvalidReductionId) {
return nullptr;
+ }
return Actions.ActOnOpenMPVarListClause(
Kind, Vars, TailExpr, Loc, LOpen, ColonLoc, Tok.getLocation(),
ReductionIdScopeSpec,
ReductionId.isValid() ? Actions.GetNameFromUnqualifiedId(ReductionId)
: DeclarationNameInfo(),
- DepKind, DepLoc);
+ DepKind, LinearModifier, MapTypeModifier, MapType, DepLinMapLoc);
}
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index 892d3c6a52ce..4430eb8d03da 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -156,6 +156,10 @@ struct PragmaUnrollHintHandler : public PragmaHandler {
Token &FirstToken) override;
};
+struct PragmaMSRuntimeChecksHandler : public EmptyPragmaHandler {
+ PragmaMSRuntimeChecksHandler() : EmptyPragmaHandler("runtime_checks") {}
+};
+
} // end namespace
void Parser::initializePragmaHandlers() {
@@ -222,6 +226,8 @@ void Parser::initializePragmaHandlers() {
PP.AddPragmaHandler(MSCodeSeg.get());
MSSection.reset(new PragmaMSPragma("section"));
PP.AddPragmaHandler(MSSection.get());
+ MSRuntimeChecks.reset(new PragmaMSRuntimeChecksHandler());
+ PP.AddPragmaHandler(MSRuntimeChecks.get());
}
OptimizeHandler.reset(new PragmaOptimizeHandler(Actions));
@@ -288,6 +294,8 @@ void Parser::resetPragmaHandlers() {
MSCodeSeg.reset();
PP.RemovePragmaHandler(MSSection.get());
MSSection.reset();
+ PP.RemovePragmaHandler(MSRuntimeChecks.get());
+ MSRuntimeChecks.reset();
}
PP.RemovePragmaHandler("STDC", FPContractHandler.get());
@@ -326,6 +334,7 @@ void Parser::HandlePragmaVisibility() {
Actions.ActOnPragmaVisibility(VisType, VisLoc);
}
+namespace {
struct PragmaPackInfo {
Sema::PragmaPackKind Kind;
IdentifierInfo *Name;
@@ -333,6 +342,7 @@ struct PragmaPackInfo {
SourceLocation LParenLoc;
SourceLocation RParenLoc;
};
+} // end anonymous namespace
void Parser::HandlePragmaPack() {
assert(Tok.is(tok::annot_pragma_pack));
@@ -742,13 +752,13 @@ bool Parser::HandlePragmaMSInitSeg(StringRef PragmaName,
return true;
}
+namespace {
struct PragmaLoopHintInfo {
Token PragmaName;
Token Option;
- Token *Toks;
- size_t TokSize;
- PragmaLoopHintInfo() : Toks(nullptr), TokSize(0) {}
+ ArrayRef<Token> Toks;
};
+} // end anonymous namespace
static std::string PragmaLoopHintString(Token PragmaName, Token Option) {
std::string PragmaString;
@@ -780,8 +790,8 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
Hint.OptionLoc = IdentifierLoc::create(
Actions.Context, Info->Option.getLocation(), OptionInfo);
- Token *Toks = Info->Toks;
- size_t TokSize = Info->TokSize;
+ const Token *Toks = Info->Toks.data();
+ size_t TokSize = Info->Toks.size();
// Return a valid hint if pragma unroll or nounroll were specified
// without an argument.
@@ -824,10 +834,9 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
SourceLocation StateLoc = Toks[0].getLocation();
IdentifierInfo *StateInfo = Toks[0].getIdentifierInfo();
if (!StateInfo ||
- ((OptionUnroll ? !StateInfo->isStr("full")
- : !StateInfo->isStr("enable") &&
- !StateInfo->isStr("assume_safety")) &&
- !StateInfo->isStr("disable"))) {
+ (!StateInfo->isStr("enable") && !StateInfo->isStr("disable") &&
+ ((OptionUnroll && !StateInfo->isStr("full")) ||
+ (!OptionUnroll && !StateInfo->isStr("assume_safety"))))) {
Diag(Toks[0].getLocation(), diag::err_pragma_invalid_keyword)
<< /*FullKeyword=*/OptionUnroll;
return false;
@@ -1928,11 +1937,7 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
EOFTok.setLocation(Tok.getLocation());
ValueList.push_back(EOFTok); // Terminates expression for parsing.
- Token *TokenArray = (Token *)PP.getPreprocessorAllocator().Allocate(
- ValueList.size() * sizeof(Token), llvm::alignOf<Token>());
- std::copy(ValueList.begin(), ValueList.end(), TokenArray);
- Info.Toks = TokenArray;
- Info.TokSize = ValueList.size();
+ Info.Toks = llvm::makeArrayRef(ValueList).copy(PP.getPreprocessorAllocator());
Info.PragmaName = PragmaName;
Info.Option = Option;
@@ -1959,8 +1964,9 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
/// 'assume_safety'
///
/// unroll-hint-keyword:
-/// 'full'
+/// 'enable'
/// 'disable'
+/// 'full'
///
/// loop-hint-value:
/// constant-expression
@@ -1976,10 +1982,13 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
/// only works on inner loops.
///
/// The unroll and unroll_count directives control the concatenation
-/// unroller. Specifying unroll(full) instructs llvm to try to
-/// unroll the loop completely, and unroll(disable) disables unrolling
-/// for the loop. Specifying unroll_count(_value_) instructs llvm to
-/// try to unroll the loop the number of times indicated by the value.
+/// unroller. Specifying unroll(enable) instructs llvm to unroll the loop
+/// completely if the trip count is known at compile time and unroll partially
+/// if the trip count is not known. Specifying unroll(full) is similar to
+/// unroll(enable) but will unroll the loop only if the trip count is known at
+/// compile time. Specifying unroll(disable) disables unrolling for the
+/// loop. Specifying unroll_count(_value_) instructs llvm to try to unroll the
+/// loop the number of times indicated by the value.
void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
Token &Tok) {
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index b658cef234ec..717bcff0c168 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -259,6 +259,10 @@ Retry:
Res = ParseReturnStatement();
SemiError = "return";
break;
+ case tok::kw_co_return: // C++ Coroutines: co_return statement
+ Res = ParseReturnStatement();
+ SemiError = "co_return";
+ break;
case tok::kw_asm: {
ProhibitAttributes(Attrs);
@@ -354,6 +358,11 @@ Retry:
HandlePragmaMSPragma();
return StmtEmpty();
+ case tok::annot_pragma_ms_vtordisp:
+ ProhibitAttributes(Attrs);
+ HandlePragmaMSVtorDisp();
+ return StmtEmpty();
+
case tok::annot_pragma_loop_hint:
ProhibitAttributes(Attrs);
return ParsePragmaLoopHint(Stmts, OnlyStatement, TrailingElseLoc, Attrs);
@@ -881,6 +890,9 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
case tok::annot_pragma_ms_pragma:
HandlePragmaMSPragma();
break;
+ case tok::annot_pragma_ms_vtordisp:
+ HandlePragmaMSVtorDisp();
+ break;
default:
checkForPragmas = false;
break;
@@ -944,7 +956,8 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
Stmts.push_back(R.get());
}
- while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
if (Tok.is(tok::annot_pragma_unused)) {
HandlePragmaUnused();
continue;
@@ -1442,7 +1455,10 @@ bool Parser::isForRangeIdentifier() {
/// 'for' '(' declaration expr[opt] ';' expr[opt] ')' statement
/// [C++] 'for' '(' for-init-statement condition[opt] ';' expression[opt] ')'
/// [C++] statement
-/// [C++0x] 'for' '(' for-range-declaration : for-range-initializer ) statement
+/// [C++0x] 'for'
+/// 'co_await'[opt] [Coroutines]
+/// '(' for-range-declaration ':' for-range-initializer ')'
+/// statement
/// [OBJC2] 'for' '(' declaration 'in' expr ')' statement
/// [OBJC2] 'for' '(' expr 'in' expr ')' statement
///
@@ -1459,6 +1475,10 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
assert(Tok.is(tok::kw_for) && "Not a for stmt!");
SourceLocation ForLoc = ConsumeToken(); // eat the 'for'.
+ SourceLocation CoawaitLoc;
+ if (Tok.is(tok::kw_co_await))
+ CoawaitLoc = ConsumeToken();
+
if (Tok.isNot(tok::l_paren)) {
Diag(Tok, diag::err_expected_lparen_after) << "for";
SkipUntil(tok::semi);
@@ -1665,6 +1685,13 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// Match the ')'.
T.consumeClose();
+ // C++ Coroutines [stmt.iter]:
+ // 'co_await' can only be used for a range-based for statement.
+ if (CoawaitLoc.isValid() && !ForRange) {
+ Diag(CoawaitLoc, diag::err_for_co_await_not_range_for);
+ CoawaitLoc = SourceLocation();
+ }
+
// We need to perform most of the semantic analysis for a C++0x for-range
// statememt before parsing the body, in order to be able to deduce the type
// of an auto-typed loop variable.
@@ -1672,12 +1699,10 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
StmtResult ForEachStmt;
if (ForRange) {
- ForRangeStmt = Actions.ActOnCXXForRangeStmt(ForLoc, FirstPart.get(),
- ForRangeInit.ColonLoc,
- ForRangeInit.RangeExpr.get(),
- T.getCloseLocation(),
- Sema::BFRK_Build);
-
+ ForRangeStmt = Actions.ActOnCXXForRangeStmt(
+ getCurScope(), ForLoc, CoawaitLoc, FirstPart.get(),
+ ForRangeInit.ColonLoc, ForRangeInit.RangeExpr.get(),
+ T.getCloseLocation(), Sema::BFRK_Build);
// Similarly, we need to do the semantic analysis for a for-range
// statement immediately in order to close over temporaries correctly.
@@ -1799,13 +1824,19 @@ StmtResult Parser::ParseBreakStatement() {
/// ParseReturnStatement
/// jump-statement:
/// 'return' expression[opt] ';'
+/// 'return' braced-init-list ';'
+/// 'co_return' expression[opt] ';'
+/// 'co_return' braced-init-list ';'
StmtResult Parser::ParseReturnStatement() {
- assert(Tok.is(tok::kw_return) && "Not a return stmt!");
+ assert((Tok.is(tok::kw_return) || Tok.is(tok::kw_co_return)) &&
+ "Not a return stmt!");
+ bool IsCoreturn = Tok.is(tok::kw_co_return);
SourceLocation ReturnLoc = ConsumeToken(); // eat the 'return'.
ExprResult R;
if (Tok.isNot(tok::semi)) {
- if (Tok.is(tok::code_completion)) {
+ // FIXME: Code completion for co_return.
+ if (Tok.is(tok::code_completion) && !IsCoreturn) {
Actions.CodeCompleteReturn(getCurScope());
cutOffParsing();
return StmtError();
@@ -1825,6 +1856,8 @@ StmtResult Parser::ParseReturnStatement() {
return StmtError();
}
}
+ if (IsCoreturn)
+ return Actions.ActOnCoreturnStmt(ReturnLoc, R.get());
return Actions.ActOnReturnStmt(ReturnLoc, R.get(), getCurScope());
}
@@ -1870,6 +1903,11 @@ Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, LBraceLoc,
"parsing function body");
+ // Save and reset current vtordisp stack if we have entered a C++ method body.
+ bool IsCXXMethod =
+ getLangOpts().CPlusPlus && Decl && isa<CXXMethodDecl>(Decl);
+ Sema::VtorDispStackRAII SavedVtorDispStack(Actions, IsCXXMethod);
+
// Do not enter a scope for the brace, as the arguments are in the same scope
// (the function body) as the body itself. Instead, just read the statement
// list and put it into a CompoundStmt for safe keeping.
@@ -1909,6 +1947,11 @@ Decl *Parser::ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope) {
return Actions.ActOnSkippedFunctionBody(Decl);
}
+ // Save and reset current vtordisp stack if we have entered a C++ method body.
+ bool IsCXXMethod =
+ getLangOpts().CPlusPlus && Decl && isa<CXXMethodDecl>(Decl);
+ Sema::VtorDispStackRAII SavedVtorDispStack(Actions, IsCXXMethod);
+
SourceLocation LBraceLoc = Tok.getLocation();
StmtResult FnBody(ParseCXXTryBlockCommon(TryLoc, /*FnTry*/true));
// If we failed to parse the try-catch, we just give the function an empty
diff --git a/lib/Parse/ParseStmtAsm.cpp b/lib/Parse/ParseStmtAsm.cpp
index 8cdae6a74b20..f469a064f896 100644
--- a/lib/Parse/ParseStmtAsm.cpp
+++ b/lib/Parse/ParseStmtAsm.cpp
@@ -215,12 +215,36 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
// Require an identifier here.
SourceLocation TemplateKWLoc;
UnqualifiedId Id;
- bool Invalid =
- ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/false,
- /*AllowConstructorName=*/false,
- /*ObjectType=*/ParsedType(), TemplateKWLoc, Id);
+ bool Invalid = true;
+ ExprResult Result;
+ if (Tok.is(tok::kw_this)) {
+ Result = ParseCXXThis();
+ Invalid = false;
+ } else {
+ Invalid =
+ ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*ObjectType=*/ParsedType(), TemplateKWLoc, Id);
+ // Perform the lookup.
+ Result = Actions.LookupInlineAsmIdentifier(SS, TemplateKWLoc, Id, Info,
+ IsUnevaluatedContext);
+ }
+ // While the next two tokens are 'period' 'identifier', repeatedly parse it as
+ // a field access. We have to avoid consuming assembler directives that look
+ // like '.' 'else'.
+ while (Result.isUsable() && Tok.is(tok::period)) {
+ Token IdTok = PP.LookAhead(0);
+ if (IdTok.isNot(tok::identifier))
+ break;
+ ConsumeToken(); // Consume the period.
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ ConsumeToken(); // Consume the identifier.
+ unsigned OffsetUnused;
+ Result = Actions.LookupInlineAsmVarDeclField(
+ Result.get(), Id->getName(), OffsetUnused, Info, Tok.getLocation());
+ }
// Figure out how many tokens we are into LineToks.
unsigned LineIndex = 0;
@@ -254,9 +278,7 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
LineToks.pop_back();
LineToks.pop_back();
- // Perform the lookup.
- return Actions.LookupInlineAsmIdentifier(SS, TemplateKWLoc, Id, Info,
- IsUnevaluatedContext);
+ return Result;
}
/// Turn a sequence of our tokens back into a string that we can hand
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 3a964dd20528..a4dcdb1e2a09 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -126,8 +126,7 @@ Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
Actions.ActOnTemplateParameterList(CurTemplateDepthTracker.getDepth(),
ExportLoc,
TemplateLoc, LAngleLoc,
- TemplateParams.data(),
- TemplateParams.size(), RAngleLoc));
+ TemplateParams, RAngleLoc));
if (!TemplateParams.empty()) {
isSpecialization = false;
@@ -280,8 +279,8 @@ Parser::ParseSingleDeclarationAfterTemplate(
// Recover as if it were an explicit specialization.
TemplateParameterLists FakedParamLists;
FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
- 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, nullptr,
- 0, LAngleLoc));
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
+ LAngleLoc));
return ParseFunctionDefinition(
DeclaratorInfo, ParsedTemplateInfo(&FakedParamLists,
@@ -631,8 +630,7 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
TemplateParameterList *ParamList =
Actions.ActOnTemplateParameterList(Depth, SourceLocation(),
TemplateLoc, LAngleLoc,
- TemplateParams.data(),
- TemplateParams.size(),
+ TemplateParams,
RAngleLoc);
// Grab a default argument (if available).
@@ -695,7 +693,8 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// end of the template-parameter-list rather than a greater-than
// operator.
GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
- EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ EnterExpressionEvaluationContext ConstantEvaluated(Actions,
+ Sema::ConstantEvaluated);
DefaultArg = Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
if (DefaultArg.isInvalid())
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index 9d2a2b931e88..6fbcfd9bd217 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -1089,6 +1089,7 @@ public:
/// [GNU] typeof-specifier
/// [GNU] '_Complex'
/// [C++11] 'auto'
+/// [GNU] '__auto_type'
/// [C++11] 'decltype' ( expression )
/// [C++1y] 'decltype' ( 'auto' )
///
@@ -1262,6 +1263,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw_restrict:
case tok::kw__Complex:
case tok::kw___attribute:
+ case tok::kw___auto_type:
return TPResult::True;
// Microsoft
@@ -1515,6 +1517,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_double:
case tok::kw_void:
case tok::kw___unknown_anytype:
+ case tok::kw___auto_type:
return true;
case tok::kw_auto:
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index 0574a636337f..b3eeb9d58ff4 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -282,6 +282,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
// Ran out of tokens.
return false;
+ case tok::annot_pragma_openmp:
case tok::annot_pragma_openmp_end:
// Stop before an OpenMP pragma boundary.
case tok::annot_module_begin:
@@ -1067,10 +1068,17 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// Tell the actions module that we have entered a function definition with the
// specified Declarator for the function.
- Decl *Res = TemplateInfo.TemplateParams?
- Actions.ActOnStartOfFunctionTemplateDef(getCurScope(),
- *TemplateInfo.TemplateParams, D)
- : Actions.ActOnStartOfFunctionDef(getCurScope(), D);
+ Sema::SkipBodyInfo SkipBody;
+ Decl *Res = Actions.ActOnStartOfFunctionDef(getCurScope(), D,
+ TemplateInfo.TemplateParams
+ ? *TemplateInfo.TemplateParams
+ : MultiTemplateParamsArg(),
+ &SkipBody);
+
+ if (SkipBody.ShouldSkip) {
+ SkipFunctionBody();
+ return Res;
+ }
// Break out of the ParsingDeclarator context before we parse the body.
D.complete(Res);
@@ -1086,14 +1094,16 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
SourceLocation KWLoc;
if (TryConsumeToken(tok::kw_delete, KWLoc)) {
Diag(KWLoc, getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_deleted_function
- : diag::ext_deleted_function);
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 1 /* deleted */;
Actions.SetDeclDeleted(Res, KWLoc);
Delete = true;
} else if (TryConsumeToken(tok::kw_default, KWLoc)) {
Diag(KWLoc, getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_defaulted_function
- : diag::ext_defaulted_function);
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 0 /* defaulted */;
Actions.SetDeclDefaulted(Res, KWLoc);
} else {
llvm_unreachable("function definition after = not 'delete' or 'default'");
@@ -1137,6 +1147,28 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
return ParseFunctionStatementBody(Res, BodyScope);
}
+void Parser::SkipFunctionBody() {
+ if (Tok.is(tok::equal)) {
+ SkipUntil(tok::semi);
+ return;
+ }
+
+ bool IsFunctionTryBlock = Tok.is(tok::kw_try);
+ if (IsFunctionTryBlock)
+ ConsumeToken();
+
+ CachedTokens Skipped;
+ if (ConsumeAndStoreFunctionPrologue(Skipped))
+ SkipMalformedDecl();
+ else {
+ SkipUntil(tok::r_brace);
+ while (IsFunctionTryBlock && Tok.is(tok::kw_catch)) {
+ SkipUntil(tok::l_brace);
+ SkipUntil(tok::r_brace);
+ }
+ }
+}
+
/// ParseKNRParamDeclarations - Parse 'declaration-list[opt]' which provides
/// types for a function with a K&R-style identifier list for arguments.
void Parser::ParseKNRParamDeclarations(Declarator &D) {
@@ -1493,7 +1525,7 @@ bool Parser::TryKeywordIdentFallback(bool DisableKeyword) {
<< PP.getSpelling(Tok)
<< DisableKeyword;
if (DisableKeyword)
- Tok.getIdentifierInfo()->RevertTokenIDToIdentifier();
+ Tok.getIdentifierInfo()->revertTokenIDToIdentifier();
Tok.setKind(tok::identifier);
return true;
}
@@ -1989,6 +2021,37 @@ Parser::DeclGroupPtrTy Parser::ParseModuleImport(SourceLocation AtLoc) {
return Actions.ConvertDeclToDeclGroup(Import.get());
}
+/// \brief Try recover parser when module annotation appears where it must not
+/// be found.
+/// \returns false if the recover was successful and parsing may be continued, or
+/// true if parser must bail out to top level and handle the token there.
+bool Parser::parseMisplacedModuleImport() {
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::annot_module_end:
+ // Inform caller that recovery failed, the error must be handled at upper
+ // level.
+ return true;
+ case tok::annot_module_begin:
+ Actions.diagnoseMisplacedModuleImport(reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()), Tok.getLocation());
+ return true;
+ case tok::annot_module_include:
+ // Module import found where it should not be, for instance, inside a
+ // namespace. Recover by importing the module.
+ Actions.ActOnModuleInclude(Tok.getLocation(),
+ reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()));
+ ConsumeToken();
+ // If there is another module import, process it.
+ continue;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
bool BalancedDelimiterTracker::diagnoseOverflow() {
P.Diag(P.Tok, diag::err_bracket_depth_exceeded)
<< P.getLangOpts().BracketDepth;
@@ -2016,7 +2079,10 @@ bool BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
bool BalancedDelimiterTracker::diagnoseMissingClose() {
assert(!P.Tok.is(Close) && "Should have consumed closing delimiter");
- P.Diag(P.Tok, diag::err_expected) << Close;
+ if (P.Tok.is(tok::annot_module_end))
+ P.Diag(P.Tok, diag::err_missing_before_module_end) << Close;
+ else
+ P.Diag(P.Tok, diag::err_expected) << Close;
P.Diag(LOpen, diag::note_matching) << Kind;
// If we're not already at some kind of closing bracket, skip to our closing
diff --git a/lib/Rewrite/Rewriter.cpp b/lib/Rewrite/Rewriter.cpp
index be09a363a61f..ae41decc64a3 100644
--- a/lib/Rewrite/Rewriter.cpp
+++ b/lib/Rewrite/Rewriter.cpp
@@ -15,11 +15,9 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Config/llvm-config.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -35,7 +33,7 @@ raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
/// \brief Return true if this character is non-new-line whitespace:
/// ' ', '\\t', '\\f', '\\v', '\\r'.
-static inline bool isWhitespace(unsigned char c) {
+static inline bool isWhitespaceExceptNL(unsigned char c) {
switch (c) {
case ' ':
case '\t':
@@ -80,7 +78,7 @@ void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size,
unsigned lineSize = 0;
posI = curLineStart;
- while (posI != end() && isWhitespace(*posI)) {
+ while (posI != end() && isWhitespaceExceptNL(*posI)) {
++posI;
++lineSize;
}
@@ -256,7 +254,7 @@ bool Rewriter::InsertText(SourceLocation Loc, StringRef Str,
StringRef indentSpace;
{
unsigned i = lineOffs;
- while (isWhitespace(MB[i]))
+ while (isWhitespaceExceptNL(MB[i]))
++i;
indentSpace = MB.substr(lineOffs, i-lineOffs);
}
@@ -363,12 +361,12 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
StringRef parentSpace, startSpace;
{
unsigned i = parentLineOffs;
- while (isWhitespace(MB[i]))
+ while (isWhitespaceExceptNL(MB[i]))
++i;
parentSpace = MB.substr(parentLineOffs, i-parentLineOffs);
i = startLineOffs;
- while (isWhitespace(MB[i]))
+ while (isWhitespaceExceptNL(MB[i]))
++i;
startSpace = MB.substr(startLineOffs, i-startLineOffs);
}
@@ -384,7 +382,7 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
for (unsigned lineNo = startLineNo; lineNo <= endLineNo; ++lineNo) {
unsigned offs = Content->SourceLineCache[lineNo];
unsigned i = offs;
- while (isWhitespace(MB[i]))
+ while (isWhitespaceExceptNL(MB[i]))
++i;
StringRef origIndent = MB.substr(offs, i-offs);
if (origIndent.startswith(startSpace))
@@ -409,7 +407,7 @@ public:
TempFilename = Filename;
TempFilename += "-%%%%%%%%";
int FD;
- if (llvm::sys::fs::createUniqueFile(TempFilename.str(), FD, TempFilename)) {
+ if (llvm::sys::fs::createUniqueFile(TempFilename, FD, TempFilename)) {
AllWritten = false;
Diagnostics.Report(clang::diag::err_unable_to_make_temp)
<< TempFilename;
@@ -421,19 +419,15 @@ public:
~AtomicallyMovedFile() {
if (!ok()) return;
- FileStream->flush();
-#ifdef LLVM_ON_WIN32
- // Win32 does not allow rename/removing opened files.
- FileStream.reset();
-#endif
- if (std::error_code ec =
- llvm::sys::fs::rename(TempFilename.str(), Filename)) {
+ // Close (will also flush) theFileStream.
+ FileStream->close();
+ if (std::error_code ec = llvm::sys::fs::rename(TempFilename, Filename)) {
AllWritten = false;
Diagnostics.Report(clang::diag::err_unable_to_rename_temp)
<< TempFilename << Filename << ec.message();
// If the remove fails, there's not a lot we can do - this is already an
// error.
- llvm::sys::fs::remove(TempFilename.str());
+ llvm::sys::fs::remove(TempFilename);
}
}
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index f2ff48ad69f4..5f74343fbd95 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -34,7 +34,6 @@
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
@@ -99,7 +98,7 @@ namespace {
}
}
};
-}
+} // anonymous namespace
/// CheckUnreachable - Check for unreachable code.
static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
@@ -157,12 +156,45 @@ public:
<< DiagRange << isAlwaysTrue;
}
};
-} // namespace
+} // anonymous namespace
//===----------------------------------------------------------------------===//
// Check for infinite self-recursion in functions
//===----------------------------------------------------------------------===//
+// Returns true if the function is called anywhere within the CFGBlock.
+// For member functions, the additional condition of being call from the
+// this pointer is required.
+static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) {
+ // Process all the Stmt's in this block to find any calls to FD.
+ for (const auto &B : Block) {
+ if (B.getKind() != CFGElement::Statement)
+ continue;
+
+ const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt());
+ if (!CE || !CE->getCalleeDecl() ||
+ CE->getCalleeDecl()->getCanonicalDecl() != FD)
+ continue;
+
+ // Skip function calls which are qualified with a templated class.
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(CE->getCallee()->IgnoreParenImpCasts())) {
+ if (NestedNameSpecifier *NNS = DRE->getQualifier()) {
+ if (NNS->getKind() == NestedNameSpecifier::TypeSpec &&
+ isa<TemplateSpecializationType>(NNS->getAsType())) {
+ continue;
+ }
+ }
+ }
+
+ const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE);
+ if (!MCE || isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) ||
+ !MCE->getMethodDecl()->isVirtual())
+ return true;
+ }
+ return false;
+}
+
// All blocks are in one of three states. States are ordered so that blocks
// can only move to higher states.
enum RecursiveState {
@@ -171,68 +203,56 @@ enum RecursiveState {
FoundPathWithNoRecursiveCall
};
-static void checkForFunctionCall(Sema &S, const FunctionDecl *FD,
- CFGBlock &Block, unsigned ExitID,
- llvm::SmallVectorImpl<RecursiveState> &States,
- RecursiveState State) {
- unsigned ID = Block.getBlockID();
+// Returns true if there exists a path to the exit block and every path
+// to the exit block passes through a call to FD.
+static bool checkForRecursiveFunctionCall(const FunctionDecl *FD, CFG *cfg) {
- // A block's state can only move to a higher state.
- if (States[ID] >= State)
- return;
+ const unsigned ExitID = cfg->getExit().getBlockID();
- States[ID] = State;
+ // Mark all nodes as FoundNoPath, then set the status of the entry block.
+ SmallVector<RecursiveState, 16> States(cfg->getNumBlockIDs(), FoundNoPath);
+ States[cfg->getEntry().getBlockID()] = FoundPathWithNoRecursiveCall;
- // Found a path to the exit node without a recursive call.
- if (ID == ExitID && State == FoundPathWithNoRecursiveCall)
- return;
+ // Make the processing stack and seed it with the entry block.
+ SmallVector<CFGBlock *, 16> Stack;
+ Stack.push_back(&cfg->getEntry());
- if (State == FoundPathWithNoRecursiveCall) {
- // If the current state is FoundPathWithNoRecursiveCall, the successors
- // will be either FoundPathWithNoRecursiveCall or FoundPath. To determine
- // which, process all the Stmt's in this block to find any recursive calls.
- for (const auto &B : Block) {
- if (B.getKind() != CFGElement::Statement)
- continue;
+ while (!Stack.empty()) {
+ CFGBlock *CurBlock = Stack.back();
+ Stack.pop_back();
- const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt());
- if (CE && CE->getCalleeDecl() &&
- CE->getCalleeDecl()->getCanonicalDecl() == FD) {
-
- // Skip function calls which are qualified with a templated class.
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(
- CE->getCallee()->IgnoreParenImpCasts())) {
- if (NestedNameSpecifier *NNS = DRE->getQualifier()) {
- if (NNS->getKind() == NestedNameSpecifier::TypeSpec &&
- isa<TemplateSpecializationType>(NNS->getAsType())) {
- continue;
- }
- }
- }
+ unsigned ID = CurBlock->getBlockID();
+ RecursiveState CurState = States[ID];
- if (const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE)) {
- if (isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) ||
- !MCE->getMethodDecl()->isVirtual()) {
- State = FoundPath;
- break;
- }
- } else {
- State = FoundPath;
- break;
+ if (CurState == FoundPathWithNoRecursiveCall) {
+ // Found a path to the exit node without a recursive call.
+ if (ExitID == ID)
+ return false;
+
+ // Only change state if the block has a recursive call.
+ if (hasRecursiveCallInPath(FD, *CurBlock))
+ CurState = FoundPath;
+ }
+
+ // Loop over successor blocks and add them to the Stack if their state
+ // changes.
+ for (auto I = CurBlock->succ_begin(), E = CurBlock->succ_end(); I != E; ++I)
+ if (*I) {
+ unsigned next_ID = (*I)->getBlockID();
+ if (States[next_ID] < CurState) {
+ States[next_ID] = CurState;
+ Stack.push_back(*I);
}
}
- }
}
- for (CFGBlock::succ_iterator I = Block.succ_begin(), E = Block.succ_end();
- I != E; ++I)
- if (*I)
- checkForFunctionCall(S, FD, **I, ExitID, States, State);
+ // Return true if the exit node is reachable, and only reachable through
+ // a recursive call.
+ return States[ExitID] == FoundPath;
}
static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
- const Stmt *Body,
- AnalysisDeclContext &AC) {
+ const Stmt *Body, AnalysisDeclContext &AC) {
FD = FD->getCanonicalDecl();
// Only run on non-templated functions and non-templated members of
@@ -248,15 +268,8 @@ static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
if (cfg->getExit().pred_empty())
return;
- // Mark all nodes as FoundNoPath, then begin processing the entry block.
- llvm::SmallVector<RecursiveState, 16> states(cfg->getNumBlockIDs(),
- FoundNoPath);
- checkForFunctionCall(S, FD, cfg->getEntry(), cfg->getExit().getBlockID(),
- states, FoundPathWithNoRecursiveCall);
-
- // Check that the exit block is reachable. This prevents triggering the
- // warning on functions that do not terminate.
- if (states[cfg->getExit().getBlockID()] == FoundPath)
+ // Emit diagnostic if a recursive function call is detected for all paths.
+ if (checkForRecursiveFunctionCall(FD, cfg))
S.Diag(Body->getLocStart(), diag::warn_infinite_recursive_function);
}
@@ -492,7 +505,7 @@ struct CheckFallThroughDiagnostics {
}
};
-}
+} // anonymous namespace
/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
/// function that should return a value. Check that we don't fall off the end
@@ -600,7 +613,7 @@ public:
bool doesContainReference() const { return FoundReference; }
};
-}
+} // anonymous namespace
static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
QualType VariableTy = VD->getType().getCanonicalType();
@@ -643,8 +656,7 @@ static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then,
CharSourceRange::getCharRange(If->getLocStart(),
Then->getLocStart()));
if (Else) {
- SourceLocation ElseKwLoc = Lexer::getLocForEndOfToken(
- Then->getLocEnd(), 0, S.getSourceManager(), S.getLangOpts());
+ SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getLocEnd());
Fixit2 = FixItHint::CreateRemoval(
SourceRange(ElseKwLoc, Else->getLocEnd()));
}
@@ -836,7 +848,6 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
const UninitUse &Use,
bool alwaysReportSelfInit = false) {
-
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) {
// Inspect the initializer of the variable declaration which is
// being referenced prior to its initialization. We emit
@@ -1058,7 +1069,7 @@ namespace {
Sema &S;
llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks;
};
-}
+} // anonymous namespace
static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
bool PerFunction) {
@@ -1090,8 +1101,7 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
FM.fillReachableBlocks(Cfg);
- for (CFG::reverse_iterator I = Cfg->rbegin(), E = Cfg->rend(); I != E; ++I) {
- const CFGBlock *B = *I;
+ for (const CFGBlock *B : llvm::reverse(*Cfg)) {
const Stmt *Label = B->getLabel();
if (!Label || !isa<SwitchCase>(Label))
@@ -1170,7 +1180,6 @@ static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
return false;
}
-
static void diagnoseRepeatedUseOfWeak(Sema &S,
const sema::FunctionScopeInfo *CurFn,
const Decl *D,
@@ -1330,20 +1339,16 @@ class UninitValsDiagReporter : public UninitVariablesHandler {
// the same as insertion order. This is needed to obtain a deterministic
// order of diagnostics when calling flushDiagnostics().
typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap;
- UsesMap *uses;
+ UsesMap uses;
public:
- UninitValsDiagReporter(Sema &S) : S(S), uses(nullptr) {}
+ UninitValsDiagReporter(Sema &S) : S(S) {}
~UninitValsDiagReporter() override { flushDiagnostics(); }
MappedType &getUses(const VarDecl *vd) {
- if (!uses)
- uses = new UsesMap();
-
- MappedType &V = (*uses)[vd];
+ MappedType &V = uses[vd];
if (!V.getPointer())
V.setPointer(new UsesVec());
-
return V;
}
@@ -1357,10 +1362,7 @@ public:
}
void flushDiagnostics() {
- if (!uses)
- return;
-
- for (const auto &P : *uses) {
+ for (const auto &P : uses) {
const VarDecl *vd = P.first;
const MappedType &V = P.second;
@@ -1401,7 +1403,8 @@ public:
// Release the uses vector.
delete vec;
}
- delete uses;
+
+ uses.clear();
}
private:
@@ -1413,7 +1416,7 @@ private:
});
}
};
-}
+} // anonymous namespace
namespace clang {
namespace {
@@ -1431,7 +1434,8 @@ struct SortDiagBySourceLocation {
return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
}
};
-}}
+} // anonymous namespace
+} // namespace clang
//===----------------------------------------------------------------------===//
// -Wthread-safety
@@ -1670,7 +1674,6 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
Warnings.emplace_back(std::move(Warning), getNotes());
}
-
void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName,
SourceLocation Loc) override {
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex)
@@ -1696,10 +1699,10 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
}
void leaveFunction(const FunctionDecl* FD) override {
- CurrentFunction = 0;
+ CurrentFunction = nullptr;
}
};
-} // namespace
+} // anonymous namespace
} // namespace threadSafety
} // namespace clang
@@ -1792,7 +1795,9 @@ public:
Warnings.emplace_back(std::move(Warning), OptionalNotes());
}
};
-}}}
+} // anonymous namespace
+} // namespace consumed
+} // namespace clang
//===----------------------------------------------------------------------===//
// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
@@ -1958,7 +1963,6 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
flushDiagnostics(S, fscope);
}
-
// Warning: check missing 'return'
if (P.enableCheckFallThrough) {
const CheckFallThroughDiagnostics &CD =
@@ -2038,7 +2042,7 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull);
}
- if (S.getLangOpts().ObjCARCWeak &&
+ if (S.getLangOpts().ObjCWeak &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getLocStart()))
diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap());
diff --git a/lib/Sema/AttributeList.cpp b/lib/Sema/AttributeList.cpp
index 34af6cf63c87..3c61c95ad8ec 100644
--- a/lib/Sema/AttributeList.cpp
+++ b/lib/Sema/AttributeList.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
@@ -108,6 +109,19 @@ void AttributePool::takePool(AttributeList *pool) {
#include "clang/Sema/AttrParsedAttrKinds.inc"
+static StringRef normalizeAttrName(StringRef AttrName, StringRef ScopeName,
+ AttributeList::Syntax SyntaxUsed) {
+ // Normalize the attribute name, __foo__ becomes foo. This is only allowable
+ // for GNU attributes.
+ bool IsGNU = SyntaxUsed == AttributeList::AS_GNU ||
+ (SyntaxUsed == AttributeList::AS_CXX11 && ScopeName == "gnu");
+ if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
+ AttrName.endswith("__"))
+ AttrName = AttrName.slice(2, AttrName.size() - 2);
+
+ return AttrName;
+}
+
AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
const IdentifierInfo *ScopeName,
Syntax SyntaxUsed) {
@@ -117,13 +131,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
if (ScopeName)
FullName += ScopeName->getName();
- // Normalize the attribute name, __foo__ becomes foo. This is only allowable
- // for GNU attributes.
- bool IsGNU = SyntaxUsed == AS_GNU || (SyntaxUsed == AS_CXX11 &&
- FullName == "gnu");
- if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
- AttrName.endswith("__"))
- AttrName = AttrName.slice(2, AttrName.size() - 2);
+ AttrName = normalizeAttrName(AttrName, FullName, SyntaxUsed);
// Ensure that in the case of C++11 attributes, we look for '::foo' if it is
// unscoped.
@@ -137,8 +145,9 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
unsigned AttributeList::getAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
- StringRef Name = AttrName->getName();
StringRef Scope = ScopeName ? ScopeName->getName() : "";
+ StringRef Name = normalizeAttrName(AttrName->getName(), Scope,
+ (AttributeList::Syntax)SyntaxUsed);
#include "clang/Sema/AttrSpellingListIndex.inc"
@@ -155,7 +164,7 @@ struct ParsedAttrInfo {
bool (*DiagAppertainsToDecl)(Sema &S, const AttributeList &Attr,
const Decl *);
bool (*DiagLangOpts)(Sema &S, const AttributeList &Attr);
- bool (*ExistsInTarget)(const llvm::Triple &T);
+ bool (*ExistsInTarget)(const TargetInfo &Target);
unsigned (*SpellingIndexToSemanticSpelling)(const AttributeList &Attr);
};
@@ -195,8 +204,8 @@ bool AttributeList::isTypeAttr() const {
return getInfo(*this).IsType;
}
-bool AttributeList::existsInTarget(const llvm::Triple &T) const {
- return getInfo(*this).ExistsInTarget(T);
+bool AttributeList::existsInTarget(const TargetInfo &Target) const {
+ return getInfo(*this).ExistsInTarget(Target);
}
bool AttributeList::isKnownToGCC() const {
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 4a772d8972a9..8aa005102fe4 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -21,6 +21,7 @@ add_clang_library(clangSema
SemaChecking.cpp
SemaCodeComplete.cpp
SemaConsumer.cpp
+ SemaCoroutine.cpp
SemaCUDA.cpp
SemaDecl.cpp
SemaDeclAttr.cpp
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index 4adbb2b6af2a..d664d8704003 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -1,4 +1,4 @@
-//===--- SemaDeclSpec.cpp - Declaration Specifier Semantic Analysis -------===//
+//===--- DeclSpec.cpp - Declaration Specifier Semantic Analysis -----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,29 +15,19 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Parse/ParseDiagnostic.h" // FIXME: remove this back-dependency!
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/ErrorHandling.h"
#include <cstring>
using namespace clang;
-static DiagnosticBuilder Diag(DiagnosticsEngine &D, SourceLocation Loc,
- unsigned DiagID) {
- return D.Report(Loc, DiagID);
-}
-
-
void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
assert(TemplateId && "NULL template-id annotation?");
Kind = IK_TemplateId;
@@ -177,7 +167,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
SourceLocation MutableLoc,
ExceptionSpecificationType
ESpecType,
- SourceLocation ESpecLoc,
+ SourceRange ESpecRange,
ParsedType *Exceptions,
SourceRange *ExceptionRanges,
unsigned NumExceptions,
@@ -212,7 +202,8 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Fun.RestrictQualifierLoc = RestrictQualifierLoc.getRawEncoding();
I.Fun.MutableLoc = MutableLoc.getRawEncoding();
I.Fun.ExceptionSpecType = ESpecType;
- I.Fun.ExceptionSpecLoc = ESpecLoc.getRawEncoding();
+ I.Fun.ExceptionSpecLocBeg = ESpecRange.getBegin().getRawEncoding();
+ I.Fun.ExceptionSpecLocEnd = ESpecRange.getEnd().getRawEncoding();
I.Fun.NumExceptions = 0;
I.Fun.Exceptions = nullptr;
I.Fun.NoexceptExpr = nullptr;
@@ -287,6 +278,7 @@ bool Declarator::isDeclarationOfFunction() const {
switch (DS.getTypeSpecType()) {
case TST_atomic:
case TST_auto:
+ case TST_auto_type:
case TST_bool:
case TST_char:
case TST_char16:
@@ -350,6 +342,11 @@ bool Declarator::isStaticMember() {
getName().OperatorFunctionId.Operator));
}
+bool Declarator::isCtorOrDtor() {
+ return (getName().getKind() == UnqualifiedId::IK_ConstructorName) ||
+ (getName().getKind() == UnqualifiedId::IK_DestructorName);
+}
+
bool DeclSpec::hasTagDefinition() const {
if (!TypeSpecOwned)
return false;
@@ -470,6 +467,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_typeofType:
case DeclSpec::TST_typeofExpr: return "typeof";
case DeclSpec::TST_auto: return "auto";
+ case DeclSpec::TST_auto_type: return "__auto_type";
case DeclSpec::TST_decltype: return "(decltype)";
case DeclSpec::TST_decltype_auto: return "decltype(auto)";
case DeclSpec::TST_underlyingType: return "__underlying_type";
@@ -508,12 +506,12 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
case SCS_extern:
case SCS_private_extern:
case SCS_static:
- if (S.getLangOpts().OpenCLVersion < 120) {
- DiagID = diag::err_opencl_unknown_type_specifier;
- PrevSpec = getSpecifierName(SC);
- return true;
- }
- break;
+ if (S.getLangOpts().OpenCLVersion < 120) {
+ DiagID = diag::err_opencl_unknown_type_specifier;
+ PrevSpec = getSpecifierName(SC);
+ return true;
+ }
+ break;
case SCS_auto:
case SCS_register:
DiagID = diag::err_opencl_unknown_type_specifier;
@@ -925,7 +923,7 @@ void DeclSpec::SaveWrittenBuiltinSpecs() {
/// "_Imaginary" (lacking an FP type). This returns a diagnostic to issue or
/// diag::NUM_DIAGNOSTICS if there is no error. After calling this method,
/// DeclSpec is guaranteed self-consistent, even if an error occurred.
-void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPolicy &Policy) {
+void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Before possibly changing their values, save specs as written.
SaveWrittenBuiltinSpecs();
@@ -946,10 +944,10 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
FixItHint Hints[NumLocs];
SourceLocation FirstLoc;
for (unsigned I = 0; I != NumLocs; ++I) {
- if (!ExtraLocs[I].isInvalid()) {
+ if (ExtraLocs[I].isValid()) {
if (FirstLoc.isInvalid() ||
- PP.getSourceManager().isBeforeInTranslationUnit(ExtraLocs[I],
- FirstLoc))
+ S.getSourceManager().isBeforeInTranslationUnit(ExtraLocs[I],
+ FirstLoc))
FirstLoc = ExtraLocs[I];
Hints[I] = FixItHint::CreateRemoval(ExtraLocs[I]);
}
@@ -959,7 +957,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
TypeSpecSign = TSS_unspecified;
TypeAltiVecVector = TypeAltiVecPixel = TypeAltiVecBool = false;
TypeQualifiers = 0;
- Diag(D, TSTLoc, diag::err_decltype_auto_cannot_be_combined)
+ S.Diag(TSTLoc, diag::err_decltype_auto_cannot_be_combined)
<< Hints[0] << Hints[1] << Hints[2] << Hints[3]
<< Hints[4] << Hints[5] << Hints[6] << Hints[7];
}
@@ -969,14 +967,14 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
if (TypeAltiVecBool) {
// Sign specifiers are not allowed with vector bool. (PIM 2.1)
if (TypeSpecSign != TSS_unspecified) {
- Diag(D, TSSLoc, diag::err_invalid_vector_bool_decl_spec)
+ S.Diag(TSSLoc, diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName((TSS)TypeSpecSign);
}
// Only char/int are valid with vector bool. (PIM 2.1)
if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) &&
(TypeSpecType != TST_int)) || TypeAltiVecPixel) {
- Diag(D, TSTLoc, diag::err_invalid_vector_bool_decl_spec)
+ S.Diag(TSTLoc, diag::err_invalid_vector_bool_decl_spec)
<< (TypeAltiVecPixel ? "__pixel" :
getSpecifierName((TST)TypeSpecType, Policy));
}
@@ -984,15 +982,15 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
// Only 'short' and 'long long' are valid with vector bool. (PIM 2.1)
if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) &&
(TypeSpecWidth != TSW_longlong))
- Diag(D, TSWLoc, diag::err_invalid_vector_bool_decl_spec)
+ S.Diag(TSWLoc, diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName((TSW)TypeSpecWidth);
// vector bool long long requires VSX support or ZVector.
if ((TypeSpecWidth == TSW_longlong) &&
- (!PP.getTargetInfo().hasFeature("vsx")) &&
- (!PP.getTargetInfo().hasFeature("power8-vector")) &&
- !PP.getLangOpts().ZVector)
- Diag(D, TSTLoc, diag::err_invalid_vector_long_long_decl_spec);
+ (!S.Context.getTargetInfo().hasFeature("vsx")) &&
+ (!S.Context.getTargetInfo().hasFeature("power8-vector")) &&
+ !S.getLangOpts().ZVector)
+ S.Diag(TSTLoc, diag::err_invalid_vector_long_long_decl_spec);
// Elements of vector bool are interpreted as unsigned. (PIM 2.1)
if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
@@ -1002,20 +1000,20 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
// vector long double and vector long long double are never allowed.
// vector double is OK for Power7 and later, and ZVector.
if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong)
- Diag(D, TSWLoc, diag::err_invalid_vector_long_double_decl_spec);
- else if (!PP.getTargetInfo().hasFeature("vsx") &&
- !PP.getLangOpts().ZVector)
- Diag(D, TSTLoc, diag::err_invalid_vector_double_decl_spec);
+ S.Diag(TSWLoc, diag::err_invalid_vector_long_double_decl_spec);
+ else if (!S.Context.getTargetInfo().hasFeature("vsx") &&
+ !S.getLangOpts().ZVector)
+ S.Diag(TSTLoc, diag::err_invalid_vector_double_decl_spec);
} else if (TypeSpecType == TST_float) {
// vector float is unsupported for ZVector.
- if (PP.getLangOpts().ZVector)
- Diag(D, TSTLoc, diag::err_invalid_vector_float_decl_spec);
+ if (S.getLangOpts().ZVector)
+ S.Diag(TSTLoc, diag::err_invalid_vector_float_decl_spec);
} else if (TypeSpecWidth == TSW_long) {
// vector long is unsupported for ZVector and deprecated for AltiVec.
- if (PP.getLangOpts().ZVector)
- Diag(D, TSWLoc, diag::err_invalid_vector_long_decl_spec);
+ if (S.getLangOpts().ZVector)
+ S.Diag(TSWLoc, diag::err_invalid_vector_long_decl_spec);
else
- Diag(D, TSWLoc, diag::warn_vector_long_decl_spec_combination)
+ S.Diag(TSWLoc, diag::warn_vector_long_decl_spec_combination)
<< getSpecifierName((TST)TypeSpecType, Policy);
}
@@ -1034,7 +1032,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
- Diag(D, TSSLoc, diag::err_invalid_sign_spec)
+ S.Diag(TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
// signed double -> double.
TypeSpecSign = TSS_unspecified;
@@ -1049,9 +1047,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // short -> short int, long long -> long long int.
else if (TypeSpecType != TST_int) {
- Diag(D, TSWLoc,
- TypeSpecWidth == TSW_short ? diag::err_invalid_short_spec
- : diag::err_invalid_longlong_spec)
+ S.Diag(TSWLoc, diag::err_invalid_width_spec) << (int)TypeSpecWidth
<< getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecType = TST_int;
TypeSpecOwned = false;
@@ -1061,7 +1057,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // long -> long int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
- Diag(D, TSWLoc, diag::err_invalid_long_spec)
+ S.Diag(TSWLoc, diag::err_invalid_width_spec) << (int)TypeSpecWidth
<< getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecType = TST_int;
TypeSpecOwned = false;
@@ -1073,17 +1069,17 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
// disallow their use. Need information about the backend.
if (TypeSpecComplex != TSC_unspecified) {
if (TypeSpecType == TST_unspecified) {
- Diag(D, TSCLoc, diag::ext_plain_complex)
+ S.Diag(TSCLoc, diag::ext_plain_complex)
<< FixItHint::CreateInsertion(
- PP.getLocForEndOfToken(getTypeSpecComplexLoc()),
+ S.getLocForEndOfToken(getTypeSpecComplexLoc()),
" double");
TypeSpecType = TST_double; // _Complex -> _Complex double.
} else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
// Note that this intentionally doesn't include _Complex _Bool.
- if (!PP.getLangOpts().CPlusPlus)
- Diag(D, TSTLoc, diag::ext_integer_complex);
+ if (!S.getLangOpts().CPlusPlus)
+ S.Diag(TSTLoc, diag::ext_integer_complex);
} else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
- Diag(D, TSCLoc, diag::err_invalid_complex_spec)
+ S.Diag(TSCLoc, diag::err_invalid_complex_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecComplex = TSC_unspecified;
}
@@ -1100,14 +1096,14 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
case SCS_static:
break;
default:
- if (PP.getSourceManager().isBeforeInTranslationUnit(
+ if (S.getSourceManager().isBeforeInTranslationUnit(
getThreadStorageClassSpecLoc(), getStorageClassSpecLoc()))
- Diag(D, getStorageClassSpecLoc(),
+ S.Diag(getStorageClassSpecLoc(),
diag::err_invalid_decl_spec_combination)
<< DeclSpec::getSpecifierName(getThreadStorageClassSpec())
<< SourceRange(getThreadStorageClassSpecLoc());
else
- Diag(D, getThreadStorageClassSpecLoc(),
+ S.Diag(getThreadStorageClassSpecLoc(),
diag::err_invalid_decl_spec_combination)
<< DeclSpec::getSpecifierName(getStorageClassSpec())
<< SourceRange(getStorageClassSpecLoc());
@@ -1121,7 +1117,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
// the type specifier is not optional, but we got 'auto' as a storage
// class specifier, then assume this is an attempt to use C++0x's 'auto'
// type specifier.
- if (PP.getLangOpts().CPlusPlus &&
+ if (S.getLangOpts().CPlusPlus &&
TypeSpecType == TST_unspecified && StorageClassSpec == SCS_auto) {
TypeSpecType = TST_auto;
StorageClassSpec = SCS_unspecified;
@@ -1130,17 +1126,17 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
}
// Diagnose if we've recovered from an ill-formed 'auto' storage class
// specifier in a pre-C++11 dialect of C++.
- if (!PP.getLangOpts().CPlusPlus11 && TypeSpecType == TST_auto)
- Diag(D, TSTLoc, diag::ext_auto_type_specifier);
- if (PP.getLangOpts().CPlusPlus && !PP.getLangOpts().CPlusPlus11 &&
+ if (!S.getLangOpts().CPlusPlus11 && TypeSpecType == TST_auto)
+ S.Diag(TSTLoc, diag::ext_auto_type_specifier);
+ if (S.getLangOpts().CPlusPlus && !S.getLangOpts().CPlusPlus11 &&
StorageClassSpec == SCS_auto)
- Diag(D, StorageClassSpecLoc, diag::warn_auto_storage_class)
+ S.Diag(StorageClassSpecLoc, diag::warn_auto_storage_class)
<< FixItHint::CreateRemoval(StorageClassSpecLoc);
if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
- Diag(D, TSTLoc, diag::warn_cxx98_compat_unicode_type)
+ S.Diag(TSTLoc, diag::warn_cxx98_compat_unicode_type)
<< (TypeSpecType == TST_char16 ? "char16_t" : "char32_t");
if (Constexpr_specified)
- Diag(D, ConstexprLoc, diag::warn_cxx98_compat_constexpr);
+ S.Diag(ConstexprLoc, diag::warn_cxx98_compat_constexpr);
// C++ [class.friend]p6:
// No storage-class-specifier shall appear in the decl-specifier-seq
@@ -1164,7 +1160,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
ThreadHint = FixItHint::CreateRemoval(SCLoc);
}
- Diag(D, SCLoc, diag::err_friend_decl_spec)
+ S.Diag(SCLoc, diag::err_friend_decl_spec)
<< SpecName << StorageHint << ThreadHint;
ClearStorageClassSpecs();
@@ -1190,7 +1186,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli
}
FixItHint Hint = FixItHint::CreateRemoval(SCLoc);
- Diag(D, SCLoc, diag::err_friend_decl_spec)
+ S.Diag(SCLoc, diag::err_friend_decl_spec)
<< Keyword << Hint;
FS_virtual_specified = FS_explicit_specified = false;
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index 775fe85740d4..c394d24d5fdc 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -147,9 +147,12 @@ static ScopePair GetDiagForGotoScopeDecl(Sema &S, const Decl *D) {
if (VD->hasLocalStorage()) {
switch (VD->getType().isDestructedType()) {
case QualType::DK_objc_strong_lifetime:
+ return ScopePair(diag::note_protected_by_objc_strong_init,
+ diag::note_exits_objc_strong);
+
case QualType::DK_objc_weak_lifetime:
- return ScopePair(diag::note_protected_by_objc_ownership,
- diag::note_exits_objc_ownership);
+ return ScopePair(diag::note_protected_by_objc_weak_init,
+ diag::note_exits_objc_weak);
case QualType::DK_cxx_destructor:
OutDiag = diag::note_exits_dtor;
diff --git a/lib/Sema/MultiplexExternalSemaSource.cpp b/lib/Sema/MultiplexExternalSemaSource.cpp
index 9ecb5a7fefbc..0f93421ac21b 100644
--- a/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -107,15 +107,11 @@ void MultiplexExternalSemaSource::completeVisibleDeclsMap(const DeclContext *DC)
Sources[i]->completeVisibleDeclsMap(DC);
}
-ExternalLoadResult MultiplexExternalSemaSource::
-FindExternalLexicalDecls(const DeclContext *DC,
- bool (*isKindWeWant)(Decl::Kind),
- SmallVectorImpl<Decl*> &Result) {
+void MultiplexExternalSemaSource::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) {
for(size_t i = 0; i < Sources.size(); ++i)
- // FIXME: The semantics of the return result is unclear to me...
- Sources[i]->FindExternalLexicalDecls(DC, isKindWeWant, Result);
-
- return ELR_Success;
+ Sources[i]->FindExternalLexicalDecls(DC, IsKindWeWant, Result);
}
void MultiplexExternalSemaSource::FindFileRegionDecls(FileID File,
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index f80eadf18d56..cbd7ef7abb41 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -33,11 +33,14 @@ void FunctionScopeInfo::Clear() {
ObjCWarnForNoDesignatedInitChain = false;
ObjCIsSecondaryInit = false;
ObjCWarnForNoInitDelegation = false;
+ FirstReturnLoc = SourceLocation();
FirstCXXTryLoc = SourceLocation();
FirstSEHTryLoc = SourceLocation();
SwitchStack.clear();
Returns.clear();
+ CoroutinePromise = nullptr;
+ CoroutineStmts.clear();
ErrorTrap.reset();
PossiblyUnreachableDiags.clear();
WeakObjectUses.clear();
@@ -234,5 +237,4 @@ void LambdaScopeInfo::getPotentialVariableCapture(unsigned Idx, VarDecl *&VD,
FunctionScopeInfo::~FunctionScopeInfo() { }
BlockScopeInfo::~BlockScopeInfo() { }
-LambdaScopeInfo::~LambdaScopeInfo() { }
CapturedRegionScopeInfo::~CapturedRegionScopeInfo() { }
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 18d7e9dcf548..39b8cc9f0c63 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -40,7 +40,6 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/Support/CrashRecoveryContext.h"
using namespace clang;
using namespace sema;
@@ -121,8 +120,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
FieldCollector.reset(new CXXFieldCollector());
// Tell diagnostics how to render things from the AST library.
- PP.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
- &Context);
+ Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
ExprEvalContexts.emplace_back(PotentiallyEvaluated, 0, false, nullptr, false);
@@ -139,10 +137,6 @@ void Sema::addImplicitTypedef(StringRef Name, QualType T) {
}
void Sema::Initialize() {
- // Tell the AST consumer about this Sema object.
- Consumer.Initialize(Context);
-
- // FIXME: Isn't this redundant with the initialization above?
if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
SC->InitializeSema(*this);
@@ -155,6 +149,9 @@ void Sema::Initialize() {
// will not be able to merge any duplicate __va_list_tag decls correctly.
VAListTagName = PP.getIdentifierInfo("__va_list_tag");
+ if (!TUScope)
+ return;
+
// Initialize predefined 128-bit integer types, if needed.
if (Context.getTargetInfo().hasInt128Type()) {
// If either of the 128-bit integer types are unavailable to name lookup,
@@ -170,7 +167,7 @@ void Sema::Initialize() {
// Initialize predefined Objective-C types:
- if (PP.getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC1) {
// If 'SEL' does not yet refer to any declarations, make it refer to the
// predefined 'SEL'.
DeclarationName SEL = &Context.Idents.get("SEL");
@@ -195,8 +192,8 @@ void Sema::Initialize() {
}
// Initialize Microsoft "predefined C++ types".
- if (PP.getLangOpts().MSVCCompat) {
- if (PP.getLangOpts().CPlusPlus &&
+ if (getLangOpts().MSVCCompat) {
+ if (getLangOpts().CPlusPlus &&
IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class),
TUScope);
@@ -205,7 +202,7 @@ void Sema::Initialize() {
}
// Initialize predefined OpenCL types.
- if (PP.getLangOpts().OpenCL) {
+ if (getLangOpts().OpenCL) {
addImplicitTypedef("image1d_t", Context.OCLImage1dTy);
addImplicitTypedef("image1d_array_t", Context.OCLImage1dArrayTy);
addImplicitTypedef("image1d_buffer_t", Context.OCLImage1dBufferTy);
@@ -215,6 +212,18 @@ void Sema::Initialize() {
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
if (getLangOpts().OpenCLVersion >= 200) {
+ addImplicitTypedef("image2d_depth_t", Context.OCLImage2dDepthTy);
+ addImplicitTypedef("image2d_array_depth_t",
+ Context.OCLImage2dArrayDepthTy);
+ addImplicitTypedef("image2d_msaa_t", Context.OCLImage2dMSAATy);
+ addImplicitTypedef("image2d_array_msaa_t", Context.OCLImage2dArrayMSAATy);
+ addImplicitTypedef("image2d_msaa_depth_t", Context.OCLImage2dMSAADepthTy);
+ addImplicitTypedef("image2d_array_msaa_depth_t",
+ Context.OCLImage2dArrayMSAADepthTy);
+ addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
+ addImplicitTypedef("queue_t", Context.OCLQueueTy);
+ addImplicitTypedef("ndrange_t", Context.OCLNDRangeTy);
+ addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
addImplicitTypedef("atomic_uint",
Context.getAtomicType(Context.UnsignedIntTy));
@@ -239,6 +248,12 @@ void Sema::Initialize() {
}
}
+ if (Context.getTargetInfo().hasBuiltinMSVaList()) {
+ DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
+ if (IdResolver.begin(MSVaList) == IdResolver.end())
+ PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
+ }
+
DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
@@ -280,7 +295,7 @@ Sema::~Sema() {
/// make the relevant declaration unavailable instead of erroring, do
/// so and return true.
bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
- StringRef msg) {
+ UnavailableAttr::ImplicitReason reason) {
// If we're not in a function, it's an error.
FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
if (!fn) return false;
@@ -296,7 +311,7 @@ bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
// If the function is already unavailable, it's not an error.
if (fn->hasAttr<UnavailableAttr>()) return true;
- fn->addAttr(UnavailableAttr::CreateImplicit(Context, msg, loc));
+ fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
return true;
}
@@ -334,6 +349,20 @@ void Sema::PrintStats() const {
AnalysisWarnings.PrintStats();
}
+void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
+ QualType SrcType,
+ SourceLocation Loc) {
+ Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context);
+ if (!ExprNullability || *ExprNullability != NullabilityKind::Nullable)
+ return;
+
+ Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context);
+ if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
+ return;
+
+ Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
+}
+
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
/// If there is already an implicit cast, merge into the existing one.
/// The result is of the given category.
@@ -357,18 +386,7 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
assert((VK == VK_RValue || !E->isRValue()) && "can't cast rvalue to lvalue");
#endif
- // Check whether we're implicitly casting from a nullable type to a nonnull
- // type.
- if (auto exprNullability = E->getType()->getNullability(Context)) {
- if (*exprNullability == NullabilityKind::Nullable) {
- if (auto typeNullability = Ty->getNullability(Context)) {
- if (*typeNullability == NullabilityKind::NonNull) {
- Diag(E->getLocStart(), diag::warn_nullability_lost)
- << E->getType() << Ty;
- }
- }
- }
- }
+ diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getLocStart());
QualType ExprTy = Context.getCanonicalType(E->getType());
QualType TypeTy = Context.getCanonicalType(Ty);
@@ -689,6 +707,9 @@ void Sema::ActOnEndOfTranslationUnit() {
assert(DelayedDefaultedMemberExceptionSpecs.empty());
assert(DelayedExceptionSpecChecks.empty());
+ // All dllexport classes should have been processed already.
+ assert(DelayedDllExportClasses.empty());
+
// Remove file scoped decls that turned out to be used.
UnusedFileScopedDecls.erase(
std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
@@ -708,8 +729,15 @@ void Sema::ActOnEndOfTranslationUnit() {
if (WeakID.second.getUsed())
continue;
- Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared)
- << WeakID.first;
+ Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(),
+ LookupOrdinaryName);
+ if (PrevDecl != nullptr &&
+ !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
+ Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'weak'" << ExpectedVariableOrFunction;
+ else
+ Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared)
+ << WeakID.first;
}
if (LangOpts.CPlusPlus11 &&
@@ -1443,7 +1471,7 @@ bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
// arguments and that it returns something of a reasonable type,
// so we can emit a fixit and carry on pretending that E was
// actually a CallExpr.
- SourceLocation ParenInsertionLoc = PP.getLocForEndOfToken(Range.getEnd());
+ SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
Diag(Loc, PD)
<< /*zero-arg*/ 1 << Range
<< (IsCallableWithAppend(E.get())
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
index 0e973cc5ebaa..e9772bc52049 100644
--- a/lib/Sema/SemaAccess.cpp
+++ b/lib/Sema/SemaAccess.cpp
@@ -182,15 +182,20 @@ struct AccessTarget : public AccessedEntity {
class SavedInstanceContext {
public:
+ SavedInstanceContext(SavedInstanceContext &&S)
+ : Target(S.Target), Has(S.Has) {
+ S.Target = nullptr;
+ }
~SavedInstanceContext() {
- Target.HasInstanceContext = Has;
+ if (Target)
+ Target->HasInstanceContext = Has;
}
private:
friend struct AccessTarget;
explicit SavedInstanceContext(AccessTarget &Target)
- : Target(Target), Has(Target.HasInstanceContext) {}
- AccessTarget &Target;
+ : Target(&Target), Has(Target.HasInstanceContext) {}
+ AccessTarget *Target;
bool Has;
};
@@ -1766,7 +1771,7 @@ Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
case AR_inaccessible: return Sema::AR_inaccessible;
case AR_dependent: return Sema::AR_dependent;
}
- llvm_unreachable("falling off end");
+ llvm_unreachable("invalid access result");
}
Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
index 5973500826e3..61dfdd3f7206 100644
--- a/lib/Sema/SemaCUDA.cpp
+++ b/lib/Sema/SemaCUDA.cpp
@@ -60,8 +60,101 @@ Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
return CFT_Host;
}
+// * CUDA Call preference table
+//
+// F - from,
+// T - to
+// Ph - preference in host mode
+// Pd - preference in device mode
+// H - handled in (x)
+// Preferences: b-best, f-fallback, l-last resort, n-never.
+//
+// | F | T | Ph | Pd | H |
+// |----+----+----+----+-----+
+// | d | d | b | b | (b) |
+// | d | g | n | n | (a) |
+// | d | h | l | l | (e) |
+// | d | hd | f | f | (c) |
+// | g | d | b | b | (b) |
+// | g | g | n | n | (a) |
+// | g | h | l | l | (e) |
+// | g | hd | f | f | (c) |
+// | h | d | l | l | (e) |
+// | h | g | b | b | (b) |
+// | h | h | b | b | (b) |
+// | h | hd | f | f | (c) |
+// | hd | d | l | f | (d) |
+// | hd | g | f | n |(d/a)|
+// | hd | h | f | l | (d) |
+// | hd | hd | b | b | (b) |
+
+Sema::CUDAFunctionPreference
+Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
+ const FunctionDecl *Callee) {
+ assert(getLangOpts().CUDATargetOverloads &&
+ "Should not be called w/o enabled target overloads.");
+
+ assert(Callee && "Callee must be valid.");
+ CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
+ CUDAFunctionTarget CallerTarget =
+ (Caller != nullptr) ? IdentifyCUDATarget(Caller) : Sema::CFT_Host;
+
+ // If one of the targets is invalid, the check always fails, no matter what
+ // the other target is.
+ if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
+ return CFP_Never;
+
+ // (a) Can't call global from some contexts until we support CUDA's
+ // dynamic parallelism.
+ if (CalleeTarget == CFT_Global &&
+ (CallerTarget == CFT_Global || CallerTarget == CFT_Device ||
+ (CallerTarget == CFT_HostDevice && getLangOpts().CUDAIsDevice)))
+ return CFP_Never;
+
+ // (b) Best case scenarios
+ if (CalleeTarget == CallerTarget ||
+ (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) ||
+ (CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
+ return CFP_Best;
+
+ // (c) Calling HostDevice is OK as a fallback that works for everyone.
+ if (CalleeTarget == CFT_HostDevice)
+ return CFP_Fallback;
+
+ // Figure out what should be returned 'last resort' cases. Normally
+ // those would not be allowed, but we'll consider them if
+ // CUDADisableTargetCallChecks is true.
+ CUDAFunctionPreference QuestionableResult =
+ getLangOpts().CUDADisableTargetCallChecks ? CFP_LastResort : CFP_Never;
+
+ // (d) HostDevice behavior depends on compilation mode.
+ if (CallerTarget == CFT_HostDevice) {
+ // Calling a function that matches compilation mode is OK.
+ // Calling a function from the other side is frowned upon.
+ if (getLangOpts().CUDAIsDevice)
+ return CalleeTarget == CFT_Device ? CFP_Fallback : QuestionableResult;
+ else
+ return (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)
+ ? CFP_Fallback
+ : QuestionableResult;
+ }
+
+ // (e) Calling across device/host boundary is not something you should do.
+ if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) ||
+ (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) ||
+ (CallerTarget == CFT_Global && CalleeTarget == CFT_Host))
+ return QuestionableResult;
+
+ llvm_unreachable("All cases should've been handled by now.");
+}
+
bool Sema::CheckCUDATarget(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
+ // With target overloads enabled, we only disallow calling
+ // combinations with CFP_Never.
+ if (getLangOpts().CUDATargetOverloads)
+ return IdentifyCUDAPreference(Caller,Callee) == CFP_Never;
+
// The CUDADisableTargetCallChecks short-circuits this check: we assume all
// cross-target calls are valid.
if (getLangOpts().CUDADisableTargetCallChecks)
@@ -117,6 +210,57 @@ bool Sema::CheckCUDATarget(const FunctionDecl *Caller,
return false;
}
+template <typename T, typename FetchDeclFn>
+static void EraseUnwantedCUDAMatchesImpl(Sema &S, const FunctionDecl *Caller,
+ llvm::SmallVectorImpl<T> &Matches,
+ FetchDeclFn FetchDecl) {
+ assert(S.getLangOpts().CUDATargetOverloads &&
+ "Should not be called w/o enabled target overloads.");
+ if (Matches.size() <= 1)
+ return;
+
+ // Find the best call preference among the functions in Matches.
+ Sema::CUDAFunctionPreference P, BestCFP = Sema::CFP_Never;
+ for (auto const &Match : Matches) {
+ P = S.IdentifyCUDAPreference(Caller, FetchDecl(Match));
+ if (P > BestCFP)
+ BestCFP = P;
+ }
+
+ // Erase all functions with lower priority.
+ for (unsigned I = 0, N = Matches.size(); I != N;)
+ if (S.IdentifyCUDAPreference(Caller, FetchDecl(Matches[I])) < BestCFP) {
+ Matches[I] = Matches[--N];
+ Matches.resize(N);
+ } else {
+ ++I;
+ }
+}
+
+void Sema::EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
+ SmallVectorImpl<FunctionDecl *> &Matches){
+ EraseUnwantedCUDAMatchesImpl<FunctionDecl *>(
+ *this, Caller, Matches, [](const FunctionDecl *item) { return item; });
+}
+
+void Sema::EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
+ SmallVectorImpl<DeclAccessPair> &Matches) {
+ EraseUnwantedCUDAMatchesImpl<DeclAccessPair>(
+ *this, Caller, Matches, [](const DeclAccessPair &item) {
+ return dyn_cast<FunctionDecl>(item.getDecl());
+ });
+}
+
+void Sema::EraseUnwantedCUDAMatches(
+ const FunctionDecl *Caller,
+ SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches){
+ EraseUnwantedCUDAMatchesImpl<std::pair<DeclAccessPair, FunctionDecl *>>(
+ *this, Caller, Matches,
+ [](const std::pair<DeclAccessPair, FunctionDecl *> &item) {
+ return dyn_cast<FunctionDecl>(item.second);
+ });
+}
+
/// When an implicitly-declared special member has to invoke more than one
/// base/field special member, conflicts may occur in the targets of these
/// members. For example, if one base's member __host__ and another's is
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 9e146ed3a642..f7aace625a92 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -291,8 +291,10 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
if (!SD)
return false;
+ SD = SD->getUnderlyingDecl();
+
// Namespace and namespace aliases are fine.
- if (isa<NamespaceDecl>(SD) || isa<NamespaceAliasDecl>(SD))
+ if (isa<NamespaceDecl>(SD))
return true;
if (!isa<TypeDecl>(SD))
@@ -396,10 +398,7 @@ bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
}
Found.suppressDiagnostics();
- if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
- return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
-
- return false;
+ return Found.getAsSingle<NamespaceDecl>();
}
namespace {
@@ -533,6 +532,9 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
LookupName(Found, S);
}
+ if (Found.isAmbiguous())
+ return true;
+
// If we performed lookup into a dependent context and did not find anything,
// that's fine: just build a dependent nested-name-specifier.
if (Found.empty() && isDependent &&
@@ -551,8 +553,6 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
return false;
}
- // FIXME: Deal with ambiguities cleanly.
-
if (Found.empty() && !ErrorRecoveryLookup) {
// If identifier is not found as class-name-or-namespace-name, but is found
// as other entity, don't look for typos.
@@ -562,6 +562,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
else if (S && !isDependent)
LookupName(R, S);
if (!R.empty()) {
+ // Don't diagnose problems with this speculative lookup.
+ R.suppressDiagnostics();
// The identifier is found in ordinary lookup. If correction to colon is
// allowed, suggest replacement to ':'.
if (IsCorrectedToColon) {
@@ -604,7 +606,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
diagnoseTypo(Corrected, PDiag(diag::err_undeclared_var_use_suggest)
<< Name);
- if (NamedDecl *ND = Corrected.getCorrectionDecl())
+ if (NamedDecl *ND = Corrected.getFoundDecl())
Found.addDecl(ND);
Found.setLookupName(Corrected.getCorrection());
} else {
@@ -612,7 +614,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
}
}
- NamedDecl *SD = Found.getAsSingle<NamedDecl>();
+ NamedDecl *SD =
+ Found.isSingleResult() ? Found.getRepresentativeDecl() : nullptr;
bool IsExtension = false;
bool AcceptSpec = isAcceptableNestedNameSpecifier(SD, &IsExtension);
if (!AcceptSpec && IsExtension) {
@@ -684,7 +687,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
return false;
}
- QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
+ QualType T =
+ Context.getTypeDeclType(cast<TypeDecl>(SD->getUnderlyingDecl()));
TypeLocBuilder TLB;
if (isa<InjectedClassNameType>(T)) {
InjectedClassNameTypeLoc InjectedTL
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index c0754ba7902a..07b058911c2d 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -160,19 +160,19 @@ static TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
unsigned &msg);
static TryCastResult TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr,
QualType DestType, bool CStyle,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath);
static TryCastResult TryStaticPointerDowncast(Sema &Self, QualType SrcType,
QualType DestType, bool CStyle,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath);
static TryCastResult TryStaticDowncast(Sema &Self, CanQualType SrcType,
CanQualType DestType, bool CStyle,
- const SourceRange &OpRange,
+ SourceRange OpRange,
QualType OrigSrcType,
QualType OrigDestType, unsigned &msg,
CastKind &Kind,
@@ -180,7 +180,7 @@ static TryCastResult TryStaticDowncast(Sema &Self, CanQualType SrcType,
static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr,
QualType SrcType,
QualType DestType,bool CStyle,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath);
@@ -188,13 +188,13 @@ static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExp
static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg, CastKind &Kind,
bool ListInitialization);
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath,
bool ListInitialization);
@@ -203,7 +203,7 @@ static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
unsigned &msg);
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg,
CastKind &Kind);
@@ -489,9 +489,9 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
QualType *TheOffendingDestType = nullptr,
Qualifiers *CastAwayQualifiers = nullptr) {
// If the only checking we care about is for Objective-C lifetime qualifiers,
- // and we're not in ARC mode, there's nothing to check.
+ // and we're not in ObjC mode, there's nothing to check.
if (!CheckCVR && CheckObjCLifetime &&
- !Self.Context.getLangOpts().ObjCAutoRefCount)
+ !Self.Context.getLangOpts().ObjC1)
return false;
// Casting away constness is defined in C++ 5.2.11p8 with reference to
@@ -683,7 +683,8 @@ void CastOperation::CheckDynamicCast() {
// C++ 5.2.7p5
// Upcasts are resolved statically.
- if (DestRecord && Self.IsDerivedFrom(SrcPointee, DestPointee)) {
+ if (DestRecord &&
+ Self.IsDerivedFrom(OpRange.getBegin(), SrcPointee, DestPointee)) {
if (Self.CheckDerivedToBaseConversion(SrcPointee, DestPointee,
OpRange.getBegin(), OpRange,
&BasePath)) {
@@ -943,7 +944,7 @@ void CastOperation::CheckStaticCast() {
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
- const SourceRange &OpRange, unsigned &msg,
+ SourceRange OpRange, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath,
bool ListInitialization) {
// Determine whether we have the semantics of a C-style cast.
@@ -1171,7 +1172,8 @@ TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
Kind = CK_DerivedToBase;
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
- if (!Self.IsDerivedFrom(SrcExpr->getType(), R->getPointeeType(), Paths))
+ if (!Self.IsDerivedFrom(SrcExpr->getLocStart(), SrcExpr->getType(),
+ R->getPointeeType(), Paths))
return TC_NotApplicable;
Self.BuildBasePathArray(Paths, BasePath);
@@ -1184,7 +1186,7 @@ TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
/// Tests whether a conversion according to C++ 5.2.9p5 is valid.
TryCastResult
TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr, QualType DestType,
- bool CStyle, const SourceRange &OpRange,
+ bool CStyle, SourceRange OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath) {
// C++ 5.2.9p5: An lvalue of type "cv1 B", where B is a class type, can be
@@ -1222,7 +1224,7 @@ TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr, QualType DestType,
/// Tests whether a conversion according to C++ 5.2.9p8 is valid.
TryCastResult
TryStaticPointerDowncast(Sema &Self, QualType SrcType, QualType DestType,
- bool CStyle, const SourceRange &OpRange,
+ bool CStyle, SourceRange OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath) {
// C++ 5.2.9p8: An rvalue of type "pointer to cv1 B", where B is a class
@@ -1256,12 +1258,12 @@ TryStaticPointerDowncast(Sema &Self, QualType SrcType, QualType DestType,
/// DestType is possible and allowed.
TryCastResult
TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
- bool CStyle, const SourceRange &OpRange, QualType OrigSrcType,
+ bool CStyle, SourceRange OpRange, QualType OrigSrcType,
QualType OrigDestType, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath) {
// We can only work with complete types. But don't complain if it doesn't work
- if (Self.RequireCompleteType(OpRange.getBegin(), SrcType, 0) ||
- Self.RequireCompleteType(OpRange.getBegin(), DestType, 0))
+ if (!Self.isCompleteType(OpRange.getBegin(), SrcType) ||
+ !Self.isCompleteType(OpRange.getBegin(), DestType))
return TC_NotApplicable;
// Downcast can only happen in class hierarchies, so we need classes.
@@ -1271,7 +1273,7 @@ TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
- if (!Self.IsDerivedFrom(DestType, SrcType, Paths)) {
+ if (!Self.IsDerivedFrom(OpRange.getBegin(), DestType, SrcType, Paths)) {
return TC_NotApplicable;
}
@@ -1307,7 +1309,7 @@ TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
if (!Paths.isRecordingPaths()) {
Paths.clear();
Paths.setRecordingPaths(true);
- Self.IsDerivedFrom(DestType, SrcType, Paths);
+ Self.IsDerivedFrom(OpRange.getBegin(), DestType, SrcType, Paths);
}
std::string PathDisplayStr;
std::set<unsigned> DisplayedPaths;
@@ -1372,7 +1374,7 @@ TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
TryCastResult
TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
QualType DestType, bool CStyle,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath) {
const MemberPointerType *DestMemPtr = DestType->getAs<MemberPointerType>();
@@ -1398,6 +1400,11 @@ TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
return TC_NotApplicable;
}
+ // Lock down the inheritance model right now in MS ABI, whether or not the
+ // pointee types are the same.
+ if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft())
+ (void)Self.isCompleteType(OpRange.getBegin(), SrcType);
+
// T == T, modulo cv
if (!Self.Context.hasSameUnqualifiedType(SrcMemPtr->getPointeeType(),
DestMemPtr->getPointeeType()))
@@ -1408,16 +1415,15 @@ TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
QualType DestClass(DestMemPtr->getClass(), 0);
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
- if (Self.RequireCompleteType(OpRange.getBegin(), SrcClass, 0) ||
- !Self.IsDerivedFrom(SrcClass, DestClass, Paths)) {
+ if (!Self.IsDerivedFrom(OpRange.getBegin(), SrcClass, DestClass, Paths))
return TC_NotApplicable;
- }
// B is a base of D. But is it an allowed base? If not, it's a hard error.
if (Paths.isAmbiguous(Self.Context.getCanonicalType(DestClass))) {
Paths.clear();
Paths.setRecordingPaths(true);
- bool StillOkay = Self.IsDerivedFrom(SrcClass, DestClass, Paths);
+ bool StillOkay =
+ Self.IsDerivedFrom(OpRange.getBegin(), SrcClass, DestClass, Paths);
assert(StillOkay);
(void)StillOkay;
std::string PathDisplayStr = Self.getAmbiguousPathsDisplayString(Paths);
@@ -1484,7 +1490,7 @@ TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
TryCastResult
TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
Sema::CheckedConversionKind CCK,
- const SourceRange &OpRange, unsigned &msg,
+ SourceRange OpRange, unsigned &msg,
CastKind &Kind, bool ListInitialization) {
if (DestType->isRecordType()) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
@@ -1494,10 +1500,6 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
msg = 0;
return TC_Failed;
}
- } else if (DestType->isMemberPointerType()) {
- if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- Self.RequireCompleteType(OpRange.getBegin(), DestType, 0);
- }
}
InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
@@ -1750,7 +1752,7 @@ static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
- const SourceRange &OpRange,
+ SourceRange OpRange,
unsigned &msg,
CastKind &Kind) {
bool IsLValueCast = false;
@@ -1845,8 +1847,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// We need to determine the inheritance model that the class will use if
// haven't yet.
- Self.RequireCompleteType(OpRange.getBegin(), SrcType, 0);
- Self.RequireCompleteType(OpRange.getBegin(), DestType, 0);
+ (void)Self.isCompleteType(OpRange.getBegin(), SrcType);
+ (void)Self.isCompleteType(OpRange.getBegin(), DestType);
}
// Don't allow casting between member pointers of different sizes.
@@ -1877,28 +1879,29 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_Success;
}
+ // Allow reinterpret_casts between vectors of the same size and
+ // between vectors and integers of the same size.
bool destIsVector = DestType->isVectorType();
bool srcIsVector = SrcType->isVectorType();
if (srcIsVector || destIsVector) {
- // FIXME: Should this also apply to floating point types?
- bool srcIsScalar = SrcType->isIntegralType(Self.Context);
- bool destIsScalar = DestType->isIntegralType(Self.Context);
-
- // Check if this is a cast between a vector and something else.
- if (!(srcIsScalar && destIsVector) && !(srcIsVector && destIsScalar) &&
- !(srcIsVector && destIsVector))
+ // The non-vector type, if any, must have integral type. This is
+ // the same rule that C vector casts use; note, however, that enum
+ // types are not integral in C++.
+ if ((!destIsVector && !DestType->isIntegralType(Self.Context)) ||
+ (!srcIsVector && !SrcType->isIntegralType(Self.Context)))
return TC_NotApplicable;
- // If both types have the same size, we can successfully cast.
- if (Self.Context.getTypeSize(SrcType)
- == Self.Context.getTypeSize(DestType)) {
+ // The size we want to consider is eltCount * eltSize.
+ // That's exactly what the lax-conversion rules will check.
+ if (Self.areLaxCompatibleVectorTypes(SrcType, DestType)) {
Kind = CK_BitCast;
return TC_Success;
}
-
- if (destIsScalar)
+
+ // Otherwise, pick a reasonable diagnostic.
+ if (!destIsVector)
msg = diag::err_bad_cxx_cast_vector_to_scalar_different_size;
- else if (srcIsScalar)
+ else if (!srcIsVector)
msg = diag::err_bad_cxx_cast_scalar_to_vector_different_size;
else
msg = diag::err_bad_cxx_cast_vector_to_vector_different_size;
@@ -2237,6 +2240,16 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Overloads are allowed with C extensions, so we need to support them.
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ DeclAccessPair DAP;
+ if (FunctionDecl *FD = Self.ResolveAddressOfOverloadedFunction(
+ SrcExpr.get(), DestType, /*Complain=*/true, DAP))
+ SrcExpr = Self.FixOverloadedFunctionReference(SrcExpr.get(), DAP, FD);
+ else
+ return;
+ assert(SrcExpr.isUsable());
+ }
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
if (SrcExpr.isInvalid())
return;
@@ -2480,8 +2493,11 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
if (Op.SrcExpr.isInvalid())
return ExprError();
-
- if (CXXConstructExpr *ConstructExpr = dyn_cast<CXXConstructExpr>(Op.SrcExpr.get()))
+
+ auto *SubExpr = Op.SrcExpr.get();
+ if (auto *BindExpr = dyn_cast<CXXBindTemporaryExpr>(SubExpr))
+ SubExpr = BindExpr->getSubExpr();
+ if (auto *ConstructExpr = dyn_cast<CXXConstructExpr>(SubExpr))
ConstructExpr->setParenOrBraceRange(SourceRange(LPLoc, RPLoc));
return Op.complete(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 1a8ab6e209e5..59d51f7e84ca 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/Analyses/FormatString.h"
@@ -111,6 +112,39 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
return false;
}
+static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 3))
+ return true;
+
+ // First two arguments should be integers.
+ for (unsigned I = 0; I < 2; ++I) {
+ Expr *Arg = TheCall->getArg(I);
+ QualType Ty = Arg->getType();
+ if (!Ty->isIntegerType()) {
+ S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_int)
+ << Ty << Arg->getSourceRange();
+ return true;
+ }
+ }
+
+ // Third argument should be a pointer to a non-const integer.
+ // IRGen correctly handles volatile, restrict, and address spaces, and
+ // the other qualifiers aren't possible.
+ {
+ Expr *Arg = TheCall->getArg(2);
+ QualType Ty = Arg->getType();
+ const auto *PtrTy = Ty->getAs<PointerType>();
+ if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
+ !PtrTy->getPointeeType().isConstQualified())) {
+ S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_ptr_int)
+ << Ty << Arg->getSourceRange();
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
CallExpr *TheCall, unsigned SizeIdx,
unsigned DstSizeIdx) {
@@ -440,6 +474,9 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return SemaBuiltinAtomicOverloaded(TheCallResult);
+ case Builtin::BI__builtin_nontemporal_load:
+ case Builtin::BI__builtin_nontemporal_store:
+ return SemaBuiltinNontemporalOverloaded(TheCallResult);
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
@@ -453,6 +490,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinAddressof(*this, TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_mul_overflow:
+ if (SemaBuiltinOverflow(*this, TheCall))
+ return ExprError();
+ break;
case Builtin::BI__builtin_operator_new:
case Builtin::BI__builtin_operator_delete:
if (!getLangOpts().CPlusPlus) {
@@ -525,7 +568,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// Since the target specific builtins for each arch overlap, only check those
// of the arch we are compiling for.
- if (BuiltinID >= Builtin::FirstTSBuiltin) {
+ if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
switch (Context.getTargetInfo().getTriple().getArch()) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
@@ -1027,12 +1070,34 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
+/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
+/// This checks that the target supports __builtin_cpu_supports and
+/// that the string argument is constant and valid.
+static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
+ Expr *Arg = TheCall->getArg(0);
+
+ // Check if the argument is a string literal.
+ if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
+ return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
+ << Arg->getSourceRange();
+
+ // Check the contents of the string.
+ StringRef Feature =
+ cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
+ if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
+ return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports)
+ << Arg->getSourceRange();
+ return false;
+}
+
bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0;
switch (BuiltinID) {
default: return false;
case X86::BI__builtin_cpu_supports:
- return SemaBuiltinCpuSupports(TheCall);
+ return SemaBuiltinCpuSupports(*this, TheCall);
+ case X86::BI__builtin_ms_va_start:
+ return SemaBuiltinMSVAStart(TheCall);
case X86::BI_mm_prefetch: i = 1; l = 0; u = 3; break;
case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break;
case X86::BI__builtin_ia32_vpermil2pd:
@@ -1115,8 +1180,7 @@ bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
/// Checks if a the given expression evaluates to null.
///
/// \brief Returns true if the value evaluates to null.
-static bool CheckNonNullExpr(Sema &S,
- const Expr *Expr) {
+static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
// If the expression has non-null type, it doesn't evaluate to null.
if (auto nullability
= Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
@@ -1145,7 +1209,8 @@ static void CheckNonNullArgument(Sema &S,
const Expr *ArgExpr,
SourceLocation CallSiteLoc) {
if (CheckNonNullExpr(S, ArgExpr))
- S.Diag(CallSiteLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
+ S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
+ S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange());
}
bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
@@ -1638,6 +1703,12 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
return ExprError();
}
ValType = AtomTy->getAs<AtomicType>()->getValueType();
+ } else if (Form != Load && Op != AtomicExpr::AO__atomic_load) {
+ if (ValType.isConstQualified()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_pointer)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
}
// For an arithmetic operation, the implied arithmetic must be well-formed.
@@ -1675,9 +1746,6 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
return ExprError();
}
- // FIXME: For any builtin other than a load, the ValType must not be
- // const-qualified.
-
switch (ValType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -1710,6 +1778,10 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
if (!IsC11 && !IsN)
ByValType = Ptr->getType();
+ // FIXME: __atomic_load allows the first argument to be a a pointer to const
+ // but not the second argument. We need to manually remove possible const
+ // qualifiers.
+
// The first argument --- the pointer --- has a fixed type; we
// deduce the types of the rest of the arguments accordingly. Walk
// the remaining arguments, converting them to the deduced value type.
@@ -1729,8 +1801,17 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
Ty = ByValType;
else if (Form == Arithmetic)
Ty = Context.getPointerDiffType();
- else
- Ty = Context.getPointerType(ValType.getUnqualifiedType());
+ else {
+ Expr *ValArg = TheCall->getArg(i);
+ unsigned AS = 0;
+ // Keep address space of non-atomic pointer type.
+ if (const PointerType *PtrTy =
+ ValArg->getType()->getAs<PointerType>()) {
+ AS = PtrTy->getPointeeType().getAddressSpace();
+ }
+ Ty = Context.getPointerType(
+ Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
+ }
break;
case 2:
// The third argument to compare_exchange / GNU exchange is a
@@ -2142,7 +2223,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// Get the decl for the concrete builtin from this, we can tell what the
// concrete integer type we should convert to is.
unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
- const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
+ const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
FunctionDecl *NewBuiltinDecl;
if (NewBuiltinID == BuiltinID)
NewBuiltinDecl = FDecl;
@@ -2209,6 +2290,78 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
return TheCallResult;
}
+/// SemaBuiltinNontemporalOverloaded - We have a call to
+/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
+/// overloaded function based on the pointer type of its last argument.
+///
+/// This function goes through and does final semantic checking for these
+/// builtins.
+ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
+ CallExpr *TheCall = (CallExpr *)TheCallResult.get();
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+ unsigned BuiltinID = FDecl->getBuiltinID();
+ assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
+ BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
+ "Unexpected nontemporal load/store builtin!");
+ bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
+ unsigned numArgs = isStore ? 2 : 1;
+
+ // Ensure that we have the proper number of arguments.
+ if (checkArgCount(*this, TheCall, numArgs))
+ return ExprError();
+
+ // Inspect the last argument of the nontemporal builtin. This should always
+ // be a pointer type, from which we imply the type of the memory access.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ Expr *PointerArg = TheCall->getArg(numArgs - 1);
+ ExprResult PointerArgResult =
+ DefaultFunctionArrayLvalueConversion(PointerArg);
+
+ if (PointerArgResult.isInvalid())
+ return ExprError();
+ PointerArg = PointerArgResult.get();
+ TheCall->setArg(numArgs - 1, PointerArg);
+
+ const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getLocStart(), diag::err_nontemporal_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return ExprError();
+ }
+
+ QualType ValType = pointerType->getPointeeType();
+
+ // Strip any qualifiers off ValType.
+ ValType = ValType.getUnqualifiedType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
+ !ValType->isVectorType()) {
+ Diag(DRE->getLocStart(),
+ diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return ExprError();
+ }
+
+ if (!isStore) {
+ TheCall->setType(ValType);
+ return TheCallResult;
+ }
+
+ ExprResult ValArg = TheCall->getArg(0);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return ExprError();
+
+ TheCall->setArg(0, ValArg.get());
+ TheCall->setType(Context.VoidTy);
+ return TheCallResult;
+}
+
/// CheckObjCString - Checks that the argument to the builtin
/// CFString constructor is correct
/// Note: It might also make sense to do the UTF-16 conversion here (would
@@ -2241,9 +2394,10 @@ bool Sema::CheckObjCString(Expr *Arg) {
return false;
}
-/// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity.
-/// Emit an error and return true on failure, return false on success.
-bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
+/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
+/// for validity. Emit an error and return true on failure; return false
+/// on success.
+bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
Expr *Fn = TheCall->getCallee();
if (TheCall->getNumArgs() > 2) {
Diag(TheCall->getArg(2)->getLocStart(),
@@ -2321,6 +2475,48 @@ bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
return false;
}
+/// Check the arguments to '__builtin_va_start' for validity, and that
+/// it was called from a function of the native ABI.
+/// Emit an error and return true on failure; return false on success.
+bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
+ // On x86-64 Unix, don't allow this in Win64 ABI functions.
+ // On x64 Windows, don't allow this in System V ABI functions.
+ // (Yes, that means there's no corresponding way to support variadic
+ // System V ABI functions on Windows.)
+ if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86_64) {
+ unsigned OS = Context.getTargetInfo().getTriple().getOS();
+ clang::CallingConv CC = CC_C;
+ if (const FunctionDecl *FD = getCurFunctionDecl())
+ CC = FD->getType()->getAs<FunctionType>()->getCallConv();
+ if ((OS == llvm::Triple::Win32 && CC == CC_X86_64SysV) ||
+ (OS != llvm::Triple::Win32 && CC == CC_X86_64Win64))
+ return Diag(TheCall->getCallee()->getLocStart(),
+ diag::err_va_start_used_in_wrong_abi_function)
+ << (OS != llvm::Triple::Win32);
+ }
+ return SemaBuiltinVAStartImpl(TheCall);
+}
+
+/// Check the arguments to '__builtin_ms_va_start' for validity, and that
+/// it was called from a Win64 ABI function.
+/// Emit an error and return true on failure; return false on success.
+bool Sema::SemaBuiltinMSVAStart(CallExpr *TheCall) {
+ // This only makes sense for x86-64.
+ const llvm::Triple &TT = Context.getTargetInfo().getTriple();
+ Expr *Callee = TheCall->getCallee();
+ if (TT.getArch() != llvm::Triple::x86_64)
+ return Diag(Callee->getLocStart(), diag::err_x86_builtin_32_bit_tgt);
+ // Don't allow this in System V ABI functions.
+ clang::CallingConv CC = CC_C;
+ if (const FunctionDecl *FD = getCurFunctionDecl())
+ CC = FD->getType()->getAs<FunctionType>()->getCallConv();
+ if (CC == CC_X86_64SysV ||
+ (TT.getOS() != llvm::Triple::Win32 && CC != CC_X86_64Win64))
+ return Diag(Callee->getLocStart(),
+ diag::err_ms_va_start_used_in_sysv_function);
+ return SemaBuiltinVAStartImpl(TheCall);
+}
+
bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) {
// void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
// const char *named_addr);
@@ -2784,26 +2980,6 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
return false;
}
-/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
-/// This checks that the target supports __builtin_cpu_supports and
-/// that the string argument is constant and valid.
-bool Sema::SemaBuiltinCpuSupports(CallExpr *TheCall) {
- Expr *Arg = TheCall->getArg(0);
-
- // Check if the argument is a string literal.
- if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
- << Arg->getSourceRange();
-
- // Check the contents of the string.
- StringRef Feature =
- cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- if (!Context.getTargetInfo().validateCpuSupports(Feature))
- return Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports)
- << Arg->getSourceRange();
- return false;
-}
-
/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
/// This checks that the target supports __builtin_longjmp and
/// that val is a constant 1.
@@ -4833,7 +5009,7 @@ static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
}
}
} else {
- FunctionName = S.Context.BuiltinInfo.GetName(AbsKind);
+ FunctionName = S.Context.BuiltinInfo.getName(AbsKind);
HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind);
if (HeaderName) {
@@ -4909,7 +5085,7 @@ void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
// function call.
if (ArgType->isUnsignedIntegerType()) {
const char *FunctionName =
- IsStdAbs ? "std::abs" : Context.BuiltinInfo.GetName(AbsKind);
+ IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
Diag(Call->getExprLoc(), diag::note_remove_abs)
<< FunctionName
@@ -4917,6 +5093,19 @@ void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
return;
}
+ // Taking the absolute value of a pointer is very suspicious, they probably
+ // wanted to index into an array, dereference a pointer, call a function, etc.
+ if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
+ unsigned DiagType = 0;
+ if (ArgType->isFunctionType())
+ DiagType = 1;
+ else if (ArgType->isArrayType())
+ DiagType = 2;
+
+ Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
+ return;
+ }
+
// std::abs has overloads which prevent most of the absolute value problems
// from occurring.
if (IsStdAbs)
@@ -5465,17 +5654,15 @@ CheckReturnStackAddr(Sema &S, Expr *RetValExp, QualType lhsType,
}
if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(stackE)) { //address of local var.
- S.Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_stack_ref
- : diag::warn_ret_stack_addr)
+ S.Diag(diagLoc, diag::warn_ret_stack_addr_ref) << lhsType->isReferenceType()
<< DR->getDecl()->getDeclName() << diagRange;
} else if (isa<BlockExpr>(stackE)) { // local block.
S.Diag(diagLoc, diag::err_ret_local_block) << diagRange;
} else if (isa<AddrLabelExpr>(stackE)) { // address of label.
S.Diag(diagLoc, diag::warn_ret_addr_label) << diagRange;
} else { // local temporary.
- S.Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_local_temp_ref
- : diag::warn_ret_local_temp_addr)
- << diagRange;
+ S.Diag(diagLoc, diag::warn_ret_local_temp_addr_ref)
+ << lhsType->isReferenceType() << diagRange;
}
// Display the "trail" of reference variables that we followed until we
@@ -5750,6 +5937,11 @@ do {
return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase(), refVars,ParentDecl);
}
+ case Stmt::OMPArraySectionExprClass: {
+ return EvalAddr(cast<OMPArraySectionExpr>(E)->getBase(), refVars,
+ ParentDecl);
+ }
+
case Stmt::ConditionalOperatorClass: {
// For conditional operators we need to see if either the LHS or RHS are
// non-NULL Expr's. If one is non-NULL, we return it.
@@ -7083,6 +7275,14 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
+
+ }
+ // ... or possibly if we're increasing rank, too
+ else if (TargetBT->getKind() > SourceBT->getKind()) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
}
return;
}
@@ -7105,20 +7305,24 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
}
- // If the target is bool, warn if expr is a function or method call.
- if (Target->isSpecificBuiltinType(BuiltinType::Bool) &&
- isa<CallExpr>(E)) {
+ // Detect the case where a call result is converted from floating-point to
+ // to bool, and the final argument to the call is converted from bool, to
+ // discover this typo:
+ //
+ // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
+ //
+ // FIXME: This is an incredibly special case; is there some more general
+ // way to detect this class of misplaced-parentheses bug?
+ if (Target->isBooleanType() && isa<CallExpr>(E)) {
// Check last argument of function call to see if it is an
// implicit cast from a type matching the type the result
// is being cast to.
CallExpr *CEx = cast<CallExpr>(E);
- unsigned NumArgs = CEx->getNumArgs();
- if (NumArgs > 0) {
+ if (unsigned NumArgs = CEx->getNumArgs()) {
Expr *LastA = CEx->getArg(NumArgs - 1);
Expr *InnerE = LastA->IgnoreParenImpCasts();
- const Type *InnerType =
- S.Context.getCanonicalType(InnerE->getType()).getTypePtr();
- if (isa<ImplicitCastExpr>(LastA) && (InnerType == Target)) {
+ if (isa<ImplicitCastExpr>(LastA) &&
+ InnerE->getType()->isBooleanType()) {
// Warn on this floating-point to bool conversion
DiagnoseImpCast(S, E, T, CC,
diag::warn_impcast_floating_point_to_bool);
@@ -7301,18 +7505,16 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
CheckImplicitConversion(S, E, T, CC);
// Now continue drilling into this expression.
-
- if (PseudoObjectExpr * POE = dyn_cast<PseudoObjectExpr>(E)) {
- if (POE->getResultExpr())
- E = POE->getResultExpr();
- }
-
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
- if (OVE->getSourceExpr())
- AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC);
- return;
+
+ if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ // The bound subexpressions in a PseudoObjectExpr are not reachable
+ // as transitive children.
+ // FIXME: Use a more uniform representation for this.
+ for (auto *SE : POE->semantics())
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
+ AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC);
}
-
+
// Skip past explicit casts.
if (isa<ExplicitCastExpr>(E)) {
E = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreParenImpCasts();
@@ -7372,12 +7574,6 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
} // end anonymous namespace
-enum {
- AddressOf,
- FunctionPointer,
- ArrayPointer
-};
-
// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
// Returns true when emitting a warning about taking the address of a reference.
static bool CheckForReference(Sema &SemaRef, const Expr *E,
@@ -7476,6 +7672,26 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
}
}
+ auto ComplainAboutNonnullParamOrCall = [&](bool IsParam) {
+ std::string Str;
+ llvm::raw_string_ostream S(Str);
+ E->printPretty(S, nullptr, getPrintingPolicy());
+ unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
+ : diag::warn_cast_nonnull_to_bool;
+ Diag(E->getExprLoc(), DiagID) << IsParam << S.str()
+ << E->getSourceRange() << Range << IsEqual;
+ };
+
+ // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
+ if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) {
+ if (auto *Callee = Call->getDirectCallee()) {
+ if (Callee->hasAttr<ReturnsNonNullAttr>()) {
+ ComplainAboutNonnullParamOrCall(false);
+ return;
+ }
+ }
+ }
+
// Expect to find a single Decl. Skip anything more complicated.
ValueDecl *D = nullptr;
if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
@@ -7487,40 +7703,38 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
// Weak Decls can be null.
if (!D || D->isWeak())
return;
-
+
// Check for parameter decl with nonnull attribute
- if (const ParmVarDecl* PV = dyn_cast<ParmVarDecl>(D)) {
- if (getCurFunction() && !getCurFunction()->ModifiedNonNullParams.count(PV))
- if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
- unsigned NumArgs = FD->getNumParams();
- llvm::SmallBitVector AttrNonNull(NumArgs);
+ if (const auto* PV = dyn_cast<ParmVarDecl>(D)) {
+ if (getCurFunction() &&
+ !getCurFunction()->ModifiedNonNullParams.count(PV)) {
+ if (PV->hasAttr<NonNullAttr>()) {
+ ComplainAboutNonnullParamOrCall(true);
+ return;
+ }
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
+ auto ParamIter = std::find(FD->param_begin(), FD->param_end(), PV);
+ assert(ParamIter != FD->param_end());
+ unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
+
for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
if (!NonNull->args_size()) {
- AttrNonNull.set(0, NumArgs);
- break;
- }
- for (unsigned Val : NonNull->args()) {
- if (Val >= NumArgs)
- continue;
- AttrNonNull.set(Val);
+ ComplainAboutNonnullParamOrCall(true);
+ return;
}
- }
- if (!AttrNonNull.empty())
- for (unsigned i = 0; i < NumArgs; ++i)
- if (FD->getParamDecl(i) == PV &&
- (AttrNonNull[i] || PV->hasAttr<NonNullAttr>())) {
- std::string Str;
- llvm::raw_string_ostream S(Str);
- E->printPretty(S, nullptr, getPrintingPolicy());
- unsigned DiagID = IsCompare ? diag::warn_nonnull_parameter_compare
- : diag::warn_cast_nonnull_to_bool;
- Diag(E->getExprLoc(), DiagID) << S.str() << E->getSourceRange()
- << Range << IsEqual;
+
+ for (unsigned ArgNo : NonNull->args()) {
+ if (ArgNo == ParamNo) {
+ ComplainAboutNonnullParamOrCall(true);
return;
}
+ }
+ }
}
}
-
+ }
+
QualType T = D->getType();
const bool IsArray = T->isArrayType();
const bool IsFunction = T->isFunctionType();
@@ -7541,7 +7755,11 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
: diag::warn_impcast_pointer_to_bool;
- unsigned DiagType;
+ enum {
+ AddressOf,
+ FunctionPointer,
+ ArrayPointer
+ } DiagType;
if (IsAddressOf)
DiagType = AddressOf;
else if (IsFunction)
@@ -8229,6 +8447,15 @@ bool Sema::CheckParmsForFunctionDef(ParmVarDecl *const *P,
}
}
}
+
+ // Parameters with the pass_object_size attribute only need to be marked
+ // constant at function definitions. Because we lack information about
+ // whether we're on a declaration or definition when we're instantiating the
+ // attribute, we need to check for constness here.
+ if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
+ if (!Param->getType().isConstQualified())
+ Diag(Param->getLocation(), diag::err_attribute_pointers_only)
+ << Attr->getSpelling() << 1;
}
return HasInvalidParm;
@@ -8348,7 +8575,7 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
return;
llvm::APSInt index;
- if (!IndexExpr->EvaluateAsInt(index, Context))
+ if (!IndexExpr->EvaluateAsInt(index, Context, Expr::SE_AllowSideEffects))
return;
if (IndexNegated)
index = -index;
@@ -8462,6 +8689,13 @@ void Sema::CheckArrayAccess(const Expr *expr) {
AllowOnePastEnd > 0);
return;
}
+ case Stmt::OMPArraySectionExprClass: {
+ const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
+ if (ASE->getLowerBound())
+ CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
+ /*ASE=*/nullptr, AllowOnePastEnd > 0);
+ return;
+ }
case Stmt::UnaryOperatorClass: {
// Only unwrap the * and & unary operators
const UnaryOperator *UO = cast<UnaryOperator>(expr);
@@ -9672,4 +9906,3 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
<< ArgumentExpr->getSourceRange()
<< TypeTagExpr->getSourceRange();
}
-
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index 86265275d05e..21cf62585142 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -494,6 +494,7 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
bool &AsNestedNameSpecifier) const {
AsNestedNameSpecifier = false;
+ auto *Named = ND;
ND = ND->getUnderlyingDecl();
// Skip unnamed entities.
@@ -526,14 +527,14 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
return false;
if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
- ((isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) &&
+ (isa<NamespaceDecl>(ND) &&
Filter != &ResultBuilder::IsNamespace &&
Filter != &ResultBuilder::IsNamespaceOrAlias &&
Filter != nullptr))
AsNestedNameSpecifier = true;
// Filter out any unwanted results.
- if (Filter && !(this->*Filter)(ND)) {
+ if (Filter && !(this->*Filter)(Named)) {
// Check whether it is interesting as a nested-name-specifier.
if (AllowNestedNameSpecifiers && SemaRef.getLangOpts().CPlusPlus &&
IsNestedNameSpecifier(ND) &&
@@ -1142,14 +1143,12 @@ bool ResultBuilder::IsNamespace(const NamedDecl *ND) const {
/// \brief Determines whether the given declaration is a namespace or
/// namespace alias.
bool ResultBuilder::IsNamespaceOrAlias(const NamedDecl *ND) const {
- return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
+ return isa<NamespaceDecl>(ND->getUnderlyingDecl());
}
/// \brief Determines whether the given declaration is a type.
bool ResultBuilder::IsType(const NamedDecl *ND) const {
- if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(ND))
- ND = Using->getTargetDecl();
-
+ ND = ND->getUnderlyingDecl();
return isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
}
@@ -1157,11 +1156,9 @@ bool ResultBuilder::IsType(const NamedDecl *ND) const {
/// "." or "->". Only value declarations, nested name specifiers, and
/// using declarations thereof should show up.
bool ResultBuilder::IsMember(const NamedDecl *ND) const {
- if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(ND))
- ND = Using->getTargetDecl();
-
+ ND = ND->getUnderlyingDecl();
return isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
- isa<ObjCPropertyDecl>(ND);
+ isa<ObjCPropertyDecl>(ND);
}
static bool isObjCReceiverType(ASTContext &C, QualType T) {
@@ -3036,6 +3033,7 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
case Decl::ParmVar: return CXCursor_ParmDecl;
case Decl::Typedef: return CXCursor_TypedefDecl;
case Decl::TypeAlias: return CXCursor_TypeAliasDecl;
+ case Decl::TypeAliasTemplate: return CXCursor_TypeAliasTemplateDecl;
case Decl::Var: return CXCursor_VarDecl;
case Decl::Namespace: return CXCursor_Namespace;
case Decl::NamespaceAlias: return CXCursor_NamespaceAlias;
@@ -3376,7 +3374,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case PCC_Statement:
case PCC_RecoveryInFunction:
if (S->getFnParent())
- AddPrettyFunctionResults(PP.getLangOpts(), Results);
+ AddPrettyFunctionResults(getLangOpts(), Results);
break;
case PCC_Namespace:
@@ -3520,7 +3518,7 @@ void Sema::CodeCompleteExpression(Scope *S,
if (S->getFnParent() &&
!Data.ObjCCollection &&
!Data.IntegralConstantExpression)
- AddPrettyFunctionResults(PP.getLangOpts(), Results);
+ AddPrettyFunctionResults(getLangOpts(), Results);
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false, PreferredTypeIsPointer);
@@ -4051,7 +4049,7 @@ void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
// If expression's type is CXXRecordDecl, it may overload the function
// call operator, so we check if it does and add them as candidates.
// A complete type is needed to lookup for member function call operators.
- if (!RequireCompleteType(Loc, NakedFn->getType(), 0)) {
+ if (isCompleteType(Loc, NakedFn->getType())) {
DeclarationName OpName = Context.DeclarationNames
.getCXXOperatorName(OO_Call);
LookupResult R(*this, OpName, Loc, LookupOrdinaryName);
@@ -4093,7 +4091,7 @@ void Sema::CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
return;
// A complete type is needed to lookup for constructors.
- if (RequireCompleteType(Loc, Type, 0))
+ if (!isCompleteType(Loc, Type))
return;
CXXRecordDecl *RD = Type->getAsCXXRecordDecl();
@@ -4205,7 +4203,7 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
Results.ExitScope();
if (S->getFnParent())
- AddPrettyFunctionResults(PP.getLangOpts(), Results);
+ AddPrettyFunctionResults(getLangOpts(), Results);
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
@@ -4912,7 +4910,7 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Results.AddResult(CodeCompletionResult("atomic"));
// Only suggest "weak" if we're compiling for ARC-with-weak-references or GC.
- if (getLangOpts().ObjCARCWeak || getLangOpts().getGC() != LangOptions::NonGC)
+ if (getLangOpts().ObjCWeak || getLangOpts().getGC() != LangOptions::NonGC)
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_weak))
Results.AddResult(CodeCompletionResult("weak"));
@@ -5925,8 +5923,8 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
}
}
-void Sema::CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
- unsigned NumProtocols) {
+void Sema::CodeCompleteObjCProtocolReferences(
+ ArrayRef<IdentifierLocPair> Protocols) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
@@ -5937,9 +5935,9 @@ void Sema::CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
// Tell the result set to ignore all of the protocols we have
// already seen.
// FIXME: This doesn't work when caching code-completion results.
- for (unsigned I = 0; I != NumProtocols; ++I)
- if (ObjCProtocolDecl *Protocol = LookupProtocol(Protocols[I].first,
- Protocols[I].second))
+ for (const IdentifierLocPair &Pair : Protocols)
+ if (ObjCProtocolDecl *Protocol = LookupProtocol(Pair.first,
+ Pair.second))
Results.Ignore(Protocol);
// Add all protocols.
@@ -7079,11 +7077,13 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
// If the result type was not already provided, add it to the
// pattern as (type).
- if (ReturnType.isNull())
- AddObjCPassingTypeChunk(Method->getSendResultType()
- .stripObjCKindOfType(Context),
+ if (ReturnType.isNull()) {
+ QualType ResTy = Method->getSendResultType().stripObjCKindOfType(Context);
+ AttributedType::stripOuterNullability(ResTy);
+ AddObjCPassingTypeChunk(ResTy,
Method->getObjCDeclQualifier(), Context, Policy,
Builder);
+ }
Selector Sel = Method->getSelector();
@@ -7114,6 +7114,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
ParamType = (*P)->getOriginalType();
ParamType = ParamType.substObjCTypeArgs(Context, {},
ObjCSubstitutionContext::Parameter);
+ AttributedType::stripOuterNullability(ParamType);
AddObjCPassingTypeChunk(ParamType,
(*P)->getObjCDeclQualifier(),
Context, Policy,
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
new file mode 100644
index 000000000000..4b4fd6b16a06
--- /dev/null
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -0,0 +1,448 @@
+//===--- SemaCoroutines.cpp - Semantic Analysis for Coroutines ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ Coroutines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Overload.h"
+using namespace clang;
+using namespace sema;
+
+/// Look up the std::coroutine_traits<...>::promise_type for the given
+/// function type.
+static QualType lookupPromiseType(Sema &S, const FunctionProtoType *FnType,
+ SourceLocation Loc) {
+ // FIXME: Cache std::coroutine_traits once we've found it.
+ NamespaceDecl *Std = S.getStdNamespace();
+ if (!Std) {
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_not_found);
+ return QualType();
+ }
+
+ LookupResult Result(S, &S.PP.getIdentifierTable().get("coroutine_traits"),
+ Loc, Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std)) {
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_not_found);
+ return QualType();
+ }
+
+ ClassTemplateDecl *CoroTraits = Result.getAsSingle<ClassTemplateDecl>();
+ if (!CoroTraits) {
+ Result.suppressDiagnostics();
+ // We found something weird. Complain about the first thing we found.
+ NamedDecl *Found = *Result.begin();
+ S.Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
+ return QualType();
+ }
+
+ // Form template argument list for coroutine_traits<R, P1, P2, ...>.
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(TemplateArgumentLoc(
+ TemplateArgument(FnType->getReturnType()),
+ S.Context.getTrivialTypeSourceInfo(FnType->getReturnType(), Loc)));
+ // FIXME: If the function is a non-static member function, add the type
+ // of the implicit object parameter before the formal parameters.
+ for (QualType T : FnType->getParamTypes())
+ Args.addArgument(TemplateArgumentLoc(
+ TemplateArgument(T), S.Context.getTrivialTypeSourceInfo(T, Loc)));
+
+ // Build the template-id.
+ QualType CoroTrait =
+ S.CheckTemplateIdType(TemplateName(CoroTraits), Loc, Args);
+ if (CoroTrait.isNull())
+ return QualType();
+ if (S.RequireCompleteType(Loc, CoroTrait,
+ diag::err_coroutine_traits_missing_specialization))
+ return QualType();
+
+ CXXRecordDecl *RD = CoroTrait->getAsCXXRecordDecl();
+ assert(RD && "specialization of class template is not a class?");
+
+ // Look up the ::promise_type member.
+ LookupResult R(S, &S.PP.getIdentifierTable().get("promise_type"), Loc,
+ Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(R, RD);
+ auto *Promise = R.getAsSingle<TypeDecl>();
+ if (!Promise) {
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_promise_type_not_found)
+ << RD;
+ return QualType();
+ }
+
+ // The promise type is required to be a class type.
+ QualType PromiseType = S.Context.getTypeDeclType(Promise);
+ if (!PromiseType->getAsCXXRecordDecl()) {
+ // Use the fully-qualified name of the type.
+ auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, Std);
+ NNS = NestedNameSpecifier::Create(S.Context, NNS, false,
+ CoroTrait.getTypePtr());
+ PromiseType = S.Context.getElaboratedType(ETK_None, NNS, PromiseType);
+
+ S.Diag(Loc, diag::err_implied_std_coroutine_traits_promise_type_not_class)
+ << PromiseType;
+ return QualType();
+ }
+
+ return PromiseType;
+}
+
+/// Check that this is a context in which a coroutine suspension can appear.
+static FunctionScopeInfo *
+checkCoroutineContext(Sema &S, SourceLocation Loc, StringRef Keyword) {
+ // 'co_await' and 'co_yield' are not permitted in unevaluated operands.
+ if (S.isUnevaluatedContext()) {
+ S.Diag(Loc, diag::err_coroutine_unevaluated_context) << Keyword;
+ return nullptr;
+ }
+
+ // Any other usage must be within a function.
+ // FIXME: Reject a coroutine with a deduced return type.
+ auto *FD = dyn_cast<FunctionDecl>(S.CurContext);
+ if (!FD) {
+ S.Diag(Loc, isa<ObjCMethodDecl>(S.CurContext)
+ ? diag::err_coroutine_objc_method
+ : diag::err_coroutine_outside_function) << Keyword;
+ } else if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD)) {
+ // Coroutines TS [special]/6:
+ // A special member function shall not be a coroutine.
+ //
+ // FIXME: We assume that this really means that a coroutine cannot
+ // be a constructor or destructor.
+ S.Diag(Loc, diag::err_coroutine_ctor_dtor)
+ << isa<CXXDestructorDecl>(FD) << Keyword;
+ } else if (FD->isConstexpr()) {
+ S.Diag(Loc, diag::err_coroutine_constexpr) << Keyword;
+ } else if (FD->isVariadic()) {
+ S.Diag(Loc, diag::err_coroutine_varargs) << Keyword;
+ } else {
+ auto *ScopeInfo = S.getCurFunction();
+ assert(ScopeInfo && "missing function scope for function");
+
+ // If we don't have a promise variable, build one now.
+ if (!ScopeInfo->CoroutinePromise) {
+ QualType T =
+ FD->getType()->isDependentType()
+ ? S.Context.DependentTy
+ : lookupPromiseType(S, FD->getType()->castAs<FunctionProtoType>(),
+ Loc);
+ if (T.isNull())
+ return nullptr;
+
+ // Create and default-initialize the promise.
+ ScopeInfo->CoroutinePromise =
+ VarDecl::Create(S.Context, FD, FD->getLocation(), FD->getLocation(),
+ &S.PP.getIdentifierTable().get("__promise"), T,
+ S.Context.getTrivialTypeSourceInfo(T, Loc), SC_None);
+ S.CheckVariableDeclarationType(ScopeInfo->CoroutinePromise);
+ if (!ScopeInfo->CoroutinePromise->isInvalidDecl())
+ S.ActOnUninitializedDecl(ScopeInfo->CoroutinePromise, false);
+ }
+
+ return ScopeInfo;
+ }
+
+ return nullptr;
+}
+
+/// Build a call to 'operator co_await' if there is a suitable operator for
+/// the given expression.
+static ExprResult buildOperatorCoawaitCall(Sema &SemaRef, Scope *S,
+ SourceLocation Loc, Expr *E) {
+ UnresolvedSet<16> Functions;
+ SemaRef.LookupOverloadedOperatorName(OO_Coawait, S, E->getType(), QualType(),
+ Functions);
+ return SemaRef.CreateOverloadedUnaryOp(Loc, UO_Coawait, Functions, E);
+}
+
+struct ReadySuspendResumeResult {
+ bool IsInvalid;
+ Expr *Results[3];
+};
+
+static ExprResult buildMemberCall(Sema &S, Expr *Base, SourceLocation Loc,
+ StringRef Name,
+ MutableArrayRef<Expr *> Args) {
+ DeclarationNameInfo NameInfo(&S.PP.getIdentifierTable().get(Name), Loc);
+
+ // FIXME: Fix BuildMemberReferenceExpr to take a const CXXScopeSpec&.
+ CXXScopeSpec SS;
+ ExprResult Result = S.BuildMemberReferenceExpr(
+ Base, Base->getType(), Loc, /*IsPtr=*/false, SS,
+ SourceLocation(), nullptr, NameInfo, /*TemplateArgs=*/nullptr,
+ /*Scope=*/nullptr);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return S.ActOnCallExpr(nullptr, Result.get(), Loc, Args, Loc, nullptr);
+}
+
+/// Build calls to await_ready, await_suspend, and await_resume for a co_await
+/// expression.
+static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, SourceLocation Loc,
+ Expr *E) {
+ // Assume invalid until we see otherwise.
+ ReadySuspendResumeResult Calls = {true, {}};
+
+ const StringRef Funcs[] = {"await_ready", "await_suspend", "await_resume"};
+ for (size_t I = 0, N = llvm::array_lengthof(Funcs); I != N; ++I) {
+ Expr *Operand = new (S.Context) OpaqueValueExpr(
+ Loc, E->getType(), VK_LValue, E->getObjectKind(), E);
+
+ // FIXME: Pass coroutine handle to await_suspend.
+ ExprResult Result = buildMemberCall(S, Operand, Loc, Funcs[I], None);
+ if (Result.isInvalid())
+ return Calls;
+ Calls.Results[I] = Result.get();
+ }
+
+ Calls.IsInvalid = false;
+ return Calls;
+}
+
+ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return ExprError();
+ E = R.get();
+ }
+
+ ExprResult Awaitable = buildOperatorCoawaitCall(*this, S, Loc, E);
+ if (Awaitable.isInvalid())
+ return ExprError();
+ return BuildCoawaitExpr(Loc, Awaitable.get());
+}
+ExprResult Sema::BuildCoawaitExpr(SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_await");
+ if (!Coroutine)
+ return ExprError();
+
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return ExprError();
+ E = R.get();
+ }
+
+ if (E->getType()->isDependentType()) {
+ Expr *Res = new (Context) CoawaitExpr(Loc, Context.DependentTy, E);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+ }
+
+ // If the expression is a temporary, materialize it as an lvalue so that we
+ // can use it multiple times.
+ if (E->getValueKind() == VK_RValue)
+ E = new (Context) MaterializeTemporaryExpr(E->getType(), E, true);
+
+ // Build the await_ready, await_suspend, await_resume calls.
+ ReadySuspendResumeResult RSS = buildCoawaitCalls(*this, Loc, E);
+ if (RSS.IsInvalid)
+ return ExprError();
+
+ Expr *Res = new (Context) CoawaitExpr(Loc, E, RSS.Results[0], RSS.Results[1],
+ RSS.Results[2]);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+}
+
+static ExprResult buildPromiseCall(Sema &S, FunctionScopeInfo *Coroutine,
+ SourceLocation Loc, StringRef Name,
+ MutableArrayRef<Expr *> Args) {
+ assert(Coroutine->CoroutinePromise && "no promise for coroutine");
+
+ // Form a reference to the promise.
+ auto *Promise = Coroutine->CoroutinePromise;
+ ExprResult PromiseRef = S.BuildDeclRefExpr(
+ Promise, Promise->getType().getNonReferenceType(), VK_LValue, Loc);
+ if (PromiseRef.isInvalid())
+ return ExprError();
+
+ // Call 'yield_value', passing in E.
+ return buildMemberCall(S, PromiseRef.get(), Loc, Name, Args);
+}
+
+ExprResult Sema::ActOnCoyieldExpr(Scope *S, SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_yield");
+ if (!Coroutine)
+ return ExprError();
+
+ // Build yield_value call.
+ ExprResult Awaitable =
+ buildPromiseCall(*this, Coroutine, Loc, "yield_value", E);
+ if (Awaitable.isInvalid())
+ return ExprError();
+
+ // Build 'operator co_await' call.
+ Awaitable = buildOperatorCoawaitCall(*this, S, Loc, Awaitable.get());
+ if (Awaitable.isInvalid())
+ return ExprError();
+
+ return BuildCoyieldExpr(Loc, Awaitable.get());
+}
+ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_yield");
+ if (!Coroutine)
+ return ExprError();
+
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return ExprError();
+ E = R.get();
+ }
+
+ if (E->getType()->isDependentType()) {
+ Expr *Res = new (Context) CoyieldExpr(Loc, Context.DependentTy, E);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+ }
+
+ // If the expression is a temporary, materialize it as an lvalue so that we
+ // can use it multiple times.
+ if (E->getValueKind() == VK_RValue)
+ E = new (Context) MaterializeTemporaryExpr(E->getType(), E, true);
+
+ // Build the await_ready, await_suspend, await_resume calls.
+ ReadySuspendResumeResult RSS = buildCoawaitCalls(*this, Loc, E);
+ if (RSS.IsInvalid)
+ return ExprError();
+
+ Expr *Res = new (Context) CoyieldExpr(Loc, E, RSS.Results[0], RSS.Results[1],
+ RSS.Results[2]);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+}
+
+StmtResult Sema::ActOnCoreturnStmt(SourceLocation Loc, Expr *E) {
+ return BuildCoreturnStmt(Loc, E);
+}
+StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_return");
+ if (!Coroutine)
+ return StmtError();
+
+ if (E && E->getType()->isPlaceholderType() &&
+ !E->getType()->isSpecificPlaceholderType(BuiltinType::Overload)) {
+ ExprResult R = CheckPlaceholderExpr(E);
+ if (R.isInvalid()) return StmtError();
+ E = R.get();
+ }
+
+ // FIXME: If the operand is a reference to a variable that's about to go out
+ // of scope, we should treat the operand as an xvalue for this overload
+ // resolution.
+ ExprResult PC;
+ if (E && !E->getType()->isVoidType()) {
+ PC = buildPromiseCall(*this, Coroutine, Loc, "return_value", E);
+ } else {
+ E = MakeFullDiscardedValueExpr(E).get();
+ PC = buildPromiseCall(*this, Coroutine, Loc, "return_void", None);
+ }
+ if (PC.isInvalid())
+ return StmtError();
+
+ Expr *PCE = ActOnFinishFullExpr(PC.get()).get();
+
+ Stmt *Res = new (Context) CoreturnStmt(Loc, E, PCE);
+ Coroutine->CoroutineStmts.push_back(Res);
+ return Res;
+}
+
+void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
+ FunctionScopeInfo *Fn = getCurFunction();
+ assert(Fn && !Fn->CoroutineStmts.empty() && "not a coroutine");
+
+ // Coroutines [stmt.return]p1:
+ // A return statement shall not appear in a coroutine.
+ if (Fn->FirstReturnLoc.isValid()) {
+ Diag(Fn->FirstReturnLoc, diag::err_return_in_coroutine);
+ auto *First = Fn->CoroutineStmts[0];
+ Diag(First->getLocStart(), diag::note_declared_coroutine_here)
+ << (isa<CoawaitExpr>(First) ? 0 :
+ isa<CoyieldExpr>(First) ? 1 : 2);
+ }
+
+ bool AnyCoawaits = false;
+ bool AnyCoyields = false;
+ for (auto *CoroutineStmt : Fn->CoroutineStmts) {
+ AnyCoawaits |= isa<CoawaitExpr>(CoroutineStmt);
+ AnyCoyields |= isa<CoyieldExpr>(CoroutineStmt);
+ }
+
+ if (!AnyCoawaits && !AnyCoyields)
+ Diag(Fn->CoroutineStmts.front()->getLocStart(),
+ diag::ext_coroutine_without_co_await_co_yield);
+
+ SourceLocation Loc = FD->getLocation();
+
+ // Form a declaration statement for the promise declaration, so that AST
+ // visitors can more easily find it.
+ StmtResult PromiseStmt =
+ ActOnDeclStmt(ConvertDeclToDeclGroup(Fn->CoroutinePromise), Loc, Loc);
+ if (PromiseStmt.isInvalid())
+ return FD->setInvalidDecl();
+
+ // Form and check implicit 'co_await p.initial_suspend();' statement.
+ ExprResult InitialSuspend =
+ buildPromiseCall(*this, Fn, Loc, "initial_suspend", None);
+ // FIXME: Support operator co_await here.
+ if (!InitialSuspend.isInvalid())
+ InitialSuspend = BuildCoawaitExpr(Loc, InitialSuspend.get());
+ InitialSuspend = ActOnFinishFullExpr(InitialSuspend.get());
+ if (InitialSuspend.isInvalid())
+ return FD->setInvalidDecl();
+
+ // Form and check implicit 'co_await p.final_suspend();' statement.
+ ExprResult FinalSuspend =
+ buildPromiseCall(*this, Fn, Loc, "final_suspend", None);
+ // FIXME: Support operator co_await here.
+ if (!FinalSuspend.isInvalid())
+ FinalSuspend = BuildCoawaitExpr(Loc, FinalSuspend.get());
+ FinalSuspend = ActOnFinishFullExpr(FinalSuspend.get());
+ if (FinalSuspend.isInvalid())
+ return FD->setInvalidDecl();
+
+ // FIXME: Perform analysis of set_exception call.
+
+ // FIXME: Try to form 'p.return_void();' expression statement to handle
+ // control flowing off the end of the coroutine.
+
+ // Build implicit 'p.get_return_object()' expression and form initialization
+ // of return type from it.
+ ExprResult ReturnObject =
+ buildPromiseCall(*this, Fn, Loc, "get_return_object", None);
+ if (ReturnObject.isInvalid())
+ return FD->setInvalidDecl();
+ QualType RetType = FD->getReturnType();
+ if (!RetType->isDependentType()) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeResult(Loc, RetType, false);
+ ReturnObject = PerformMoveOrCopyInitialization(Entity, nullptr, RetType,
+ ReturnObject.get());
+ if (ReturnObject.isInvalid())
+ return FD->setInvalidDecl();
+ }
+ ReturnObject = ActOnFinishFullExpr(ReturnObject.get(), Loc);
+ if (ReturnObject.isInvalid())
+ return FD->setInvalidDecl();
+
+ // FIXME: Perform move-initialization of parameters into frame-local copies.
+ SmallVector<Expr*, 16> ParamMoves;
+
+ // Build body for the coroutine wrapper statement.
+ Body = new (Context) CoroutineBodyStmt(
+ Body, PromiseStmt.get(), InitialSuspend.get(), FinalSuspend.get(),
+ /*SetException*/nullptr, /*Fallthrough*/nullptr,
+ ReturnObject.get(), ParamMoves);
+}
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index c694a20319b0..2c5516a48d64 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -33,7 +33,6 @@
#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
#include "clang/Lex/ModuleLoader.h" // TODO: Sema shouldn't depend on Lex
#include "clang/Lex/Preprocessor.h" // Included for isCodeCompletionEnabled()
-#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
@@ -111,6 +110,7 @@ bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
case tok::kw_wchar_t:
case tok::kw_bool:
case tok::kw___underlying_type:
+ case tok::kw___auto_type:
return true;
case tok::annot_typename:
@@ -795,7 +795,7 @@ Corrected:
}
// In C, we first see whether there is a tag type by the same name, in
- // which case it's likely that the user just forget to write "enum",
+ // which case it's likely that the user just forgot to write "enum",
// "struct", or "union".
if (!getLangOpts().CPlusPlus && !SecondTry &&
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
@@ -813,9 +813,8 @@ Corrected:
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
- NamedDecl *FirstDecl = Corrected.getCorrectionDecl();
- NamedDecl *UnderlyingFirstDecl
- = FirstDecl? FirstDecl->getUnderlyingDecl() : nullptr;
+ NamedDecl *FirstDecl = Corrected.getFoundDecl();
+ NamedDecl *UnderlyingFirstDecl = Corrected.getCorrectionDecl();
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
UnderlyingFirstDecl && isa<TemplateDecl>(UnderlyingFirstDecl)) {
UnqualifiedDiag = diag::err_no_template_suggest;
@@ -1022,7 +1021,7 @@ Corrected:
if (FirstDecl->isCXXClassMember())
return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result,
- nullptr);
+ nullptr, S);
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
return BuildDeclarationNameExpr(SS, Result, ADL);
@@ -1089,7 +1088,9 @@ Sema::SkippedDefinitionContext Sema::ActOnTagStartSkippedDefinition(Scope *S,
auto Result = static_cast<SkippedDefinitionContext>(CurContext);
CurContext = cast<TagDecl>(D)->getDefinition();
assert(CurContext && "skipping definition of undefined tag");
- S->setEntity(CurContext);
+ // Start lookups from the parent of the current context; we don't want to look
+ // into the pre-existing complete definition.
+ S->setEntity(CurContext->getLookupParent());
return Result;
}
@@ -1733,20 +1734,18 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
if (Error) {
if (ForRedeclaration)
Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
- << getHeaderName(Error)
- << Context.BuiltinInfo.GetName(ID);
+ << getHeaderName(Error) << Context.BuiltinInfo.getName(ID);
return nullptr;
}
if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(ID)) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
- << Context.BuiltinInfo.GetName(ID)
- << R;
+ << Context.BuiltinInfo.getName(ID) << R;
if (Context.BuiltinInfo.getHeaderName(ID) &&
!Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc))
Diag(Loc, diag::note_include_header_or_declare)
<< Context.BuiltinInfo.getHeaderName(ID)
- << Context.BuiltinInfo.GetName(ID);
+ << Context.BuiltinInfo.getName(ID);
}
DeclContext *Parent = Context.getTranslationUnitDecl();
@@ -1796,37 +1795,6 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
return New;
}
-/// \brief Filter out any previous declarations that the given declaration
-/// should not consider because they are not permitted to conflict, e.g.,
-/// because they come from hidden sub-modules and do not refer to the same
-/// entity.
-static void filterNonConflictingPreviousDecls(Sema &S,
- NamedDecl *decl,
- LookupResult &previous){
- // This is only interesting when modules are enabled.
- if ((!S.getLangOpts().Modules && !S.getLangOpts().ModulesLocalVisibility) ||
- !S.getLangOpts().ModulesHideInternalLinkage)
- return;
-
- // Empty sets are uninteresting.
- if (previous.empty())
- return;
-
- LookupResult::Filter filter = previous.makeFilter();
- while (filter.hasNext()) {
- NamedDecl *old = filter.next();
-
- // Non-hidden declarations are never ignored.
- if (S.isVisible(old))
- continue;
-
- if (!old->isExternallyVisible())
- filter.erase();
- }
-
- filter.done();
-}
-
/// Typedef declarations don't have linkage, but they still denote the same
/// entity if their types are the same.
/// FIXME: This is notionally doing the same thing as ASTReaderDecl's
@@ -1859,13 +1827,13 @@ static void filterNonConflictingPreviousTypedefDecls(Sema &S,
// If both declarations give a tag declaration a typedef name for linkage
// purposes, then they declare the same entity.
- if (OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true) &&
+ if (S.getLangOpts().CPlusPlus &&
+ OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true) &&
Decl->getAnonDeclWithTypedefName())
continue;
}
- if (!Old->isExternallyVisible())
- Filter.erase();
+ Filter.erase();
}
Filter.done();
@@ -1910,7 +1878,8 @@ bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
/// how to resolve this situation, merging decls or emitting
/// diagnostics as appropriate. If there was an error, set New to be invalid.
///
-void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
+void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
+ LookupResult &OldDecls) {
// If the new decl is known invalid already, don't bother doing any
// merging checks.
if (New->isInvalidDecl()) return;
@@ -1991,6 +1960,19 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
// Make the old tag definition visible.
makeMergedDefinitionVisible(Hidden, NewTag->getLocation());
+
+ // If this was an unscoped enumeration, yank all of its enumerators
+ // out of the scope.
+ if (isa<EnumDecl>(NewTag)) {
+ Scope *EnumScope = getNonFieldDeclScope(S);
+ for (auto *D : NewTag->decls()) {
+ auto *ED = cast<EnumConstantDecl>(D);
+ assert(EnumScope->isDeclScope(ED));
+ EnumScope->RemoveDecl(ED);
+ IdResolver.RemoveDecl(ED);
+ ED->getLexicalDeclContext()->removeDecl(ED);
+ }
+ }
}
}
@@ -2206,14 +2188,15 @@ static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
}
static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
- const InheritableAttr *Attr, bool Override) {
+ const InheritableAttr *Attr,
+ Sema::AvailabilityMergeKind AMK) {
InheritableAttr *NewAttr = nullptr;
unsigned AttrSpellingListIndex = Attr->getSpellingListIndex();
if (const auto *AA = dyn_cast<AvailabilityAttr>(Attr))
NewAttr = S.mergeAvailabilityAttr(D, AA->getRange(), AA->getPlatform(),
AA->getIntroduced(), AA->getDeprecated(),
AA->getObsoleted(), AA->getUnavailable(),
- AA->getMessage(), Override,
+ AA->getMessage(), AMK,
AttrSpellingListIndex);
else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr))
NewAttr = S.mergeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
@@ -2246,11 +2229,22 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex);
+ else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
+ NewAttr = S.mergeInternalLinkageAttr(
+ D, InternalLinkageA->getRange(),
+ &S.Context.Idents.get(InternalLinkageA->getSpelling()),
+ AttrSpellingListIndex);
+ else if (const auto *CommonA = dyn_cast<CommonAttr>(Attr))
+ NewAttr = S.mergeCommonAttr(D, CommonA->getRange(),
+ &S.Context.Idents.get(CommonA->getSpelling()),
+ AttrSpellingListIndex);
else if (isa<AlignedAttr>(Attr))
// AlignedAttrs are handled separately, because we need to handle all
// such attributes on a declaration at the same time.
NewAttr = nullptr;
- else if (isa<DeprecatedAttr>(Attr) && Override)
+ else if ((isa<DeprecatedAttr>(Attr) || isa<UnavailableAttr>(Attr)) &&
+ (AMK == Sema::AMK_Override ||
+ AMK == Sema::AMK_ProtocolImplementation))
NewAttr = nullptr;
else if (Attr->duplicatesAllowed() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -2303,9 +2297,17 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
const Attr *NewAttribute = NewAttributes[I];
if (isa<AliasAttr>(NewAttribute)) {
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(New))
- S.CheckForFunctionRedefinition(FD, cast<FunctionDecl>(Def));
- else {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(New)) {
+ Sema::SkipBodyInfo SkipBody;
+ S.CheckForFunctionRedefinition(FD, cast<FunctionDecl>(Def), &SkipBody);
+
+ // If we're skipping this definition, drop the "alias" attribute.
+ if (SkipBody.ShouldSkip) {
+ NewAttributes.erase(NewAttributes.begin() + I);
+ --E;
+ continue;
+ }
+ } else {
VarDecl *VD = cast<VarDecl>(New);
unsigned Diag = cast<VarDecl>(Def)->isThisDeclarationADefinition() ==
VarDecl::TentativeDefinition
@@ -2376,9 +2378,24 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
if (!Old->hasAttrs() && !New->hasAttrs())
return;
- // attributes declared post-definition are currently ignored
+ // Attributes declared post-definition are currently ignored.
checkNewAttributesAfterDef(*this, New, Old);
+ if (AsmLabelAttr *NewA = New->getAttr<AsmLabelAttr>()) {
+ if (AsmLabelAttr *OldA = Old->getAttr<AsmLabelAttr>()) {
+ if (OldA->getLabel() != NewA->getLabel()) {
+ // This redeclaration changes __asm__ label.
+ Diag(New->getLocation(), diag::err_different_asm_label);
+ Diag(OldA->getLocation(), diag::note_previous_declaration);
+ }
+ } else if (Old->isUsed()) {
+ // This redeclaration adds an __asm__ label to a declaration that has
+ // already been ODR-used.
+ Diag(New->getLocation(), diag::err_late_asm_label_name)
+ << isa<FunctionDecl>(Old) << New->getAttr<AsmLabelAttr>()->getRange();
+ }
+ }
+
if (!Old->hasAttrs())
return;
@@ -2389,8 +2406,8 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
if (!foundAny) New->setAttrs(AttrVec());
for (auto *I : Old->specific_attrs<InheritableAttr>()) {
- bool Override = false;
// Ignore deprecated/unavailable/availability attributes if requested.
+ AvailabilityMergeKind LocalAMK = AMK_None;
if (isa<DeprecatedAttr>(I) ||
isa<UnavailableAttr>(I) ||
isa<AvailabilityAttr>(I)) {
@@ -2399,10 +2416,9 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
continue;
case AMK_Redeclaration:
- break;
-
case AMK_Override:
- Override = true;
+ case AMK_ProtocolImplementation:
+ LocalAMK = AMK;
break;
}
}
@@ -2411,7 +2427,7 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
if (isa<UsedAttr>(I))
continue;
- if (mergeDeclAttribute(*this, New, I, Override))
+ if (mergeDeclAttribute(*this, New, I, LocalAMK))
foundAny = true;
}
@@ -2619,6 +2635,21 @@ static bool checkUsingShadowRedecl(Sema &S, UsingShadowDecl *OldS,
return false;
}
+static bool hasIdenticalPassObjectSizeAttrs(const FunctionDecl *A,
+ const FunctionDecl *B) {
+ assert(A->getNumParams() == B->getNumParams());
+
+ auto AttrEq = [](const ParmVarDecl *A, const ParmVarDecl *B) {
+ const auto *AttrA = A->getAttr<PassObjectSizeAttr>();
+ const auto *AttrB = B->getAttr<PassObjectSizeAttr>();
+ if (AttrA == AttrB)
+ return true;
+ return AttrA && AttrB && AttrA->getType() == AttrB->getType();
+ };
+
+ return std::equal(A->param_begin(), A->param_end(), B->param_begin(), AttrEq);
+}
+
/// MergeFunctionDecl - We just parsed a function 'New' from
/// declarator D which has the same name and scope as a previous
/// declaration 'Old'. Figure out how to resolve this situation,
@@ -2685,6 +2716,13 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
}
}
+ if (New->hasAttr<InternalLinkageAttr>() &&
+ !Old->hasAttr<InternalLinkageAttr>()) {
+ Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->dropAttr<InternalLinkageAttr>();
+ }
// If a function is first declared with a calling convention, but is later
// declared or defined without one, all following decls assume the calling
@@ -2790,7 +2828,17 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
Old->isInlined() && !Old->hasAttr<GNUInlineAttr>()) {
UndefinedButUsed.erase(Old->getCanonicalDecl());
}
-
+
+ // If pass_object_size params don't match up perfectly, this isn't a valid
+ // redeclaration.
+ if (Old->getNumParams() > 0 && Old->getNumParams() == New->getNumParams() &&
+ !hasIdenticalPassObjectSizeAttrs(Old, New)) {
+ Diag(New->getLocation(), diag::err_different_pass_object_size_params)
+ << New->getDeclName();
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
if (getLangOpts().CPlusPlus) {
// (C++98 13.1p2):
// Certain function declarations cannot be overloaded:
@@ -3115,7 +3163,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// remain visible, a single bogus local redeclaration (which is
// actually only a warning) could break all the downstream code.
if (!New->getLexicalDeclContext()->isFunctionOrMethod())
- New->getIdentifier()->setBuiltinID(Builtin::NotBuiltin);
+ New->getIdentifier()->revertBuiltin();
return false;
}
@@ -3179,8 +3227,11 @@ void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
// Merge the attributes, including deprecated/unavailable
AvailabilityMergeKind MergeKind =
- isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration
- : AMK_Override;
+ isa<ObjCProtocolDecl>(oldMethod->getDeclContext())
+ ? AMK_ProtocolImplementation
+ : isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration
+ : AMK_Override;
+
mergeDeclAttributes(newMethod, oldMethod, MergeKind);
// Merge attributes from the parameters.
@@ -3331,6 +3382,9 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
if (New->isInvalidDecl())
return;
+ if (!shouldLinkPossiblyHiddenDecl(Previous, New))
+ return;
+
VarTemplateDecl *NewTemplate = New->getDescribedVarTemplate();
// Verify the old decl was also a variable or variable template.
@@ -3362,15 +3416,12 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
return New->setInvalidDecl();
}
- if (!shouldLinkPossiblyHiddenDecl(Old, New))
- return;
-
// Ensure the template parameters are compatible.
if (NewTemplate &&
!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
OldTemplate->getTemplateParameters(),
/*Complain=*/true, TPL_TemplateMatch))
- return;
+ return New->setInvalidDecl();
// C++ [class.mem]p1:
// A member shall not be declared twice in the member-specification [...]
@@ -3395,6 +3446,14 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
New->dropAttr<WeakImportAttr>();
}
+ if (New->hasAttr<InternalLinkageAttr>() &&
+ !Old->hasAttr<InternalLinkageAttr>()) {
+ Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->dropAttr<InternalLinkageAttr>();
+ }
+
// Merge the types.
VarDecl *MostRecent = Old->getMostRecentDecl();
if (MostRecent != Old) {
@@ -3583,11 +3642,11 @@ void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) {
void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD) {
- // Do nothing if the tag is not anonymous or already has an
- // associated typedef (from an earlier typedef in this decl group).
- if (TagFromDeclSpec->getIdentifier())
+ if (TagFromDeclSpec->isInvalidDecl())
return;
- if (TagFromDeclSpec->getTypedefNameForAnonDecl())
+
+ // Do nothing if the tag already has a name for linkage purposes.
+ if (TagFromDeclSpec->hasNameForLinkage())
return;
// A well-formed anonymous tag must always be a TUK_Definition.
@@ -3595,8 +3654,11 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
// The type must match the tag exactly; no qualifiers allowed.
if (!Context.hasSameType(NewTD->getUnderlyingType(),
- Context.getTagDeclType(TagFromDeclSpec)))
+ Context.getTagDeclType(TagFromDeclSpec))) {
+ if (getLangOpts().CPlusPlus)
+ Context.addTypedefNameForUnnamedTagDecl(TagFromDeclSpec, NewTD);
return;
+ }
// If we've already computed linkage for the anonymous tag, then
// adding a typedef name for the anonymous decl can change that
@@ -3695,6 +3757,14 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
return TagD;
}
+ if (DS.isConceptSpecified()) {
+ // C++ Concepts TS [dcl.spec.concept]p1: A concept definition refers to
+ // either a function concept and its definition or a variable concept and
+ // its initializer.
+ Diag(DS.getConceptSpecLoc(), diag::err_concept_wrong_decl_kind);
+ return TagD;
+ }
+
DiagnoseFunctionSpecifiers(DS);
if (DS.isFriendSpecified()) {
@@ -3709,10 +3779,15 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
bool IsExplicitSpecialization =
!TemplateParams.empty() && TemplateParams.back()->size() == 0;
if (Tag && SS.isNotEmpty() && !Tag->isCompleteDefinition() &&
- !IsExplicitInstantiation && !IsExplicitSpecialization) {
+ !IsExplicitInstantiation && !IsExplicitSpecialization &&
+ !isa<ClassTemplatePartialSpecializationDecl>(Tag)) {
// Per C++ [dcl.type.elab]p1, a class declaration cannot have a
// nested-name-specifier unless it is an explicit instantiation
// or an explicit specialization.
+ //
+ // FIXME: We allow class template partial specializations here too, per the
+ // obvious intent of DR1819.
+ //
// Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either.
Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
<< GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << SS.getRange();
@@ -3882,7 +3957,7 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
DeclContext *Owner,
DeclarationName Name,
SourceLocation NameLoc,
- unsigned diagnostic) {
+ bool IsUnion) {
LookupResult R(SemaRef, Name, NameLoc, Sema::LookupMemberName,
Sema::ForRedeclaration);
if (!SemaRef.LookupName(R, S)) return false;
@@ -3897,7 +3972,8 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
if (!SemaRef.isDeclInScope(PrevDecl, Owner, S))
return false;
- SemaRef.Diag(NameLoc, diagnostic) << Name;
+ SemaRef.Diag(NameLoc, diag::err_anonymous_record_member_redecl)
+ << IsUnion << Name;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
return true;
@@ -3925,10 +4001,6 @@ static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S,
AccessSpecifier AS,
SmallVectorImpl<NamedDecl *> &Chaining,
bool MSAnonStruct) {
- unsigned diagKind
- = AnonRecord->isUnion() ? diag::err_anonymous_union_member_redecl
- : diag::err_anonymous_struct_member_redecl;
-
bool Invalid = false;
// Look every FieldDecl and IndirectFieldDecl with a name.
@@ -3937,7 +4009,8 @@ static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S,
cast<NamedDecl>(D)->getDeclName()) {
ValueDecl *VD = cast<ValueDecl>(D);
if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
- VD->getLocation(), diagKind)) {
+ VD->getLocation(),
+ AnonRecord->isUnion())) {
// C++ [class.union]p2:
// The names of the members of an anonymous union shall be
// distinct from the names of any other entity in the
@@ -4131,7 +4204,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
assert(FD->getAccess() != AS_none);
if (FD->getAccess() != AS_public) {
Diag(FD->getLocation(), diag::err_anonymous_record_nonpublic_member)
- << (int)Record->isUnion() << (int)(FD->getAccess() == AS_protected);
+ << Record->isUnion() << (FD->getAccess() == AS_protected);
Invalid = true;
}
@@ -4155,11 +4228,11 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// Visual C++ allows type definition in anonymous struct or union.
if (getLangOpts().MicrosoftExt)
Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type)
- << (int)Record->isUnion();
+ << Record->isUnion();
else {
// This is a nested type declaration.
Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
- << (int)Record->isUnion();
+ << Record->isUnion();
Invalid = true;
}
} else {
@@ -4168,7 +4241,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// not part of standard C++.
Diag(MemRecord->getLocation(),
diag::ext_anonymous_record_with_anonymous_type)
- << (int)Record->isUnion();
+ << Record->isUnion();
}
} else if (isa<AccessSpecDecl>(Mem)) {
// Any access specifier is fine.
@@ -4189,10 +4262,9 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
if (getLangOpts().MicrosoftExt &&
DK == diag::err_anonymous_record_with_type)
Diag(Mem->getLocation(), diag::ext_anonymous_record_with_type)
- << (int)Record->isUnion();
+ << Record->isUnion();
else {
- Diag(Mem->getLocation(), DK)
- << (int)Record->isUnion();
+ Diag(Mem->getLocation(), DK) << Record->isUnion();
Invalid = true;
}
}
@@ -4209,7 +4281,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
if (!Record->isUnion() && !Owner->isRecord()) {
Diag(Record->getLocation(), diag::err_anonymous_struct_not_member)
- << (int)getLangOpts().CPlusPlus;
+ << getLangOpts().CPlusPlus;
Invalid = true;
}
@@ -4885,6 +4957,23 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
+ if (D.getDeclSpec().isConceptSpecified()) {
+ // C++ Concepts TS [dcl.spec.concept]p1: The concept specifier shall be
+ // applied only to the definition of a function template or variable
+ // template, declared in namespace scope
+ if (!TemplateParamLists.size()) {
+ Diag(D.getDeclSpec().getConceptSpecLoc(),
+ diag:: err_concept_wrong_decl_kind);
+ return nullptr;
+ }
+
+ if (!DC->getRedeclContext()->isFileContext()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_concept_decls_may_only_appear_in_namespace_scope);
+ return nullptr;
+ }
+ }
+
NamedDecl *New;
bool AddToScope = true;
@@ -5102,6 +5191,9 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
if (D.getDeclSpec().isConstexprSpecified())
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 1;
+ if (D.getDeclSpec().isConceptSpecified())
+ Diag(D.getDeclSpec().getConceptSpecLoc(),
+ diag::err_concept_wrong_decl_kind);
if (D.getName().Kind != UnqualifiedId::IK_Identifier) {
Diag(D.getName().StartLocation, diag::err_typedef_not_identifier)
@@ -5174,7 +5266,7 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
filterNonConflictingPreviousTypedefDecls(*this, NewTD, Previous);
if (!Previous.empty()) {
Redeclaration = true;
- MergeTypedefNameDecl(NewTD, Previous);
+ MergeTypedefNameDecl(S, NewTD, Previous);
}
// If this is the C FILE type, notify the AST context.
@@ -5343,14 +5435,26 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
}
}
- // dll attributes require external linkage.
if (const InheritableAttr *Attr = getDLLAttr(&ND)) {
- if (!ND.isExternallyVisible()) {
+ // dll attributes require external linkage. Static locals may have external
+ // linkage but still cannot be explicitly imported or exported.
+ auto *VD = dyn_cast<VarDecl>(&ND);
+ if (!ND.isExternallyVisible() || (VD && VD->isStaticLocal())) {
S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
<< &ND << Attr;
ND.setInvalidDecl();
}
}
+
+ // Virtual functions cannot be marked as 'notail'.
+ if (auto *Attr = ND.getAttr<NotTailCalledAttr>())
+ if (auto *MD = dyn_cast<CXXMethodDecl>(&ND))
+ if (MD->isVirtual()) {
+ S.Diag(ND.getLocation(),
+ diag::err_invalid_attribute_on_virtual_function)
+ << Attr;
+ ND.dropAttr<NotTailCalledAttr>();
+ }
}
static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
@@ -5501,6 +5605,12 @@ static bool isIncompleteDeclExternC(Sema &S, const T *D) {
// In C++, the overloadable attribute negates the effects of extern "C".
if (!D->isInExternCContext() || D->template hasAttr<OverloadableAttr>())
return false;
+
+ // So do CUDA's host/device attributes if overloading is enabled.
+ if (S.getLangOpts().CUDA && S.getLangOpts().CUDATargetOverloads &&
+ (D->template hasAttr<CUDADeviceAttr>() ||
+ D->template hasAttr<CUDAHostAttr>()))
+ return false;
}
return D->isExternC();
}
@@ -5574,15 +5684,12 @@ bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) {
return true;
}
-/// \brief Returns true if given declaration is TU-scoped and externally
-/// visible.
-static bool isDeclTUScopedExternallyVisible(const Decl *D) {
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- return (FD->getDeclContext()->isTranslationUnit() || FD->isExternC()) &&
- FD->hasExternalFormalLinkage();
- else if (auto *VD = dyn_cast<VarDecl>(D))
- return (VD->getDeclContext()->isTranslationUnit() || VD->isExternC()) &&
- VD->hasExternalFormalLinkage();
+/// \brief Returns true if given declaration has external C language linkage.
+static bool isDeclExternC(const Decl *D) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->isExternC();
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ return VD->isExternC();
llvm_unreachable("Unknown type of decl!");
}
@@ -5646,7 +5753,8 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Suppress the warning in system macros, it's used in macros in some
// popular C system headers, such as in glibc's htonl() macro.
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
- diag::warn_deprecated_register)
+ getLangOpts().CPlusPlus1z ? diag::ext_register_storage_class
+ : diag::warn_deprecated_register)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
@@ -5670,12 +5778,6 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
if (getLangOpts().OpenCL) {
- // Set up the special work-group-local storage class for variables in the
- // OpenCL __local address space.
- if (R.getAddressSpace() == LangAS::opencl_local) {
- SC = SC_OpenCLWorkGroupLocal;
- }
-
// OpenCL v1.2 s6.9.b p4:
// The sampler type cannot be used with the __local and __global address
// space qualifiers.
@@ -5712,7 +5814,10 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
D.getIdentifierLoc(), II,
R, TInfo, SC);
-
+
+ if (D.getDeclSpec().containsPlaceholderType() && R->getContainedAutoType())
+ ParsingInitForAutoVars.insert(NewVD);
+
if (D.isInvalidType())
NewVD->setInvalidDecl();
} else {
@@ -5742,8 +5847,6 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
break;
case SC_PrivateExtern:
llvm_unreachable("C storage class in c++!");
- case SC_OpenCLWorkGroupLocal:
- llvm_unreachable("OpenCL storage class in c++!");
}
}
@@ -5860,11 +5963,31 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
unsigned VDTemplateParamLists = TemplateParams ? 1 : 0;
if (TemplateParamLists.size() > VDTemplateParamLists)
NewVD->setTemplateParameterListsInfo(
- Context, TemplateParamLists.size() - VDTemplateParamLists,
- TemplateParamLists.data());
+ Context, TemplateParamLists.drop_back(VDTemplateParamLists));
if (D.getDeclSpec().isConstexprSpecified())
NewVD->setConstexpr(true);
+
+ if (D.getDeclSpec().isConceptSpecified()) {
+ NewVD->setConcept(true);
+
+ // C++ Concepts TS [dcl.spec.concept]p2: A concept definition shall not
+ // be declared with the thread_local, inline, friend, or constexpr
+ // specifiers, [...]
+ if (D.getDeclSpec().getThreadStorageClassSpec() == TSCS_thread_local) {
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 0 << 0;
+ NewVD->setInvalidDecl(true);
+ }
+
+ if (D.getDeclSpec().isConstexprSpecified()) {
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 0 << 3;
+ NewVD->setInvalidDecl(true);
+ }
+ }
}
// Set the lexical context. If the declarator has a C++ scope specifier, the
@@ -5990,19 +6113,31 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
break;
case SC_Register:
// Local Named register
- if (!Context.getTargetInfo().isValidGCCRegisterName(Label))
+ if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
+ DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
break;
case SC_Static:
case SC_Extern:
case SC_PrivateExtern:
- case SC_OpenCLWorkGroupLocal:
break;
}
} else if (SC == SC_Register) {
// Global Named register
- if (!Context.getTargetInfo().isValidGCCRegisterName(Label))
- Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
+ const auto &TI = Context.getTargetInfo();
+ bool HasSizeMismatch;
+
+ if (!TI.isValidGCCRegisterName(Label))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ else if (!TI.validateGlobalRegisterVariable(Label,
+ Context.getTypeSize(R),
+ HasSizeMismatch))
+ Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
+ else if (HasSizeMismatch)
+ Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
+ }
+
if (!R->isIntegralType(Context) && !R->isPointerType()) {
Diag(D.getLocStart(), diag::err_asm_bad_register_type);
NewVD->setInvalidDecl(true);
@@ -6011,13 +6146,16 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
Context, Label, 0));
- } else if (!ExtnameUndeclaredIdentifiers.empty() &&
- isDeclTUScopedExternallyVisible(NewVD)) {
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
if (I != ExtnameUndeclaredIdentifiers.end()) {
- NewVD->addAttr(I->second);
- ExtnameUndeclaredIdentifiers.erase(I);
+ if (isDeclExternC(NewVD)) {
+ NewVD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ } else
+ Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/1 << NewVD;
}
}
@@ -6118,6 +6256,22 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
+ // Special handling of variable named 'main'.
+ if (Name.isIdentifier() && Name.getAsIdentifierInfo()->isStr("main") &&
+ NewVD->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
+ !getLangOpts().Freestanding && !NewVD->getDescribedVarTemplate()) {
+
+ // C++ [basic.start.main]p3
+ // A program that declares a variable main at global scope is ill-formed.
+ if (getLangOpts().CPlusPlus)
+ Diag(D.getLocStart(), diag::err_main_global_variable);
+
+ // In C, and external-linkage variable named main results in undefined
+ // behavior.
+ else if (NewVD->hasExternalFormalLinkage())
+ Diag(D.getLocStart(), diag::warn_main_redefined);
+ }
+
if (D.isRedeclaration() && !Previous.empty()) {
checkDLLAttributeRedeclaration(
*this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewVD,
@@ -6370,31 +6524,79 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// This includes arrays of objects with address space qualifiers, but not
// automatic variables that point to other address spaces.
// ISO/IEC TR 18037 S5.1.2
- if (NewVD->hasLocalStorage() && T.getAddressSpace() != 0) {
+ if (!getLangOpts().OpenCL
+ && NewVD->hasLocalStorage() && T.getAddressSpace() != 0) {
Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl);
NewVD->setInvalidDecl();
return;
}
- // OpenCL v1.2 s6.5 - All program scope variables must be declared in the
- // __constant address space.
- if (getLangOpts().OpenCL && NewVD->isFileVarDecl()
- && T.getAddressSpace() != LangAS::opencl_constant
- && !T->isSamplerT()){
- Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space);
- NewVD->setInvalidDecl();
- return;
- }
-
// OpenCL v1.2 s6.8 -- The static qualifier is valid only in program
// scope.
- if ((getLangOpts().OpenCLVersion >= 120)
- && NewVD->isStaticLocal()) {
+ if (getLangOpts().OpenCLVersion == 120 &&
+ !getOpenCLOptions().cl_clang_storage_class_specifiers &&
+ NewVD->isStaticLocal()) {
Diag(NewVD->getLocation(), diag::err_static_function_scope);
NewVD->setInvalidDecl();
return;
}
+ // OpenCL v1.2 s6.5 - All program scope variables must be declared in the
+ // __constant address space.
+ // OpenCL v2.0 s6.5.1 - Variables defined at program scope and static
+ // variables inside a function can also be declared in the global
+ // address space.
+ if (getLangOpts().OpenCL) {
+ if (NewVD->isFileVarDecl()) {
+ if (!T->isSamplerT() &&
+ !(T.getAddressSpace() == LangAS::opencl_constant ||
+ (T.getAddressSpace() == LangAS::opencl_global &&
+ getLangOpts().OpenCLVersion == 200))) {
+ if (getLangOpts().OpenCLVersion == 200)
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "global or constant";
+ else
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "constant";
+ NewVD->setInvalidDecl();
+ return;
+ }
+ } else {
+ // OpenCL v2.0 s6.5.1 - Variables defined at program scope and static
+ // variables inside a function can also be declared in the global
+ // address space.
+ if (NewVD->isStaticLocal() &&
+ !(T.getAddressSpace() == LangAS::opencl_constant ||
+ (T.getAddressSpace() == LangAS::opencl_global &&
+ getLangOpts().OpenCLVersion == 200))) {
+ if (getLangOpts().OpenCLVersion == 200)
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "global or constant";
+ else
+ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
+ << "constant";
+ NewVD->setInvalidDecl();
+ return;
+ }
+ // OpenCL v1.1 s6.5.2 and s6.5.3 no local or constant variables
+ // in functions.
+ if (T.getAddressSpace() == LangAS::opencl_constant ||
+ T.getAddressSpace() == LangAS::opencl_local) {
+ FunctionDecl *FD = getCurFunctionDecl();
+ if (FD && !FD->hasAttr<OpenCLKernelAttr>()) {
+ if (T.getAddressSpace() == LangAS::opencl_constant)
+ Diag(NewVD->getLocation(), diag::err_opencl_non_kernel_variable)
+ << "constant";
+ else
+ Diag(NewVD->getLocation(), diag::err_opencl_non_kernel_variable)
+ << "local";
+ NewVD->setInvalidDecl();
+ return;
+ }
+ }
+ }
+ }
+
if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
&& !NewVD->hasAttr<BlocksAttr>()) {
if (getLangOpts().getGC() != LangOptions::NonGC)
@@ -6506,9 +6708,6 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
checkForConflictWithNonVisibleExternC(*this, NewVD, Previous))
Previous.setShadowed();
- // Filter out any non-conflicting previous declarations.
- filterNonConflictingPreviousDecls(*this, NewVD, Previous);
-
if (!Previous.empty()) {
MergeVarDecl(NewVD, Previous);
return true;
@@ -6516,50 +6715,45 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
return false;
}
-/// \brief Data used with FindOverriddenMethod
-struct FindOverriddenMethodData {
+namespace {
+struct FindOverriddenMethod {
Sema *S;
CXXMethodDecl *Method;
-};
-/// \brief Member lookup function that determines whether a given C++
-/// method overrides a method in a base class, to be used with
-/// CXXRecordDecl::lookupInBases().
-static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- void *UserData) {
- RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+ /// Member lookup function that determines whether a given C++
+ /// method overrides a method in a base class, to be used with
+ /// CXXRecordDecl::lookupInBases().
+ bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->getAs<RecordType>()->getDecl();
- FindOverriddenMethodData *Data
- = reinterpret_cast<FindOverriddenMethodData*>(UserData);
-
- DeclarationName Name = Data->Method->getDeclName();
-
- // FIXME: Do we care about other names here too?
- if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
- // We really want to find the base class destructor here.
- QualType T = Data->S->Context.getTypeDeclType(BaseRecord);
- CanQualType CT = Data->S->Context.getCanonicalType(T);
-
- Name = Data->S->Context.DeclarationNames.getCXXDestructorName(CT);
- }
-
- for (Path.Decls = BaseRecord->lookup(Name);
- !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- NamedDecl *D = Path.Decls.front();
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
- if (MD->isVirtual() && !Data->S->IsOverload(Data->Method, MD, false))
- return true;
+ DeclarationName Name = Method->getDeclName();
+
+ // FIXME: Do we care about other names here too?
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ // We really want to find the base class destructor here.
+ QualType T = S->Context.getTypeDeclType(BaseRecord);
+ CanQualType CT = S->Context.getCanonicalType(T);
+
+ Name = S->Context.DeclarationNames.getCXXDestructorName(CT);
}
+
+ for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ NamedDecl *D = Path.Decls.front();
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->isVirtual() && !S->IsOverload(Method, MD, false))
+ return true;
+ }
+ }
+
+ return false;
}
-
- return false;
-}
+};
+
+enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
+} // end anonymous namespace
-namespace {
- enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
-}
/// \brief Report an error regarding overriding, along with any relevant
/// overriden methods.
///
@@ -6587,13 +6781,13 @@ static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
// Look for methods in base classes that this method might override.
CXXBasePaths Paths;
- FindOverriddenMethodData Data;
- Data.Method = MD;
- Data.S = this;
+ FindOverriddenMethod FOM;
+ FOM.Method = MD;
+ FOM.S = this;
bool hasDeletedOverridenMethods = false;
bool hasNonDeletedOverridenMethods = false;
bool AddedAny = false;
- if (DC->lookupInBases(&FindOverriddenMethod, &Data, Paths)) {
+ if (DC->lookupInBases(FOM, Paths)) {
for (auto *I : Paths.found_decls()) {
if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(I)) {
MD->addOverriddenMethod(OldMD->getCanonicalDecl());
@@ -7200,7 +7394,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< DeclSpec::getSpecifierName(TSCS);
if (D.isFirstDeclarationOfMember())
- adjustMemberFunctionCC(R, D.isStaticMember());
+ adjustMemberFunctionCC(R, D.isStaticMember(), D.isCtorOrDtor(),
+ D.getIdentifierLoc());
bool isFriend = false;
FunctionTemplateDecl *FunctionTemplate = nullptr;
@@ -7236,6 +7431,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
bool isExplicit = D.getDeclSpec().isExplicitSpecified();
bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
+ bool isConcept = D.getDeclSpec().isConceptSpecified();
isFriend = D.getDeclSpec().isFriendSpecified();
if (isFriend && !isInline && D.isFunctionDefinition()) {
// C++ [class.friend]p5
@@ -7309,17 +7505,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// For source fidelity, store the other template param lists.
if (TemplateParamLists.size() > 1) {
NewFD->setTemplateParameterListsInfo(Context,
- TemplateParamLists.size() - 1,
- TemplateParamLists.data());
+ TemplateParamLists.drop_back(1));
}
} else {
// This is a function template specialization.
isFunctionTemplateSpecialization = true;
// For source fidelity, store all the template param lists.
if (TemplateParamLists.size() > 0)
- NewFD->setTemplateParameterListsInfo(Context,
- TemplateParamLists.size(),
- TemplateParamLists.data());
+ NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
// C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
if (isFriend) {
@@ -7349,9 +7542,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// this is NOT (an explicit specialization of) a template.
if (TemplateParamLists.size() > 0)
// For source fidelity, store all the template param lists.
- NewFD->setTemplateParameterListsInfo(Context,
- TemplateParamLists.size(),
- TemplateParamLists.data());
+ NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
}
if (Invalid) {
@@ -7452,6 +7643,67 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor);
}
+ if (isConcept) {
+ // C++ Concepts TS [dcl.spec.concept]p1: The concept specifier shall be
+ // applied only to the definition of a function template [...]
+ if (!D.isFunctionDefinition()) {
+ Diag(D.getDeclSpec().getConceptSpecLoc(),
+ diag::err_function_concept_not_defined);
+ NewFD->setInvalidDecl();
+ }
+
+ // C++ Concepts TS [dcl.spec.concept]p1: [...] A function concept shall
+ // have no exception-specification and is treated as if it were specified
+ // with noexcept(true) (15.4). [...]
+ if (const FunctionProtoType *FPT = R->getAs<FunctionProtoType>()) {
+ if (FPT->hasExceptionSpec()) {
+ SourceRange Range;
+ if (D.isFunctionDeclarator())
+ Range = D.getFunctionTypeInfo().getExceptionSpecRange();
+ Diag(NewFD->getLocation(), diag::err_function_concept_exception_spec)
+ << FixItHint::CreateRemoval(Range);
+ NewFD->setInvalidDecl();
+ } else {
+ Context.adjustExceptionSpec(NewFD, EST_BasicNoexcept);
+ }
+
+ // C++ Concepts TS [dcl.spec.concept]p5: A function concept has the
+ // following restrictions:
+ // - The declaration's parameter list shall be equivalent to an empty
+ // parameter list.
+ if (FPT->getNumParams() > 0 || FPT->isVariadic())
+ Diag(NewFD->getLocation(), diag::err_function_concept_with_params);
+ }
+
+ // C++ Concepts TS [dcl.spec.concept]p2: Every concept definition is
+ // implicity defined to be a constexpr declaration (implicitly inline)
+ NewFD->setImplicitlyInline();
+
+ // C++ Concepts TS [dcl.spec.concept]p2: A concept definition shall not
+ // be declared with the thread_local, inline, friend, or constexpr
+ // specifiers, [...]
+ if (isInline) {
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 1 << 1;
+ NewFD->setInvalidDecl(true);
+ }
+
+ if (isFriend) {
+ Diag(D.getDeclSpec().getFriendSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 1 << 2;
+ NewFD->setInvalidDecl(true);
+ }
+
+ if (isConstexpr) {
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_concept_decl_invalid_specifiers)
+ << 1 << 3;
+ NewFD->setInvalidDecl(true);
+ }
+ }
+
// If __module_private__ was specified, mark the function accordingly.
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (isFunctionTemplateSpecialization) {
@@ -7539,13 +7791,16 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
StringLiteral *SE = cast<StringLiteral>(E);
NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context,
SE->getString(), 0));
- } else if (!ExtnameUndeclaredIdentifiers.empty() &&
- isDeclTUScopedExternallyVisible(NewFD)) {
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier());
if (I != ExtnameUndeclaredIdentifiers.end()) {
- NewFD->addAttr(I->second);
- ExtnameUndeclaredIdentifiers.erase(I);
+ if (isDeclExternC(NewFD)) {
+ NewFD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ } else
+ Diag(NewFD->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/0 << NewFD;
}
}
@@ -8061,9 +8316,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
bool MergeTypeWithPrevious = !getLangOpts().CPlusPlus &&
!Previous.isShadowed();
- // Filter out any non-conflicting previous declarations.
- filterNonConflictingPreviousDecls(*this, NewFD, Previous);
-
bool Redeclaration = false;
NamedDecl *OldDecl = nullptr;
@@ -8075,7 +8327,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// there's no more work to do here; we'll just add the new
// function to the scope.
if (!AllowOverloadingOfFunction(Previous, Context)) {
- NamedDecl *Candidate = Previous.getFoundDecl();
+ NamedDecl *Candidate = Previous.getRepresentativeDecl();
if (shouldLinkPossiblyHiddenDecl(Candidate, NewFD)) {
Redeclaration = true;
OldDecl = Candidate;
@@ -8117,7 +8369,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// Check for a previous extern "C" declaration with this name.
if (!Redeclaration &&
checkForConflictWithNonVisibleExternC(*this, NewFD, Previous)) {
- filterNonConflictingPreviousDecls(*this, NewFD, Previous);
if (!Previous.empty()) {
// This is an extern "C" declaration with the same name as a previous
// declaration, and thus redeclares that entity...
@@ -8294,7 +8545,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (!T.isNull() && !Context.hasSameType(T, NewFD->getType())) {
// The type of this function differs from the type of the builtin,
// so forget about the builtin entirely.
- Context.BuiltinInfo.ForgetBuiltin(BuiltinID, Context.Idents);
+ Context.BuiltinInfo.forgetBuiltin(BuiltinID, Context.Idents);
}
}
@@ -8577,9 +8828,8 @@ namespace {
// Convert FieldDecls to their index number.
llvm::SmallVector<unsigned, 4> UsedFieldIndex;
- for (auto I = Fields.rbegin(), E = Fields.rend(); I != E; ++I) {
- UsedFieldIndex.push_back((*I)->getFieldIndex());
- }
+ for (const FieldDecl *I : llvm::reverse(Fields))
+ UsedFieldIndex.push_back(I->getFieldIndex());
// See if a warning is needed by checking the first difference in index
// numbers. If field being used has index less than the field being
@@ -8832,6 +9082,96 @@ namespace {
}
}
+QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
+ DeclarationName Name, QualType Type,
+ TypeSourceInfo *TSI,
+ SourceRange Range, bool DirectInit,
+ Expr *Init) {
+ bool IsInitCapture = !VDecl;
+ assert((!VDecl || !VDecl->isInitCapture()) &&
+ "init captures are expected to be deduced prior to initialization");
+
+ ArrayRef<Expr *> DeduceInits = Init;
+ if (DirectInit) {
+ if (auto *PL = dyn_cast<ParenListExpr>(Init))
+ DeduceInits = PL->exprs();
+ else if (auto *IL = dyn_cast<InitListExpr>(Init))
+ DeduceInits = IL->inits();
+ }
+
+ // Deduction only works if we have exactly one source expression.
+ if (DeduceInits.empty()) {
+ // It isn't possible to write this directly, but it is possible to
+ // end up in this situation with "auto x(some_pack...);"
+ Diag(Init->getLocStart(), IsInitCapture
+ ? diag::err_init_capture_no_expression
+ : diag::err_auto_var_init_no_expression)
+ << Name << Type << Range;
+ return QualType();
+ }
+
+ if (DeduceInits.size() > 1) {
+ Diag(DeduceInits[1]->getLocStart(),
+ IsInitCapture ? diag::err_init_capture_multiple_expressions
+ : diag::err_auto_var_init_multiple_expressions)
+ << Name << Type << Range;
+ return QualType();
+ }
+
+ Expr *DeduceInit = DeduceInits[0];
+ if (DirectInit && isa<InitListExpr>(DeduceInit)) {
+ Diag(Init->getLocStart(), IsInitCapture
+ ? diag::err_init_capture_paren_braces
+ : diag::err_auto_var_init_paren_braces)
+ << isa<InitListExpr>(Init) << Name << Type << Range;
+ return QualType();
+ }
+
+ // Expressions default to 'id' when we're in a debugger.
+ bool DefaultedAnyToId = false;
+ if (getLangOpts().DebuggerCastResultToId &&
+ Init->getType() == Context.UnknownAnyTy && !IsInitCapture) {
+ ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
+ if (Result.isInvalid()) {
+ return QualType();
+ }
+ Init = Result.get();
+ DefaultedAnyToId = true;
+ }
+
+ QualType DeducedType;
+ if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) {
+ if (!IsInitCapture)
+ DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
+ else if (isa<InitListExpr>(Init))
+ Diag(Range.getBegin(),
+ diag::err_init_capture_deduction_failure_from_init_list)
+ << Name
+ << (DeduceInit->getType().isNull() ? TSI->getType()
+ : DeduceInit->getType())
+ << DeduceInit->getSourceRange();
+ else
+ Diag(Range.getBegin(), diag::err_init_capture_deduction_failure)
+ << Name << TSI->getType()
+ << (DeduceInit->getType().isNull() ? TSI->getType()
+ : DeduceInit->getType())
+ << DeduceInit->getSourceRange();
+ }
+
+ // Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
+ // 'id' instead of a specific object type prevents most of our usual
+ // checks.
+ // We only want to warn outside of template instantiations, though:
+ // inside a template, the 'id' could have come from a parameter.
+ if (ActiveTemplateInstantiations.empty() && !DefaultedAnyToId &&
+ !IsInitCapture && !DeducedType.isNull() && DeducedType->isObjCIdType()) {
+ SourceLocation Loc = TSI->getTypeLoc().getBeginLoc();
+ Diag(Loc, diag::warn_auto_var_is_id) << Name << Range;
+ }
+
+ return DeducedType;
+}
+
/// AddInitializerToDecl - Adds the initializer Init to the
/// declaration dcl. If DirectInit is true, this is C++ direct
/// initialization rather than copy initialization.
@@ -8859,79 +9199,27 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
RealDecl->setInvalidDecl();
return;
}
- ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
// C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
if (TypeMayContainAuto && VDecl->getType()->isUndeducedType()) {
// Attempt typo correction early so that the type of the init expression can
- // be deduced based on the chosen correction:if the original init contains a
+ // be deduced based on the chosen correction if the original init contains a
// TypoExpr.
ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl);
if (!Res.isUsable()) {
RealDecl->setInvalidDecl();
return;
}
+ Init = Res.get();
- if (Res.get() != Init) {
- Init = Res.get();
- if (CXXDirectInit)
- CXXDirectInit = dyn_cast<ParenListExpr>(Init);
- }
-
- Expr *DeduceInit = Init;
- // Initializer could be a C++ direct-initializer. Deduction only works if it
- // contains exactly one expression.
- if (CXXDirectInit) {
- if (CXXDirectInit->getNumExprs() == 0) {
- // It isn't possible to write this directly, but it is possible to
- // end up in this situation with "auto x(some_pack...);"
- Diag(CXXDirectInit->getLocStart(),
- VDecl->isInitCapture() ? diag::err_init_capture_no_expression
- : diag::err_auto_var_init_no_expression)
- << VDecl->getDeclName() << VDecl->getType()
- << VDecl->getSourceRange();
- RealDecl->setInvalidDecl();
- return;
- } else if (CXXDirectInit->getNumExprs() > 1) {
- Diag(CXXDirectInit->getExpr(1)->getLocStart(),
- VDecl->isInitCapture()
- ? diag::err_init_capture_multiple_expressions
- : diag::err_auto_var_init_multiple_expressions)
- << VDecl->getDeclName() << VDecl->getType()
- << VDecl->getSourceRange();
- RealDecl->setInvalidDecl();
- return;
- } else {
- DeduceInit = CXXDirectInit->getExpr(0);
- if (isa<InitListExpr>(DeduceInit))
- Diag(CXXDirectInit->getLocStart(),
- diag::err_auto_var_init_paren_braces)
- << VDecl->getDeclName() << VDecl->getType()
- << VDecl->getSourceRange();
- }
- }
-
- // Expressions default to 'id' when we're in a debugger.
- bool DefaultedToAuto = false;
- if (getLangOpts().DebuggerCastResultToId &&
- Init->getType() == Context.UnknownAnyTy) {
- ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
- if (Result.isInvalid()) {
- VDecl->setInvalidDecl();
- return;
- }
- Init = Result.get();
- DefaultedToAuto = true;
- }
-
- QualType DeducedType;
- if (DeduceAutoType(VDecl->getTypeSourceInfo(), DeduceInit, DeducedType) ==
- DAR_Failed)
- DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
+ QualType DeducedType = deduceVarTypeFromInitializer(
+ VDecl, VDecl->getDeclName(), VDecl->getType(),
+ VDecl->getTypeSourceInfo(), VDecl->getSourceRange(), DirectInit, Init);
if (DeducedType.isNull()) {
RealDecl->setInvalidDecl();
return;
}
+
VDecl->setType(DeducedType);
assert(VDecl->isLinkageValid());
@@ -8939,38 +9227,18 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
VDecl->setInvalidDecl();
- // Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
- // 'id' instead of a specific object type prevents most of our usual checks.
- // We only want to warn outside of template instantiations, though:
- // inside a template, the 'id' could have come from a parameter.
- if (ActiveTemplateInstantiations.empty() && !DefaultedToAuto &&
- DeducedType->isObjCIdType()) {
- SourceLocation Loc =
- VDecl->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
- Diag(Loc, diag::warn_auto_var_is_id)
- << VDecl->getDeclName() << DeduceInit->getSourceRange();
- }
-
// If this is a redeclaration, check that the type we just deduced matches
// the previously declared type.
if (VarDecl *Old = VDecl->getPreviousDecl()) {
// We never need to merge the type, because we cannot form an incomplete
// array of auto, nor deduce such a type.
- MergeVarDeclTypes(VDecl, Old, /*MergeTypeWithPrevious*/false);
+ MergeVarDeclTypes(VDecl, Old, /*MergeTypeWithPrevious*/ false);
}
// Check the deduced type is valid for a variable declaration.
CheckVariableDeclarationType(VDecl);
if (VDecl->isInvalidDecl())
return;
-
- // If all looks well, warn if this is a case that will change meaning when
- // we implement N3922.
- if (DirectInit && !CXXDirectInit && isa<InitListExpr>(Init)) {
- Diag(Init->getLocStart(),
- diag::warn_auto_var_direct_list_init)
- << FixItHint::CreateInsertion(Init->getLocStart(), "=");
- }
}
// dllimport cannot be used on variable definitions.
@@ -9059,7 +9327,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// OpenCL 1.1 6.5.2: "Variables allocated in the __local address space inside
// a kernel function cannot be initialized."
- if (VDecl->getStorageClass() == SC_OpenCLWorkGroupLocal) {
+ if (VDecl->getType().getAddressSpace() == LangAS::opencl_local) {
Diag(VDecl->getLocation(), diag::err_local_cant_init);
VDecl->setInvalidDecl();
return;
@@ -9082,17 +9350,18 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
}
// Perform the initialization.
+ ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
if (!VDecl->isInvalidDecl()) {
InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
- InitializationKind Kind
- = DirectInit ?
- CXXDirectInit ? InitializationKind::CreateDirect(VDecl->getLocation(),
- Init->getLocStart(),
- Init->getLocEnd())
- : InitializationKind::CreateDirectList(
- VDecl->getLocation())
- : InitializationKind::CreateCopy(VDecl->getLocation(),
- Init->getLocStart());
+ InitializationKind Kind =
+ DirectInit
+ ? CXXDirectInit
+ ? InitializationKind::CreateDirect(VDecl->getLocation(),
+ Init->getLocStart(),
+ Init->getLocEnd())
+ : InitializationKind::CreateDirectList(VDecl->getLocation())
+ : InitializationKind::CreateCopy(VDecl->getLocation(),
+ Init->getLocStart());
MultiExprArg Args = Init;
if (CXXDirectInit)
@@ -9156,7 +9425,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
if (VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
Init->getLocStart()))
- getCurFunction()->markSafeWeakUse(Init);
+ getCurFunction()->markSafeWeakUse(Init);
}
// The initialization is usually a full-expression.
@@ -9409,6 +9678,15 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
return;
}
+ // C++ Concepts TS [dcl.spec.concept]p1: [...] A variable template
+ // definition having the concept specifier is called a variable concept. A
+ // concept definition refers to [...] a variable concept and its initializer.
+ if (Var->isConcept()) {
+ Diag(Var->getLocation(), diag::err_var_concept_not_initialized);
+ Var->setInvalidDecl();
+ return;
+ }
+
// OpenCL v1.1 s6.5.3: variables declared in the constant address space must
// be initialized.
if (!Var->isInvalidDecl() &&
@@ -9621,8 +9899,6 @@ void Sema::ActOnCXXForRangeDecl(Decl *D) {
case SC_Register:
Error = 4;
break;
- case SC_OpenCLWorkGroupLocal:
- llvm_unreachable("Unexpected storage class");
}
if (Error != -1) {
Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class)
@@ -9665,9 +9941,9 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isInvalidDecl()) return;
- // In ARC, don't allow jumps past the implicit initialization of a
+ // In Objective-C, don't allow jumps past the implicit initialization of a
// local retaining variable.
- if (getLangOpts().ObjCAutoRefCount &&
+ if (getLangOpts().ObjC1 &&
var->hasLocalStorage()) {
switch (var->getType().getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -9913,9 +10189,17 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) {
// dllimport/dllexport variables cannot be thread local, their TLS index
// isn't exported with the variable.
if (DLLAttr && VD->getTLSKind()) {
- Diag(VD->getLocation(), diag::err_attribute_dll_thread_local) << VD
- << DLLAttr;
- VD->setInvalidDecl();
+ auto *F = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod());
+ if (F && getDLLAttr(F)) {
+ assert(VD->isStaticLocal());
+ // But if this is a static local in a dlimport/dllexport function, the
+ // function will never be inlined, which means the var would never be
+ // imported, so having it marked import/export is safe.
+ } else {
+ Diag(VD->getLocation(), diag::err_attribute_dll_thread_local) << VD
+ << DLLAttr;
+ VD->setInvalidDecl();
+ }
}
if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) {
@@ -9988,8 +10272,9 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
if (DeclSpec::isDeclRep(DS.getTypeSpecType())) {
if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) {
handleTagNumbering(Tag, S);
- if (!Tag->hasNameForLinkage() && !Tag->hasDeclaratorForAnonDecl())
- Tag->setDeclaratorForAnonDecl(FirstDeclaratorInGroup);
+ if (FirstDeclaratorInGroup && !Tag->hasNameForLinkage() &&
+ getLangOpts().CPlusPlus)
+ Context.addDeclaratorForUnnamedTagDecl(Tag, FirstDeclaratorInGroup);
}
}
@@ -10028,7 +10313,7 @@ Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
} else if (DeducedCanon != UCanon) {
Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
diag::err_auto_different_deductions)
- << (AT->isDecltypeAuto() ? 1 : 0)
+ << (unsigned)AT->getKeyword()
<< Deduced << DeducedDecl->getDeclName()
<< U << D->getDeclName()
<< DeducedDecl->getInit()->getSourceRange()
@@ -10118,6 +10403,8 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
if (DS.isConstexprSpecified())
Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 0;
+ if (DS.isConceptSpecified())
+ Diag(DS.getConceptSpecLoc(), diag::err_concept_wrong_decl_kind);
DiagnoseFunctionSpecifiers(DS);
@@ -10370,14 +10657,17 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
}
}
-Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D) {
+Decl *
+Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
+ MultiTemplateParamsArg TemplateParameterLists,
+ SkipBodyInfo *SkipBody) {
assert(getCurFunctionDecl() == nullptr && "Function parsing confused");
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
D.setFunctionDefinitionKind(FDK_Definition);
- Decl *DP = HandleDeclarator(ParentScope, D, MultiTemplateParamsArg());
- return ActOnStartOfFunctionDef(FnBodyScope, DP);
+ Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
+ return ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
}
void Sema::ActOnFinishInlineMethodDef(CXXMethodDecl *D) {
@@ -10441,7 +10731,8 @@ static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
void
Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
- const FunctionDecl *EffectiveDefinition) {
+ const FunctionDecl *EffectiveDefinition,
+ SkipBodyInfo *SkipBody) {
// Don't complain if we're in GNU89 mode and the previous definition
// was an extern inline function.
const FunctionDecl *Definition = EffectiveDefinition;
@@ -10453,17 +10744,20 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
return;
// If we don't have a visible definition of the function, and it's inline or
- // a template, it's OK to form another definition of it.
- //
- // FIXME: Should we skip the body of the function and use the old definition
- // in this case? That may be necessary for functions that return local types
- // through a deduced return type, or instantiate templates with local types.
- if (!hasVisibleDefinition(Definition) &&
+ // a template, skip the new definition.
+ if (SkipBody && !hasVisibleDefinition(Definition) &&
(Definition->getFormalLinkage() == InternalLinkage ||
Definition->isInlined() ||
Definition->getDescribedFunctionTemplate() ||
- Definition->getNumTemplateParameterLists()))
+ Definition->getNumTemplateParameterLists())) {
+ SkipBody->ShouldSkip = true;
+ if (auto *TD = Definition->getDescribedFunctionTemplate())
+ makeMergedDefinitionVisible(TD, FD->getLocation());
+ else
+ makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition),
+ FD->getLocation());
return;
+ }
if (getLangOpts().GNUMode && Definition->isInlineSpecified() &&
Definition->getStorageClass() == SC_Extern)
@@ -10524,7 +10818,8 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
}
}
-Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
+Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
+ SkipBodyInfo *SkipBody) {
// Clear the last template instantiation error context.
LastTemplateInstantiationErrorContext = ActiveTemplateInstantiation();
@@ -10536,6 +10831,16 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
FD = FunTmpl->getTemplatedDecl();
else
FD = cast<FunctionDecl>(D);
+
+ // See if this is a redefinition.
+ if (!FD->isLateTemplateParsed()) {
+ CheckForFunctionRedefinition(FD, nullptr, SkipBody);
+
+ // If we're skipping the body, we're done. Don't enter the scope.
+ if (SkipBody && SkipBody->ShouldSkip)
+ return D;
+ }
+
// If we are instantiating a generic lambda call operator, push
// a LambdaScopeInfo onto the function stack. But use the information
// that's already been calculated (ActOnLambdaExpr) to prime the current
@@ -10555,10 +10860,6 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
// Enter a new function scope
PushFunctionScope();
- // See if this is a redefinition.
- if (!FD->isLateTemplateParsed())
- CheckForFunctionRedefinition(FD);
-
// Builtin functions cannot be defined.
if (unsigned BuiltinID = FD->getBuiltinID()) {
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
@@ -10734,6 +11035,9 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
+ if (getLangOpts().Coroutines && !getCurFunction()->CoroutineStmts.empty())
+ CheckCompletedCoroutineBody(FD, Body);
+
if (FD) {
FD->setBody(Body);
@@ -11073,7 +11377,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc,
EST_None,
- /*ESpecLoc=*/NoLoc,
+ /*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
@@ -11159,6 +11463,18 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation()));
if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->hasAttr<ConstAttr>())
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads &&
+ Context.BuiltinInfo.isTSBuiltin(BuiltinID) &&
+ !FD->hasAttr<CUDADeviceAttr>() && !FD->hasAttr<CUDAHostAttr>()) {
+ // Assign appropriate attribute depending on CUDA compilation
+ // mode and the target builtin belongs to. E.g. during host
+ // compilation, aux builtins are __device__, the rest are __host__.
+ if (getLangOpts().CUDAIsDevice !=
+ Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
+ FD->addAttr(CUDADeviceAttr::CreateImplicit(Context, FD->getLocation()));
+ else
+ FD->addAttr(CUDAHostAttr::CreateImplicit(Context, FD->getLocation()));
+ }
}
IdentifierInfo *Name = FD->getIdentifier();
@@ -11269,9 +11585,9 @@ bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
/// Check whether this is a valid redeclaration of a previous enumeration.
/// \return true if the redeclaration was invalid.
-bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
- QualType EnumUnderlyingTy,
- const EnumDecl *Prev) {
+bool Sema::CheckEnumRedeclaration(
+ SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy,
+ bool EnumUnderlyingIsImplicit, const EnumDecl *Prev) {
bool IsFixed = !EnumUnderlyingTy.isNull();
if (IsScoped != Prev->isScoped()) {
@@ -11293,6 +11609,10 @@ bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
<< Prev->getIntegerTypeRange();
return true;
}
+ } else if (IsFixed && !Prev->isFixed() && EnumUnderlyingIsImplicit) {
+ ;
+ } else if (!IsFixed && Prev->isFixed() && !Prev->getIntegerTypeSourceInfo()) {
+ ;
} else if (IsFixed != Prev->isFixed()) {
Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
<< Prev->isFixed();
@@ -11459,7 +11779,6 @@ static FixItHint createFriendTagNNSFixIt(Sema &SemaRef, NamedDecl *ND, Scope *S,
std::reverse(Namespaces.begin(), Namespaces.end());
for (auto *II : Namespaces)
OS << II->getName() << "::";
- OS.flush();
return FixItHint::CreateInsertion(NameLoc, Insertion);
}
@@ -11563,6 +11882,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// this early, because it's needed to detect if this is an incompatible
// redeclaration.
llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
+ bool EnumUnderlyingIsImplicit = false;
if (Kind == TTK_Enum) {
if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum))
@@ -11584,9 +11904,13 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
UPPC_FixedUnderlyingType))
EnumUnderlying = Context.IntTy.getTypePtr();
- } else if (getLangOpts().MSVCCompat)
- // Microsoft enums are always of int type.
- EnumUnderlying = Context.IntTy.getTypePtr();
+ } else if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (getLangOpts().MSVCCompat || TUK == TUK_Definition) {
+ // Microsoft enums are always of int type.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ EnumUnderlyingIsImplicit = true;
+ }
+ }
}
DeclContext *SearchDC = CurContext;
@@ -11816,9 +12140,16 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// In C++, we need to do a redeclaration lookup to properly
// diagnose some problems.
+ // FIXME: redeclaration lookup is also used (with and without C++) to find a
+ // hidden declaration so that we don't get ambiguity errors when using a
+ // type declared by an elaborated-type-specifier. In C that is not correct
+ // and we should instead merge compatible types found by lookup.
if (getLangOpts().CPlusPlus) {
Previous.setRedeclarationKind(ForRedeclaration);
LookupQualifiedName(Previous, SearchDC);
+ } else {
+ Previous.setRedeclarationKind(ForRedeclaration);
+ LookupName(Previous, S);
}
}
@@ -11932,7 +12263,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// returning the previous declaration, unless this is a definition,
// in which case we want the caller to bail out.
if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
- ScopedEnum, EnumUnderlyingTy, PrevEnum))
+ ScopedEnum, EnumUnderlyingTy,
+ EnumUnderlyingIsImplicit, PrevEnum))
return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
}
@@ -12203,9 +12535,7 @@ CreateNewDecl:
New->setQualifierInfo(SS.getWithLocInContext(Context));
if (TemplateParameterLists.size() > 0) {
- New->setTemplateParameterListsInfo(Context,
- TemplateParameterLists.size(),
- TemplateParameterLists.data());
+ New->setTemplateParameterListsInfo(Context, TemplateParameterLists);
}
}
else
@@ -12504,26 +12834,41 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
}
if (!FieldTy->isDependentType()) {
- uint64_t TypeSize = Context.getTypeSize(FieldTy);
- if (Value.getZExtValue() > TypeSize) {
- if (!getLangOpts().CPlusPlus || IsMsStruct ||
- Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- if (FieldName)
- return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_size)
- << FieldName << (unsigned)Value.getZExtValue()
- << (unsigned)TypeSize;
-
- return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_size)
- << (unsigned)Value.getZExtValue() << (unsigned)TypeSize;
- }
-
+ uint64_t TypeStorageSize = Context.getTypeSize(FieldTy);
+ uint64_t TypeWidth = Context.getIntWidth(FieldTy);
+ bool BitfieldIsOverwide = Value.ugt(TypeWidth);
+
+ // Over-wide bitfields are an error in C or when using the MSVC bitfield
+ // ABI.
+ bool CStdConstraintViolation =
+ BitfieldIsOverwide && !getLangOpts().CPlusPlus;
+ bool MSBitfieldViolation =
+ Value.ugt(TypeStorageSize) &&
+ (IsMsStruct || Context.getTargetInfo().getCXXABI().isMicrosoft());
+ if (CStdConstraintViolation || MSBitfieldViolation) {
+ unsigned DiagWidth =
+ CStdConstraintViolation ? TypeWidth : TypeStorageSize;
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_width)
+ << FieldName << (unsigned)Value.getZExtValue()
+ << !CStdConstraintViolation << DiagWidth;
+
+ return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_width)
+ << (unsigned)Value.getZExtValue() << !CStdConstraintViolation
+ << DiagWidth;
+ }
+
+ // Warn on types where the user might conceivably expect to get all
+ // specified bits as value bits: that's all integral types other than
+ // 'bool'.
+ if (BitfieldIsOverwide && !FieldTy->isBooleanType()) {
if (FieldName)
- Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_size)
- << FieldName << (unsigned)Value.getZExtValue()
- << (unsigned)TypeSize;
+ Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width)
+ << FieldName << (unsigned)Value.getZExtValue()
+ << (unsigned)TypeWidth;
else
- Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_size)
- << (unsigned)Value.getZExtValue() << (unsigned)TypeSize;
+ Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_width)
+ << (unsigned)Value.getZExtValue() << (unsigned)TypeWidth;
}
}
@@ -12866,9 +13211,8 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
SourceLocation Loc = FD->getLocation();
if (getSourceManager().isInSystemHeader(Loc)) {
if (!FD->hasAttr<UnavailableAttr>())
- FD->addAttr(UnavailableAttr::CreateImplicit(Context,
- "this system field has retaining ownership",
- Loc));
+ FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
+ UnavailableAttr::IR_ARCFieldWithOwnership, Loc));
return false;
}
}
@@ -12876,7 +13220,7 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
Diag(FD->getLocation(), getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member :
diag::err_illegal_union_or_anon_struct_member)
- << (int)FD->getParent()->isUnion() << FD->getDeclName() << member;
+ << FD->getParent()->isUnion() << FD->getDeclName() << member;
DiagnoseNontrivial(RDecl, member);
return !getLangOpts().CPlusPlus11;
}
@@ -13237,9 +13581,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
SourceLocation loc = FD->getLocation();
if (getSourceManager().isInSystemHeader(loc)) {
if (!FD->hasAttr<UnavailableAttr>()) {
- FD->addAttr(UnavailableAttr::CreateImplicit(Context,
- "this system field has retaining ownership",
- loc));
+ FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
+ UnavailableAttr::IR_ARCFieldWithOwnership, loc));
}
} else {
Diag(FD->getLocation(), diag::err_arc_objc_object_in_tag)
@@ -13687,10 +14030,12 @@ Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
NamedDecl *PrevDecl = LookupSingleName(S, II, IILoc, LookupOrdinaryName,
ForRedeclaration);
auto *PrevECD = dyn_cast_or_null<EnumConstantDecl>(PrevDecl);
+ if (!PrevECD)
+ return SkipBodyInfo();
+
+ EnumDecl *PrevED = cast<EnumDecl>(PrevECD->getDeclContext());
NamedDecl *Hidden;
- if (PrevECD &&
- !hasVisibleDefinition(cast<NamedDecl>(PrevECD->getDeclContext()),
- &Hidden)) {
+ if (!PrevED->getDeclName() && !hasVisibleDefinition(PrevED, &Hidden)) {
SkipBodyInfo Skip;
Skip.Previous = Hidden;
return Skip;
@@ -13722,12 +14067,27 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
PrevDecl = nullptr;
}
+ // C++ [class.mem]p15:
+ // If T is the name of a class, then each of the following shall have a name
+ // different from T:
+ // - every enumerator of every member of class T that is an unscoped
+ // enumerated type
+ if (!TheEnumDecl->isScoped())
+ DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(),
+ DeclarationNameInfo(Id, IdLoc));
+
+ EnumConstantDecl *New =
+ CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
+ if (!New)
+ return nullptr;
+
if (PrevDecl) {
// When in C++, we may get a TagDecl with the same name; in this case the
// enum constant will 'hide' the tag.
assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
"Received TagDecl when not in C++!");
- if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S)) {
+ if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S) &&
+ shouldLinkPossiblyHiddenDecl(PrevDecl, New)) {
if (isa<EnumConstantDecl>(PrevDecl))
Diag(IdLoc, diag::err_redefinition_of_enumerator) << Id;
else
@@ -13737,26 +14097,12 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
}
}
- // C++ [class.mem]p15:
- // If T is the name of a class, then each of the following shall have a name
- // different from T:
- // - every enumerator of every member of class T that is an unscoped
- // enumerated type
- if (!TheEnumDecl->isScoped())
- DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(),
- DeclarationNameInfo(Id, IdLoc));
-
- EnumConstantDecl *New =
- CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
+ // Process attributes.
+ if (Attr) ProcessDeclAttributeList(S, New, Attr);
- if (New) {
- // Process attributes.
- if (Attr) ProcessDeclAttributeList(S, New, Attr);
-
- // Register this decl in the current scope stack.
- New->setAccess(TheEnumDecl->getAccess());
- PushOnScopeChains(New, S);
- }
+ // Register this decl in the current scope stack.
+ New->setAccess(TheEnumDecl->getAccess());
+ PushOnScopeChains(New, S);
ActOnDocumentableDecl(New);
@@ -13803,6 +14149,7 @@ static bool ValidDuplicateEnum(EnumConstantDecl *ECD, EnumDecl *Enum) {
return false;
}
+namespace {
struct DupKey {
int64_t val;
bool isTombstoneOrEmptyKey;
@@ -13826,6 +14173,7 @@ struct DenseMapInfoDupKey {
LHS.val == RHS.val;
}
};
+} // end anonymous namespace
// Emits a warning when an element is implicitly set a value that
// a previous element has already been set to.
@@ -13937,17 +14285,22 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
}
}
-bool
-Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
- bool AllowMask) const {
- FlagEnumAttr *FEAttr = ED->getAttr<FlagEnumAttr>();
- assert(FEAttr && "looking for value in non-flag enum");
+bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
+ bool AllowMask) const {
+ assert(ED->hasAttr<FlagEnumAttr>() && "looking for value in non-flag enum");
+ assert(ED->isCompleteDefinition() && "expected enum definition");
- llvm::APInt FlagMask = ~FEAttr->getFlagBits();
- unsigned Width = FlagMask.getBitWidth();
+ auto R = FlagBitsCache.insert(std::make_pair(ED, llvm::APInt()));
+ llvm::APInt &FlagBits = R.first->second;
- // We will try a zero-extended value for the regular check first.
- llvm::APInt ExtVal = Val.zextOrSelf(Width);
+ if (R.second) {
+ for (auto *E : ED->enumerators()) {
+ const auto &EVal = E->getInitVal();
+ // Only single-bit enumerators introduce new flag values.
+ if (EVal.isPowerOf2())
+ FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal;
+ }
+ }
// A value is in a flag enum if either its bits are a subset of the enum's
// flag bits (the first condition) or we are allowing masks and the same is
@@ -13957,27 +14310,8 @@ Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
// While it's true that any value could be used as a mask, the assumption is
// that a mask will have all of the insignificant bits set. Anything else is
// likely a logic error.
- if (!(FlagMask & ExtVal))
- return true;
-
- if (AllowMask) {
- // Try a one-extended value instead. This can happen if the enum is wider
- // than the constant used, in C with extensions to allow for wider enums.
- // The mask will still have the correct behaviour, so we give the user the
- // benefit of the doubt.
- //
- // FIXME: This heuristic can cause weird results if the enum was extended
- // to a larger type and is signed, because then bit-masks of smaller types
- // that get extended will fall out of range (e.g. ~0x1u). We currently don't
- // detect that case and will get a false positive for it. In most cases,
- // though, it can be fixed by making it a signed type (e.g. ~0x1), so it may
- // be fine just to accept this as a warning.
- ExtVal |= llvm::APInt::getHighBitsSet(Width, Width - Val.getBitWidth());
- if (!(FlagMask & ~ExtVal))
- return true;
- }
-
- return false;
+ llvm::APInt FlagMask = ~FlagBits.zextOrTrunc(Val.getBitWidth());
+ return !(FlagMask & Val) || (AllowMask && !(FlagMask & ~Val));
}
void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
@@ -14131,13 +14465,8 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
}
}
- FlagEnumAttr *FEAttr = Enum->getAttr<FlagEnumAttr>();
- if (FEAttr)
- FEAttr->getFlagBits() = llvm::APInt(BestWidth, 0);
-
// Loop over all of the enumerator constants, changing their types to match
- // the type of the enum if needed. If we have a flag type, we also prepare the
- // FlagBits cache.
+ // the type of the enum if needed.
for (auto *D : Elements) {
auto *ECD = cast_or_null<EnumConstantDecl>(D);
if (!ECD) continue; // Already issued a diagnostic.
@@ -14169,7 +14498,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
// enum-specifier, each enumerator has the type of its
// enumeration.
ECD->setType(EnumType);
- goto flagbits;
+ continue;
} else {
NewTy = BestType;
NewWidth = BestWidth;
@@ -14196,37 +14525,26 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
ECD->setType(EnumType);
else
ECD->setType(NewTy);
-
-flagbits:
- // Check to see if we have a constant with exactly one bit set. Note that x
- // & (x - 1) will be nonzero if and only if x has more than one bit set.
- if (FEAttr) {
- llvm::APInt ExtVal = InitVal.zextOrSelf(BestWidth);
- if (ExtVal != 0 && !(ExtVal & (ExtVal - 1))) {
- FEAttr->getFlagBits() |= ExtVal;
- }
- }
}
- if (FEAttr) {
+ Enum->completeDefinition(BestType, BestPromotionType,
+ NumPositiveBits, NumNegativeBits);
+
+ CheckForDuplicateEnumValues(*this, Elements, Enum, EnumType);
+
+ if (Enum->hasAttr<FlagEnumAttr>()) {
for (Decl *D : Elements) {
EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(D);
if (!ECD) continue; // Already issued a diagnostic.
llvm::APSInt InitVal = ECD->getInitVal();
- if (InitVal != 0 && !IsValueInFlagEnum(Enum, InitVal, true))
+ if (InitVal != 0 && !InitVal.isPowerOf2() &&
+ !IsValueInFlagEnum(Enum, InitVal, true))
Diag(ECD->getLocation(), diag::warn_flag_enum_constant_out_of_range)
<< ECD << Enum;
}
}
-
-
- Enum->completeDefinition(BestType, BestPromotionType,
- NumPositiveBits, NumNegativeBits);
-
- CheckForDuplicateEnumValues(*this, Elements, Enum, EnumType);
-
// Now that the enum type is defined, ensure it's not been underaligned.
if (Enum->hasAttrs())
CheckAlignasUnderalignment(Enum);
@@ -14245,17 +14563,15 @@ Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
}
static void checkModuleImportContext(Sema &S, Module *M,
- SourceLocation ImportLoc,
- DeclContext *DC) {
+ SourceLocation ImportLoc, DeclContext *DC,
+ bool FromInclude = false) {
+ SourceLocation ExternCLoc;
+
if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
switch (LSD->getLanguage()) {
case LinkageSpecDecl::lang_c:
- if (!M->IsExternC) {
- S.Diag(ImportLoc, diag::err_module_import_in_extern_c)
- << M->getFullModuleName();
- S.Diag(LSD->getLocStart(), diag::note_module_import_in_extern_c);
- return;
- }
+ if (ExternCLoc.isInvalid())
+ ExternCLoc = LSD->getLocStart();
break;
case LinkageSpecDecl::lang_cxx:
break;
@@ -14265,15 +14581,25 @@ static void checkModuleImportContext(Sema &S, Module *M,
while (isa<LinkageSpecDecl>(DC))
DC = DC->getParent();
+
if (!isa<TranslationUnitDecl>(DC)) {
- S.Diag(ImportLoc, diag::err_module_import_not_at_top_level)
- << M->getFullModuleName() << DC;
+ S.Diag(ImportLoc, (FromInclude && S.isModuleVisible(M))
+ ? diag::ext_module_import_not_at_top_level_noop
+ : diag::err_module_import_not_at_top_level_fatal)
+ << M->getFullModuleName() << DC;
S.Diag(cast<Decl>(DC)->getLocStart(),
- diag::note_module_import_not_at_top_level)
- << DC;
+ diag::note_module_import_not_at_top_level) << DC;
+ } else if (!M->IsExternC && ExternCLoc.isValid()) {
+ S.Diag(ImportLoc, diag::ext_module_import_in_extern_c)
+ << M->getFullModuleName();
+ S.Diag(ExternCLoc, diag::note_module_import_in_extern_c);
}
}
+void Sema::diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc) {
+ return checkModuleImportContext(*this, M, ImportLoc, CurContext);
+}
+
DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
SourceLocation ImportLoc,
ModuleIdPath Path) {
@@ -14318,7 +14644,7 @@ DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
}
void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
- checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext);
+ checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
// Determine whether we're in the #include buffer for a module. The #includes
// in that buffer do not qualify as module imports; they're just an
@@ -14394,12 +14720,14 @@ void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
// 1) declares a function or a variable
// 2) has external linkage
// already exists, add a label attribute to it.
- if (PrevDecl &&
- (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)) &&
- PrevDecl->hasExternalFormalLinkage())
- PrevDecl->addAttr(Attr);
+ if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
+ if (isDeclExternC(PrevDecl))
+ PrevDecl->addAttr(Attr);
+ else
+ Diag(PrevDecl->getLocation(), diag::warn_redefine_extname_not_applied)
+ << /*Variable*/(isa<FunctionDecl>(PrevDecl) ? 0 : 1) << PrevDecl;
// Otherwise, add a label atttibute to ExtnameUndeclaredIdentifiers.
- else
+ } else
(void)ExtnameUndeclaredIdentifiers.insert(std::make_pair(Name, Attr));
}
@@ -14426,7 +14754,7 @@ void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
LookupOrdinaryName);
WeakInfo W = WeakInfo(Name, NameLoc);
- if (PrevDecl) {
+ if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
if (!PrevDecl->hasAttr<AliasAttr>())
if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl))
DeclApplyPragmaWeak(TUScope, ND, W);
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 191dbd05c9bd..5a0f0f84af7e 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -244,11 +244,12 @@ static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
/// \brief Diagnose mutually exclusive attributes when present on a given
/// declaration. Returns true if diagnosed.
template <typename AttrTy>
-static bool checkAttrMutualExclusion(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static bool checkAttrMutualExclusion(Sema &S, Decl *D, SourceRange Range,
+ IdentifierInfo *Ident) {
if (AttrTy *A = D->getAttr<AttrTy>()) {
- S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
- << Attr.getName() << A;
+ S.Diag(Range.getBegin(), diag::err_attributes_are_not_compatible) << Ident
+ << A;
+ S.Diag(A->getLocation(), diag::note_conflicting_attribute);
return true;
}
return false;
@@ -315,7 +316,7 @@ bool Sema::checkStringLiteralArgumentAttr(const AttributeList &Attr,
Diag(Loc->Loc, diag::err_attribute_argument_type)
<< Attr.getName() << AANT_ArgumentString
<< FixItHint::CreateInsertion(Loc->Loc, "\"")
- << FixItHint::CreateInsertion(PP.getLocForEndOfToken(Loc->Loc), "\"");
+ << FixItHint::CreateInsertion(getLocForEndOfToken(Loc->Loc), "\"");
Str = Loc->Ident->getName();
if (ArgLocation)
*ArgLocation = Loc->Loc;
@@ -432,11 +433,10 @@ static bool checkRecordTypeForCapability(Sema &S, QualType Ty) {
// Else check if any base classes have a capability.
if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
CXXBasePaths BPaths(false, false);
- if (CRD->lookupInBases([](const CXXBaseSpecifier *BS, CXXBasePath &P,
- void *) {
- return BS->getType()->getAs<RecordType>()
- ->getDecl()->hasAttr<CapabilityAttr>();
- }, nullptr, BPaths))
+ if (CRD->lookupInBases([](const CXXBaseSpecifier *BS, CXXBasePath &) {
+ const auto *Type = BS->getType()->getAs<RecordType>();
+ return Type->getDecl()->hasAttr<CapabilityAttr>();
+ }, BPaths))
return true;
}
return false;
@@ -629,13 +629,10 @@ static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D,
// Check that this attribute only applies to lockable types.
QualType QT = cast<ValueDecl>(D)->getType();
- if (!QT->isDependentType()) {
- const RecordType *RT = getRecordType(QT);
- if (!RT || !RT->getDecl()->hasAttr<CapabilityAttr>()) {
- S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
- << Attr.getName();
- return false;
- }
+ if (!QT->isDependentType() && !typeHasCapability(S, QT)) {
+ S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
+ << Attr.getName();
+ return false;
}
// Check that all arguments are lockable objects.
@@ -812,6 +809,43 @@ static void handleEnableIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
Attr.getAttributeSpellingListIndex()));
}
+static void handlePassObjectSizeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (D->hasAttr<PassObjectSizeAttr>()) {
+ S.Diag(D->getLocStart(), diag::err_attribute_only_once_per_parameter)
+ << Attr.getName();
+ return;
+ }
+
+ Expr *E = Attr.getArgAsExpr(0);
+ uint32_t Type;
+ if (!checkUInt32Argument(S, Attr, E, Type, /*Idx=*/1))
+ return;
+
+ // pass_object_size's argument is passed in as the second argument of
+ // __builtin_object_size. So, it has the same constraints as that second
+ // argument; namely, it must be in the range [0, 3].
+ if (Type > 3) {
+ S.Diag(E->getLocStart(), diag::err_attribute_argument_outof_range)
+ << Attr.getName() << 0 << 3 << E->getSourceRange();
+ return;
+ }
+
+ // pass_object_size is only supported on constant pointer parameters; as a
+ // kindness to users, we allow the parameter to be non-const for declarations.
+ // At this point, we have no clue if `D` belongs to a function declaration or
+ // definition, so we defer the constness check until later.
+ if (!cast<ParmVarDecl>(D)->getType()->isPointerType()) {
+ S.Diag(D->getLocStart(), diag::err_attribute_pointers_only)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ PassObjectSizeAttr(Attr.getRange(), S.Context, (int)Type,
+ Attr.getAttributeSpellingListIndex()));
+}
+
static void handleConsumableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
ConsumableAttr::ConsumedState DefaultState;
@@ -1039,17 +1073,14 @@ static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
TD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context,
Attr.getAttributeSpellingListIndex()));
else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
- // If the alignment is less than or equal to 8 bits, the packed attribute
- // has no effect.
+ // Report warning about changed offset in the newer compiler versions.
if (!FD->getType()->isDependentType() &&
- !FD->getType()->isIncompleteType() &&
+ !FD->getType()->isIncompleteType() && FD->isBitField() &&
S.Context.getTypeAlign(FD->getType()) <= 8)
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
- << Attr.getName() << FD->getType();
- else
- FD->addAttr(::new (S.Context)
- PackedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ S.Diag(Attr.getLoc(), diag::warn_attribute_packed_for_bitfield);
+
+ FD->addAttr(::new (S.Context) PackedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
} else
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
}
@@ -1165,10 +1196,12 @@ static bool attrNonNullArgCheck(Sema &S, QualType T, const AttributeList &Attr,
SourceRange TypeRange,
bool isReturnValue = false) {
if (!S.isValidPointerAttrType(T)) {
- S.Diag(Attr.getLoc(), isReturnValue
- ? diag::warn_attribute_return_pointers_only
- : diag::warn_attribute_pointers_only)
- << Attr.getName() << AttrParmRange << TypeRange;
+ if (isReturnValue)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
+ << Attr.getName() << AttrParmRange << TypeRange;
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_pointers_only)
+ << Attr.getName() << AttrParmRange << TypeRange << 0;
return false;
}
return true;
@@ -1312,6 +1345,17 @@ void Sema::AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
AssumeAlignedAttr(AttrRange, Context, E, OE, SpellingListIndex));
}
+/// Normalize the attribute, __foo__ becomes foo.
+/// Returns true if normalization was applied.
+static bool normalizeName(StringRef &AttrName) {
+ if (AttrName.size() > 4 && AttrName.startswith("__") &&
+ AttrName.endswith("__")) {
+ AttrName = AttrName.drop_front(2).drop_back(2);
+ return true;
+ }
+ return false;
+}
+
static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
// This attribute must be applied to a function declaration. The first
// argument to the attribute must be an identifier, the name of the resource,
@@ -1353,11 +1397,8 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
IdentifierInfo *Module = AL.getArgAsIdent(0)->Ident;
- // Normalize the argument, __foo__ becomes foo.
StringRef ModuleName = Module->getName();
- if (ModuleName.startswith("__") && ModuleName.endswith("__") &&
- ModuleName.size() > 4) {
- ModuleName = ModuleName.drop_front(2).drop_back(2);
+ if (normalizeName(ModuleName)) {
Module = &S.PP.getIdentifierTable().get(ModuleName);
}
@@ -1519,7 +1560,7 @@ static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<HotAttr>(S, D, Attr))
+ if (checkAttrMutualExclusion<HotAttr>(S, D, Attr.getRange(), Attr.getName()))
return;
D->addAttr(::new (S.Context) ColdAttr(Attr.getRange(), S.Context,
@@ -1527,7 +1568,7 @@ static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
static void handleHotAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<ColdAttr>(S, D, Attr))
+ if (checkAttrMutualExclusion<ColdAttr>(S, D, Attr.getRange(), Attr.getName()))
return;
D->addAttr(::new (S.Context) HotAttr(Attr.getRange(), S.Context,
@@ -1569,12 +1610,22 @@ static void handleRestrictAttr(Sema &S, Decl *D, const AttributeList &Attr) {
static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.LangOpts.CPlusPlus) {
S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
- << Attr.getName() << AttributeLangSupport::Cpp;
+ << Attr.getName() << AttributeLangSupport::Cpp;
return;
}
- D->addAttr(::new (S.Context) CommonAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ if (CommonAttr *CA = S.mergeCommonAttr(D, Attr.getRange(), Attr.getName(),
+ Attr.getAttributeSpellingListIndex()))
+ D->addAttr(CA);
+}
+
+static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) NakedAttr(Attr.getRange(), S.Context,
+ Attr.getAttributeSpellingListIndex()));
}
static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &attr) {
@@ -1613,7 +1664,7 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D,
!VD->getType()->isFunctionPointerType())) {
S.Diag(Attr.getLoc(),
Attr.isCXX11Attribute() ? diag::err_attribute_wrong_decl_type
- : diag::warn_attribute_wrong_decl_type)
+ : diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunctionMethodOrBlock;
return;
}
@@ -1697,6 +1748,26 @@ static void handleDependencyAttr(Sema &S, Scope *Scope, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
+static void handleNotTailCalledAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<AlwaysInlineAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) NotTailCalledAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleDisableTailCallsAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<NakedAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ D->addAttr(::new (S.Context) DisableTailCallsAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+}
+
static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasLocalStorage()) {
@@ -1825,12 +1896,24 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
- bool Override,
+ AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex) {
VersionTuple MergedIntroduced = Introduced;
VersionTuple MergedDeprecated = Deprecated;
VersionTuple MergedObsoleted = Obsoleted;
bool FoundAny = false;
+ bool OverrideOrImpl = false;
+ switch (AMK) {
+ case AMK_None:
+ case AMK_Redeclaration:
+ OverrideOrImpl = false;
+ break;
+
+ case AMK_Override:
+ case AMK_ProtocolImplementation:
+ OverrideOrImpl = true;
+ break;
+ }
if (D->hasAttrs()) {
AttrVec &Attrs = D->getAttrs();
@@ -1847,30 +1930,46 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
continue;
}
+ // If there is an existing availability attribute for this platform that
+ // is explicit and the new one is implicit use the explicit one and
+ // discard the new implicit attribute.
+ if (OldAA->getRange().isValid() && Range.isInvalid()) {
+ return nullptr;
+ }
+
+ // If there is an existing attribute for this platform that is implicit
+ // and the new attribute is explicit then erase the old one and
+ // continue processing the attributes.
+ if (Range.isValid() && OldAA->getRange().isInvalid()) {
+ Attrs.erase(Attrs.begin() + i);
+ --e;
+ continue;
+ }
+
FoundAny = true;
VersionTuple OldIntroduced = OldAA->getIntroduced();
VersionTuple OldDeprecated = OldAA->getDeprecated();
VersionTuple OldObsoleted = OldAA->getObsoleted();
bool OldIsUnavailable = OldAA->getUnavailable();
- if (!versionsMatch(OldIntroduced, Introduced, Override) ||
- !versionsMatch(Deprecated, OldDeprecated, Override) ||
- !versionsMatch(Obsoleted, OldObsoleted, Override) ||
+ if (!versionsMatch(OldIntroduced, Introduced, OverrideOrImpl) ||
+ !versionsMatch(Deprecated, OldDeprecated, OverrideOrImpl) ||
+ !versionsMatch(Obsoleted, OldObsoleted, OverrideOrImpl) ||
!(OldIsUnavailable == IsUnavailable ||
- (Override && !OldIsUnavailable && IsUnavailable))) {
- if (Override) {
+ (OverrideOrImpl && !OldIsUnavailable && IsUnavailable))) {
+ if (OverrideOrImpl) {
int Which = -1;
VersionTuple FirstVersion;
VersionTuple SecondVersion;
- if (!versionsMatch(OldIntroduced, Introduced, Override)) {
+ if (!versionsMatch(OldIntroduced, Introduced, OverrideOrImpl)) {
Which = 0;
FirstVersion = OldIntroduced;
SecondVersion = Introduced;
- } else if (!versionsMatch(Deprecated, OldDeprecated, Override)) {
+ } else if (!versionsMatch(Deprecated, OldDeprecated, OverrideOrImpl)) {
Which = 1;
FirstVersion = Deprecated;
SecondVersion = OldDeprecated;
- } else if (!versionsMatch(Obsoleted, OldObsoleted, Override)) {
+ } else if (!versionsMatch(Obsoleted, OldObsoleted, OverrideOrImpl)) {
Which = 2;
FirstVersion = Obsoleted;
SecondVersion = OldObsoleted;
@@ -1879,15 +1978,20 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
if (Which == -1) {
Diag(OldAA->getLocation(),
diag::warn_mismatched_availability_override_unavail)
- << AvailabilityAttr::getPrettyPlatformName(Platform->getName());
+ << AvailabilityAttr::getPrettyPlatformName(Platform->getName())
+ << (AMK == AMK_Override);
} else {
Diag(OldAA->getLocation(),
diag::warn_mismatched_availability_override)
<< Which
<< AvailabilityAttr::getPrettyPlatformName(Platform->getName())
- << FirstVersion.getAsString() << SecondVersion.getAsString();
+ << FirstVersion.getAsString() << SecondVersion.getAsString()
+ << (AMK == AMK_Override);
}
- Diag(Range.getBegin(), diag::note_overridden_method);
+ if (AMK == AMK_Override)
+ Diag(Range.getBegin(), diag::note_overridden_method);
+ else
+ Diag(Range.getBegin(), diag::note_protocol_method);
} else {
Diag(OldAA->getLocation(), diag::warn_mismatched_availability);
Diag(Range.getBegin(), diag::note_previous_attribute);
@@ -1930,11 +2034,11 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
MergedObsoleted == Obsoleted)
return nullptr;
- // Only create a new attribute if !Override, but we want to do
+ // Only create a new attribute if !OverrideOrImpl, but we want to do
// the checking.
if (!checkAvailabilityAttr(*this, Range, Platform, MergedIntroduced,
MergedDeprecated, MergedObsoleted) &&
- !Override) {
+ !OverrideOrImpl) {
return ::new (Context) AvailabilityAttr(Range, Context, Platform,
Introduced, Deprecated,
Obsoleted, IsUnavailable, Message,
@@ -1975,10 +2079,78 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
Deprecated.Version,
Obsoleted.Version,
IsUnavailable, Str,
- /*Override=*/false,
+ Sema::AMK_None,
Index);
if (NewAttr)
D->addAttr(NewAttr);
+
+ // Transcribe "ios" to "watchos" (and add a new attribute) if the versioning
+ // matches before the start of the watchOS platform.
+ if (S.Context.getTargetInfo().getTriple().isWatchOS()) {
+ IdentifierInfo *NewII = nullptr;
+ if (II->getName() == "ios")
+ NewII = &S.Context.Idents.get("watchos");
+ else if (II->getName() == "ios_app_extension")
+ NewII = &S.Context.Idents.get("watchos_app_extension");
+
+ if (NewII) {
+ auto adjustWatchOSVersion = [](VersionTuple Version) -> VersionTuple {
+ if (Version.empty())
+ return Version;
+ auto Major = Version.getMajor();
+ auto NewMajor = Major >= 9 ? Major - 7 : 0;
+ if (NewMajor >= 2) {
+ if (Version.getMinor().hasValue()) {
+ if (Version.getSubminor().hasValue())
+ return VersionTuple(NewMajor, Version.getMinor().getValue(),
+ Version.getSubminor().getValue());
+ else
+ return VersionTuple(NewMajor, Version.getMinor().getValue());
+ }
+ }
+
+ return VersionTuple(2, 0);
+ };
+
+ auto NewIntroduced = adjustWatchOSVersion(Introduced.Version);
+ auto NewDeprecated = adjustWatchOSVersion(Deprecated.Version);
+ auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
+
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
+ SourceRange(),
+ NewII,
+ NewIntroduced,
+ NewDeprecated,
+ NewObsoleted,
+ IsUnavailable, Str,
+ Sema::AMK_None,
+ Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ }
+ } else if (S.Context.getTargetInfo().getTriple().isTvOS()) {
+ // Transcribe "ios" to "tvos" (and add a new attribute) if the versioning
+ // matches before the start of the tvOS platform.
+ IdentifierInfo *NewII = nullptr;
+ if (II->getName() == "ios")
+ NewII = &S.Context.Idents.get("tvos");
+ else if (II->getName() == "ios_app_extension")
+ NewII = &S.Context.Idents.get("tvos_app_extension");
+
+ if (NewII) {
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
+ SourceRange(),
+ NewII,
+ Introduced.Version,
+ Deprecated.Version,
+ Obsoleted.Version,
+ IsUnavailable, Str,
+ Sema::AMK_None,
+ Index);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ }
+ }
}
template <class T>
@@ -2492,17 +2664,17 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 1, IdxExpr, Idx))
return;
- // make sure the format string is really a string
+ // Make sure the format string is really a string.
QualType Ty = getFunctionOrMethodParamType(D, Idx);
- bool not_nsstring_type = !isNSStringType(Ty, S.Context);
- if (not_nsstring_type &&
+ bool NotNSStringTy = !isNSStringType(Ty, S.Context);
+ if (NotNSStringTy &&
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
- << (not_nsstring_type ? "a string type" : "an NSString")
- << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
+ << "a string type" << IdxExpr->getSourceRange()
+ << getFunctionOrMethodParamRange(D, 0);
return;
}
Ty = getFunctionOrMethodResultType(D);
@@ -2511,7 +2683,7 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
(!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(Attr.getLoc(), diag::err_format_attribute_result_not)
- << (not_nsstring_type ? "string type" : "NSString")
+ << (NotNSStringTy ? "string type" : "NSString")
<< IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
return;
}
@@ -2588,7 +2760,7 @@ static void handleInitPriorityAttr(Sema &S, Decl *D,
if (prioritynum < 101 || prioritynum > 65535) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range)
- << E->getSourceRange();
+ << E->getSourceRange() << Attr.getName() << 101 << 65535;
Attr.setInvalid();
return;
}
@@ -2635,9 +2807,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
StringRef Format = II->getName();
- // Normalize the argument, __foo__ becomes foo.
- if (Format.startswith("__") && Format.endswith("__")) {
- Format = Format.substr(2, Format.size() - 4);
+ if (normalizeName(Format)) {
// If we've modified the string name, we need a new identifier for it.
II = &S.Context.Idents.get(Format);
}
@@ -2858,7 +3028,7 @@ void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
}
if (!E->isValueDependent()) {
- llvm::APSInt Alignment(32);
+ llvm::APSInt Alignment;
ExprResult ICE
= VerifyIntegerConstantExpression(E, &Alignment,
diag::err_align_value_attribute_argument_not_int,
@@ -2972,7 +3142,7 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
}
// FIXME: Cache the number on the Attr object?
- llvm::APSInt Alignment(32);
+ llvm::APSInt Alignment;
ExprResult ICE
= VerifyIntegerConstantExpression(E, &Alignment,
diag::err_aligned_attribute_argument_not_int,
@@ -2980,42 +3150,44 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
if (ICE.isInvalid())
return;
+ uint64_t AlignVal = Alignment.getZExtValue();
+
// C++11 [dcl.align]p2:
// -- if the constant expression evaluates to zero, the alignment
// specifier shall have no effect
// C11 6.7.5p6:
// An alignment specification of zero has no effect.
if (!(TmpAttr.isAlignas() && !Alignment)) {
- if(!llvm::isPowerOf2_64(Alignment.getZExtValue())) {
+ if (!llvm::isPowerOf2_64(AlignVal)) {
Diag(AttrLoc, diag::err_alignment_not_power_of_two)
<< E->getSourceRange();
return;
}
- if (Context.getTargetInfo().isTLSSupported()) {
- if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) {
- if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
- if (VD->getTLSKind()) {
- CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign);
- if (Alignment.getSExtValue() > MaxAlignChars.getQuantity()) {
- Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
- << (unsigned)Alignment.getZExtValue() << VD
- << (unsigned)MaxAlignChars.getQuantity();
- return;
- }
- }
- }
- }
- }
}
// Alignment calculations can wrap around if it's greater than 2**28.
- unsigned MaxValidAlignment = TmpAttr.isDeclspec() ? 8192 : 268435456;
- if (Alignment.getZExtValue() > MaxValidAlignment) {
+ unsigned MaxValidAlignment =
+ Context.getTargetInfo().getTriple().isOSBinFormatCOFF() ? 8192
+ : 268435456;
+ if (AlignVal > MaxValidAlignment) {
Diag(AttrLoc, diag::err_attribute_aligned_too_great) << MaxValidAlignment
<< E->getSourceRange();
return;
}
+ if (Context.getTargetInfo().isTLSSupported()) {
+ unsigned MaxTLSAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getMaxTLSAlign())
+ .getQuantity();
+ auto *VD = dyn_cast<VarDecl>(D);
+ if (MaxTLSAlign && AlignVal > MaxTLSAlign && VD &&
+ VD->getTLSKind() != VarDecl::TLS_None) {
+ Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
+ << (unsigned)AlignVal << VD << MaxTLSAlign;
+ return;
+ }
+ }
+
AlignedAttr *AA = ::new (Context) AlignedAttr(AttrRange, Context, true,
ICE.get(), SpellingListIndex);
AA->setPackExpansion(IsPackExpansion);
@@ -3098,40 +3270,31 @@ bool Sema::checkMSInheritanceAttrOnDefinition(
return true;
}
-/// handleModeAttr - This attribute modifies the width of a decl with primitive
-/// type.
-///
-/// Despite what would be logical, the mode attribute is a decl attribute, not a
-/// type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make 'G' be
-/// HImode, not an intermediate pointer.
-static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- // This attribute isn't documented, but glibc uses it. It changes
- // the width of an int or unsigned int to the specified size.
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName()
- << AANT_ArgumentIdentifier;
- return;
- }
-
- IdentifierInfo *Name = Attr.getArgAsIdent(0)->Ident;
- StringRef Str = Name->getName();
-
- // Normalize the attribute name, __foo__ becomes foo.
- if (Str.startswith("__") && Str.endswith("__"))
- Str = Str.substr(2, Str.size() - 4);
-
- unsigned DestWidth = 0;
- bool IntegerMode = true;
- bool ComplexMode = false;
+/// parseModeAttrArg - Parses attribute mode string and returns parsed type
+/// attribute.
+static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
+ bool &IntegerMode, bool &ComplexMode) {
switch (Str.size()) {
case 2:
switch (Str[0]) {
- case 'Q': DestWidth = 8; break;
- case 'H': DestWidth = 16; break;
- case 'S': DestWidth = 32; break;
- case 'D': DestWidth = 64; break;
- case 'X': DestWidth = 96; break;
- case 'T': DestWidth = 128; break;
+ case 'Q':
+ DestWidth = 8;
+ break;
+ case 'H':
+ DestWidth = 16;
+ break;
+ case 'S':
+ DestWidth = 32;
+ break;
+ case 'D':
+ DestWidth = 64;
+ break;
+ case 'X':
+ DestWidth = 96;
+ break;
+ case 'T':
+ DestWidth = 128;
+ break;
}
if (Str[1] == 'F') {
IntegerMode = false;
@@ -3159,6 +3322,52 @@ static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
DestWidth = S.Context.getTargetInfo().getUnwindWordWidth();
break;
}
+}
+
+/// handleModeAttr - This attribute modifies the width of a decl with primitive
+/// type.
+///
+/// Despite what would be logical, the mode attribute is a decl attribute, not a
+/// type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make 'G' be
+/// HImode, not an intermediate pointer.
+static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // This attribute isn't documented, but glibc uses it. It changes
+ // the width of an int or unsigned int to the specified size.
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName()
+ << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierInfo *Name = Attr.getArgAsIdent(0)->Ident;
+ StringRef Str = Name->getName();
+
+ normalizeName(Str);
+
+ unsigned DestWidth = 0;
+ bool IntegerMode = true;
+ bool ComplexMode = false;
+ llvm::APInt VectorSize(64, 0);
+ if (Str.size() >= 4 && Str[0] == 'V') {
+ // Minimal length of vector mode is 4: 'V' + NUMBER(>=1) + TYPE(>=2).
+ size_t StrSize = Str.size();
+ size_t VectorStringLength = 0;
+ while ((VectorStringLength + 1) < StrSize &&
+ isdigit(Str[VectorStringLength + 1]))
+ ++VectorStringLength;
+ if (VectorStringLength &&
+ !Str.substr(1, VectorStringLength).getAsInteger(10, VectorSize) &&
+ VectorSize.isPowerOf2()) {
+ parseModeAttrArg(S, Str.substr(VectorStringLength + 1), DestWidth,
+ IntegerMode, ComplexMode);
+ S.Diag(Attr.getLoc(), diag::warn_vector_mode_deprecated);
+ } else {
+ VectorSize = 0;
+ }
+ }
+
+ if (!VectorSize)
+ parseModeAttrArg(S, Str, DestWidth, IntegerMode, ComplexMode);
QualType OldTy;
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
@@ -3217,7 +3426,10 @@ static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
QualType NewTy = NewElemTy;
- if (const VectorType *OldVT = OldTy->getAs<VectorType>()) {
+ if (VectorSize.getBoolValue()) {
+ NewTy = S.Context.getVectorType(NewTy, VectorSize.getZExtValue(),
+ VectorType::GenericVector);
+ } else if (const VectorType *OldVT = OldTy->getAs<VectorType>()) {
// Complex machine mode does not support base vector types.
if (ComplexMode) {
S.Diag(Attr.getLoc(), diag::err_complex_mode_vector_type);
@@ -3280,6 +3492,42 @@ AlwaysInlineAttr *Sema::mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
AttrSpellingListIndex);
}
+CommonAttr *Sema::mergeCommonAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Ident,
+ unsigned AttrSpellingListIndex) {
+ if (checkAttrMutualExclusion<InternalLinkageAttr>(*this, D, Range, Ident))
+ return nullptr;
+
+ return ::new (Context) CommonAttr(Range, Context, AttrSpellingListIndex);
+}
+
+InternalLinkageAttr *
+Sema::mergeInternalLinkageAttr(Decl *D, SourceRange Range,
+ IdentifierInfo *Ident,
+ unsigned AttrSpellingListIndex) {
+ if (auto VD = dyn_cast<VarDecl>(D)) {
+ // Attribute applies to Var but not any subclass of it (like ParmVar,
+ // ImplicitParm or VarTemplateSpecialization).
+ if (VD->getKind() != Decl::Var) {
+ Diag(Range.getBegin(), diag::warn_attribute_wrong_decl_type)
+ << Ident << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
+ return nullptr;
+ }
+ // Attribute does not apply to non-static local variables.
+ if (VD->hasLocalStorage()) {
+ Diag(VD->getLocation(), diag::warn_internal_linkage_local_storage);
+ return nullptr;
+ }
+ }
+
+ if (checkAttrMutualExclusion<CommonAttr>(*this, D, Range, Ident))
+ return nullptr;
+
+ return ::new (Context)
+ InternalLinkageAttr(Range, Context, AttrSpellingListIndex);
+}
+
MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex) {
if (OptimizeNoneAttr *Optnone = D->getAttr<OptimizeNoneAttr>()) {
@@ -3316,6 +3564,10 @@ OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
static void handleAlwaysInlineAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
if (AlwaysInlineAttr *Inline = S.mergeAlwaysInlineAttr(
D, Attr.getRange(), Attr.getName(),
Attr.getAttributeSpellingListIndex()))
@@ -3349,6 +3601,7 @@ static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
D->addAttr(::new (S.Context)
CUDAGlobalAttr(Attr.getRange(), S.Context,
Attr.getAttributeSpellingListIndex()));
+
}
static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -3645,7 +3898,7 @@ static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
QualType BufferTy = getFunctionOrMethodParamType(D, ArgumentIdx);
if (!BufferTy->isPointerType()) {
S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
- << Attr.getName();
+ << Attr.getName() << 0;
}
}
@@ -3898,7 +4151,8 @@ static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
static void handleCFAuditedTransferAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (checkAttrMutualExclusion<CFUnknownTransferAttr>(S, D, Attr))
+ if (checkAttrMutualExclusion<CFUnknownTransferAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
return;
D->addAttr(::new (S.Context)
@@ -3908,7 +4162,8 @@ static void handleCFAuditedTransferAttr(Sema &S, Decl *D,
static void handleCFUnknownTransferAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (checkAttrMutualExclusion<CFAuditedTransferAttr>(S, D, Attr))
+ if (checkAttrMutualExclusion<CFAuditedTransferAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
return;
D->addAttr(::new (S.Context)
@@ -4231,14 +4486,86 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D,
D->addAttr(UsedAttr::CreateImplicit(S.Context));
}
+static void handleMipsInterruptAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Only one optional argument permitted.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+
+ if (Attr.getNumArgs() == 0)
+ Str = "";
+ else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
+ return;
+
+ // Semantic checks for a function with the 'interrupt' attribute for MIPS:
+ // a) Must be a function.
+ // b) Must have no parameters.
+ // c) Must have the 'void' return type.
+ // d) Cannot have the 'mips16' attribute, as that instruction set
+ // lacks the 'eret' instruction.
+ // e) The attribute itself must either have no argument or one of the
+ // valid interrupt types, see [MipsInterruptDocs].
+
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'interrupt'" << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
+ S.Diag(D->getLocation(), diag::warn_mips_interrupt_attribute)
+ << 0;
+ return;
+ }
+
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ S.Diag(D->getLocation(), diag::warn_mips_interrupt_attribute)
+ << 1;
+ return;
+ }
+
+ if (checkAttrMutualExclusion<Mips16Attr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ MipsInterruptAttr::InterruptType Kind;
+ if (!MipsInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << Attr.getName() << "'" + std::string(Str) + "'";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) MipsInterruptAttr(
+ Attr.getLoc(), S.Context, Kind, Attr.getAttributeSpellingListIndex()));
+}
+
static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Dispatch the interrupt attribute based on the current target.
if (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::msp430)
handleMSP430InterruptAttr(S, D, Attr);
+ else if (S.Context.getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::mipsel ||
+ S.Context.getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::mips)
+ handleMipsInterruptAttr(S, D, Attr);
else
handleARMInterruptAttr(S, D, Attr);
}
+static void handleMips16Attribute(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<MipsInterruptAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+
+ handleSimpleAttribute<Mips16Attr>(S, D, Attr);
+}
+
static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
uint32_t NumRegs;
@@ -4334,6 +4661,14 @@ static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) {
}
}
+ if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ MD->getParent()->isLambda()) {
+ S.Diag(A.getRange().getBegin(), diag::err_attribute_dll_lambda) << A.getName();
+ return;
+ }
+ }
+
unsigned Index = A.getAttributeSpellingListIndex();
Attr *NewAttr = A.getKind() == AttributeList::AT_DLLExport
? (Attr *)S.mergeDLLExportAttr(D, A.getRange(), Index)
@@ -4509,8 +4844,10 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
+ StringRef AttrName = Attr.getName()->getName();
+ normalizeName(AttrName);
std::string SanitizerName =
- llvm::StringSwitch<std::string>(Attr.getName()->getName())
+ llvm::StringSwitch<std::string>(AttrName)
.Case("no_address_safety_analysis", "address")
.Case("no_sanitize_address", "address")
.Case("no_sanitize_thread", "thread")
@@ -4520,6 +4857,14 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
+static void handleInternalLinkageAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (InternalLinkageAttr *Internal =
+ S.mergeInternalLinkageAttr(D, Attr.getRange(), Attr.getName(),
+ Attr.getAttributeSpellingListIndex()))
+ D->addAttr(Internal);
+}
+
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the correct
/// number of arguments were passed, etc.
@@ -4583,7 +4928,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
// which do not apply to the current target architecture are treated as
// though they were unknown attributes.
if (Attr.getKind() == AttributeList::UnknownAttribute ||
- !Attr.existsInTarget(S.Context.getTargetInfo().getTriple())) {
+ !Attr.existsInTarget(S.Context.getTargetInfo())) {
S.Diag(Attr.getLoc(), Attr.isDeclspecAttribute()
? diag::warn_unhandled_ms_attribute_ignored
: diag::warn_unknown_attribute_ignored)
@@ -4610,7 +4955,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleDLLAttr(S, D, Attr);
break;
case AttributeList::AT_Mips16:
- handleSimpleAttribute<Mips16Attr>(S, D, Attr);
+ handleMips16Attribute(S, D, Attr);
break;
case AttributeList::AT_NoMips16:
handleSimpleAttribute<NoMips16Attr>(S, D, Attr);
@@ -4663,6 +5008,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_CUDAConstant:
handleSimpleAttribute<CUDAConstantAttr>(S, D, Attr);
break;
+ case AttributeList::AT_PassObjectSize:
+ handlePassObjectSizeAttr(S, D, Attr);
+ break;
case AttributeList::AT_Constructor:
handleConstructorAttr(S, D, Attr);
break;
@@ -4723,6 +5071,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_Mode:
handleModeAttr(S, D, Attr);
break;
+ case AttributeList::AT_NoAlias:
+ handleSimpleAttribute<NoAliasAttr>(S, D, Attr);
+ break;
case AttributeList::AT_NoCommon:
handleSimpleAttribute<NoCommonAttr>(S, D, Attr);
break;
@@ -4754,7 +5105,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleHotAttr(S, D, Attr);
break;
case AttributeList::AT_Naked:
- handleSimpleAttribute<NakedAttr>(S, D, Attr);
+ handleNakedAttr(S, D, Attr);
break;
case AttributeList::AT_NoReturn:
handleNoReturnAttr(S, D, Attr);
@@ -4874,6 +5225,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ReturnsTwice:
handleSimpleAttribute<ReturnsTwiceAttr>(S, D, Attr);
break;
+ case AttributeList::AT_NotTailCalled:
+ handleNotTailCalledAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_DisableTailCalls:
+ handleDisableTailCallsAttr(S, D, Attr);
+ break;
case AttributeList::AT_Used:
handleUsedAttr(S, D, Attr);
break;
@@ -4958,6 +5315,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_OpenCLImageAccess:
handleSimpleAttribute<OpenCLImageAccessAttr>(S, D, Attr);
break;
+ case AttributeList::AT_InternalLinkage:
+ handleInternalLinkageAttr(S, D, Attr);
+ break;
// Microsoft attributes:
case AttributeList::AT_MSNoVTable:
@@ -5299,26 +5659,50 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
}
/// Is the given declaration allowed to use a forbidden type?
-static bool isForbiddenTypeAllowed(Sema &S, Decl *decl) {
+/// If so, it'll still be annotated with an attribute that makes it
+/// illegal to actually use.
+static bool isForbiddenTypeAllowed(Sema &S, Decl *decl,
+ const DelayedDiagnostic &diag,
+ UnavailableAttr::ImplicitReason &reason) {
// Private ivars are always okay. Unfortunately, people don't
// always properly make their ivars private, even in system headers.
// Plus we need to make fields okay, too.
- // Function declarations in sys headers will be marked unavailable.
if (!isa<FieldDecl>(decl) && !isa<ObjCPropertyDecl>(decl) &&
!isa<FunctionDecl>(decl))
return false;
- // Require it to be declared in a system header.
- return S.Context.getSourceManager().isInSystemHeader(decl->getLocation());
+ // Silently accept unsupported uses of __weak in both user and system
+ // declarations when it's been disabled, for ease of integration with
+ // -fno-objc-arc files. We do have to take some care against attempts
+ // to define such things; for now, we've only done that for ivars
+ // and properties.
+ if ((isa<ObjCIvarDecl>(decl) || isa<ObjCPropertyDecl>(decl))) {
+ if (diag.getForbiddenTypeDiagnostic() == diag::err_arc_weak_disabled ||
+ diag.getForbiddenTypeDiagnostic() == diag::err_arc_weak_no_runtime) {
+ reason = UnavailableAttr::IR_ForbiddenWeak;
+ return true;
+ }
+ }
+
+ // Allow all sorts of things in system headers.
+ if (S.Context.getSourceManager().isInSystemHeader(decl->getLocation())) {
+ // Currently, all the failures dealt with this way are due to ARC
+ // restrictions.
+ reason = UnavailableAttr::IR_ARCForbiddenType;
+ return true;
+ }
+
+ return false;
}
/// Handle a delayed forbidden-type diagnostic.
static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
Decl *decl) {
- if (decl && isForbiddenTypeAllowed(S, decl)) {
- decl->addAttr(UnavailableAttr::CreateImplicit(S.Context,
- "this system declaration uses an unsupported type",
- diag.Loc));
+ auto reason = UnavailableAttr::IR_None;
+ if (decl && isForbiddenTypeAllowed(S, decl, diag, reason)) {
+ assert(reason && "didn't set reason?");
+ decl->addAttr(UnavailableAttr::CreateImplicit(S.Context, "", reason,
+ diag.Loc));
return;
}
if (S.getLangOpts().ObjCAutoRefCount)
@@ -5371,6 +5755,7 @@ static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
bool ObjCPropertyAccess) {
// Diagnostics for deprecated or unavailable.
unsigned diag, diag_message, diag_fwdclass_message;
+ unsigned diag_available_here = diag::note_availability_specified_here;
// Matches 'diag::note_property_attribute' options.
unsigned property_note_select;
@@ -5400,6 +5785,50 @@ static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
property_note_select = /* unavailable */ 1;
available_here_select_kind = /* unavailable */ 0;
+
+ if (auto attr = D->getAttr<UnavailableAttr>()) {
+ if (attr->isImplicit() && attr->getImplicitReason()) {
+ // Most of these failures are due to extra restrictions in ARC;
+ // reflect that in the primary diagnostic when applicable.
+ auto flagARCError = [&] {
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ S.getSourceManager().isInSystemHeader(D->getLocation()))
+ diag = diag::err_unavailable_in_arc;
+ };
+
+ switch (attr->getImplicitReason()) {
+ case UnavailableAttr::IR_None: break;
+
+ case UnavailableAttr::IR_ARCForbiddenType:
+ flagARCError();
+ diag_available_here = diag::note_arc_forbidden_type;
+ break;
+
+ case UnavailableAttr::IR_ForbiddenWeak:
+ if (S.getLangOpts().ObjCWeakRuntime)
+ diag_available_here = diag::note_arc_weak_disabled;
+ else
+ diag_available_here = diag::note_arc_weak_no_runtime;
+ break;
+
+ case UnavailableAttr::IR_ARCForbiddenConversion:
+ flagARCError();
+ diag_available_here = diag::note_performs_forbidden_arc_conversion;
+ break;
+
+ case UnavailableAttr::IR_ARCInitReturnsUnrelated:
+ flagARCError();
+ diag_available_here = diag::note_arc_init_returns_unrelated;
+ break;
+
+ case UnavailableAttr::IR_ARCFieldWithOwnership:
+ flagARCError();
+ diag_available_here = diag::note_arc_field_with_ownership;
+ break;
+ }
+ }
+ }
+
break;
case Sema::AD_Partial:
@@ -5426,7 +5855,7 @@ static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
}
- S.Diag(D->getLocation(), diag::note_availability_specified_here)
+ S.Diag(D->getLocation(), diag_available_here)
<< D << available_here_select_kind;
if (K == Sema::AD_Partial)
S.Diag(Loc, diag::note_partial_availability_silence) << D;
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 0d7cbf45e525..3f6c6b00d902 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -162,34 +162,31 @@ Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ // If we have a throw-all spec at this point, ignore the function.
+ if (ComputedEST == EST_None)
+ return;
+
+ switch(EST) {
// If this function can throw any exceptions, make a note of that.
- if (EST == EST_MSAny || EST == EST_None) {
+ case EST_MSAny:
+ case EST_None:
ClearExceptions();
ComputedEST = EST;
return;
- }
-
// FIXME: If the call to this decl is using any of its default arguments, we
// need to search them for potentially-throwing calls.
-
// If this function has a basic noexcept, it doesn't affect the outcome.
- if (EST == EST_BasicNoexcept)
- return;
-
- // If we have a throw-all spec at this point, ignore the function.
- if (ComputedEST == EST_None)
+ case EST_BasicNoexcept:
return;
-
// If we're still at noexcept(true) and there's a nothrow() callee,
// change to that specification.
- if (EST == EST_DynamicNone) {
+ case EST_DynamicNone:
if (ComputedEST == EST_BasicNoexcept)
ComputedEST = EST_DynamicNone;
return;
- }
-
// Check out noexcept specs.
- if (EST == EST_ComputedNoexcept) {
+ case EST_ComputedNoexcept:
+ {
FunctionProtoType::NoexceptResult NR =
Proto->getNoexceptSpec(Self->Context);
assert(NR != FunctionProtoType::NR_NoNoexcept &&
@@ -197,7 +194,6 @@ Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
assert(NR != FunctionProtoType::NR_Dependent &&
"Should not generate implicit declarations for dependent cases, "
"and don't know how to handle them anyway.");
-
// noexcept(false) -> no spec on the new function
if (NR == FunctionProtoType::NR_Throw) {
ClearExceptions();
@@ -206,7 +202,9 @@ Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
// noexcept(true) won't change anything either.
return;
}
-
+ default:
+ break;
+ }
assert(EST == EST_Dynamic && "EST case not considered earlier.");
assert(ComputedEST != EST_None &&
"Shouldn't collect exceptions when throw-all is guaranteed.");
@@ -1232,9 +1230,9 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
Diag(Dcl->getLocation(),
OK ? diag::warn_cxx11_compat_constexpr_body_no_return
: diag::err_constexpr_body_no_return);
- return OK;
- }
- if (ReturnStmts.size() > 1) {
+ if (!OK)
+ return false;
+ } else if (ReturnStmts.size() > 1) {
Diag(ReturnStmts.back(),
getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_constexpr_body_multiple_return
@@ -1555,9 +1553,9 @@ NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set,
/// \brief Performs the actual work of attaching the given base class
/// specifiers to a C++ class.
-bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
- unsigned NumBases) {
- if (NumBases == 0)
+bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
+ MutableArrayRef<CXXBaseSpecifier *> Bases) {
+ if (Bases.empty())
return false;
// Used to keep track of which base types we have already seen, so
@@ -1573,7 +1571,7 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
// Copy non-redundant base specifiers into permanent storage.
unsigned NumGoodBases = 0;
bool Invalid = false;
- for (unsigned idx = 0; idx < NumBases; ++idx) {
+ for (unsigned idx = 0; idx < Bases.size(); ++idx) {
QualType NewBaseType
= Context.getCanonicalType(Bases[idx]->getType());
NewBaseType = NewBaseType.getLocalUnqualifiedType();
@@ -1599,7 +1597,7 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
Bases[NumGoodBases++] = Bases[idx];
// Note this base's direct & indirect bases, if there could be ambiguity.
- if (NumBases > 1)
+ if (Bases.size() > 1)
NoteIndirectBases(Context, IndirectBaseTypes, NewBaseType);
if (const RecordType *Record = NewBaseType->getAs<RecordType>()) {
@@ -1621,7 +1619,7 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
}
// Attach the remaining base class specifiers to the derived class.
- Class->setBases(Bases, NumGoodBases);
+ Class->setBases(Bases.data(), NumGoodBases);
for (unsigned idx = 0; idx < NumGoodBases; ++idx) {
// Check whether this direct base is inaccessible due to ambiguity.
@@ -1656,21 +1654,21 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
/// ActOnBaseSpecifiers - Attach the given base specifiers to the
/// class, after checking whether there are any duplicate base
/// classes.
-void Sema::ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
- unsigned NumBases) {
- if (!ClassDecl || !Bases || !NumBases)
+void Sema::ActOnBaseSpecifiers(Decl *ClassDecl,
+ MutableArrayRef<CXXBaseSpecifier *> Bases) {
+ if (!ClassDecl || Bases.empty())
return;
AdjustDeclIfTemplate(ClassDecl);
- AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl), Bases, NumBases);
+ AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl), Bases);
}
/// \brief Determine whether the type \p Derived is a C++ class that is
/// derived from the type \p Base.
-bool Sema::IsDerivedFrom(QualType Derived, QualType Base) {
+bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base) {
if (!getLangOpts().CPlusPlus)
return false;
-
+
CXXRecordDecl *DerivedRD = Derived->getAsCXXRecordDecl();
if (!DerivedRD)
return false;
@@ -1684,13 +1682,18 @@ bool Sema::IsDerivedFrom(QualType Derived, QualType Base) {
if (BaseRD->isInvalidDecl() || DerivedRD->isInvalidDecl())
return false;
- // FIXME: instantiate DerivedRD if necessary. We need a PoI for this.
- return DerivedRD->hasDefinition() && DerivedRD->isDerivedFrom(BaseRD);
+ // FIXME: In a modules build, do we need the entire path to be visible for us
+ // to be able to use the inheritance relationship?
+ if (!isCompleteType(Loc, Derived) && !DerivedRD->isBeingDefined())
+ return false;
+
+ return DerivedRD->isDerivedFrom(BaseRD);
}
/// \brief Determine whether the type \p Derived is a C++ class that is
/// derived from the type \p Base.
-bool Sema::IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths) {
+bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
+ CXXBasePaths &Paths) {
if (!getLangOpts().CPlusPlus)
return false;
@@ -1702,6 +1705,9 @@ bool Sema::IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths) {
if (!BaseRD)
return false;
+ if (!isCompleteType(Loc, Derived) && !DerivedRD->isBeingDefined())
+ return false;
+
return DerivedRD->isDerivedFrom(BaseRD, Paths);
}
@@ -1749,7 +1755,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
// explore multiple paths to determine if there is an ambiguity.
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/false);
- bool DerivationOkay = IsDerivedFrom(Derived, Base, Paths);
+ bool DerivationOkay = IsDerivedFrom(Loc, Derived, Base, Paths);
assert(DerivationOkay &&
"Can only be used with a derived-to-base conversion");
(void)DerivationOkay;
@@ -1783,7 +1789,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
// performance isn't as much of an issue.
Paths.clear();
Paths.setRecordingPaths(true);
- bool StillOkay = IsDerivedFrom(Derived, Base, Paths);
+ bool StillOkay = IsDerivedFrom(Loc, Derived, Base, Paths);
assert(StillOkay && "Can only be used with a derived-to-base conversion");
(void)StillOkay;
@@ -2759,7 +2765,8 @@ static bool FindBaseInitializer(Sema &SemaRef,
// virtual base class.
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/false);
- if (SemaRef.IsDerivedFrom(SemaRef.Context.getTypeDeclType(ClassDecl),
+ if (SemaRef.IsDerivedFrom(ClassDecl->getLocation(),
+ SemaRef.Context.getTypeDeclType(ClassDecl),
BaseType, Paths)) {
for (CXXBasePaths::paths_iterator Path = Paths.begin();
Path != Paths.end(); ++Path) {
@@ -2982,10 +2989,15 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (BaseType.isNull()) {
BaseType = Context.getTypeDeclType(TyD);
MarkAnyDeclReferenced(TyD->getLocation(), TyD, /*OdrUse=*/false);
- if (SS.isSet())
- // FIXME: preserve source range information
+ if (SS.isSet()) {
BaseType = Context.getElaboratedType(ETK_None, SS.getScopeRep(),
BaseType);
+ TInfo = Context.CreateTypeSourceInfo(BaseType);
+ ElaboratedTypeLoc TL = TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ }
}
}
@@ -3483,7 +3495,8 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
/*TemplateKWLoc=*/SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
MemberLookup,
- /*TemplateArgs=*/nullptr);
+ /*TemplateArgs=*/nullptr,
+ /*S*/nullptr);
if (CtorArg.isInvalid())
return true;
@@ -4140,7 +4153,7 @@ static void DiagnoseBaseOrMemInitializerOrder(
if (InitKey == IdealInitKeys[IdealIndex])
break;
- assert(IdealIndex != NumIdealInits &&
+ assert(IdealIndex < NumIdealInits &&
"initializer not found in initializer list");
}
@@ -4407,64 +4420,35 @@ void Sema::ActOnDefaultCtorInitializers(Decl *CDtorDecl) {
}
}
-bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
- unsigned DiagID, AbstractDiagSelID SelID) {
- class NonAbstractTypeDiagnoser : public TypeDiagnoser {
- unsigned DiagID;
- AbstractDiagSelID SelID;
-
- public:
- NonAbstractTypeDiagnoser(unsigned DiagID, AbstractDiagSelID SelID)
- : TypeDiagnoser(DiagID == 0), DiagID(DiagID), SelID(SelID) { }
-
- void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
- if (Suppressed) return;
- if (SelID == -1)
- S.Diag(Loc, DiagID) << T;
- else
- S.Diag(Loc, DiagID) << SelID << T;
- }
- } Diagnoser(DiagID, SelID);
-
- return RequireNonAbstractType(Loc, T, Diagnoser);
-}
-
-bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
- TypeDiagnoser &Diagnoser) {
+bool Sema::isAbstractType(SourceLocation Loc, QualType T) {
if (!getLangOpts().CPlusPlus)
return false;
- if (const ArrayType *AT = Context.getAsArrayType(T))
- return RequireNonAbstractType(Loc, AT->getElementType(), Diagnoser);
-
- if (const PointerType *PT = T->getAs<PointerType>()) {
- // Find the innermost pointer type.
- while (const PointerType *T = PT->getPointeeType()->getAs<PointerType>())
- PT = T;
-
- if (const ArrayType *AT = Context.getAsArrayType(PT->getPointeeType()))
- return RequireNonAbstractType(Loc, AT->getElementType(), Diagnoser);
- }
-
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
+ const auto *RD = Context.getBaseElementType(T)->getAsCXXRecordDecl();
+ if (!RD)
return false;
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ // FIXME: Per [temp.inst]p1, we are supposed to trigger instantiation of a
+ // class template specialization here, but doing so breaks a lot of code.
// We can't answer whether something is abstract until it has a
- // definition. If it's currently being defined, we'll walk back
+ // definition. If it's currently being defined, we'll walk back
// over all the declarations when we have a full definition.
const CXXRecordDecl *Def = RD->getDefinition();
if (!Def || Def->isBeingDefined())
return false;
- if (!RD->isAbstract())
+ return RD->isAbstract();
+}
+
+bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
+ TypeDiagnoser &Diagnoser) {
+ if (!isAbstractType(Loc, T))
return false;
+ T = Context.getBaseElementType(T);
Diagnoser.diagnose(*this, Loc, T);
- DiagnoseAbstractType(RD);
-
+ DiagnoseAbstractType(T->getAsCXXRecordDecl());
return true;
}
@@ -4684,6 +4668,60 @@ static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
}
}
+static void ReferenceDllExportedMethods(Sema &S, CXXRecordDecl *Class) {
+ Attr *ClassAttr = getDLLAttr(Class);
+ if (!ClassAttr)
+ return;
+
+ assert(ClassAttr->getKind() == attr::DLLExport);
+
+ TemplateSpecializationKind TSK = Class->getTemplateSpecializationKind();
+
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ // Don't go any further if this is just an explicit instantiation
+ // declaration.
+ return;
+
+ for (Decl *Member : Class->decls()) {
+ auto *MD = dyn_cast<CXXMethodDecl>(Member);
+ if (!MD)
+ continue;
+
+ if (Member->getAttr<DLLExportAttr>()) {
+ if (MD->isUserProvided()) {
+ // Instantiate non-default class member functions ...
+
+ // .. except for certain kinds of template specializations.
+ if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited())
+ continue;
+
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+
+ // The function will be passed to the consumer when its definition is
+ // encountered.
+ } else if (!MD->isTrivial() || MD->isExplicitlyDefaulted() ||
+ MD->isCopyAssignmentOperator() ||
+ MD->isMoveAssignmentOperator()) {
+ // Synthesize and instantiate non-trivial implicit methods, explicitly
+ // defaulted methods, and the copy and move assignment operators. The
+ // latter are exported even if they are trivial, because the address of
+ // an operator can be taken and should compare equal accross libraries.
+ DiagnosticErrorTrap Trap(S.Diags);
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+ if (Trap.hasErrorOccurred()) {
+ S.Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class)
+ << Class->getName() << !S.getLangOpts().CPlusPlus11;
+ break;
+ }
+
+ // There is no later point when we will see the definition of this
+ // function, so pass it to the consumer now.
+ S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
+ }
+ }
+ }
+}
+
/// \brief Check class-level dllimport/dllexport attribute.
void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
Attr *ClassAttr = getDLLAttr(Class);
@@ -4785,45 +4823,10 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
NewAttr->setInherited(true);
Member->addAttr(NewAttr);
}
-
- if (MD && ClassExported) {
- if (TSK == TSK_ExplicitInstantiationDeclaration)
- // Don't go any further if this is just an explicit instantiation
- // declaration.
- continue;
-
- if (MD->isUserProvided()) {
- // Instantiate non-default class member functions ...
-
- // .. except for certain kinds of template specializations.
- if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited())
- continue;
-
- MarkFunctionReferenced(Class->getLocation(), MD);
-
- // The function will be passed to the consumer when its definition is
- // encountered.
- } else if (!MD->isTrivial() || MD->isExplicitlyDefaulted() ||
- MD->isCopyAssignmentOperator() ||
- MD->isMoveAssignmentOperator()) {
- // Synthesize and instantiate non-trivial implicit methods, explicitly
- // defaulted methods, and the copy and move assignment operators. The
- // latter are exported even if they are trivial, because the address of
- // an operator can be taken and should compare equal accross libraries.
- DiagnosticErrorTrap Trap(Diags);
- MarkFunctionReferenced(Class->getLocation(), MD);
- if (Trap.hasErrorOccurred()) {
- Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class)
- << Class->getName() << !getLangOpts().CPlusPlus11;
- break;
- }
-
- // There is no later point when we will see the definition of this
- // function, so pass it to the consumer now.
- Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
- }
- }
}
+
+ if (ClassExported)
+ DelayedDllExportClasses.push_back(Class);
}
/// \brief Perform propagation of DLL attributes from a derived class to a
@@ -5623,7 +5626,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
/// having a particular direct or virtual base class.
bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
CXXRecordDecl *BaseClass = Base->getType()->getAsCXXRecordDecl();
- return shouldDeleteForClassSubobject(BaseClass, Base, 0);
+ // If program is correct, BaseClass cannot be null, but if it is, the error
+ // must be reported elsewhere.
+ return BaseClass && shouldDeleteForClassSubobject(BaseClass, Base, 0);
}
/// Check whether we should delete a special member function due to the class
@@ -6270,77 +6275,75 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
return true;
}
-/// \brief Data used with FindHiddenVirtualMethod
namespace {
- struct FindHiddenVirtualMethodData {
- Sema *S;
- CXXMethodDecl *Method;
- llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverridenAndUsingBaseMethods;
- SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
- };
-}
-
-/// \brief Check whether any most overriden method from MD in Methods
-static bool CheckMostOverridenMethods(const CXXMethodDecl *MD,
- const llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) {
- if (MD->size_overridden_methods() == 0)
- return Methods.count(MD->getCanonicalDecl());
- for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
- E = MD->end_overridden_methods();
- I != E; ++I)
- if (CheckMostOverridenMethods(*I, Methods))
- return true;
- return false;
-}
+struct FindHiddenVirtualMethod {
+ Sema *S;
+ CXXMethodDecl *Method;
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverridenAndUsingBaseMethods;
+ SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
-/// \brief Member lookup function that determines whether a given C++
-/// method overloads virtual methods in a base class without overriding any,
-/// to be used with CXXRecordDecl::lookupInBases().
-static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- void *UserData) {
- RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
-
- FindHiddenVirtualMethodData &Data
- = *static_cast<FindHiddenVirtualMethodData*>(UserData);
-
- DeclarationName Name = Data.Method->getDeclName();
- assert(Name.getNameKind() == DeclarationName::Identifier);
-
- bool foundSameNameMethod = false;
- SmallVector<CXXMethodDecl *, 8> overloadedMethods;
- for (Path.Decls = BaseRecord->lookup(Name);
- !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- NamedDecl *D = Path.Decls.front();
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
- MD = MD->getCanonicalDecl();
- foundSameNameMethod = true;
- // Interested only in hidden virtual methods.
- if (!MD->isVirtual())
- continue;
- // If the method we are checking overrides a method from its base
- // don't warn about the other overloaded methods. Clang deviates from GCC
- // by only diagnosing overloads of inherited virtual functions that do not
- // override any other virtual functions in the base. GCC's
- // -Woverloaded-virtual diagnoses any derived function hiding a virtual
- // function from a base class. These cases may be better served by a
- // warning (not specific to virtual functions) on call sites when the call
- // would select a different function from the base class, were it visible.
- // See FIXME in test/SemaCXX/warn-overload-virtual.cpp for an example.
- if (!Data.S->IsOverload(Data.Method, MD, false))
+private:
+ /// Check whether any most overriden method from MD in Methods
+ static bool CheckMostOverridenMethods(
+ const CXXMethodDecl *MD,
+ const llvm::SmallPtrSetImpl<const CXXMethodDecl *> &Methods) {
+ if (MD->size_overridden_methods() == 0)
+ return Methods.count(MD->getCanonicalDecl());
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I)
+ if (CheckMostOverridenMethods(*I, Methods))
return true;
- // Collect the overload only if its hidden.
- if (!CheckMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods))
- overloadedMethods.push_back(MD);
- }
+ return false;
}
- if (foundSameNameMethod)
- Data.OverloadedMethods.append(overloadedMethods.begin(),
- overloadedMethods.end());
- return foundSameNameMethod;
-}
+public:
+ /// Member lookup function that determines whether a given C++
+ /// method overloads virtual methods in a base class without overriding any,
+ /// to be used with CXXRecordDecl::lookupInBases().
+ bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ RecordDecl *BaseRecord =
+ Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName Name = Method->getDeclName();
+ assert(Name.getNameKind() == DeclarationName::Identifier);
+
+ bool foundSameNameMethod = false;
+ SmallVector<CXXMethodDecl *, 8> overloadedMethods;
+ for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
+ Path.Decls = Path.Decls.slice(1)) {
+ NamedDecl *D = Path.Decls.front();
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ MD = MD->getCanonicalDecl();
+ foundSameNameMethod = true;
+ // Interested only in hidden virtual methods.
+ if (!MD->isVirtual())
+ continue;
+ // If the method we are checking overrides a method from its base
+ // don't warn about the other overloaded methods. Clang deviates from
+ // GCC by only diagnosing overloads of inherited virtual functions that
+ // do not override any other virtual functions in the base. GCC's
+ // -Woverloaded-virtual diagnoses any derived function hiding a virtual
+ // function from a base class. These cases may be better served by a
+ // warning (not specific to virtual functions) on call sites when the
+ // call would select a different function from the base class, were it
+ // visible.
+ // See FIXME in test/SemaCXX/warn-overload-virtual.cpp for an example.
+ if (!S->IsOverload(Method, MD, false))
+ return true;
+ // Collect the overload only if its hidden.
+ if (!CheckMostOverridenMethods(MD, OverridenAndUsingBaseMethods))
+ overloadedMethods.push_back(MD);
+ }
+ }
+
+ if (foundSameNameMethod)
+ OverloadedMethods.append(overloadedMethods.begin(),
+ overloadedMethods.end());
+ return foundSameNameMethod;
+ }
+};
+} // end anonymous namespace
/// \brief Add the most overriden methods from MD to Methods
static void AddMostOverridenMethods(const CXXMethodDecl *MD,
@@ -6363,9 +6366,9 @@ void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD,
CXXBasePaths Paths(/*FindAmbiguities=*/true, // true to look in all bases.
/*bool RecordPaths=*/false,
/*bool DetectVirtual=*/false);
- FindHiddenVirtualMethodData Data;
- Data.Method = MD;
- Data.S = this;
+ FindHiddenVirtualMethod FHVM;
+ FHVM.Method = MD;
+ FHVM.S = this;
// Keep the base methods that were overriden or introduced in the subclass
// by 'using' in a set. A base method not in this set is hidden.
@@ -6376,11 +6379,11 @@ void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD,
if (UsingShadowDecl *shad = dyn_cast<UsingShadowDecl>(*I))
ND = shad->getTargetDecl();
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
- AddMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods);
+ AddMostOverridenMethods(MD, FHVM.OverridenAndUsingBaseMethods);
}
- if (DC->lookupInBases(&FindHiddenVirtualMethod, &Data, Paths))
- OverloadedMethods = Data.OverloadedMethods;
+ if (DC->lookupInBases(FHVM, Paths))
+ OverloadedMethods = FHVM.OverloadedMethods;
}
void Sema::NoteHiddenVirtualMethods(CXXMethodDecl *MD,
@@ -6897,7 +6900,7 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
return Context.getFunctionType(Context.VoidTy, None, EPI);
}
-static void extendLeft(SourceRange &R, const SourceRange &Before) {
+static void extendLeft(SourceRange &R, SourceRange Before) {
if (Before.isInvalid())
return;
R.setBegin(Before.getBegin());
@@ -6905,7 +6908,7 @@ static void extendLeft(SourceRange &R, const SourceRange &Before) {
R.setEnd(Before.getEnd());
}
-static void extendRight(SourceRange &R, const SourceRange &After) {
+static void extendRight(SourceRange &R, SourceRange After) {
if (After.isInvalid())
return;
if (R.getBegin().isInvalid())
@@ -7019,7 +7022,7 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
// If we can provide a correct fix-it hint, do so.
if (After.isInvalid() && ConvTSI) {
SourceLocation InsertLoc =
- PP.getLocForEndOfToken(ConvTSI->getTypeLoc().getLocEnd());
+ getLocForEndOfToken(ConvTSI->getTypeLoc().getLocEnd());
DB << FixItHint::CreateInsertion(InsertLoc, " ")
<< FixItHint::CreateInsertionFromRange(
InsertLoc, CharSourceRange::getTokenRange(Before))
@@ -7102,7 +7105,7 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
if (ConvType == ClassType)
Diag(Conversion->getLocation(), diag::warn_conv_to_self_not_used)
<< ClassType;
- else if (IsDerivedFrom(ClassType, ConvType))
+ else if (IsDerivedFrom(Conversion->getLocation(), ClassType, ConvType))
Diag(Conversion->getLocation(), diag::warn_conv_to_base_not_used)
<< ClassType << ConvType;
} else if (ConvType->isVoidType()) {
@@ -7169,7 +7172,8 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
SourceLocation IdentLoc,
IdentifierInfo *II,
SourceLocation LBrace,
- AttributeList *AttrList) {
+ AttributeList *AttrList,
+ UsingDirectiveDecl *&UD) {
SourceLocation StartLoc = InlineLoc.isValid() ? InlineLoc : NamespaceLoc;
// For anonymous namespace, take the location of the left brace.
SourceLocation Loc = II ? IdentLoc : LBrace;
@@ -7190,23 +7194,14 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
// treated as an original-namespace-name.
//
// Since namespace names are unique in their scope, and we don't
- // look through using directives, just look for any ordinary names.
-
- const unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Member |
- Decl::IDNS_Type | Decl::IDNS_Using | Decl::IDNS_Tag |
- Decl::IDNS_Namespace;
- NamedDecl *PrevDecl = nullptr;
- DeclContext::lookup_result R = CurContext->getRedeclContext()->lookup(II);
- for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
- ++I) {
- if ((*I)->getIdentifierNamespace() & IDNS) {
- PrevDecl = *I;
- break;
- }
- }
-
+ // look through using directives, just look for any ordinary names
+ // as if by qualified name lookup.
+ LookupResult R(*this, II, IdentLoc, LookupOrdinaryName, ForRedeclaration);
+ LookupQualifiedName(R, CurContext->getRedeclContext());
+ NamedDecl *PrevDecl =
+ R.isSingleResult() ? R.getRepresentativeDecl() : nullptr;
PrevNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl);
-
+
if (PrevNS) {
// This is an extended namespace definition.
if (IsInline != PrevNS->isInline())
@@ -7293,14 +7288,13 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
// namespace internal linkage.
if (!PrevNS) {
- UsingDirectiveDecl* UD
- = UsingDirectiveDecl::Create(Context, Parent,
- /* 'using' */ LBrace,
- /* 'namespace' */ SourceLocation(),
- /* qualifier */ NestedNameSpecifierLoc(),
- /* identifier */ SourceLocation(),
- Namespc,
- /* Ancestor */ Parent);
+ UD = UsingDirectiveDecl::Create(Context, Parent,
+ /* 'using' */ LBrace,
+ /* 'namespace' */ SourceLocation(),
+ /* qualifier */ NestedNameSpecifierLoc(),
+ /* identifier */ SourceLocation(),
+ Namespc,
+ /* Ancestor */ Parent);
UD->setImplicit();
Parent->addDecl(UD);
}
@@ -7538,7 +7532,7 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
S.PDiag(diag::err_using_directive_suggest) << Ident,
S.PDiag(diag::note_namespace_defined_here));
}
- R.addDecl(Corrected.getCorrectionDecl());
+ R.addDecl(Corrected.getFoundDecl());
return true;
}
return false;
@@ -7556,7 +7550,7 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
assert(IdentLoc.isValid() && "Invalid NamespceName location.");
// This can only happen along a recovery path.
- while (S->getFlags() & Scope::TemplateParamScope)
+ while (S->isTemplateParamScope())
S = S->getParent();
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
@@ -7586,9 +7580,9 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
}
if (!R.empty()) {
- NamedDecl *Named = R.getFoundDecl();
- assert((isa<NamespaceDecl>(Named) || isa<NamespaceAliasDecl>(Named))
- && "expected namespace decl");
+ NamedDecl *Named = R.getRepresentativeDecl();
+ NamespaceDecl *NS = R.getAsSingle<NamespaceDecl>();
+ assert(NS && "expected namespace decl");
// The use of a nested name specifier may trigger deprecation warnings.
DiagnoseUseOfDecl(Named, IdentLoc);
@@ -7605,7 +7599,6 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
// Find enclosing context containing both using-directive and
// nominated namespace.
- NamespaceDecl *NS = getNamespaceDecl(Named);
DeclContext *CommonAncestor = cast<DeclContext>(NS);
while (CommonAncestor && !CommonAncestor->Encloses(CurContext))
CommonAncestor = CommonAncestor->getParent();
@@ -7805,7 +7798,8 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
FoundEquivalentDecl = true;
}
- (isa<TagDecl>(D) ? Tag : NonTag) = D;
+ if (isVisible(D))
+ (isa<TagDecl>(D) ? Tag : NonTag) = D;
}
if (FoundEquivalentDecl)
@@ -8014,6 +8008,10 @@ public:
// FIXME: Check that the base class member is accessible?
}
+ } else {
+ auto *FoundRecord = dyn_cast<CXXRecordDecl>(ND);
+ if (FoundRecord && FoundRecord->isInjectedClassName())
+ return false;
}
if (isa<TypeDecl>(ND))
@@ -8377,7 +8375,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
} else {
// Convert 'using X::Y;' to 'typedef X::Y Y;'.
SourceLocation InsertLoc =
- PP.getLocForEndOfToken(NameInfo.getLocEnd());
+ getLocForEndOfToken(NameInfo.getLocEnd());
Diag(InsertLoc, diag::note_using_decl_class_member_workaround)
<< 1 // typedef declaration
<< FixItHint::CreateReplacement(UsingLoc, "typedef")
@@ -8470,40 +8468,26 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
// in the UsingDecl and UsingShadowDecl so that these checks didn't
// need to be repeated.
- struct UserData {
- llvm::SmallPtrSet<const CXXRecordDecl*, 4> Bases;
-
- static bool collect(const CXXRecordDecl *Base, void *OpaqueData) {
- UserData *Data = reinterpret_cast<UserData*>(OpaqueData);
- Data->Bases.insert(Base);
- return true;
- }
-
- bool hasDependentBases(const CXXRecordDecl *Class) {
- return !Class->forallBases(collect, this);
- }
-
- /// Returns true if the base is dependent or is one of the
- /// accumulated base classes.
- static bool doesNotContain(const CXXRecordDecl *Base, void *OpaqueData) {
- UserData *Data = reinterpret_cast<UserData*>(OpaqueData);
- return !Data->Bases.count(Base);
- }
-
- bool mightShareBases(const CXXRecordDecl *Class) {
- return Bases.count(Class) || !Class->forallBases(doesNotContain, this);
- }
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> Bases;
+ auto Collect = [&Bases](const CXXRecordDecl *Base) {
+ Bases.insert(Base);
+ return true;
};
- UserData Data;
-
- // Returns false if we find a dependent base.
- if (Data.hasDependentBases(cast<CXXRecordDecl>(CurContext)))
+ // Collect all bases. Return false if we find a dependent base.
+ if (!cast<CXXRecordDecl>(CurContext)->forallBases(Collect))
return false;
- // Returns false if the class has a dependent base or if it or one
+ // Returns true if the base is dependent or is one of the accumulated base
+ // classes.
+ auto IsNotBase = [&Bases](const CXXRecordDecl *Base) {
+ return !Bases.count(Base);
+ };
+
+ // Return false if the class has a dependent base or if it or one
// of its bases is present in the base set of the current context.
- if (Data.mightShareBases(cast<CXXRecordDecl>(NamedContext)))
+ if (Bases.count(cast<CXXRecordDecl>(NamedContext)) ||
+ !cast<CXXRecordDecl>(NamedContext)->forallBases(IsNotBase))
return false;
Diag(SS.getRange().getBegin(),
@@ -8524,7 +8508,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
TypeResult Type,
Decl *DeclFromDeclSpec) {
// Skip up to the relevant declaration scope.
- while (S->getFlags() & Scope::TemplateParamScope)
+ while (S->isTemplateParamScope())
S = S->getParent();
assert((S->getFlags() & Scope::DeclScope) &&
"got alias-declaration outside of declaration scope");
@@ -8685,28 +8669,41 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc,
}
}
assert(!R.isAmbiguous() && !R.empty());
+ NamedDecl *ND = R.getRepresentativeDecl();
// Check if we have a previous declaration with the same name.
- NamedDecl *PrevDecl = LookupSingleName(S, Alias, AliasLoc, LookupOrdinaryName,
- ForRedeclaration);
- if (PrevDecl && !isDeclInScope(PrevDecl, CurContext, S))
- PrevDecl = nullptr;
+ LookupResult PrevR(*this, Alias, AliasLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ LookupName(PrevR, S);
- NamedDecl *ND = R.getFoundDecl();
+ // Check we're not shadowing a template parameter.
+ if (PrevR.isSingleResult() && PrevR.getFoundDecl()->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(AliasLoc, PrevR.getFoundDecl());
+ PrevR.clear();
+ }
- if (PrevDecl) {
+ // Filter out any other lookup result from an enclosing scope.
+ FilterLookupForScope(PrevR, CurContext, S, /*ConsiderLinkage*/false,
+ /*AllowInlineNamespace*/false);
+
+ // Find the previous declaration and check that we can redeclare it.
+ NamespaceAliasDecl *Prev = nullptr;
+ if (PrevR.isSingleResult()) {
+ NamedDecl *PrevDecl = PrevR.getRepresentativeDecl();
if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(PrevDecl)) {
// We already have an alias with the same name that points to the same
// namespace; check that it matches.
- if (!AD->getNamespace()->Equals(getNamespaceDecl(ND))) {
+ if (AD->getNamespace()->Equals(getNamespaceDecl(ND))) {
+ Prev = AD;
+ } else if (isVisible(PrevDecl)) {
Diag(AliasLoc, diag::err_redefinition_different_namespace_alias)
<< Alias;
- Diag(PrevDecl->getLocation(), diag::note_previous_namespace_alias)
+ Diag(AD->getLocation(), diag::note_previous_namespace_alias)
<< AD->getNamespace();
return nullptr;
}
- } else {
- unsigned DiagID = isa<NamespaceDecl>(PrevDecl)
+ } else if (isVisible(PrevDecl)) {
+ unsigned DiagID = isa<NamespaceDecl>(PrevDecl->getUnderlyingDecl())
? diag::err_redefinition
: diag::err_redefinition_different_kind;
Diag(AliasLoc, DiagID) << Alias;
@@ -8722,8 +8719,8 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc,
NamespaceAliasDecl::Create(Context, CurContext, NamespaceLoc, AliasLoc,
Alias, SS.getWithLocInContext(Context),
IdentLoc, ND);
- if (PrevDecl)
- AliasDecl->setPreviousDecl(cast<NamespaceAliasDecl>(PrevDecl));
+ if (Prev)
+ AliasDecl->setPreviousDecl(Prev);
PushOnScopeChains(AliasDecl, S);
return AliasDecl;
@@ -9497,7 +9494,7 @@ static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
}
}
-void Sema::ActOnFinishCXXMemberDefaultArgs(Decl *D) {
+void Sema::ActOnFinishCXXNonNestedClass(Decl *D) {
auto *RD = dyn_cast<CXXRecordDecl>(D);
// Default constructors that are annotated with __declspec(dllexport) which
@@ -9505,6 +9502,15 @@ void Sema::ActOnFinishCXXMemberDefaultArgs(Decl *D) {
// wrapped with a thunk called the default constructor closure.
if (RD && Context.getTargetInfo().getCXXABI().isMicrosoft())
getDefaultArgExprsForConstructors(*this, RD);
+
+ if (!DelayedDllExportClasses.empty()) {
+ // Calling ReferenceDllExportedMethods might cause the current function to
+ // be called again, so use a local copy of DelayedDllExportClasses.
+ SmallVector<CXXRecordDecl *, 4> WorkList;
+ std::swap(DelayedDllExportClasses, WorkList);
+ for (CXXRecordDecl *Class : WorkList)
+ ReferenceDllExportedMethods(*this, Class);
+ }
}
void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
@@ -9618,7 +9624,7 @@ public:
Expr *build(Sema &S, SourceLocation Loc) const override {
return assertNotNull(S.BuildMemberReferenceExpr(
Builder.build(S, Loc), Type, Loc, IsArrow, SS, SourceLocation(),
- nullptr, MemberLookup, nullptr).get());
+ nullptr, MemberLookup, nullptr, nullptr).get());
}
MemberBuilder(const ExprBuilder &Builder, QualType Type, bool IsArrow,
@@ -9828,7 +9834,7 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
SS, /*TemplateKWLoc=*/SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
OpLookup,
- /*TemplateArgs=*/nullptr,
+ /*TemplateArgs=*/nullptr, /*S*/nullptr,
/*SuppressQualifierCheck=*/true);
if (OpEqualRef.isInvalid())
return StmtError();
@@ -12207,7 +12213,7 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
diag::ext_unelaborated_friend_type)
<< (unsigned) RD->getTagKind()
<< T
- << FixItHint::CreateInsertion(PP.getLocForEndOfToken(FriendLoc),
+ << FixItHint::CreateInsertion(getLocForEndOfToken(FriendLoc),
InsertionText);
} else {
Diag(FriendLoc,
@@ -12673,15 +12679,30 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
DC = CurContext;
assert(isa<CXXRecordDecl>(DC) && "friend declaration not in class?");
}
-
+
if (!DC->isRecord()) {
+ int DiagArg = -1;
+ switch (D.getName().getKind()) {
+ case UnqualifiedId::IK_ConstructorTemplateId:
+ case UnqualifiedId::IK_ConstructorName:
+ DiagArg = 0;
+ break;
+ case UnqualifiedId::IK_DestructorName:
+ DiagArg = 1;
+ break;
+ case UnqualifiedId::IK_ConversionFunctionId:
+ DiagArg = 2;
+ break;
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_TemplateId:
+ break;
+ }
// This implies that it has to be an operator or function.
- if (D.getName().getKind() == UnqualifiedId::IK_ConstructorName ||
- D.getName().getKind() == UnqualifiedId::IK_DestructorName ||
- D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId) {
- Diag(Loc, diag::err_introducing_special_friend) <<
- (D.getName().getKind() == UnqualifiedId::IK_ConstructorName ? 0 :
- D.getName().getKind() == UnqualifiedId::IK_DestructorName ? 1 : 2);
+ if (DiagArg >= 0) {
+ Diag(Loc, diag::err_introducing_special_friend) << DiagArg;
return nullptr;
}
}
@@ -12983,7 +13004,7 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
if (!Context.hasSameUnqualifiedType(NewClassTy, OldClassTy)) {
// Check if the new class derives from the old class.
- if (!IsDerivedFrom(NewClassTy, OldClassTy)) {
+ if (!IsDerivedFrom(New->getLocation(), NewClassTy, OldClassTy)) {
Diag(New->getLocation(), diag::err_covariant_return_not_derived)
<< New->getDeclName() << NewTy << OldTy
<< New->getReturnTypeSourceRange();
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index f42c4b7546ce..a2f41a7cc30a 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -15,12 +15,11 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
-#include "clang/AST/DataRecursiveASTVisitor.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/Lookup.h"
@@ -100,9 +99,8 @@ bool Sema::checkInitMethod(ObjCMethodDecl *method,
// If we're in a system header, and this is not a call, just make
// the method unusable.
if (receiverTypeIfCall.isNull() && getSourceManager().isInSystemHeader(loc)) {
- method->addAttr(UnavailableAttr::CreateImplicit(Context,
- "init method returns a type unrelated to its receiver type",
- loc));
+ method->addAttr(UnavailableAttr::CreateImplicit(Context, "",
+ UnavailableAttr::IR_ARCInitReturnsUnrelated, loc));
return true;
}
@@ -449,7 +447,7 @@ class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback {
ObjCInterfaceDecl *CurrentIDecl;
};
-}
+} // end anonymous namespace
static void diagnoseUseOfProtocols(Sema &TheSema,
ObjCContainerDecl *CD,
@@ -484,7 +482,7 @@ ActOnSuperClassOfClassInterface(Scope *S,
if (TypoCorrection Corrected = CorrectTypo(
DeclarationNameInfo(SuperName, SuperLoc),
LookupOrdinaryName, TUScope,
- NULL, llvm::make_unique<ObjCInterfaceValidatorCCC>(IDecl),
+ nullptr, llvm::make_unique<ObjCInterfaceValidatorCCC>(IDecl),
CTK_ErrorRecovery)) {
diagnoseTypo(Corrected, PDiag(diag::err_undef_superclass_suggest)
<< SuperName << ClassName);
@@ -507,7 +505,7 @@ ActOnSuperClassOfClassInterface(Scope *S,
SuperClassType = Context.getObjCInterfaceType(SuperClassDecl);
}
- if (PrevDecl && SuperClassDecl == 0) {
+ if (PrevDecl && !SuperClassDecl) {
// The previous declaration was not a class decl. Check if we have a
// typedef. If we do, get the underlying class type.
if (const TypedefNameDecl *TDecl =
@@ -548,7 +546,7 @@ ActOnSuperClassOfClassInterface(Scope *S,
SuperClassDecl->getDeclName(),
ClassName,
SourceRange(AtInterfaceLoc, ClassLoc))) {
- SuperClassDecl = 0;
+ SuperClassDecl = nullptr;
SuperClassType = QualType();
}
}
@@ -608,7 +606,7 @@ DeclResult Sema::actOnObjCTypeParam(Scope *S,
} else if (typeBound->isObjCObjectType()) {
// The user forgot the * on an Objective-C pointer type, e.g.,
// "T : NSView".
- SourceLocation starLoc = PP.getLocForEndOfToken(
+ SourceLocation starLoc = getLocForEndOfToken(
typeBoundInfo->getTypeLoc().getEndLoc());
Diag(typeBoundInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_param_bound_missing_pointer)
@@ -638,20 +636,44 @@ DeclResult Sema::actOnObjCTypeParam(Scope *S,
typeBoundInfo = nullptr;
}
- // Type bounds cannot have explicit nullability.
+ // Type bounds cannot have qualifiers (even indirectly) or explicit
+ // nullability.
if (typeBoundInfo) {
- // Type arguments cannot explicitly specify nullability.
- if (auto nullability = AttributedType::stripOuterNullability(typeBound)) {
- // Look at the type location information to find the nullability
- // specifier so we can zap it.
- SourceLocation nullabilityLoc
- = typeBoundInfo->getTypeLoc().findNullabilityLoc();
- SourceLocation diagLoc
- = nullabilityLoc.isValid()? nullabilityLoc
- : typeBoundInfo->getTypeLoc().getLocStart();
- Diag(diagLoc, diag::err_type_param_bound_explicit_nullability)
- << paramName << typeBoundInfo->getType()
- << FixItHint::CreateRemoval(nullabilityLoc);
+ QualType typeBound = typeBoundInfo->getType();
+ TypeLoc qual = typeBoundInfo->getTypeLoc().findExplicitQualifierLoc();
+ if (qual || typeBound.hasQualifiers()) {
+ bool diagnosed = false;
+ SourceRange rangeToRemove;
+ if (qual) {
+ if (auto attr = qual.getAs<AttributedTypeLoc>()) {
+ rangeToRemove = attr.getLocalSourceRange();
+ if (attr.getTypePtr()->getImmediateNullability()) {
+ Diag(attr.getLocStart(),
+ diag::err_objc_type_param_bound_explicit_nullability)
+ << paramName << typeBound
+ << FixItHint::CreateRemoval(rangeToRemove);
+ diagnosed = true;
+ }
+ }
+ }
+
+ if (!diagnosed) {
+ Diag(qual ? qual.getLocStart()
+ : typeBoundInfo->getTypeLoc().getLocStart(),
+ diag::err_objc_type_param_bound_qualified)
+ << paramName << typeBound << typeBound.getQualifiers().getAsString()
+ << FixItHint::CreateRemoval(rangeToRemove);
+ }
+
+ // If the type bound has qualifiers other than CVR, we need to strip
+ // them or we'll probably assert later when trying to apply new
+ // qualifiers.
+ Qualifiers quals = typeBound.getQualifiers();
+ quals.removeCVRQualifiers();
+ if (!quals.empty()) {
+ typeBoundInfo =
+ Context.getTrivialTypeSourceInfo(typeBound.getUnqualifiedType());
+ }
}
}
}
@@ -722,7 +744,7 @@ namespace {
Category,
Extension
};
-}
+} // end anonymous namespace
/// Check consistency between two Objective-C type parameter lists, e.g.,
/// between a category/extension and an \@interface or between an \@class and an
@@ -737,7 +759,7 @@ static bool checkTypeParamListConsistency(Sema &S,
if (newTypeParams->size() > prevTypeParams->size()) {
diagLoc = newTypeParams->begin()[prevTypeParams->size()]->getLocation();
} else {
- diagLoc = S.PP.getLocForEndOfToken(newTypeParams->back()->getLocEnd());
+ diagLoc = S.getLocForEndOfToken(newTypeParams->back()->getLocEnd());
}
S.Diag(diagLoc, diag::err_objc_type_param_arity_mismatch)
@@ -852,7 +874,7 @@ static bool checkTypeParamListConsistency(Sema &S,
newContext == TypeParamListContext::Definition) {
// Diagnose this problem for forward declarations and definitions.
SourceLocation insertionLoc
- = S.PP.getLocForEndOfToken(newTypeParam->getLocation());
+ = S.getLocForEndOfToken(newTypeParam->getLocation());
std::string newCode
= " : " + prevTypeParam->getUnderlyingType().getAsString(
S.Context.getPrintingPolicy());
@@ -1184,26 +1206,23 @@ static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
/// protocol declarations in its 'Protocols' argument.
void
Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
- const IdentifierLocPair *ProtocolId,
- unsigned NumProtocols,
+ ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols) {
- for (unsigned i = 0; i != NumProtocols; ++i) {
- ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolId[i].first,
- ProtocolId[i].second);
+ for (const IdentifierLocPair &Pair : ProtocolId) {
+ ObjCProtocolDecl *PDecl = LookupProtocol(Pair.first, Pair.second);
if (!PDecl) {
TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(ProtocolId[i].first, ProtocolId[i].second),
+ DeclarationNameInfo(Pair.first, Pair.second),
LookupObjCProtocolName, TUScope, nullptr,
llvm::make_unique<DeclFilterCCC<ObjCProtocolDecl>>(),
CTK_ErrorRecovery);
if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>()))
diagnoseTypo(Corrected, PDiag(diag::err_undeclared_protocol_suggest)
- << ProtocolId[i].first);
+ << Pair.first);
}
if (!PDecl) {
- Diag(ProtocolId[i].second, diag::err_undeclared_protocol)
- << ProtocolId[i].first;
+ Diag(Pair.second, diag::err_undeclared_protocol) << Pair.first;
continue;
}
// If this is a forward protocol declaration, get its definition.
@@ -1213,7 +1232,7 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
// For an objc container, delay protocol reference checking until after we
// can set the objc decl as the availability context, otherwise check now.
if (!ForObjCContainer) {
- (void)DiagnoseUseOfDecl(PDecl, ProtocolId[i].second);
+ (void)DiagnoseUseOfDecl(PDecl, Pair.second);
}
// If this is a forward declaration and we are supposed to warn in this
@@ -1223,8 +1242,7 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
if (WarnOnDeclarations &&
NestedProtocolHasNoDefinition(PDecl, UndefinedProtocol)) {
- Diag(ProtocolId[i].second, diag::warn_undef_protocolref)
- << ProtocolId[i].first;
+ Diag(Pair.second, diag::warn_undef_protocolref) << Pair.first;
Diag(UndefinedProtocol->getLocation(), diag::note_protocol_decl_undefined)
<< UndefinedProtocol;
}
@@ -1388,8 +1406,8 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
if (allProtocolsDeclared) {
Diag(firstClassNameLoc, diag::warn_objc_redundant_qualified_class_type)
<< baseClass->getDeclName() << SourceRange(lAngleLoc, rAngleLoc)
- << FixItHint::CreateInsertion(
- PP.getLocForEndOfToken(firstClassNameLoc), " *");
+ << FixItHint::CreateInsertion(getLocForEndOfToken(firstClassNameLoc),
+ " *");
}
}
@@ -1469,15 +1487,15 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// If we have a typedef of an Objective-C class type that is missing a '*',
// add the '*'.
if (type->getAs<ObjCInterfaceType>()) {
- SourceLocation starLoc = PP.getLocForEndOfToken(loc);
+ SourceLocation starLoc = getLocForEndOfToken(loc);
ParsedAttributes parsedAttrs(attrFactory);
D.AddTypeInfo(DeclaratorChunk::getPointer(/*typeQuals=*/0, starLoc,
SourceLocation(),
SourceLocation(),
SourceLocation(),
SourceLocation()),
- parsedAttrs,
- starLoc);
+ parsedAttrs,
+ starLoc);
// Diagnose the missing '*'.
Diag(loc, diag::err_objc_type_arg_missing_star)
@@ -1655,17 +1673,16 @@ void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
/// ActOnForwardProtocolDeclaration - Handle \@protocol foo;
Sema::DeclGroupPtrTy
Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
- const IdentifierLocPair *IdentList,
- unsigned NumElts,
+ ArrayRef<IdentifierLocPair> IdentList,
AttributeList *attrList) {
SmallVector<Decl *, 8> DeclsInGroup;
- for (unsigned i = 0; i != NumElts; ++i) {
- IdentifierInfo *Ident = IdentList[i].first;
- ObjCProtocolDecl *PrevDecl = LookupProtocol(Ident, IdentList[i].second,
+ for (const IdentifierLocPair &IdentPair : IdentList) {
+ IdentifierInfo *Ident = IdentPair.first;
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(Ident, IdentPair.second,
ForRedeclaration);
ObjCProtocolDecl *PDecl
= ObjCProtocolDecl::Create(Context, CurContext, Ident,
- IdentList[i].second, AtProtocolLoc,
+ IdentPair.second, AtProtocolLoc,
PrevDecl);
PushOnScopeChains(PDecl, TUScope);
@@ -1850,6 +1867,8 @@ Decl *Sema::ActOnStartClassImplementation(
Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
} else if ((IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl))) {
+ // FIXME: This will produce an error if the definition of the interface has
+ // been imported from a module but is not visible.
RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
diag::warn_undef_interface);
} else {
@@ -2247,7 +2266,7 @@ static bool CheckMethodOverrideReturn(Sema &S,
DiagID =
IsOverridingMode ? diag::warn_non_covariant_overriding_ret_types
- : diag::warn_non_covariant_ret_types;
+ : diag::warn_non_covariant_ret_types;
}
}
@@ -2331,7 +2350,7 @@ static bool CheckMethodOverrideParam(Sema &S,
DiagID =
IsOverridingMode ? diag::warn_non_contravariant_overriding_param_types
- : diag::warn_non_contravariant_param_types;
+ : diag::warn_non_contravariant_param_types;
}
}
@@ -2340,7 +2359,7 @@ static bool CheckMethodOverrideParam(Sema &S,
<< MethodImpl->getDeclName() << IfaceTy << ImplTy;
S.Diag(IfaceVar->getLocation(),
(IsOverridingMode ? diag::note_previous_declaration
- : diag::note_previous_definition))
+ : diag::note_previous_definition))
<< getTypeRange(IfaceVar->getTypeSourceInfo());
return false;
}
@@ -2749,7 +2768,8 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!WarnCategoryMethodImpl) {
for (auto *Cat : I->visible_categories())
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
- IMPDecl, Cat, IncompleteImpl, false,
+ IMPDecl, Cat, IncompleteImpl,
+ ImmediateClass && Cat->IsClassExtension(),
WarnCategoryMethodImpl);
} else {
// Also methods in class extensions need be looked at next.
@@ -2825,6 +2845,20 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
for (const auto *I : IMPDecl->instance_methods())
InsMap.insert(I->getSelector());
+ // Add the selectors for getters/setters of @dynamic properties.
+ for (const auto *PImpl : IMPDecl->property_impls()) {
+ // We only care about @dynamic implementations.
+ if (PImpl->getPropertyImplementation() != ObjCPropertyImplDecl::Dynamic)
+ continue;
+
+ const auto *P = PImpl->getPropertyDecl();
+ if (!P) continue;
+
+ InsMap.insert(P->getGetterName());
+ if (!P->getSetterName().isNull())
+ InsMap.insert(P->getSetterName());
+ }
+
// Check and see if properties declared in the interface have either 1)
// an implementation or 2) there is a @synthesize/@dynamic implementation
// of the property in the @implementation.
@@ -2866,9 +2900,6 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
for (auto *PI : I->all_referenced_protocols())
CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), PI, IncompleteImpl,
InsMap, ClsMap, I, ExplicitImplProtocols);
- // Check class extensions (unnamed categories)
- for (auto *Ext : I->visible_extensions())
- ImplMethodsVsClassMethods(S, IMPDecl, Ext, IncompleteImpl);
} else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
// For extended class, unimplemented methods in its protocols will
// be reported in the primary class.
@@ -3478,6 +3509,23 @@ void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
}
}
+/// Diagnose attempts to define ARC-__weak ivars when __weak is disabled.
+static void DiagnoseWeakIvars(Sema &S, ObjCImplementationDecl *ID) {
+ if (S.getLangOpts().ObjCWeak) return;
+
+ for (auto ivar = ID->getClassInterface()->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ if (ivar->isInvalidDecl()) continue;
+ if (ivar->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ if (S.getLangOpts().ObjCWeakRuntime) {
+ S.Diag(ivar->getLocation(), diag::err_arc_weak_disabled);
+ } else {
+ S.Diag(ivar->getLocation(), diag::err_arc_weak_no_runtime);
+ }
+ }
+ }
+}
+
Sema::ObjCContainerKind Sema::getObjCContainerKind() const {
switch (CurContext->getDeclKind()) {
case Decl::ObjCInterface:
@@ -3590,7 +3638,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
// user-defined setter/getter. It also synthesizes setter/getter methods
// and adds them to the DeclContext and global method pools.
for (auto *I : CDecl->properties())
- ProcessPropertyDecl(I, CDecl);
+ ProcessPropertyDecl(I);
CDecl->setAtEndRange(AtEnd);
}
if (ObjCImplementationDecl *IC=dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
@@ -3627,6 +3675,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
DiagnoseUnusedBackingIvarInAccessor(S, IC);
if (IDecl->hasDesignatedInitializers())
DiagnoseMissingDesignatedInitOverrides(IC, IDecl);
+ DiagnoseWeakIvars(*this, IC);
bool HasRootClassAttr = IDecl->hasAttr<ObjCRootClassAttr>();
if (IDecl->getSuperClass() == nullptr) {
@@ -3700,7 +3749,6 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
return ClassDecl;
}
-
/// CvtQTToAstBitMask - utility routine to produce an AST bitmask for
/// objective-c's type qualifier from the parser version of the same info.
static Decl::ObjCDeclQualifier
@@ -3867,7 +3915,6 @@ private:
search(Interface);
}
-
void search(const ObjCProtocolList &protocols) {
for (ObjCProtocolList::iterator i = protocols.begin(), e = protocols.end();
i != e; ++i)
@@ -3895,7 +3942,7 @@ private:
searchFromContainer(container);
}
};
-}
+} // end anonymous namespace
void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
@@ -4490,7 +4537,6 @@ void Sema::DiagnoseUseOfUnimplementedSelectors() {
if (!LookupImplementedMethodInGlobalPool(Sel))
Diag(Loc, diag::warn_unimplemented_selector) << Sel;
}
- return;
}
ObjCIvarDecl *
@@ -4522,7 +4568,7 @@ namespace {
/// Used by Sema::DiagnoseUnusedBackingIvarInAccessor to check if a property
/// accessor references the backing ivar.
class UnusedBackingIvarChecker :
- public DataRecursiveASTVisitor<UnusedBackingIvarChecker> {
+ public RecursiveASTVisitor<UnusedBackingIvarChecker> {
public:
Sema &S;
const ObjCMethodDecl *Method;
@@ -4553,7 +4599,7 @@ namespace {
return true;
}
};
-}
+} // end anonymous namespace
void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD) {
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index 2cf02b484cb4..f12bf2415dba 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -68,7 +68,7 @@ bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
///
/// \param[in,out] T The exception type. This will be decayed to a pointer type
/// when the input is an array or a function type.
-bool Sema::CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range) {
+bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
// C++11 [except.spec]p2:
// A type cv T, "array of T", or "function returning T" denoted
// in an exception-specification is adjusted to type T, "pointer to T", or
@@ -232,7 +232,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
hasImplicitExceptionSpec(Old) != hasImplicitExceptionSpec(New)) {
Diag(New->getLocation(), diag::ext_implicit_exception_spec_mismatch)
<< hasImplicitExceptionSpec(Old);
- if (!Old->getLocation().isInvalid())
+ if (Old->getLocation().isValid())
Diag(Old->getLocation(), diag::note_previous_declaration);
}
return false;
@@ -270,16 +270,35 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
FunctionProtoType::ExceptionSpecInfo ESI = OldProto->getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = OldProto->exceptions();
- } else if (ESI.Type == EST_ComputedNoexcept) {
- // FIXME: We can't just take the expression from the old prototype. It
- // likely contains references to the old prototype's parameters.
}
- // Update the type of the function with the appropriate exception
- // specification.
- New->setType(Context.getFunctionType(
- NewProto->getReturnType(), NewProto->getParamTypes(),
- NewProto->getExtProtoInfo().withExceptionSpec(ESI)));
+ if (ESI.Type == EST_ComputedNoexcept) {
+ // For computed noexcept, we can't just take the expression from the old
+ // prototype. It likely contains references to the old prototype's
+ // parameters.
+ New->setInvalidDecl();
+ } else {
+ // Update the type of the function with the appropriate exception
+ // specification.
+ New->setType(Context.getFunctionType(
+ NewProto->getReturnType(), NewProto->getParamTypes(),
+ NewProto->getExtProtoInfo().withExceptionSpec(ESI)));
+ }
+
+ if (getLangOpts().MicrosoftExt && ESI.Type != EST_ComputedNoexcept) {
+ // Allow missing exception specifications in redeclarations as an extension.
+ DiagID = diag::ext_ms_missing_exception_specification;
+ ReturnValueOnError = false;
+ } else if (New->isReplaceableGlobalAllocationFunction() &&
+ ESI.Type != EST_ComputedNoexcept) {
+ // Allow missing exception specifications in redeclarations as an extension,
+ // when declaring a replaceable global allocation function.
+ DiagID = diag::ext_missing_exception_specification;
+ ReturnValueOnError = false;
+ } else {
+ DiagID = diag::err_missing_exception_specification;
+ ReturnValueOnError = true;
+ }
// Warn about the lack of exception specification.
SmallString<128> ExceptionSpecString;
@@ -318,30 +337,30 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
default:
llvm_unreachable("This spec type is compatible with none.");
}
- OS.flush();
SourceLocation FixItLoc;
if (TypeSourceInfo *TSInfo = New->getTypeSourceInfo()) {
TypeLoc TL = TSInfo->getTypeLoc().IgnoreParens();
- if (FunctionTypeLoc FTLoc = TL.getAs<FunctionTypeLoc>())
- FixItLoc = getLocForEndOfToken(FTLoc.getLocalRangeEnd());
+ // FIXME: Preserve enough information so that we can produce a correct fixit
+ // location when there is a trailing return type.
+ if (auto FTLoc = TL.getAs<FunctionProtoTypeLoc>())
+ if (!FTLoc.getTypePtr()->hasTrailingReturn())
+ FixItLoc = getLocForEndOfToken(FTLoc.getLocalRangeEnd());
}
if (FixItLoc.isInvalid())
- Diag(New->getLocation(), diag::warn_missing_exception_specification)
+ Diag(New->getLocation(), DiagID)
<< New << OS.str();
else {
- // FIXME: This will get more complicated with C++0x
- // late-specified return types.
- Diag(New->getLocation(), diag::warn_missing_exception_specification)
+ Diag(New->getLocation(), DiagID)
<< New << OS.str()
<< FixItHint::CreateInsertion(FixItLoc, " " + OS.str().str());
}
- if (!Old->getLocation().isInvalid())
+ if (Old->getLocation().isValid())
Diag(Old->getLocation(), diag::note_previous_declaration);
- return false;
+ return ReturnValueOnError;
}
/// CheckEquivalentExceptionSpec - Check if the two types have equivalent
@@ -694,7 +713,7 @@ bool Sema::CheckExceptionSpecSubset(
continue;
Paths.clear();
- if (!IsDerivedFrom(CanonicalSubT, CanonicalSuperT, Paths))
+ if (!IsDerivedFrom(SubLoc, CanonicalSubT, CanonicalSuperT, Paths))
continue;
if (Paths.isAmbiguous(Context.getCanonicalType(CanonicalSuperT)))
@@ -979,8 +998,9 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::LambdaExprClass: {
const LambdaExpr *Lambda = cast<LambdaExpr>(E);
CanThrowResult CT = CT_Cannot;
- for (LambdaExpr::capture_init_iterator Cap = Lambda->capture_init_begin(),
- CapEnd = Lambda->capture_init_end();
+ for (LambdaExpr::const_capture_init_iterator
+ Cap = Lambda->capture_init_begin(),
+ CapEnd = Lambda->capture_init_end();
Cap != CapEnd; ++Cap)
CT = mergeCanThrow(CT, canThrow(*Cap));
return CT;
@@ -1043,8 +1063,10 @@ CanThrowResult Sema::canThrow(const Expr *E) {
// Many other things have subexpressions, so we have to test those.
// Some are simple:
+ case Expr::CoawaitExprClass:
case Expr::ConditionalOperatorClass:
case Expr::CompoundLiteralExprClass:
+ case Expr::CoyieldExprClass:
case Expr::CXXConstCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXStdInitializerListExprClass:
@@ -1065,6 +1087,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
// Some might be dependent for other reasons.
case Expr::ArraySubscriptExprClass:
+ case Expr::OMPArraySectionExprClass:
case Expr::BinaryOperatorClass:
case Expr::CompoundAssignOperatorClass:
case Expr::CStyleCastExprClass:
@@ -1156,6 +1179,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
return CT_Cannot;
case Expr::MSPropertyRefExprClass:
+ case Expr::MSPropertySubscriptExprClass:
llvm_unreachable("Invalid class for expression");
#define STMT(CLASS, PARENT) case Expr::CLASS##Class:
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 6499cb5d19eb..5d0c6057f54f 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/PartialDiagnostic.h"
@@ -326,18 +327,16 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) {
// If there were any diagnostics suppressed by template argument deduction,
// emit them now.
- SuppressedDiagnosticsMap::iterator
- Pos = SuppressedDiagnostics.find(D->getCanonicalDecl());
+ auto Pos = SuppressedDiagnostics.find(D->getCanonicalDecl());
if (Pos != SuppressedDiagnostics.end()) {
- SmallVectorImpl<PartialDiagnosticAt> &Suppressed = Pos->second;
- for (unsigned I = 0, N = Suppressed.size(); I != N; ++I)
- Diag(Suppressed[I].first, Suppressed[I].second);
+ for (const PartialDiagnosticAt &Suppressed : Pos->second)
+ Diag(Suppressed.first, Suppressed.second);
// Clear out the list of suppressed diagnostics, so that we don't emit
// them again for this specialization. However, we don't obsolete this
// entry from the table, because we want to avoid ever emitting these
// diagnostics again.
- Suppressed.clear();
+ Pos->second.clear();
}
// C++ [basic.start.main]p3:
@@ -348,8 +347,10 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
// See if this is an auto-typed variable whose initializer we are parsing.
if (ParsingInitForAutoVars.count(D)) {
+ const AutoType *AT = cast<VarDecl>(D)->getType()->getContainedAutoType();
+
Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer)
- << D->getDeclName();
+ << D->getDeclName() << (unsigned)AT->getKeyword();
return true;
}
@@ -464,7 +465,7 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
// 'nil' for ObjC methods, where it's much more likely that the
// variadic arguments form a list of object pointers.
SourceLocation MissingNilLoc
- = PP.getLocForEndOfToken(sentinelExpr->getLocEnd());
+ = getLocForEndOfToken(sentinelExpr->getLocEnd());
std::string NullValue;
if (calleeType == CT_Method && PP.isMacroDefined("nil"))
NullValue = "nil";
@@ -493,7 +494,7 @@ SourceRange Sema::getExprRange(Expr *E) const {
//===----------------------------------------------------------------------===//
/// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4).
-ExprResult Sema::DefaultFunctionArrayConversion(Expr *E) {
+ExprResult Sema::DefaultFunctionArrayConversion(Expr *E, bool Diagnose) {
// Handle any placeholder expressions which made it here.
if (E->getType()->isPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
@@ -508,9 +509,16 @@ ExprResult Sema::DefaultFunctionArrayConversion(Expr *E) {
// If we are here, we are not calling a function but taking
// its address (which is not allowed in OpenCL v1.0 s6.8.a.3).
if (getLangOpts().OpenCL) {
- Diag(E->getExprLoc(), diag::err_opencl_taking_function_address);
+ if (Diagnose)
+ Diag(E->getExprLoc(), diag::err_opencl_taking_function_address);
return ExprError();
}
+
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl()))
+ if (!checkAddressOfFunctionIsAvailable(FD, Diagnose, E->getExprLoc()))
+ return ExprError();
+
E = ImpCastExprToType(E, Context.getPointerType(Ty),
CK_FunctionToPointerDecay).get();
} else if (Ty->isArrayType()) {
@@ -579,7 +587,7 @@ static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE,
&S.Context.Idents.get("object_setClass"),
SourceLocation(), S.LookupOrdinaryName);
if (ObjectSetClass) {
- SourceLocation RHSLocEnd = S.PP.getLocForEndOfToken(RHS->getLocEnd());
+ SourceLocation RHSLocEnd = S.getLocForEndOfToken(RHS->getLocEnd());
S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_assign) <<
FixItHint::CreateInsertion(OIRE->getLocStart(), "object_setClass(") <<
FixItHint::CreateReplacement(SourceRange(OIRE->getOpLoc(),
@@ -676,6 +684,11 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
if (T.hasQualifiers())
T = T.getUnqualifiedType();
+ // Under the MS ABI, lock down the inheritance model now.
+ if (T->isMemberPointerType() &&
+ Context.getTargetInfo().getCXXABI().isMicrosoft())
+ (void)isCompleteType(E->getExprLoc(), T);
+
UpdateMarkingForLValueToRValue(E);
// Loading a __weak object implicitly retains the value, so we need a cleanup to
@@ -699,8 +712,8 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
return Res;
}
-ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E) {
- ExprResult Res = DefaultFunctionArrayConversion(E);
+ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose) {
+ ExprResult Res = DefaultFunctionArrayConversion(E, Diagnose);
if (Res.isInvalid())
return ExprError();
Res = DefaultLvalueConversion(Res.get());
@@ -1349,11 +1362,13 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
ArrayRef<Expr *> Exprs) {
unsigned NumAssocs = Types.size();
assert(NumAssocs == Exprs.size());
- if (ControllingExpr->getType()->isPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(ControllingExpr);
- if (result.isInvalid()) return ExprError();
- ControllingExpr = result.get();
- }
+
+ // Decay and strip qualifiers for the controlling expression type, and handle
+ // placeholder type replacement. See committee discussion from WG14 DR423.
+ ExprResult R = DefaultFunctionArrayLvalueConversion(ControllingExpr);
+ if (R.isInvalid())
+ return ExprError();
+ ControllingExpr = R.get();
// The controlling expression is an unevaluated operand, so side effects are
// likely unintended.
@@ -1445,12 +1460,11 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_multi_match)
<< ControllingExpr->getSourceRange() << ControllingExpr->getType()
<< (unsigned) CompatIndices.size();
- for (SmallVectorImpl<unsigned>::iterator I = CompatIndices.begin(),
- E = CompatIndices.end(); I != E; ++I) {
- Diag(Types[*I]->getTypeLoc().getBeginLoc(),
+ for (unsigned I : CompatIndices) {
+ Diag(Types[I]->getTypeLoc().getBeginLoc(),
diag::note_compat_assoc)
- << Types[*I]->getTypeLoc().getSourceRange()
- << Types[*I]->getType();
+ << Types[I]->getTypeLoc().getSourceRange()
+ << Types[I]->getType();
}
return ExprError();
}
@@ -1533,8 +1547,8 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
return ExprError();
SmallVector<SourceLocation, 4> StringTokLocs;
- for (unsigned i = 0; i != StringToks.size(); ++i)
- StringTokLocs.push_back(StringToks[i].getLocation());
+ for (const Token &Tok : StringToks)
+ StringTokLocs.push_back(Tok.getLocation());
QualType CharTy = Context.CharTy;
StringLiteral::StringKind Kind = StringLiteral::Ascii;
@@ -1697,7 +1711,7 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
MarkDeclRefReferenced(E);
- if (getLangOpts().ObjCARCWeak && isa<VarDecl>(D) &&
+ if (getLangOpts().ObjCWeak && isa<VarDecl>(D) &&
Ty.getObjCLifetime() == Qualifiers::OCL_Weak &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getLocStart()))
recordUseOfEvaluatedWeak(E);
@@ -1762,10 +1776,9 @@ static void emitEmptyLookupTypoDiagnostic(
std::string CorrectedStr = TC.getAsString(SemaRef.getLangOpts());
bool DroppedSpecifier =
TC.WillReplaceSpecifier() && Typo.getAsString() == CorrectedStr;
- unsigned NoteID =
- (TC.getCorrectionDecl() && isa<ImplicitParamDecl>(TC.getCorrectionDecl()))
- ? diag::note_implicit_param_decl
- : diag::note_previous_decl;
+ unsigned NoteID = TC.getCorrectionDeclAs<ImplicitParamDecl>()
+ ? diag::note_implicit_param_decl
+ : diag::note_previous_decl;
if (!Ctx)
SemaRef.diagnoseTypo(TC, SemaRef.PDiag(DiagnosticSuggestID) << Typo,
SemaRef.PDiag(NoteID));
@@ -1799,8 +1812,7 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// unqualified lookup. This is useful when (for example) the
// original lookup would not have found something because it was a
// dependent name.
- DeclContext *DC = (SS.isEmpty() && !CallsUndergoingInstantiation.empty())
- ? CurContext : nullptr;
+ DeclContext *DC = SS.isEmpty() ? CurContext : nullptr;
while (DC) {
if (isa<CXXRecordDecl>(DC)) {
LookupQualifiedName(R, DC);
@@ -1819,7 +1831,6 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
bool isInstance = CurMethod &&
CurMethod->isInstance() &&
DC == CurMethod->getParent() && !isDefaultArgument;
-
// Give a code modification hint to insert 'this->'.
// TODO: fixit for inserting 'Base<T>::' in the other cases.
@@ -1829,46 +1840,14 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
if (isInstance) {
Diag(R.getNameLoc(), diagnostic) << Name
<< FixItHint::CreateInsertion(R.getNameLoc(), "this->");
- UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(
- CallsUndergoingInstantiation.back()->getCallee());
-
- CXXMethodDecl *DepMethod;
- if (CurMethod->isDependentContext())
- DepMethod = CurMethod;
- else if (CurMethod->getTemplatedKind() ==
- FunctionDecl::TK_FunctionTemplateSpecialization)
- DepMethod = cast<CXXMethodDecl>(CurMethod->getPrimaryTemplate()->
- getInstantiatedFromMemberTemplate()->getTemplatedDecl());
- else
- DepMethod = cast<CXXMethodDecl>(
- CurMethod->getInstantiatedFromMemberFunction());
- assert(DepMethod && "No template pattern found");
-
- QualType DepThisType = DepMethod->getThisType(Context);
CheckCXXThisCapture(R.getNameLoc());
- CXXThisExpr *DepThis = new (Context) CXXThisExpr(
- R.getNameLoc(), DepThisType, false);
- TemplateArgumentListInfo TList;
- if (ULE->hasExplicitTemplateArgs())
- ULE->copyTemplateArgumentsInto(TList);
-
- CXXScopeSpec SS;
- SS.Adopt(ULE->getQualifierLoc());
- CXXDependentScopeMemberExpr *DepExpr =
- CXXDependentScopeMemberExpr::Create(
- Context, DepThis, DepThisType, true, SourceLocation(),
- SS.getWithLocInContext(Context),
- ULE->getTemplateKeywordLoc(), nullptr,
- R.getLookupNameInfo(),
- ULE->hasExplicitTemplateArgs() ? &TList : nullptr);
- CallsUndergoingInstantiation.back()->setCallee(DepExpr);
} else {
Diag(R.getNameLoc(), diagnostic) << Name;
}
// Do we really want to note all of these?
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
- Diag((*I)->getLocation(), diag::note_dependent_var_use);
+ for (NamedDecl *D : R)
+ Diag(D->getLocation(), diag::note_dependent_var_use);
// Return true if we are inside a default argument instantiation
// and the found name refers to an instance member function, otherwise
@@ -1923,28 +1902,26 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
bool AcceptableWithRecovery = false;
bool AcceptableWithoutRecovery = false;
- NamedDecl *ND = Corrected.getCorrectionDecl();
+ NamedDecl *ND = Corrected.getFoundDecl();
if (ND) {
if (Corrected.isOverloaded()) {
OverloadCandidateSet OCS(R.getNameLoc(),
OverloadCandidateSet::CSK_Normal);
OverloadCandidateSet::iterator Best;
- for (TypoCorrection::decl_iterator CD = Corrected.begin(),
- CDEnd = Corrected.end();
- CD != CDEnd; ++CD) {
+ for (NamedDecl *CD : Corrected) {
if (FunctionTemplateDecl *FTD =
- dyn_cast<FunctionTemplateDecl>(*CD))
+ dyn_cast<FunctionTemplateDecl>(CD))
AddTemplateOverloadCandidate(
FTD, DeclAccessPair::make(FTD, AS_none), ExplicitTemplateArgs,
Args, OCS);
- else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*CD))
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(CD))
if (!ExplicitTemplateArgs || ExplicitTemplateArgs->size() == 0)
AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none),
Args, OCS);
}
switch (OCS.BestViableFunction(*this, R.getNameLoc(), Best)) {
case OR_Success:
- ND = Best->Function;
+ ND = Best->FoundDecl;
Corrected.setCorrectionDecl(ND);
break;
default:
@@ -1966,15 +1943,16 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
R.setNamingClass(Record);
}
- AcceptableWithRecovery =
- isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND);
+ auto *UnderlyingND = ND->getUnderlyingDecl();
+ AcceptableWithRecovery = isa<ValueDecl>(UnderlyingND) ||
+ isa<FunctionTemplateDecl>(UnderlyingND);
// FIXME: If we ended up with a typo for a type name or
// Objective-C class name, we're in trouble because the parser
// is in the wrong place to recover. Suggest the typo
// correction, but don't make it a fix-it since we're not going
// to recover well anyway.
AcceptableWithoutRecovery =
- isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
+ isa<TypeDecl>(UnderlyingND) || isa<ObjCInterfaceDecl>(UnderlyingND);
} else {
// FIXME: We found a keyword. Suggest it, but don't provide a fix-it
// because we aren't able to recover.
@@ -1982,8 +1960,7 @@ Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
}
if (AcceptableWithRecovery || AcceptableWithoutRecovery) {
- unsigned NoteID = (Corrected.getCorrectionDecl() &&
- isa<ImplicitParamDecl>(Corrected.getCorrectionDecl()))
+ unsigned NoteID = Corrected.getCorrectionDeclAs<ImplicitParamDecl>()
? diag::note_implicit_param_decl
: diag::note_previous_decl;
if (SS.isEmpty())
@@ -2278,7 +2255,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
if (MightBeImplicitMember)
return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
- R, TemplateArgs);
+ R, TemplateArgs, S);
}
if (TemplateArgs || TemplateKWLoc.isValid()) {
@@ -2302,11 +2279,9 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
/// declaration name, generally during template instantiation.
/// There's a large number of things which don't need to be done along
/// this path.
-ExprResult
-Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
- const DeclarationNameInfo &NameInfo,
- bool IsAddressOfOperand,
- TypeSourceInfo **RecoveryTSI) {
+ExprResult Sema::BuildQualifiedDeclarationNameExpr(
+ CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
+ bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI) {
DeclContext *DC = computeDeclContext(SS, false);
if (!DC)
return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
@@ -2373,7 +2348,7 @@ Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand)
return BuildPossibleImplicitMemberExpr(SS,
/*TemplateKWLoc=*/SourceLocation(),
- R, /*TemplateArgs=*/nullptr);
+ R, /*TemplateArgs=*/nullptr, S);
return BuildDeclarationNameExpr(SS, R, /* ADL */ false);
}
@@ -2615,7 +2590,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
// In C++98, the qualifier type doesn't actually have to be a base
// type of the object type, in which case we just ignore it.
// Otherwise build the appropriate casts.
- if (IsDerivedFrom(FromRecordType, QRecordType)) {
+ if (IsDerivedFrom(FromLoc, FromRecordType, QRecordType)) {
CXXCastPath BasePath;
if (CheckDerivedToBaseConversion(FromRecordType, QRecordType,
FromLoc, FromRange, &BasePath))
@@ -2651,7 +2626,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
// We only need to do this if the naming-class to declaring-class
// conversion is non-trivial.
if (!Context.hasSameUnqualifiedType(FromRecordType, URecordType)) {
- assert(IsDerivedFrom(FromRecordType, URecordType));
+ assert(IsDerivedFrom(FromLoc, FromRecordType, URecordType));
CXXCastPath BasePath;
if (CheckDerivedToBaseConversion(FromRecordType, URecordType,
FromLoc, FromRange, &BasePath))
@@ -2698,9 +2673,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
// Turn off ADL when we find certain kinds of declarations during
// normal lookup:
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
- NamedDecl *D = *I;
-
+ for (NamedDecl *D : R) {
// C++0x [basic.lookup.argdep]p3:
// -- a declaration of a class member
// Since using decls preserve this property, we check this on the
@@ -3355,13 +3328,6 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// Get the value in the widest-possible width.
unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth();
- // The microsoft literal suffix extensions support 128-bit literals, which
- // may be wider than [u]intmax_t.
- // FIXME: Actually, they don't. We seem to have accidentally invented the
- // i128 suffix.
- if (Literal.MicrosoftInteger == 128 && MaxWidth < 128 &&
- Context.getTargetInfo().hasInt128Type())
- MaxWidth = 128;
llvm::APInt ResultVal(MaxWidth, 0);
if (Literal.GetIntegerValue(ResultVal)) {
@@ -3384,12 +3350,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// Microsoft specific integer suffixes are explicitly sized.
if (Literal.MicrosoftInteger) {
- if (Literal.MicrosoftInteger > MaxWidth) {
- // If this target doesn't support __int128, error and force to ull.
- Diag(Tok.getLocation(), diag::err_int128_unsupported);
- Width = MaxWidth;
- Ty = Context.getIntMaxType();
- } else if (Literal.MicrosoftInteger == 8 && !Literal.isUnsigned) {
+ if (Literal.MicrosoftInteger == 8 && !Literal.isUnsigned) {
Width = 8;
Ty = Context.CharTy;
} else {
@@ -3726,7 +3687,7 @@ static bool CheckAlignOfExpr(Sema &S, Expr *E) {
return false;
if (E->getObjectKind() == OK_BitField) {
- S.Diag(E->getExprLoc(), diag::err_sizeof_alignof_bitfield)
+ S.Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield)
<< 1 << E->getSourceRange();
return true;
}
@@ -3828,7 +3789,7 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
Diag(E->getExprLoc(), diag::err_openmp_default_simd_align_expr);
isInvalid = true;
} else if (E->refersToBitField()) { // C99 6.5.3.4p1.
- Diag(E->getExprLoc(), diag::err_sizeof_alignof_bitfield) << 0;
+ Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 0;
isInvalid = true;
} else {
isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_SizeOf);
@@ -3854,7 +3815,7 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
ExprResult
Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind, bool IsType,
- void *TyOrEx, const SourceRange &ArgRange) {
+ void *TyOrEx, SourceRange ArgRange) {
// If error parsing type, ignore.
if (!TyOrEx) return ExprError();
@@ -3940,9 +3901,21 @@ static bool checkArithmeticOnObjCPointer(Sema &S,
return true;
}
+static bool isMSPropertySubscriptExpr(Sema &S, Expr *Base) {
+ auto *BaseNoParens = Base->IgnoreParens();
+ if (auto *MSProp = dyn_cast<MSPropertyRefExpr>(BaseNoParens))
+ return MSProp->getPropertyDecl()->getType()->isArrayType();
+ return isa<MSPropertySubscriptExpr>(BaseNoParens);
+}
+
ExprResult
Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
Expr *idx, SourceLocation rbLoc) {
+ if (base && !base->getType().isNull() &&
+ base->getType()->isSpecificPlaceholderType(BuiltinType::OMPArraySection))
+ return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(),
+ /*Length=*/nullptr, rbLoc);
+
// Since this might be a postfix expression, get rid of ParenListExprs.
if (isa<ParenListExpr>(base)) {
ExprResult result = MaybeConvertParenListExprToParenExpr(S, base);
@@ -3955,10 +3928,15 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// operand might be an overloadable type, in which case the overload
// resolution for the operator overload should get the first crack
// at the overload.
+ bool IsMSPropertySubscript = false;
if (base->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(base);
- if (result.isInvalid()) return ExprError();
- base = result.get();
+ IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
+ if (!IsMSPropertySubscript) {
+ ExprResult result = CheckPlaceholderExpr(base);
+ if (result.isInvalid())
+ return ExprError();
+ base = result.get();
+ }
}
if (idx->getType()->isNonOverloadPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(idx);
@@ -3973,6 +3951,21 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
VK_LValue, OK_Ordinary, rbLoc);
}
+ // MSDN, property (C++)
+ // https://msdn.microsoft.com/en-us/library/yhfk0thd(v=vs.120).aspx
+ // This attribute can also be used in the declaration of an empty array in a
+ // class or structure definition. For example:
+ // __declspec(property(get=GetX, put=PutX)) int x[];
+ // The above statement indicates that x[] can be used with one or more array
+ // indices. In this case, i=p->x[a][b] will be turned into i=p->GetX(a, b),
+ // and p->x[a][b] = i will be turned into p->PutX(a, b, i);
+ if (IsMSPropertySubscript) {
+ // Build MS property subscript expression if base is MS property reference
+ // or MS property subscript.
+ return new (Context) MSPropertySubscriptExpr(
+ base, idx, Context.PseudoObjectTy, VK_LValue, OK_Ordinary, rbLoc);
+ }
+
// Use C++ overloaded-operator rules if either operand has record
// type. The spec says to do this if either type is *overloadable*,
// but enum types can't declare subscript operators or conversion
@@ -3991,6 +3984,139 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
return CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
}
+ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
+ Expr *LowerBound,
+ SourceLocation ColonLoc, Expr *Length,
+ SourceLocation RBLoc) {
+ if (Base->getType()->isPlaceholderType() &&
+ !Base->getType()->isSpecificPlaceholderType(
+ BuiltinType::OMPArraySection)) {
+ ExprResult Result = CheckPlaceholderExpr(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(LowerBound);
+ if (Result.isInvalid())
+ return ExprError();
+ LowerBound = Result.get();
+ }
+ if (Length && Length->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Length);
+ if (Result.isInvalid())
+ return ExprError();
+ Length = Result.get();
+ }
+
+ // Build an unanalyzed expression if either operand is type-dependent.
+ if (Base->isTypeDependent() ||
+ (LowerBound &&
+ (LowerBound->isTypeDependent() || LowerBound->isValueDependent())) ||
+ (Length && (Length->isTypeDependent() || Length->isValueDependent()))) {
+ return new (Context)
+ OMPArraySectionExpr(Base, LowerBound, Length, Context.DependentTy,
+ VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+ }
+
+ // Perform default conversions.
+ QualType OriginalTy = OMPArraySectionExpr::getBaseOriginalType(Base);
+ QualType ResultTy;
+ if (OriginalTy->isAnyPointerType()) {
+ ResultTy = OriginalTy->getPointeeType();
+ } else if (OriginalTy->isArrayType()) {
+ ResultTy = OriginalTy->getAsArrayTypeUnsafe()->getElementType();
+ } else {
+ return ExprError(
+ Diag(Base->getExprLoc(), diag::err_omp_typecheck_section_value)
+ << Base->getSourceRange());
+ }
+ // C99 6.5.2.1p1
+ if (LowerBound) {
+ auto Res = PerformOpenMPImplicitIntegerConversion(LowerBound->getExprLoc(),
+ LowerBound);
+ if (Res.isInvalid())
+ return ExprError(Diag(LowerBound->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 0 << LowerBound->getSourceRange());
+ LowerBound = Res.get();
+
+ if (LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(LowerBound->getExprLoc(), diag::warn_omp_section_is_char)
+ << 0 << LowerBound->getSourceRange();
+ }
+ if (Length) {
+ auto Res =
+ PerformOpenMPImplicitIntegerConversion(Length->getExprLoc(), Length);
+ if (Res.isInvalid())
+ return ExprError(Diag(Length->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 1 << Length->getSourceRange());
+ Length = Res.get();
+
+ if (Length->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ Length->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(Length->getExprLoc(), diag::warn_omp_section_is_char)
+ << 1 << Length->getSourceRange();
+ }
+
+ // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
+ // C++ [expr.sub]p1: The type "T" shall be a completely-defined object
+ // type. Note that functions are not objects, and that (in C99 parlance)
+ // incomplete types are not object types.
+ if (ResultTy->isFunctionType()) {
+ Diag(Base->getExprLoc(), diag::err_omp_section_function_type)
+ << ResultTy << Base->getSourceRange();
+ return ExprError();
+ }
+
+ if (RequireCompleteType(Base->getExprLoc(), ResultTy,
+ diag::err_omp_section_incomplete_type, Base))
+ return ExprError();
+
+ if (LowerBound) {
+ llvm::APSInt LowerBoundValue;
+ if (LowerBound->EvaluateAsInt(LowerBoundValue, Context)) {
+ // OpenMP 4.0, [2.4 Array Sections]
+ // The lower-bound and length must evaluate to non-negative integers.
+ if (LowerBoundValue.isNegative()) {
+ Diag(LowerBound->getExprLoc(), diag::err_omp_section_negative)
+ << 0 << LowerBoundValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << LowerBound->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+
+ if (Length) {
+ llvm::APSInt LengthValue;
+ if (Length->EvaluateAsInt(LengthValue, Context)) {
+ // OpenMP 4.0, [2.4 Array Sections]
+ // The lower-bound and length must evaluate to non-negative integers.
+ if (LengthValue.isNegative()) {
+ Diag(Length->getExprLoc(), diag::err_omp_section_negative)
+ << 1 << LengthValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << Length->getSourceRange();
+ return ExprError();
+ }
+ }
+ } else if (ColonLoc.isValid() &&
+ (OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
+ !OriginalTy->isVariableArrayType()))) {
+ // OpenMP 4.0, [2.4 Array Sections]
+ // When the size of the array dimension is not known, the length must be
+ // specified explicitly.
+ Diag(ColonLoc, diag::err_omp_section_length_undefined)
+ << (!OriginalTy.isNull() && OriginalTy->isArrayType());
+ return ExprError();
+ }
+
+ return new (Context)
+ OMPArraySectionExpr(Base, LowerBound, Length, Context.OMPArraySectionTy,
+ VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+}
+
ExprResult
Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc) {
@@ -4275,29 +4401,27 @@ static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn,
llvm::make_unique<FunctionCallCCC>(S, FuncName.getAsIdentifierInfo(),
Args.size(), ME),
Sema::CTK_ErrorRecovery)) {
- if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
+ if (NamedDecl *ND = Corrected.getFoundDecl()) {
if (Corrected.isOverloaded()) {
OverloadCandidateSet OCS(NameLoc, OverloadCandidateSet::CSK_Normal);
OverloadCandidateSet::iterator Best;
- for (TypoCorrection::decl_iterator CD = Corrected.begin(),
- CDEnd = Corrected.end();
- CD != CDEnd; ++CD) {
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*CD))
+ for (NamedDecl *CD : Corrected) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(CD))
S.AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none), Args,
OCS);
}
switch (OCS.BestViableFunction(S, NameLoc, Best)) {
case OR_Success:
- ND = Best->Function;
+ ND = Best->FoundDecl;
Corrected.setCorrectionDecl(ND);
break;
default:
break;
}
}
- if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
+ ND = ND->getUnderlyingDecl();
+ if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND))
return Corrected;
- }
}
}
return TypoCorrection();
@@ -4433,7 +4557,7 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
bool IsListInitialization) {
unsigned NumParams = Proto->getNumParams();
bool Invalid = false;
- unsigned ArgIx = 0;
+ size_t ArgIx = 0;
// Continue to check argument types (even if we have too few/many args).
for (unsigned i = FirstParam; i < NumParams; i++) {
QualType ProtoArgType = Proto->getParamType(i);
@@ -4503,26 +4627,25 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
// return __unknown_anytype aren't *really* variadic.
if (Proto->getReturnType() == Context.UnknownAnyTy && FDecl &&
FDecl->isExternC()) {
- for (unsigned i = ArgIx, e = Args.size(); i != e; ++i) {
+ for (Expr *A : Args.slice(ArgIx)) {
QualType paramType; // ignored
- ExprResult arg = checkUnknownAnyArg(CallLoc, Args[i], paramType);
+ ExprResult arg = checkUnknownAnyArg(CallLoc, A, paramType);
Invalid |= arg.isInvalid();
AllArgs.push_back(arg.get());
}
// Otherwise do argument promotion, (C99 6.5.2.2p7).
} else {
- for (unsigned i = ArgIx, e = Args.size(); i != e; ++i) {
- ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], CallType,
- FDecl);
+ for (Expr *A : Args.slice(ArgIx)) {
+ ExprResult Arg = DefaultVariadicArgumentPromotion(A, CallType, FDecl);
Invalid |= Arg.isInvalid();
AllArgs.push_back(Arg.get());
}
}
// Check for array bounds violations.
- for (unsigned i = ArgIx, e = Args.size(); i != e; ++i)
- CheckArrayAccess(Args[i]);
+ for (Expr *A : Args.slice(ArgIx))
+ CheckArrayAccess(A);
}
return Invalid;
}
@@ -4623,7 +4746,9 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
// These are always invalid as call arguments and should be reported.
case BuiltinType::BoundMember:
case BuiltinType::BuiltinFn:
+ case BuiltinType::OMPArraySection:
return true;
+
}
llvm_unreachable("bad builtin type kind");
}
@@ -4647,7 +4772,7 @@ static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) {
}
/// If a builtin function has a pointer argument with no explicit address
-/// space, than it should be able to accept a pointer to any address
+/// space, then it should be able to accept a pointer to any address
/// space as input. In order to do this, we need to replace the
/// standard builtin declaration with one that uses the same address space
/// as the call.
@@ -4745,7 +4870,7 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
// Pseudo-destructor calls should not have any arguments.
Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args)
<< FixItHint::CreateRemoval(
- SourceRange(ArgExprs[0]->getLocStart(),
+ SourceRange(ArgExprs.front()->getLocStart(),
ArgExprs.back()->getLocEnd()));
}
@@ -4802,14 +4927,10 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
// We aren't supposed to apply this logic for if there's an '&' involved.
if (!find.HasFormOfMemberPointer) {
OverloadExpr *ovl = find.Expression;
- if (isa<UnresolvedLookupExpr>(ovl)) {
- UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(ovl);
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(ovl))
return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, ArgExprs,
RParenLoc, ExecConfig);
- } else {
- return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs,
- RParenLoc);
- }
+ return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs, RParenLoc);
}
}
@@ -4832,7 +4953,7 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
FunctionDecl *FDecl = dyn_cast<FunctionDecl>(NDecl);
if (FDecl && FDecl->getBuiltinID()) {
- // Rewrite the function decl for this builtin by replacing paramaters
+ // Rewrite the function decl for this builtin by replacing parameters
// with no explicit address space with the address space of the arguments
// in ArgExprs.
if ((FDecl = rewriteBuiltinFunctionDecl(this, Context, FDecl, ArgExprs))) {
@@ -4949,7 +5070,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (!Result.isUsable()) return ExprError();
TheCall = dyn_cast<CallExpr>(Result.get());
if (!TheCall) return Result;
- Args = ArrayRef<Expr *>(TheCall->getArgs(), TheCall->getNumArgs());
+ Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
}
// Bail out early if calling a builtin with custom typechecking.
@@ -5098,8 +5219,7 @@ ExprResult
Sema::ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty,
SourceLocation RParenLoc, Expr *InitExpr) {
assert(Ty && "ActOnCompoundLiteral(): missing type");
- // FIXME: put back this assert when initializers are worked out.
- //assert((InitExpr != 0) && "ActOnCompoundLiteral(): missing expression");
+ assert(InitExpr && "ActOnCompoundLiteral(): missing expression");
TypeSourceInfo *TInfo;
QualType literalType = GetTypeFromParser(Ty, &TInfo);
@@ -5280,13 +5400,13 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
return CK_IntegralToFloating;
case Type::STK_IntegralComplex:
Src = ImpCastExprToType(Src.get(),
- DestTy->castAs<ComplexType>()->getElementType(),
- CK_IntegralCast);
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralCast);
return CK_IntegralRealToComplex;
case Type::STK_FloatingComplex:
Src = ImpCastExprToType(Src.get(),
- DestTy->castAs<ComplexType>()->getElementType(),
- CK_IntegralToFloating);
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralToFloating);
return CK_FloatingRealToComplex;
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
@@ -5401,36 +5521,54 @@ static bool breakDownVectorType(QualType type, uint64_t &len,
return true;
}
-static bool VectorTypesMatch(Sema &S, QualType srcTy, QualType destTy) {
+/// Are the two types lax-compatible vector types? That is, given
+/// that one of them is a vector, do they have equal storage sizes,
+/// where the storage size is the number of elements times the element
+/// size?
+///
+/// This will also return false if either of the types is neither a
+/// vector nor a real type.
+bool Sema::areLaxCompatibleVectorTypes(QualType srcTy, QualType destTy) {
+ assert(destTy->isVectorType() || srcTy->isVectorType());
+
+ // Disallow lax conversions between scalars and ExtVectors (these
+ // conversions are allowed for other vector types because common headers
+ // depend on them). Most scalar OP ExtVector cases are handled by the
+ // splat path anyway, which does what we want (convert, not bitcast).
+ // What this rules out for ExtVectors is crazy things like char4*float.
+ if (srcTy->isScalarType() && destTy->isExtVectorType()) return false;
+ if (destTy->isScalarType() && srcTy->isExtVectorType()) return false;
+
uint64_t srcLen, destLen;
- QualType srcElt, destElt;
- if (!breakDownVectorType(srcTy, srcLen, srcElt)) return false;
- if (!breakDownVectorType(destTy, destLen, destElt)) return false;
+ QualType srcEltTy, destEltTy;
+ if (!breakDownVectorType(srcTy, srcLen, srcEltTy)) return false;
+ if (!breakDownVectorType(destTy, destLen, destEltTy)) return false;
// ASTContext::getTypeSize will return the size rounded up to a
// power of 2, so instead of using that, we need to use the raw
// element size multiplied by the element count.
- uint64_t srcEltSize = S.Context.getTypeSize(srcElt);
- uint64_t destEltSize = S.Context.getTypeSize(destElt);
+ uint64_t srcEltSize = Context.getTypeSize(srcEltTy);
+ uint64_t destEltSize = Context.getTypeSize(destEltTy);
return (srcLen * srcEltSize == destLen * destEltSize);
}
-/// Is this a legal conversion between two known vector types?
+/// Is this a legal conversion between two types, one of which is
+/// known to be a vector type?
bool Sema::isLaxVectorConversion(QualType srcTy, QualType destTy) {
assert(destTy->isVectorType() || srcTy->isVectorType());
if (!Context.getLangOpts().LaxVectorConversions)
return false;
- return VectorTypesMatch(*this, srcTy, destTy);
+ return areLaxCompatibleVectorTypes(srcTy, destTy);
}
bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind) {
assert(VectorTy->isVectorType() && "Not a vector type!");
- if (Ty->isVectorType() || Ty->isIntegerType()) {
- if (!VectorTypesMatch(*this, Ty, VectorTy))
+ if (Ty->isVectorType() || Ty->isIntegralType(Context)) {
+ if (!areLaxCompatibleVectorTypes(Ty, VectorTy))
return Diag(R.getBegin(),
Ty->isVectorType() ?
diag::err_invalid_conversion_between_vectors :
@@ -5456,7 +5594,7 @@ ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy,
// In OpenCL, casts between vectors of different types are not allowed.
// (See OpenCL 6.2).
if (SrcTy->isVectorType()) {
- if (!VectorTypesMatch(*this, SrcTy, DestTy)
+ if (!areLaxCompatibleVectorTypes(SrcTy, DestTy)
|| (getLangOpts().OpenCL &&
(DestTy.getCanonicalType() != SrcTy.getCanonicalType()))) {
Diag(R.getBegin(),diag::err_invalid_conversion_between_ext_vectors)
@@ -6358,7 +6496,7 @@ QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
static void SuggestParentheses(Sema &Self, SourceLocation Loc,
const PartialDiagnostic &Note,
SourceRange ParenRange) {
- SourceLocation EndLoc = Self.PP.getLocForEndOfToken(ParenRange.getEnd());
+ SourceLocation EndLoc = Self.getLocForEndOfToken(ParenRange.getEnd());
if (ParenRange.getBegin().isFileID() && ParenRange.getEnd().isFileID() &&
EndLoc.isValid()) {
Self.Diag(Loc, Note)
@@ -6371,7 +6509,9 @@ static void SuggestParentheses(Sema &Self, SourceLocation Loc,
}
static bool IsArithmeticOp(BinaryOperatorKind Opc) {
- return Opc >= BO_Mul && Opc <= BO_Shr;
+ return BinaryOperator::isAdditiveOp(Opc) ||
+ BinaryOperator::isMultiplicativeOp(Opc) ||
+ BinaryOperator::isShiftOp(Opc);
}
/// IsArithmeticBinaryExpr - Returns true if E is an arithmetic binary
@@ -6417,10 +6557,6 @@ static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode,
return false;
}
-static bool IsLogicOp(BinaryOperatorKind Opc) {
- return (Opc >= BO_LT && Opc <= BO_NE) || (Opc >= BO_LAnd && Opc <= BO_LOr);
-}
-
/// ExprLooksBoolean - Returns true if E looks boolean, i.e. it has boolean type
/// or is a logical expression such as (x==y) which has int type, but is
/// commonly interpreted as boolean.
@@ -6430,7 +6566,7 @@ static bool ExprLooksBoolean(Expr *E) {
if (E->getType()->isBooleanType())
return true;
if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E))
- return IsLogicOp(OP->getOpcode());
+ return OP->isComparisonOp() || OP->isLogicalOp();
if (UnaryOperator *OP = dyn_cast<UnaryOperator>(E))
return OP->getOpcode() == UO_LNot;
if (E->getType()->isPointerType())
@@ -6753,7 +6889,7 @@ Sema::CheckAssignmentConstraints(SourceLocation Loc,
ExprResult RHSPtr = &RHSExpr;
CastKind K = CK_Invalid;
- return CheckAssignmentConstraints(LHSType, RHSPtr, K);
+ return CheckAssignmentConstraints(LHSType, RHSPtr, K, /*ConvertRHS=*/false);
}
/// CheckAssignmentConstraints (C99 6.5.16) - This routine currently
@@ -6775,7 +6911,7 @@ Sema::CheckAssignmentConstraints(SourceLocation Loc,
/// Sets 'Kind' for any result kind except Incompatible.
Sema::AssignConvertType
Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
- CastKind &Kind) {
+ CastKind &Kind, bool ConvertRHS) {
QualType RHSType = RHS.get()->getType();
QualType OrigLHSType = LHSType;
@@ -6797,7 +6933,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
CheckAssignmentConstraints(AtomicTy->getValueType(), RHS, Kind);
if (result != Compatible)
return result;
- if (Kind != CK_NoOp)
+ if (Kind != CK_NoOp && ConvertRHS)
RHS = ImpCastExprToType(RHS.get(), AtomicTy->getValueType(), Kind);
Kind = CK_NonAtomicToAtomic;
return Compatible;
@@ -6827,7 +6963,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// CK_VectorSplat does T -> vector T, so first cast to the
// element type.
QualType elType = cast<ExtVectorType>(LHSType)->getElementType();
- if (elType != RHSType) {
+ if (elType != RHSType && ConvertRHS) {
Kind = PrepareScalarCast(RHS, elType);
RHS = ImpCastExprToType(RHS.get(), elType, Kind);
}
@@ -6860,7 +6996,8 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// Arithmetic conversions.
if (LHSType->isArithmeticType() && RHSType->isArithmeticType() &&
!(getLangOpts().CPlusPlus && LHSType->isEnumeralType())) {
- Kind = PrepareScalarCast(RHS, LHSType);
+ if (ConvertRHS)
+ Kind = PrepareScalarCast(RHS, LHSType);
return Compatible;
}
@@ -6985,7 +7122,8 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// Only under strict condition T^ is compatible with an Objective-C pointer.
if (RHSType->isBlockPointerType() &&
LHSType->isBlockCompatibleObjCPointerType(Context)) {
- maybeExtendBlockObject(RHS);
+ if (ConvertRHS)
+ maybeExtendBlockObject(RHS);
Kind = CK_BlockPointerToObjCPointerCast;
return Compatible;
}
@@ -7111,9 +7249,16 @@ Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType,
}
Sema::AssignConvertType
-Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
+Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
bool Diagnose,
- bool DiagnoseCFAudited) {
+ bool DiagnoseCFAudited,
+ bool ConvertRHS) {
+ // If ConvertRHS is false, we want to leave the caller's RHS untouched. Sadly,
+ // we can't avoid *all* modifications at the moment, so we need some somewhere
+ // to put the updated value.
+ ExprResult LocalRHS = CallerRHS;
+ ExprResult &RHS = ConvertRHS ? CallerRHS : LocalRHS;
+
if (getLangOpts().CPlusPlus) {
if (!LHSType->isRecordType() && !LHSType->isAtomicType()) {
// C++ 5.17p3: If the left operand is not of class type, the
@@ -7151,6 +7296,15 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// structures.
// FIXME: We also fall through for atomics; not sure what should
// happen there, though.
+ } else if (RHS.get()->getType() == Context.OverloadTy) {
+ // As a set of extensions to C, we support overloading on functions. These
+ // functions need to be resolved here.
+ DeclAccessPair DAP;
+ if (FunctionDecl *FD = ResolveAddressOfOverloadedFunction(
+ RHS.get(), LHSType, /*Complain=*/false, DAP))
+ RHS = FixOverloadedFunctionReference(RHS.get(), DAP, FD);
+ else
+ return Incompatible;
}
// C99 6.5.16.1p1: the left operand is a pointer and the right is
@@ -7162,7 +7316,8 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
CastKind Kind;
CXXCastPath Path;
CheckPointerConversion(RHS.get(), LHSType, Kind, Path, false);
- RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path);
+ if (ConvertRHS)
+ RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path);
return Compatible;
}
@@ -7173,7 +7328,8 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
//
// Suppress this for references: C++ 8.5.3p5.
if (!LHSType->isReferenceType()) {
- RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ // FIXME: We potentially allocate here even if ConvertRHS is false.
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get(), Diagnose);
if (RHS.isInvalid())
return Incompatible;
}
@@ -7189,7 +7345,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
CastKind Kind = CK_Invalid;
Sema::AssignConvertType result =
- CheckAssignmentConstraints(LHSType, RHS, Kind);
+ CheckAssignmentConstraints(LHSType, RHS, Kind, ConvertRHS);
// C99 6.5.16.1p2: The value of the right operand is converted to the
// type of the assignment expression.
@@ -7211,7 +7367,8 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Compatible;
}
- RHS = ImpCastExprToType(E, Ty, Kind);
+ if (ConvertRHS)
+ RHS = ImpCastExprToType(E, Ty, Kind);
}
return result;
}
@@ -7374,6 +7531,18 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
+ // OpenCL V1.1 6.2.6.p1:
+ // If the operands are of more than one vector type, then an error shall
+ // occur. Implicit conversions between vector types are not permitted, per
+ // section 6.2.1.
+ if (getLangOpts().OpenCL &&
+ RHSVecType && isa<ExtVectorType>(RHSVecType) &&
+ LHSVecType && isa<ExtVectorType>(LHSVecType)) {
+ Diag(Loc, diag::err_opencl_implicit_vector_conversion) << LHSType
+ << RHSType;
+ return QualType();
+ }
+
// Otherwise, use the generic diagnostic.
Diag(Loc, diag::err_typecheck_vector_not_convertable)
<< LHSType << RHSType
@@ -7420,6 +7589,18 @@ static void checkArithmeticNull(Sema &S, ExprResult &LHS, ExprResult &RHS,
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
}
+static void DiagnoseBadDivideOrRemainderValues(Sema& S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc, bool IsDiv) {
+ // Check for division/remainder by zero.
+ llvm::APSInt RHSValue;
+ if (!RHS.get()->isValueDependent() &&
+ RHS.get()->EvaluateAsInt(RHSValue, S.Context) && RHSValue == 0)
+ S.DiagRuntimeBehavior(Loc, RHS.get(),
+ S.PDiag(diag::warn_remainder_division_by_zero)
+ << IsDiv << RHS.get()->getSourceRange());
+}
+
QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign, bool IsDiv) {
@@ -7438,15 +7619,8 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
if (compType.isNull() || !compType->isArithmeticType())
return InvalidOperands(Loc, LHS, RHS);
-
- // Check for division by zero.
- llvm::APSInt RHSValue;
- if (IsDiv && !RHS.get()->isValueDependent() &&
- RHS.get()->EvaluateAsInt(RHSValue, Context) && RHSValue == 0)
- DiagRuntimeBehavior(Loc, RHS.get(),
- PDiag(diag::warn_division_by_zero)
- << RHS.get()->getSourceRange());
-
+ if (IsDiv)
+ DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, IsDiv);
return compType;
}
@@ -7470,15 +7644,7 @@ QualType Sema::CheckRemainderOperands(
if (compType.isNull() || !compType->isIntegerType())
return InvalidOperands(Loc, LHS, RHS);
-
- // Check for remainder by zero.
- llvm::APSInt RHSValue;
- if (!RHS.get()->isValueDependent() &&
- RHS.get()->EvaluateAsInt(RHSValue, Context) && RHSValue == 0)
- DiagRuntimeBehavior(Loc, RHS.get(),
- PDiag(diag::warn_remainder_by_zero)
- << RHS.get()->getSourceRange());
-
+ DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, false /* IsDiv */);
return compType;
}
@@ -7596,7 +7762,7 @@ static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType();
// if both are pointers check if operation is valid wrt address spaces
- if (isLHSPointer && isRHSPointer) {
+ if (S.getLangOpts().OpenCL && isLHSPointer && isRHSPointer) {
const PointerType *lhsPtr = LHSExpr->getType()->getAs<PointerType>();
const PointerType *rhsPtr = RHSExpr->getType()->getAs<PointerType>();
if (!lhsPtr->isAddressSpaceOverlapping(*rhsPtr)) {
@@ -7669,7 +7835,7 @@ static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
// Only print a fixit for "str" + int, not for int + "str".
if (IndexExpr == RHSExpr) {
- SourceLocation EndLoc = Self.PP.getLocForEndOfToken(RHSExpr->getLocEnd());
+ SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getLocEnd());
Self.Diag(OpLoc, diag::note_string_plus_scalar_silence)
<< FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
<< FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
@@ -7719,7 +7885,7 @@ static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc,
// Only print a fixit for str + char, not for char + str.
if (isa<CharacterLiteral>(RHSExpr->IgnoreImpCasts())) {
- SourceLocation EndLoc = Self.PP.getLocForEndOfToken(RHSExpr->getLocEnd());
+ SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getLocEnd());
Self.Diag(OpLoc, diag::note_string_plus_scalar_silence)
<< FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
<< FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
@@ -7739,9 +7905,10 @@ static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc,
<< RHSExpr->getSourceRange();
}
-QualType Sema::CheckAdditionOperands( // C99 6.5.6
- ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
- QualType* CompLHSTy) {
+// C99 6.5.6
+QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, BinaryOperatorKind Opc,
+ QualType* CompLHSTy) {
checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
if (LHS.get()->getType()->isVectorType() ||
@@ -7917,7 +8084,7 @@ static bool isScopedEnumerationType(QualType T) {
}
static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
- SourceLocation Loc, unsigned Opc,
+ SourceLocation Loc, BinaryOperatorKind Opc,
QualType LHSType) {
// OpenCL 6.3j: shift values are effectively % word size of LHS (more defined),
// so skip remaining warnings as we don't want to modify values within Sema.
@@ -8060,7 +8227,7 @@ static QualType checkOpenCLVectorShift(Sema &S,
// C99 6.5.7
QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
- SourceLocation Loc, unsigned Opc,
+ SourceLocation Loc, BinaryOperatorKind Opc,
bool IsCompAssign) {
checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
@@ -8365,9 +8532,9 @@ static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
if (BinaryOperator::isEqualityOp(Opc) &&
hasIsEqualMethod(S, LHS.get(), RHS.get())) {
SourceLocation Start = LHS.get()->getLocStart();
- SourceLocation End = S.PP.getLocForEndOfToken(RHS.get()->getLocEnd());
+ SourceLocation End = S.getLocForEndOfToken(RHS.get()->getLocEnd());
CharSourceRange OpRange =
- CharSourceRange::getCharRange(Loc, S.PP.getLocForEndOfToken(Loc));
+ CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
S.Diag(Loc, diag::note_objc_literal_comparison_isequal)
<< FixItHint::CreateInsertion(Start, Opc == BO_EQ ? "[" : "![")
@@ -8379,20 +8546,17 @@ static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS,
ExprResult &RHS,
SourceLocation Loc,
- unsigned OpaqueOpc) {
- // This checking requires bools.
- if (!S.getLangOpts().Bool) return;
-
+ BinaryOperatorKind Opc) {
// Check that left hand side is !something.
UnaryOperator *UO = dyn_cast<UnaryOperator>(LHS.get()->IgnoreImpCasts());
if (!UO || UO->getOpcode() != UO_LNot) return;
// Only check if the right hand side is non-bool arithmetic type.
- if (RHS.get()->getType()->isBooleanType()) return;
+ if (RHS.get()->isKnownToHaveBooleanValue()) return;
// Make sure that the something in !something is not bool.
Expr *SubExpr = UO->getSubExpr()->IgnoreImpCasts();
- if (SubExpr->getType()->isBooleanType()) return;
+ if (SubExpr->isKnownToHaveBooleanValue()) return;
// Emit warning.
S.Diag(UO->getOperatorLoc(), diag::warn_logical_not_on_lhs_of_comparison)
@@ -8401,7 +8565,7 @@ static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS,
// First note suggest !(x < y)
SourceLocation FirstOpen = SubExpr->getLocStart();
SourceLocation FirstClose = RHS.get()->getLocEnd();
- FirstClose = S.getPreprocessor().getLocForEndOfToken(FirstClose);
+ FirstClose = S.getLocForEndOfToken(FirstClose);
if (FirstClose.isInvalid())
FirstOpen = SourceLocation();
S.Diag(UO->getOperatorLoc(), diag::note_logical_not_fix)
@@ -8411,7 +8575,7 @@ static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS,
// Second note suggests (!x) < y
SourceLocation SecondOpen = LHS.get()->getLocStart();
SourceLocation SecondClose = LHS.get()->getLocEnd();
- SecondClose = S.getPreprocessor().getLocForEndOfToken(SecondClose);
+ SecondClose = S.getLocForEndOfToken(SecondClose);
if (SecondClose.isInvalid())
SecondOpen = SourceLocation();
S.Diag(UO->getOperatorLoc(), diag::note_logical_not_silence_with_parens)
@@ -8437,12 +8601,10 @@ static ValueDecl *getCompareDecl(Expr *E) {
// C99 6.5.8, C++ [expr.rel]
QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
- SourceLocation Loc, unsigned OpaqueOpc,
+ SourceLocation Loc, BinaryOperatorKind Opc,
bool IsRelational) {
checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/true);
- BinaryOperatorKind Opc = (BinaryOperatorKind) OpaqueOpc;
-
// Handle vector comparisons separately.
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType())
@@ -8455,7 +8617,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
Expr *RHSStripped = RHS.get()->IgnoreParenImpCasts();
checkEnumComparison(*this, Loc, LHS.get(), RHS.get());
- diagnoseLogicalNotOnLHSofComparison(*this, LHS, RHS, Loc, OpaqueOpc);
+ diagnoseLogicalNotOnLHSofComparison(*this, LHS, RHS, Loc, Opc);
if (!LHSType->hasFloatingRepresentation() &&
!(LHSType->isBlockPointerType() && IsRelational) &&
@@ -8628,12 +8790,15 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false);
}
if (LCanPointeeTy != RCanPointeeTy) {
- const PointerType *lhsPtr = LHSType->getAs<PointerType>();
- if (!lhsPtr->isAddressSpaceOverlapping(*RHSType->getAs<PointerType>())) {
- Diag(Loc,
- diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
- << LHSType << RHSType << 0 /* comparison */
- << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ // Treat NULL constant as a special case in OpenCL.
+ if (getLangOpts().OpenCL && !LHSIsNull && !RHSIsNull) {
+ const PointerType *LHSPtr = LHSType->getAs<PointerType>();
+ if (!LHSPtr->isAddressSpaceOverlapping(*RHSType->getAs<PointerType>())) {
+ Diag(Loc,
+ diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
+ << LHSType << RHSType << 0 /* comparison */
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ }
}
unsigned AddrSpaceL = LCanPointeeTy.getAddressSpace();
unsigned AddrSpaceR = RCanPointeeTy.getAddressSpace();
@@ -8941,9 +9106,10 @@ inline QualType Sema::CheckBitwiseOperands(
return InvalidOperands(Loc, LHS, RHS);
}
-inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
- ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc) {
-
+// C99 6.5.[13,14]
+inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
// Check vector operands differently.
if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType())
return CheckVectorLogicalOperands(LHS, RHS, Loc);
@@ -8972,18 +9138,14 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
<< (Opc == BO_LAnd ? "&" : "|")
<< FixItHint::CreateReplacement(SourceRange(
- Loc, Lexer::getLocForEndOfToken(Loc, 0, getSourceManager(),
- getLangOpts())),
+ Loc, getLocForEndOfToken(Loc)),
Opc == BO_LAnd ? "&" : "|");
if (Opc == BO_LAnd)
// Suggest replacing "Foo() && kNonZero" with "Foo()"
Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
<< FixItHint::CreateRemoval(
- SourceRange(
- Lexer::getLocForEndOfToken(LHS.get()->getLocEnd(),
- 0, getSourceManager(),
- getLangOpts()),
- RHS.get()->getLocEnd()));
+ SourceRange(getLocForEndOfToken(LHS.get()->getLocEnd()),
+ RHS.get()->getLocEnd()));
}
}
@@ -9161,7 +9323,7 @@ static void DiagnoseConstAssignment(Sema &S, const Expr *E,
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
// Function calls
const FunctionDecl *FD = CE->getDirectCallee();
- if (!IsTypeModifiable(FD->getReturnType(), IsDereference)) {
+ if (FD && !IsTypeModifiable(FD->getReturnType(), IsDereference)) {
if (!DiagnosticEmitted) {
S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange
<< ConstFunction << FD;
@@ -9510,7 +9672,9 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
return QualType();
}
// Increment of bool sets it to true, but is deprecated.
- S.Diag(OpLoc, diag::warn_increment_bool) << Op->getSourceRange();
+ S.Diag(OpLoc, S.getLangOpts().CPlusPlus1z ? diag::ext_increment_bool
+ : diag::warn_increment_bool)
+ << Op->getSourceRange();
} else if (S.getLangOpts().CPlusPlus && ResType->isEnumeralType()) {
// Error on enum increments and decrements in C++ mode
S.Diag(OpLoc, diag::err_increment_decrement_enum) << IsInc << ResType;
@@ -9710,6 +9874,12 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
// expressions here, but the result of one is always an lvalue anyway.
}
ValueDecl *dcl = getPrimaryDecl(op);
+
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(dcl))
+ if (!checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ op->getLocStart()))
+ return QualType();
+
Expr::LValueClassification lval = op->ClassifyLValue(Context);
unsigned AddressOfError = AO_No_Error;
@@ -9763,8 +9933,9 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
QualType MPTy = Context.getMemberPointerType(
op->getType(), Context.getTypeDeclType(MD->getParent()).getTypePtr());
+ // Under the MS ABI, lock down the inheritance model now.
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
- RequireCompleteType(OpLoc, MPTy, 0);
+ (void)isCompleteType(OpLoc, MPTy);
return MPTy;
} else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
// C99 6.5.3.2p1
@@ -9819,8 +9990,9 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
QualType MPTy = Context.getMemberPointerType(
op->getType(),
Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
+ // Under the MS ABI, lock down the inheritance model now.
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
- RequireCompleteType(OpLoc, MPTy, 0);
+ (void)isCompleteType(OpLoc, MPTy);
return MPTy;
}
}
@@ -10121,6 +10293,20 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
return ExprError();
}
+ if (getLangOpts().OpenCL) {
+ // OpenCLC v2.0 s6.13.11.1 allows atomic variables to be initialized by
+ // the ATOMIC_VAR_INIT macro.
+ if (LHSExpr->getType()->isAtomicType() ||
+ RHSExpr->getType()->isAtomicType()) {
+ SourceRange SR(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ if (BO_Assign == Opc)
+ Diag(OpLoc, diag::err_atomic_init_constant) << SR;
+ else
+ ResultTy = InvalidOperands(OpLoc, LHS, RHS);
+ return ExprError();
+ }
+ }
+
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
@@ -10211,7 +10397,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
break;
case BO_AndAssign:
case BO_OrAssign: // fallthrough
- DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
+ DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
case BO_XorAssign:
CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, true);
CompLHSTy = CompResultTy;
@@ -10238,7 +10424,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
&Context.Idents.get("object_setClass"),
SourceLocation(), LookupOrdinaryName);
if (ObjectSetClass && isa<ObjCIsaExpr>(LHS.get())) {
- SourceLocation RHSLocEnd = PP.getLocForEndOfToken(RHS.get()->getLocEnd());
+ SourceLocation RHSLocEnd = getLocForEndOfToken(RHS.get()->getLocEnd());
Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign) <<
FixItHint::CreateInsertion(LHS.get()->getLocStart(), "object_setClass(") <<
FixItHint::CreateReplacement(SourceRange(OISA->getOpLoc(), OpLoc), ",") <<
@@ -10274,17 +10460,17 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHSExpr);
BinaryOperator *RHSBO = dyn_cast<BinaryOperator>(RHSExpr);
- // Check that one of the sides is a comparison operator.
+ // Check that one of the sides is a comparison operator and the other isn't.
bool isLeftComp = LHSBO && LHSBO->isComparisonOp();
bool isRightComp = RHSBO && RHSBO->isComparisonOp();
- if (!isLeftComp && !isRightComp)
+ if (isLeftComp == isRightComp)
return;
// Bitwise operations are sometimes used as eager logical ops.
// Don't diagnose this.
bool isLeftBitwise = LHSBO && LHSBO->isBitwiseOp();
bool isRightBitwise = RHSBO && RHSBO->isBitwiseOp();
- if ((isLeftComp || isLeftBitwise) && (isRightComp || isRightBitwise))
+ if (isLeftBitwise || isRightBitwise)
return;
SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getLocStart(),
@@ -10306,21 +10492,6 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
ParensRange);
}
-/// \brief It accepts a '&' expr that is inside a '|' one.
-/// Emit a diagnostic together with a fixit hint that wraps the '&' expression
-/// in parentheses.
-static void
-EmitDiagnosticForBitwiseAndInBitwiseOr(Sema &Self, SourceLocation OpLoc,
- BinaryOperator *Bop) {
- assert(Bop->getOpcode() == BO_And);
- Self.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_and_in_bitwise_or)
- << Bop->getSourceRange() << OpLoc;
- SuggestParentheses(Self, Bop->getOperatorLoc(),
- Self.PDiag(diag::note_precedence_silence)
- << Bop->getOpcodeStr(),
- Bop->getSourceRange());
-}
-
/// \brief It accepts a '&&' expr that is inside a '||' one.
/// Emit a diagnostic together with a fixit hint that wraps the '&&' expression
/// in parentheses.
@@ -10389,12 +10560,21 @@ static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
}
}
-/// \brief Look for '&' in the left or right hand of a '|' expr.
-static void DiagnoseBitwiseAndInBitwiseOr(Sema &S, SourceLocation OpLoc,
- Expr *OrArg) {
- if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(OrArg)) {
- if (Bop->getOpcode() == BO_And)
- return EmitDiagnosticForBitwiseAndInBitwiseOr(S, OpLoc, Bop);
+/// \brief Look for bitwise op in the left or right hand of a bitwise op with
+/// lower precedence and emit a diagnostic together with a fixit hint that wraps
+/// the '&' expression in parentheses.
+static void DiagnoseBitwiseOpInBitwiseOp(Sema &S, BinaryOperatorKind Opc,
+ SourceLocation OpLoc, Expr *SubExpr) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(SubExpr)) {
+ if (Bop->isBitwiseOp() && Bop->getOpcode() < Opc) {
+ S.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_op_in_bitwise_op)
+ << Bop->getOpcodeStr() << BinaryOperator::getOpcodeStr(Opc)
+ << Bop->getSourceRange() << OpLoc;
+ SuggestParentheses(S, Bop->getOperatorLoc(),
+ S.PDiag(diag::note_precedence_silence)
+ << Bop->getOpcodeStr(),
+ Bop->getSourceRange());
+ }
}
}
@@ -10449,9 +10629,10 @@ static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
DiagnoseBitwisePrecedence(Self, Opc, OpLoc, LHSExpr, RHSExpr);
// Diagnose "arg1 & arg2 | arg3"
- if (Opc == BO_Or && !OpLoc.isMacroID()/* Don't warn in macros. */) {
- DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, LHSExpr);
- DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, RHSExpr);
+ if ((Opc == BO_Or || Opc == BO_Xor) &&
+ !OpLoc.isMacroID()/* Don't warn in macros. */) {
+ DiagnoseBitwiseOpInBitwiseOp(Self, Opc, OpLoc, LHSExpr);
+ DiagnoseBitwiseOpInBitwiseOp(Self, Opc, OpLoc, RHSExpr);
}
// Warn about arg1 || arg2 && arg3, as GCC 4.3+ does.
@@ -10593,6 +10774,14 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
ExprValueKind VK = VK_RValue;
ExprObjectKind OK = OK_Ordinary;
QualType resultType;
+ if (getLangOpts().OpenCL) {
+ // The only legal unary operation for atomics is '&'.
+ if (Opc != UO_AddrOf && InputExpr->getType()->isAtomicType()) {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << InputExpr->getType()
+ << Input.get()->getSourceRange());
+ }
+ }
switch (Opc) {
case UO_PreInc:
case UO_PreDec:
@@ -10735,6 +10924,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
}
break;
case UO_Extension:
+ case UO_Coawait:
resultType = Input.get()->getType();
VK = Input.get()->getValueKind();
OK = Input.get()->getObjectKind();
@@ -10778,10 +10968,8 @@ static bool isQualifiedMemberAccess(Expr *E) {
if (!ULE->getQualifier())
return false;
- for (UnresolvedLookupExpr::decls_iterator D = ULE->decls_begin(),
- DEnd = ULE->decls_end();
- D != DEnd; ++D) {
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*D)) {
+ for (NamedDecl *D : ULE->decls()) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
if (Method->isInstance())
return true;
} else {
@@ -10971,8 +11159,7 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
- OffsetOfComponent *CompPtr,
- unsigned NumComponents,
+ ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc) {
QualType ArgTy = TInfo->getType();
bool Dependent = ArgTy->isDependentType();
@@ -10996,17 +11183,15 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
// GCC extension, diagnose them.
// FIXME: This diagnostic isn't actually visible because the location is in
// a system header!
- if (NumComponents != 1)
+ if (Components.size() != 1)
Diag(BuiltinLoc, diag::ext_offsetof_extended_field_designator)
- << SourceRange(CompPtr[1].LocStart, CompPtr[NumComponents-1].LocEnd);
+ << SourceRange(Components[1].LocStart, Components.back().LocEnd);
bool DidWarnAboutNonPOD = false;
QualType CurrentType = ArgTy;
- typedef OffsetOfExpr::OffsetOfNode OffsetOfNode;
SmallVector<OffsetOfNode, 4> Comps;
SmallVector<Expr*, 4> Exprs;
- for (unsigned i = 0; i != NumComponents; ++i) {
- const OffsetOfComponent &OC = CompPtr[i];
+ for (const OffsetOfComponent &OC : Components) {
if (OC.isBrackets) {
// Offset of an array sub-field. TODO: Should we allow vector elements?
if (!CurrentType->isDependentType()) {
@@ -11074,7 +11259,7 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
if (!IsSafe && !DidWarnAboutNonPOD &&
DiagRuntimeBehavior(BuiltinLoc, nullptr,
PDiag(DiagID)
- << SourceRange(CompPtr[0].LocStart, OC.LocEnd)
+ << SourceRange(Components[0].LocStart, OC.LocEnd)
<< CurrentType))
DidWarnAboutNonPOD = true;
}
@@ -11113,7 +11298,8 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
// If the member was found in a base class, introduce OffsetOfNodes for
// the base class indirections.
CXXBasePaths Paths;
- if (IsDerivedFrom(CurrentType, Context.getTypeDeclType(Parent), Paths)) {
+ if (IsDerivedFrom(OC.LocStart, CurrentType, Context.getTypeDeclType(Parent),
+ Paths)) {
if (Paths.getDetectedVirtual()) {
Diag(OC.LocEnd, diag::err_offsetof_field_of_virtual_base)
<< MemberDecl->getDeclName()
@@ -11122,9 +11308,8 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
}
CXXBasePath &Path = Paths.front();
- for (CXXBasePath::iterator B = Path.begin(), BEnd = Path.end();
- B != BEnd; ++B)
- Comps.push_back(OffsetOfNode(B->Base));
+ for (const CXXBasePathElement &B : Path)
+ Comps.push_back(OffsetOfNode(B.Base));
}
if (IndirectMemberDecl) {
@@ -11147,8 +11332,7 @@ ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
- OffsetOfComponent *CompPtr,
- unsigned NumComponents,
+ ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc) {
TypeSourceInfo *ArgTInfo;
@@ -11159,8 +11343,7 @@ ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
if (!ArgTInfo)
ArgTInfo = Context.getTrivialTypeSourceInfo(ArgTy, TypeLoc);
- return BuildBuiltinOffsetOf(BuiltinLoc, ArgTInfo, CompPtr, NumComponents,
- RParenLoc);
+ return BuildBuiltinOffsetOf(BuiltinLoc, ArgTInfo, Components, RParenLoc);
}
@@ -11394,16 +11577,14 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
// Set the captured variables on the block.
// FIXME: Share capture structure between BlockDecl and CapturingScopeInfo!
SmallVector<BlockDecl::Capture, 4> Captures;
- for (unsigned i = 0, e = BSI->Captures.size(); i != e; i++) {
- CapturingScopeInfo::Capture &Cap = BSI->Captures[i];
+ for (CapturingScopeInfo::Capture &Cap : BSI->Captures) {
if (Cap.isThisCapture())
continue;
BlockDecl::Capture NewCap(Cap.getVariable(), Cap.isBlockCapture(),
Cap.isNested(), Cap.getInitExpr());
Captures.push_back(NewCap);
}
- BSI->TheDecl->setCaptures(Context, Captures.begin(), Captures.end(),
- BSI->CXXThisCaptureIndex != 0);
+ BSI->TheDecl->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
// If the user wrote a function type in some form, try to use that.
if (!BSI->FunctionType.isNull()) {
@@ -11495,43 +11676,57 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
Expr *E, TypeSourceInfo *TInfo,
SourceLocation RPLoc) {
Expr *OrigExpr = E;
+ bool IsMS = false;
+
+ // It might be a __builtin_ms_va_list. (But don't ever mark a va_arg()
+ // as Microsoft ABI on an actual Microsoft platform, where
+ // __builtin_ms_va_list and __builtin_va_list are the same.)
+ if (!E->isTypeDependent() && Context.getTargetInfo().hasBuiltinMSVaList() &&
+ Context.getTargetInfo().getBuiltinVaListKind() != TargetInfo::CharPtrBuiltinVaList) {
+ QualType MSVaListType = Context.getBuiltinMSVaListType();
+ if (Context.hasSameType(MSVaListType, E->getType())) {
+ if (CheckForModifiableLvalue(E, BuiltinLoc, *this))
+ return ExprError();
+ IsMS = true;
+ }
+ }
// Get the va_list type
QualType VaListType = Context.getBuiltinVaListType();
- if (VaListType->isArrayType()) {
- // Deal with implicit array decay; for example, on x86-64,
- // va_list is an array, but it's supposed to decay to
- // a pointer for va_arg.
- VaListType = Context.getArrayDecayedType(VaListType);
- // Make sure the input expression also decays appropriately.
- ExprResult Result = UsualUnaryConversions(E);
- if (Result.isInvalid())
- return ExprError();
- E = Result.get();
- } else if (VaListType->isRecordType() && getLangOpts().CPlusPlus) {
- // If va_list is a record type and we are compiling in C++ mode,
- // check the argument using reference binding.
- InitializedEntity Entity
- = InitializedEntity::InitializeParameter(Context,
- Context.getLValueReferenceType(VaListType), false);
- ExprResult Init = PerformCopyInitialization(Entity, SourceLocation(), E);
- if (Init.isInvalid())
- return ExprError();
- E = Init.getAs<Expr>();
- } else {
- // Otherwise, the va_list argument must be an l-value because
- // it is modified by va_arg.
- if (!E->isTypeDependent() &&
- CheckForModifiableLvalue(E, BuiltinLoc, *this))
- return ExprError();
+ if (!IsMS) {
+ if (VaListType->isArrayType()) {
+ // Deal with implicit array decay; for example, on x86-64,
+ // va_list is an array, but it's supposed to decay to
+ // a pointer for va_arg.
+ VaListType = Context.getArrayDecayedType(VaListType);
+ // Make sure the input expression also decays appropriately.
+ ExprResult Result = UsualUnaryConversions(E);
+ if (Result.isInvalid())
+ return ExprError();
+ E = Result.get();
+ } else if (VaListType->isRecordType() && getLangOpts().CPlusPlus) {
+ // If va_list is a record type and we are compiling in C++ mode,
+ // check the argument using reference binding.
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, Context.getLValueReferenceType(VaListType), false);
+ ExprResult Init = PerformCopyInitialization(Entity, SourceLocation(), E);
+ if (Init.isInvalid())
+ return ExprError();
+ E = Init.getAs<Expr>();
+ } else {
+ // Otherwise, the va_list argument must be an l-value because
+ // it is modified by va_arg.
+ if (!E->isTypeDependent() &&
+ CheckForModifiableLvalue(E, BuiltinLoc, *this))
+ return ExprError();
+ }
}
- if (!E->isTypeDependent() &&
- !Context.hasSameType(VaListType, E->getType())) {
+ if (!IsMS && !E->isTypeDependent() &&
+ !Context.hasSameType(VaListType, E->getType()))
return ExprError(Diag(E->getLocStart(),
diag::err_first_argument_to_va_arg_not_of_type_va_list)
<< OrigExpr->getType() << E->getSourceRange());
- }
if (!TInfo->getType()->isDependentType()) {
if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(),
@@ -11573,7 +11768,7 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
}
QualType T = TInfo->getType().getNonLValueExprType(Context);
- return new (Context) VAArgExpr(BuiltinLoc, E, TInfo, RPLoc, T);
+ return new (Context) VAArgExpr(BuiltinLoc, E, TInfo, RPLoc, T, IsMS);
}
ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
@@ -11627,6 +11822,25 @@ Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp) {
return true;
}
+static bool maybeDiagnoseAssignmentToFunction(Sema &S, QualType DstType,
+ const Expr *SrcExpr) {
+ if (!DstType->isFunctionPointerType() ||
+ !SrcExpr->getType()->isFunctionType())
+ return false;
+
+ auto *DRE = dyn_cast<DeclRefExpr>(SrcExpr->IgnoreParenImpCasts());
+ if (!DRE)
+ return false;
+
+ auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (!FD)
+ return false;
+
+ return !S.checkAddressOfFunctionIsAvailable(FD,
+ /*Complain=*/true,
+ SrcExpr->getLocStart());
+}
+
bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
@@ -11759,6 +11973,12 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
DiagKind = diag::err_arc_weak_unavailable_assign;
break;
case Incompatible:
+ if (maybeDiagnoseAssignmentToFunction(*this, DstType, SrcExpr)) {
+ if (Complained)
+ *Complained = true;
+ return true;
+ }
+
DiagKind = diag::err_typecheck_convert_incompatible;
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
MayHaveConvFixit = true;
@@ -11797,9 +12017,8 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
// If we can fix the conversion, suggest the FixIts.
assert(ConvHints.isNull() || Hint.isNull());
if (!ConvHints.isNull()) {
- for (std::vector<FixItHint>::iterator HI = ConvHints.Hints.begin(),
- HE = ConvHints.Hints.end(); HI != HE; ++HI)
- FDiag << *HI;
+ for (FixItHint &H : ConvHints.Hints)
+ FDiag << H;
} else {
FDiag << Hint;
}
@@ -11816,7 +12035,7 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
if (SecondType == Context.OverloadTy)
NoteAllOverloadCandidates(OverloadExpr::find(SrcExpr).Expression,
- FirstType);
+ FirstType, /*TakingAddress=*/true);
if (CheckInferredResultType)
EmitRelatedResultTypeNote(SrcExpr);
@@ -11974,16 +12193,16 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
if (!Folded || !AllowFold) {
if (!Diagnoser.Suppress) {
Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange());
- for (unsigned I = 0, N = Notes.size(); I != N; ++I)
- Diag(Notes[I].first, Notes[I].second);
+ for (const PartialDiagnosticAt &Note : Notes)
+ Diag(Note.first, Note.second);
}
return ExprError();
}
Diagnoser.diagnoseFold(*this, DiagLoc, E->getSourceRange());
- for (unsigned I = 0, N = Notes.size(); I != N; ++I)
- Diag(Notes[I].first, Notes[I].second);
+ for (const PartialDiagnosticAt &Note : Notes)
+ Diag(Note.first, Note.second);
if (Result)
*Result = EvalResult.Val.getInt();
@@ -12417,10 +12636,15 @@ static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDec
// Compute the type of an expression that refers to this variable.
DeclRefType = CaptureType.getNonReferenceType();
-
+
+ // Similarly to mutable captures in lambda, all the OpenMP captures by copy
+ // are mutable in the sense that user can change their value - they are
+ // private instances of the captured declarations.
const CapturingScopeInfo::Capture &Cap = CSI->getCapture(Var);
if (Cap.isCopyCapture() &&
- !(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable))
+ !(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable) &&
+ !(isa<CapturedRegionScopeInfo>(CSI) &&
+ cast<CapturedRegionScopeInfo>(CSI)->CapRegionKind == CR_OpenMP))
DeclRefType.addConst();
return true;
}
@@ -12608,9 +12832,17 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
// By default, capture variables by reference.
bool ByRef = true;
// Using an LValue reference type is consistent with Lambdas (see below).
- if (S.getLangOpts().OpenMP && S.IsOpenMPCapturedVar(Var))
- DeclRefType = DeclRefType.getUnqualifiedType();
- CaptureType = S.Context.getLValueReferenceType(DeclRefType);
+ if (S.getLangOpts().OpenMP) {
+ ByRef = S.IsOpenMPCapturedByRef(Var, RSI);
+ if (S.IsOpenMPCapturedVar(Var))
+ DeclRefType = DeclRefType.getUnqualifiedType();
+ }
+
+ if (ByRef)
+ CaptureType = S.Context.getLValueReferenceType(DeclRefType);
+ else
+ CaptureType = DeclRefType;
+
Expr *CopyExpr = nullptr;
if (BuildAndDiagnose) {
// The current implementation assumes that all variables are captured
@@ -12836,21 +13068,6 @@ bool Sema::tryCaptureVariable(
if (isVariableAlreadyCapturedInScopeInfo(CSI, Var, Nested, CaptureType,
DeclRefType))
break;
- if (getLangOpts().OpenMP) {
- if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
- // OpenMP private variables should not be captured in outer scope, so
- // just break here.
- if (RSI->CapRegionKind == CR_OpenMP) {
- if (isOpenMPPrivateVar(Var, OpenMPLevel)) {
- Nested = true;
- DeclRefType = DeclRefType.getUnqualifiedType();
- CaptureType = Context.getLValueReferenceType(DeclRefType);
- break;
- }
- ++OpenMPLevel;
- }
- }
- }
// If we are instantiating a generic lambda call operator body,
// we do not want to capture new variables. What was captured
// during either a lambdas transformation or initial parsing
@@ -12996,6 +13213,29 @@ bool Sema::tryCaptureVariable(
} while (!QTy.isNull() && QTy->isVariablyModifiedType());
}
+ if (getLangOpts().OpenMP) {
+ if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
+ // OpenMP private variables should not be captured in outer scope, so
+ // just break here. Similarly, global variables that are captured in a
+ // target region should not be captured outside the scope of the region.
+ if (RSI->CapRegionKind == CR_OpenMP) {
+ auto isTargetCap = isOpenMPTargetCapturedVar(Var, OpenMPLevel);
+ // When we detect target captures we are looking from inside the
+ // target region, therefore we need to propagate the capture from the
+ // enclosing region. Therefore, the capture is not initially nested.
+ if (isTargetCap)
+ FunctionScopesIndex--;
+
+ if (isTargetCap || isOpenMPPrivateVar(Var, OpenMPLevel)) {
+ Nested = !isTargetCap;
+ DeclRefType = DeclRefType.getUnqualifiedType();
+ CaptureType = Context.getLValueReferenceType(DeclRefType);
+ break;
+ }
+ ++OpenMPLevel;
+ }
+ }
+ }
if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None && !Explicit) {
// No capture-default, and this is not an explicit capture
// so cannot capture this variable.
@@ -13152,15 +13392,13 @@ ExprResult Sema::ActOnConstantExpression(ExprResult Res) {
}
void Sema::CleanupVarDeclMarking() {
- for (llvm::SmallPtrSetIterator<Expr*> i = MaybeODRUseExprs.begin(),
- e = MaybeODRUseExprs.end();
- i != e; ++i) {
+ for (Expr *E : MaybeODRUseExprs) {
VarDecl *Var;
SourceLocation Loc;
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*i)) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
Var = cast<VarDecl>(DRE->getDecl());
Loc = DRE->getLocation();
- } else if (MemberExpr *ME = dyn_cast<MemberExpr>(*i)) {
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
Var = cast<VarDecl>(ME->getMemberDecl());
Loc = ME->getMemberLoc();
} else {
@@ -13217,7 +13455,7 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
}
if (!isTemplateInstantiation(TSK))
- return;
+ return;
// Instantiate, but do not mark as odr-used, variable templates.
MarkODRUsed = false;
@@ -13316,7 +13554,8 @@ static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
if (!MD)
return;
// Only attempt to devirtualize if this is truly a virtual call.
- bool IsVirtualCall = MD->isVirtual() && !ME->hasQualifier();
+ bool IsVirtualCall = MD->isVirtual() &&
+ ME->performsVirtualDispatch(SemaRef.getLangOpts());
if (!IsVirtualCall)
return;
const Expr *Base = ME->getBase();
@@ -13350,7 +13589,7 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
// expression, is odr-used, unless it is a pure virtual function and its
// name is not explicitly qualified.
bool OdrUse = true;
- if (!E->hasQualifier()) {
+ if (E->performsVirtualDispatch(getLangOpts())) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(E->getMemberDecl()))
if (Method->isPure())
OdrUse = false;
@@ -13633,7 +13872,7 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
Diag(Loc, diagnostic) << E->getSourceRange();
SourceLocation Open = E->getLocStart();
- SourceLocation Close = PP.getLocForEndOfToken(E->getSourceRange().getEnd());
+ SourceLocation Close = getLocForEndOfToken(E->getSourceRange().getEnd());
Diag(Loc, diag::note_condition_assign_silence)
<< FixItHint::CreateInsertion(Open, "(")
<< FixItHint::CreateInsertion(Close, ")");
@@ -14309,6 +14548,11 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
return ExprError();
}
+ // Expressions of unknown type.
+ case BuiltinType::OMPArraySection:
+ Diag(E->getLocStart(), diag::err_omp_array_section_use);
+ return ExprError();
+
// Everything else should be impossible.
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id:
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 9ad5aa59b68a..2ad595f3a814 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -1031,6 +1031,11 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc);
}
+ // C++14 [expr.type.conv]p2: The expression T(), where T is a
+ // simple-type-specifier or typename-specifier for a non-array complete
+ // object type or the (possibly cv-qualified) void type, creates a prvalue
+ // of the specified type, whose value is that produced by value-initializing
+ // an object of type T.
QualType ElemTy = Ty;
if (Ty->isArrayType()) {
if (!ListInitialization)
@@ -1039,6 +1044,10 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
ElemTy = Context.getBaseElementType(Ty);
}
+ if (!ListInitialization && Ty->isFunctionType())
+ return ExprError(Diag(TyBeginLoc, diag::err_value_init_for_function_type)
+ << FullRange);
+
if (!Ty->isVoidType() &&
RequireCompleteType(TyBeginLoc, ElemTy,
diag::err_invalid_incomplete_type_use, FullRange))
@@ -2256,6 +2265,9 @@ FunctionDecl *Sema::FindUsualDeallocationFunction(SourceLocation StartLoc,
"found an unexpected usual deallocation function");
}
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads)
+ EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+
assert(Matches.size() == 1 &&
"unexpectedly have multiple usual deallocation functions");
return Matches.front();
@@ -2287,6 +2299,9 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
Matches.push_back(F.getPair());
}
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads)
+ EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+
// There's exactly one suitable operator; pick it.
if (Matches.size() == 1) {
Operator = cast<CXXMethodDecl>(Matches[0]->getUnderlyingDecl());
@@ -2702,6 +2717,8 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
return ExprError(Diag(StartLoc, diag::err_delete_operand)
<< Type << Ex.get()->getSourceRange());
} else if (!Pointee->isDependentType()) {
+ // FIXME: This can result in errors if the definition was imported from a
+ // module but is hidden.
if (!RequireCompleteType(StartLoc, Pointee,
diag::warn_delete_incomplete, Ex.get())) {
if (const RecordType *RT = PointeeElem->getAs<RecordType>())
@@ -2712,7 +2729,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
if (Pointee->isArrayType() && !ArrayForm) {
Diag(StartLoc, diag::warn_delete_array_type)
<< Type << Ex.get()->getSourceRange()
- << FixItHint::CreateInsertion(PP.getLocForEndOfToken(StartLoc), "[]");
+ << FixItHint::CreateInsertion(getLocForEndOfToken(StartLoc), "[]");
ArrayForm = true;
}
@@ -2777,7 +2794,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
if (!OperatorDelete)
// Look for a global declaration.
OperatorDelete = FindUsualDeallocationFunction(
- StartLoc, !RequireCompleteType(StartLoc, Pointee, 0) &&
+ StartLoc, isCompleteType(StartLoc, Pointee) &&
(!ArrayForm || UsualArrayDeleteWantsSize ||
Pointee.isDestructedType()),
DeleteName);
@@ -3103,6 +3120,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ToType = ToAtomic->getValueType();
}
+ QualType InitialFromType = FromType;
// Perform the first implicit conversion.
switch (SCS.First) {
case ICK_Identity:
@@ -3293,8 +3311,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// We may not have been able to figure out what this member pointer resolved
// to up until this exact point. Attempt to lock-in it's inheritance model.
if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- RequireCompleteType(From->getExprLoc(), From->getType(), 0);
- RequireCompleteType(From->getExprLoc(), ToType, 0);
+ (void)isCompleteType(From->getExprLoc(), From->getType());
+ (void)isCompleteType(From->getExprLoc(), ToType);
}
From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
@@ -3432,6 +3450,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Function_To_Pointer:
case ICK_Qualification:
case ICK_Num_Conversion_Kinds:
+ case ICK_C_Only_Conversion:
llvm_unreachable("Improper second standard conversion");
}
@@ -3472,6 +3491,12 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
VK_RValue, nullptr, CCK).get();
}
+ // If this conversion sequence succeeded and involved implicitly converting a
+ // _Nullable type to a _Nonnull one, complain.
+ if (CCK == CCK_ImplicitConversion)
+ diagnoseNullableToNonnullConversion(ToType, InitialFromType,
+ From->getLocStart());
+
return From;
}
@@ -3534,27 +3559,43 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsVolatile:
case UTT_IsSigned:
case UTT_IsUnsigned:
+
+ // This type trait always returns false, checking the type is moot.
+ case UTT_IsInterfaceClass:
+ return true;
+
+ // C++14 [meta.unary.prop]:
+ // If T is a non-union class type, T shall be a complete type.
+ case UTT_IsEmpty:
+ case UTT_IsPolymorphic:
+ case UTT_IsAbstract:
+ if (const auto *RD = ArgTy->getAsCXXRecordDecl())
+ if (!RD->isUnion())
+ return !S.RequireCompleteType(
+ Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
+ return true;
+
+ // C++14 [meta.unary.prop]:
+ // If T is a class type, T shall be a complete type.
+ case UTT_IsFinal:
+ case UTT_IsSealed:
+ if (ArgTy->getAsCXXRecordDecl())
+ return !S.RequireCompleteType(
+ Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
return true;
- // C++0x [meta.unary.prop] Table 49 requires the following traits to be
- // applied to a complete type.
+ // C++0x [meta.unary.prop] Table 49 requires the following traits to be
+ // applied to a complete type.
case UTT_IsTrivial:
case UTT_IsTriviallyCopyable:
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
- case UTT_IsEmpty:
- case UTT_IsPolymorphic:
- case UTT_IsAbstract:
- case UTT_IsInterfaceClass:
+
case UTT_IsDestructible:
case UTT_IsNothrowDestructible:
// Fall-through
- // These traits require a complete type.
- case UTT_IsFinal:
- case UTT_IsSealed:
-
// These trait expressions are designed to help implement predicates in
// [meta.unary.prop] despite not being named the same. They are specified
// by both GCC and the Embarcadero C++ compiler, and require the complete
@@ -3698,39 +3739,36 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
case UTT_IsVolatile:
return T.isVolatileQualified();
case UTT_IsTrivial:
- return T.isTrivialType(Self.Context);
+ return T.isTrivialType(C);
case UTT_IsTriviallyCopyable:
- return T.isTriviallyCopyableType(Self.Context);
+ return T.isTriviallyCopyableType(C);
case UTT_IsStandardLayout:
return T->isStandardLayoutType();
case UTT_IsPOD:
- return T.isPODType(Self.Context);
+ return T.isPODType(C);
case UTT_IsLiteral:
- return T->isLiteralType(Self.Context);
+ return T->isLiteralType(C);
case UTT_IsEmpty:
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
return !RD->isUnion() && RD->isEmpty();
return false;
case UTT_IsPolymorphic:
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
- return RD->isPolymorphic();
+ return !RD->isUnion() && RD->isPolymorphic();
return false;
case UTT_IsAbstract:
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
- return RD->isAbstract();
+ return !RD->isUnion() && RD->isAbstract();
return false;
+ // __is_interface_class only returns true when CL is invoked in /CLR mode and
+ // even then only when it is used with the 'interface struct ...' syntax
+ // Clang doesn't support /CLR which makes this type trait moot.
case UTT_IsInterfaceClass:
- if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
- return RD->isInterface();
return false;
case UTT_IsFinal:
- if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
- return RD->hasAttr<FinalAttr>();
- return false;
case UTT_IsSealed:
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
- if (FinalAttr *FA = RD->getAttr<FinalAttr>())
- return FA->isSpelledAsSealed();
+ return RD->hasAttr<FinalAttr>();
return false;
case UTT_IsSigned:
return T->isSignedIntegerType();
@@ -3757,7 +3795,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// If __is_pod (type) is true then the trait is true, else if type is
// a cv class or union type (or array thereof) with a trivial default
// constructor ([class.ctor]) then the trait is true, else it is false.
- if (T.isPODType(Self.Context))
+ if (T.isPODType(C))
return true;
if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
return RD->hasTrivialDefaultConstructor() &&
@@ -3767,7 +3805,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// This trait is implemented by MSVC 2012 and needed to parse the
// standard library headers. Specifically this is used as the logic
// behind std::is_trivially_move_constructible (20.9.4.3).
- if (T.isPODType(Self.Context))
+ if (T.isPODType(C))
return true;
if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
return RD->hasTrivialMoveConstructor() && !RD->hasNonTrivialMoveConstructor();
@@ -3778,7 +3816,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// the trait is true, else if type is a cv class or union type
// with a trivial copy constructor ([class.copy]) then the trait
// is true, else it is false.
- if (T.isPODType(Self.Context) || T->isReferenceType())
+ if (T.isPODType(C) || T->isReferenceType())
return true;
if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
return RD->hasTrivialCopyConstructor() &&
@@ -3788,7 +3826,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// This trait is implemented by MSVC 2012 and needed to parse the
// standard library headers. Specifically it is used as the logic
// behind std::is_trivially_move_assignable (20.9.4.3)
- if (T.isPODType(Self.Context))
+ if (T.isPODType(C))
return true;
if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
return RD->hasTrivialMoveAssignment() && !RD->hasNonTrivialMoveAssignment();
@@ -3808,7 +3846,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
if (T.isConstQualified())
return false;
- if (T.isPODType(Self.Context))
+ if (T.isPODType(C))
return true;
if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
return RD->hasTrivialCopyAssignment() &&
@@ -3816,8 +3854,47 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return false;
case UTT_IsDestructible:
case UTT_IsNothrowDestructible:
- // FIXME: Implement UTT_IsDestructible and UTT_IsNothrowDestructible.
- // For now, let's fall through.
+ // C++14 [meta.unary.prop]:
+ // For reference types, is_destructible<T>::value is true.
+ if (T->isReferenceType())
+ return true;
+
+ // Objective-C++ ARC: autorelease types don't require destruction.
+ if (T->isObjCLifetimeType() &&
+ T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing)
+ return true;
+
+ // C++14 [meta.unary.prop]:
+ // For incomplete types and function types, is_destructible<T>::value is
+ // false.
+ if (T->isIncompleteType() || T->isFunctionType())
+ return false;
+
+ // C++14 [meta.unary.prop]:
+ // For object types and given U equal to remove_all_extents_t<T>, if the
+ // expression std::declval<U&>().~U() is well-formed when treated as an
+ // unevaluated operand (Clause 5), then is_destructible<T>::value is true
+ if (auto *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) {
+ CXXDestructorDecl *Destructor = Self.LookupDestructor(RD);
+ if (!Destructor)
+ return false;
+ // C++14 [dcl.fct.def.delete]p2:
+ // A program that refers to a deleted function implicitly or
+ // explicitly, other than to declare it, is ill-formed.
+ if (Destructor->isDeleted())
+ return false;
+ if (C.getLangOpts().AccessControl && Destructor->getAccess() != AS_public)
+ return false;
+ if (UTT == UTT_IsNothrowDestructible) {
+ const FunctionProtoType *CPT =
+ Destructor->getType()->getAs<FunctionProtoType>();
+ CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
+ if (!CPT || !CPT->isNothrow(C))
+ return false;
+ }
+ }
+ return true;
+
case UTT_HasTrivialDestructor:
// http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html
// If __is_pod (type) is true or type is a reference type
@@ -3825,7 +3902,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// type (or array thereof) with a trivial destructor
// ([class.dtor]) then the trait is true, else it is
// false.
- if (T.isPODType(Self.Context) || T->isReferenceType())
+ if (T.isPODType(C) || T->isReferenceType())
return true;
// Objective-C++ ARC: autorelease types don't require destruction.
@@ -3849,7 +3926,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return false;
if (T->isReferenceType())
return false;
- if (T.isPODType(Self.Context) || T->isObjCLifetimeType())
+ if (T.isPODType(C) || T->isObjCLifetimeType())
return true;
if (const RecordType *RT = T->getAs<RecordType>())
@@ -3862,7 +3939,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// This trait is implemented by MSVC 2012 and needed to parse the
// standard library headers. Specifically this is used as the logic
// behind std::is_nothrow_move_assignable (20.9.4.3).
- if (T.isPODType(Self.Context))
+ if (T.isPODType(C))
return true;
if (const RecordType *RT = C.getBaseElementType(T)->getAs<RecordType>())
@@ -3886,15 +3963,13 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
bool FoundConstructor = false;
unsigned FoundTQs;
- DeclContext::lookup_result R = Self.LookupConstructors(RD);
- for (DeclContext::lookup_iterator Con = R.begin(),
- ConEnd = R.end(); Con != ConEnd; ++Con) {
+ for (const auto *ND : Self.LookupConstructors(RD)) {
// A template constructor is never a copy constructor.
// FIXME: However, it may actually be selected at the actual overload
// resolution point.
- if (isa<FunctionTemplateDecl>(*Con))
+ if (isa<FunctionTemplateDecl>(ND))
continue;
- CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ const CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(ND);
if (Constructor->isCopyConstructor(FoundTQs)) {
FoundConstructor = true;
const FunctionProtoType *CPT
@@ -3904,7 +3979,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return false;
// TODO: check whether evaluating default arguments can throw.
// For now, we'll be conservative and assume that they can throw.
- if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 1)
+ if (!CPT->isNothrow(C) || CPT->getNumParams() > 1)
return false;
}
}
@@ -3926,13 +4001,11 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return true;
bool FoundConstructor = false;
- DeclContext::lookup_result R = Self.LookupConstructors(RD);
- for (DeclContext::lookup_iterator Con = R.begin(),
- ConEnd = R.end(); Con != ConEnd; ++Con) {
+ for (const auto *ND : Self.LookupConstructors(RD)) {
// FIXME: In C++0x, a constructor template can be a default constructor.
- if (isa<FunctionTemplateDecl>(*Con))
+ if (isa<FunctionTemplateDecl>(ND))
continue;
- CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ const CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(ND);
if (Constructor->isDefaultConstructor()) {
FoundConstructor = true;
const FunctionProtoType *CPT
@@ -3942,7 +4015,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return false;
// FIXME: check whether evaluating default arguments can throw.
// For now, we'll be conservative and assume that they can throw.
- if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 0)
+ if (!CPT->isNothrow(C) || CPT->getNumParams() > 0)
return false;
}
}
@@ -4023,8 +4096,8 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
// Precondition: T and all types in the parameter pack Args shall be
// complete types, (possibly cv-qualified) void, or arrays of
// unknown bound.
- for (unsigned I = 0, N = Args.size(); I != N; ++I) {
- QualType ArgTy = Args[I]->getType();
+ for (const auto *TSI : Args) {
+ QualType ArgTy = TSI->getType();
if (ArgTy->isVoidType() || ArgTy->isIncompleteArrayType())
continue;
@@ -4033,12 +4106,13 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
return false;
}
- // Make sure the first argument is a complete type.
- if (Args[0]->getType()->isIncompleteType())
+ // Make sure the first argument is not incomplete nor a function type.
+ QualType T = Args[0]->getType();
+ if (T->isIncompleteType() || T->isFunctionType())
return false;
// Make sure the first argument is not an abstract type.
- CXXRecordDecl *RD = Args[0]->getType()->getAsCXXRecordDecl();
+ CXXRecordDecl *RD = T->getAsCXXRecordDecl();
if (RD && RD->isAbstract())
return false;
@@ -4046,13 +4120,13 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
SmallVector<Expr *, 2> ArgExprs;
ArgExprs.reserve(Args.size() - 1);
for (unsigned I = 1, N = Args.size(); I != N; ++I) {
- QualType T = Args[I]->getType();
- if (T->isObjectType() || T->isFunctionType())
- T = S.Context.getRValueReferenceType(T);
+ QualType ArgTy = Args[I]->getType();
+ if (ArgTy->isObjectType() || ArgTy->isFunctionType())
+ ArgTy = S.Context.getRValueReferenceType(ArgTy);
OpaqueArgExprs.push_back(
- OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
- T.getNonLValueExprType(S.Context),
- Expr::getValueKindForType(T)));
+ OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
+ ArgTy.getNonLValueExprType(S.Context),
+ Expr::getValueKindForType(ArgTy)));
}
for (Expr &E : OpaqueArgExprs)
ArgExprs.push_back(&E);
@@ -4083,7 +4157,7 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
// Under Objective-C ARC, if the destination has non-trivial Objective-C
// lifetime, this is a non-trivial construction.
if (S.getLangOpts().ObjCAutoRefCount &&
- hasNontrivialObjCLifetime(Args[0]->getType().getNonReferenceType()))
+ hasNontrivialObjCLifetime(T.getNonReferenceType()))
return false;
// The initialization succeeded; now make sure there are no non-trivial
@@ -4219,8 +4293,7 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
return LhsT->isVoidType();
// A function definition requires a complete, non-abstract return type.
- if (Self.RequireCompleteType(KeyLoc, RhsT, 0) ||
- Self.RequireNonAbstractType(KeyLoc, RhsT, 0))
+ if (!Self.isCompleteType(KeyLoc, RhsT) || Self.isAbstractType(KeyLoc, RhsT))
return false;
// Compute the result of add_rvalue_reference.
@@ -4506,7 +4579,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
- if (!IsDerivedFrom(LHSType, Class)) {
+ if (!IsDerivedFrom(Loc, LHSType, Class)) {
Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling
<< (int)isIndirect << LHS.get()->getType();
return QualType();
@@ -4634,9 +4707,9 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
const RecordType *FRec = FTy->getAs<RecordType>();
const RecordType *TRec = TTy->getAs<RecordType>();
bool FDerivedFromT = FRec && TRec && FRec != TRec &&
- Self.IsDerivedFrom(FTy, TTy);
- if (FRec && TRec &&
- (FRec == TRec || FDerivedFromT || Self.IsDerivedFrom(TTy, FTy))) {
+ Self.IsDerivedFrom(QuestionLoc, FTy, TTy);
+ if (FRec && TRec && (FRec == TRec || FDerivedFromT ||
+ Self.IsDerivedFrom(QuestionLoc, TTy, FTy))) {
// E1 can be converted to match E2 if the class of T2 is the
// same type as, or a base class of, the class of T1, and
// [cv2 > cv1].
@@ -5671,9 +5744,14 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
//
// This also indicates that we could be parsing a pseudo-destructor-name.
// Note that Objective-C class and object types can be pseudo-destructor
- // expressions or normal member (ivar or property) access expressions.
+ // expressions or normal member (ivar or property) access expressions, and
+ // it's legal for the type to be incomplete if this is a pseudo-destructor
+ // call. We'll do more incomplete-type checks later in the lookup process,
+ // so just skip this check for ObjC types.
if (BaseType->isObjCObjectOrInterfaceType()) {
+ ObjectType = ParsedType::make(BaseType);
MayBePseudoDestructor = true;
+ return Base;
} else if (!BaseType->isRecordType()) {
ObjectType = ParsedType();
MayBePseudoDestructor = true;
@@ -6331,7 +6409,7 @@ static ExprResult attemptRecovery(Sema &SemaRef,
else if (SS && !TC.WillReplaceSpecifier())
NewSS = *SS;
- if (auto *ND = TC.getCorrectionDecl()) {
+ if (auto *ND = TC.getFoundDecl()) {
R.setLookupName(ND->getDeclName());
R.addDecl(ND);
if (ND->isCXXClassMember()) {
@@ -6364,7 +6442,7 @@ static ExprResult attemptRecovery(Sema &SemaRef,
if (MightBeImplicitMember)
return SemaRef.BuildPossibleImplicitMemberExpr(
NewSS, /*TemplateKWLoc*/ SourceLocation(), R,
- /*TemplateArgs*/ nullptr);
+ /*TemplateArgs*/ nullptr, /*S*/ nullptr);
} else if (auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
return SemaRef.LookupInObjCMethod(R, Consumer.getScope(),
Ivar->getIdentifier());
@@ -6452,9 +6530,9 @@ class TransformTypos : public TreeTransform<TransformTypos> {
if (!E)
return nullptr;
if (auto *DRE = dyn_cast<DeclRefExpr>(E))
- return DRE->getDecl();
+ return DRE->getFoundDecl();
if (auto *ME = dyn_cast<MemberExpr>(E))
- return ME->getMemberDecl();
+ return ME->getFoundDecl();
// FIXME: Add any other expr types that could be be seen by the delayed typo
// correction TreeTransform for which the corresponding TypoCorrection could
// contain multiple decls.
@@ -6494,6 +6572,8 @@ public:
ExprResult TransformLambdaExpr(LambdaExpr *E) { return Owned(E); }
+ ExprResult TransformBlockExpr(BlockExpr *E) { return Owned(E); }
+
ExprResult Transform(Expr *E) {
ExprResult Res;
while (true) {
@@ -6557,7 +6637,7 @@ public:
// For the first TypoExpr and an uncached TypoExpr, find the next likely
// typo correction and return it.
while (TypoCorrection TC = State.Consumer->getNextCorrection()) {
- if (InitDecl && TC.getCorrectionDecl() == InitDecl)
+ if (InitDecl && TC.getFoundDecl() == InitDecl)
continue;
ExprResult NE = State.RecoveryHandler ?
State.RecoveryHandler(SemaRef, E, TC) :
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index a9f1919e18a8..9c345f8a69a3 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -27,18 +27,15 @@ using namespace clang;
using namespace sema;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> BaseSet;
-static bool BaseIsNotInSet(const CXXRecordDecl *Base, void *BasesPtr) {
- const BaseSet &Bases = *reinterpret_cast<const BaseSet*>(BasesPtr);
- return !Bases.count(Base->getCanonicalDecl());
-}
/// Determines if the given class is provably not derived from all of
/// the prospective base classes.
static bool isProvablyNotDerivedFrom(Sema &SemaRef, CXXRecordDecl *Record,
const BaseSet &Bases) {
- void *BasesPtr = const_cast<void*>(reinterpret_cast<const void*>(&Bases));
- return BaseIsNotInSet(Record, BasesPtr) &&
- Record->forallBases(BaseIsNotInSet, BasesPtr);
+ auto BaseIsNotInSet = [&Bases](const CXXRecordDecl *Base) {
+ return !Bases.count(Base->getCanonicalDecl());
+ };
+ return BaseIsNotInSet(Record) && Record->forallBases(BaseIsNotInSet);
}
enum IMAKind {
@@ -105,8 +102,9 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
bool hasNonInstance = false;
bool isField = false;
BaseSet Classes;
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
- NamedDecl *D = *I;
+ for (NamedDecl *D : R) {
+ // Look through any using decls.
+ D = D->getUnderlyingDecl();
if (D->isCXXInstanceMember()) {
isField |= isa<FieldDecl>(D) || isa<MSPropertyDecl>(D) ||
@@ -114,8 +112,7 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
Classes.insert(R->getCanonicalDecl());
- }
- else
+ } else
hasNonInstance = true;
}
@@ -237,15 +234,17 @@ ExprResult
Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
- const TemplateArgumentListInfo *TemplateArgs) {
+ const TemplateArgumentListInfo *TemplateArgs,
+ const Scope *S) {
switch (ClassifyImplicitMemberAccess(*this, R)) {
case IMA_Instance:
- return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true);
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true, S);
case IMA_Mixed:
case IMA_Mixed_Unrelated:
case IMA_Unresolved:
- return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false);
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false,
+ S);
case IMA_Field_Uneval_Context:
Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
@@ -633,6 +632,16 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
DeclarationName Typo = R.getLookupName();
SourceLocation TypoLoc = R.getNameLoc();
+
+ struct QueryState {
+ Sema &SemaRef;
+ DeclarationNameInfo NameInfo;
+ Sema::LookupNameKind LookupKind;
+ Sema::RedeclarationKind Redecl;
+ };
+ QueryState Q = {R.getSema(), R.getLookupNameInfo(), R.getLookupKind(),
+ R.isForRedeclaration() ? Sema::ForRedeclaration
+ : Sema::NotForRedeclaration};
TE = SemaRef.CorrectTypoDelayed(
R.getLookupNameInfo(), R.getLookupKind(), nullptr, &SS,
llvm::make_unique<RecordMemberExprValidatorCCC>(RTy),
@@ -651,6 +660,7 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
}
},
[=](Sema &SemaRef, TypoExpr *TE, TypoCorrection TC) mutable {
+ LookupResult R(Q.SemaRef, Q.NameInfo, Q.LookupKind, Q.Redecl);
R.clear(); // Ensure there's no decls lingering in the shared state.
R.suppressDiagnostics();
R.setLookupName(TC.getCorrection());
@@ -659,7 +669,7 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
R.resolveKind();
return SemaRef.BuildMemberReferenceExpr(
BaseExpr, BaseExpr->getType(), OpLoc, IsArrow, SS, SourceLocation(),
- nullptr, R, nullptr);
+ nullptr, R, nullptr, nullptr);
},
Sema::CTK_ErrorRecovery, DC);
@@ -679,6 +689,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
+ const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs) {
if (BaseType->isDependentType() ||
(SS.isSet() && isDependentScopeSpecifier(SS)))
@@ -725,7 +736,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
return BuildMemberReferenceExpr(Base, BaseType,
OpLoc, IsArrow, SS, TemplateKWLoc,
- FirstQualifierInScope, R, TemplateArgs,
+ FirstQualifierInScope, R, TemplateArgs, S,
false, ExtraArgs);
}
@@ -877,6 +888,18 @@ static MemberExpr *BuildMemberExpr(
return E;
}
+/// \brief Determine if the given scope is within a function-try-block handler.
+static bool IsInFnTryBlockHandler(const Scope *S) {
+ // Walk the scope stack until finding a FnTryCatchScope, or leave the
+ // function scope. If a FnTryCatchScope is found, check whether the TryScope
+ // flag is set. If it is not, it's a function-try-block handler.
+ for (; S != S->getFnParent(); S = S->getParent()) {
+ if (S->getFlags() & Scope::FnTryCatchScope)
+ return (S->getFlags() & Scope::TryScope) != Scope::TryScope;
+ }
+ return false;
+}
+
ExprResult
Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
SourceLocation OpLoc, bool IsArrow,
@@ -885,6 +908,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
NamedDecl *FirstQualifierInScope,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
+ const Scope *S,
bool SuppressQualifierCheck,
ActOnMemberAccessExtraArgs *ExtraArgs) {
QualType BaseType = BaseExprType;
@@ -948,6 +972,17 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
if (R.isAmbiguous())
return ExprError();
+ // [except.handle]p10: Referring to any non-static member or base class of an
+ // object in the handler for a function-try-block of a constructor or
+ // destructor for that object results in undefined behavior.
+ const auto *FD = getCurFunctionDecl();
+ if (S && BaseExpr && FD &&
+ (isa<CXXDestructorDecl>(FD) || isa<CXXConstructorDecl>(FD)) &&
+ isa<CXXThisExpr>(BaseExpr->IgnoreImpCasts()) &&
+ IsInFnTryBlockHandler(S))
+ Diag(MemberLoc, diag::warn_cdtor_function_try_handler_mem_expr)
+ << isa<CXXDestructorDecl>(FD);
+
if (R.empty()) {
// Rederive where we looked up.
DeclContext *DC = (SS.isSet()
@@ -1042,16 +1077,8 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true);
}
- bool ShouldCheckUse = true;
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MemberDecl)) {
- // Don't diagnose the use of a virtual member function unless it's
- // explicitly qualified.
- if (MD->isVirtual() && !SS.isSet())
- ShouldCheckUse = false;
- }
-
// Check the use of this member.
- if (ShouldCheckUse && DiagnoseUseOfDecl(MemberDecl, MemberLoc))
+ if (DiagnoseUseOfDecl(MemberDecl, MemberLoc))
return ExprError();
if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
@@ -1645,7 +1672,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl};
return BuildMemberReferenceExpr(Base, Base->getType(), OpLoc, IsArrow, SS,
TemplateKWLoc, FirstQualifierInScope,
- NameInfo, TemplateArgs, &ExtraArgs);
+ NameInfo, TemplateArgs, S, &ExtraArgs);
}
static ExprResult
@@ -1718,7 +1745,7 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
- bool IsKnownInstance) {
+ bool IsKnownInstance, const Scope *S) {
assert(!R.empty() && !R.isAmbiguous());
SourceLocation loc = R.getNameLoc();
@@ -1743,5 +1770,5 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
/*IsArrow*/ true,
SS, TemplateKWLoc,
/*FirstQualifierInScope*/ nullptr,
- R, TemplateArgs);
+ R, TemplateArgs, S);
}
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index 6cd062621951..65f10816924f 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -32,24 +32,21 @@ using namespace sema;
using llvm::makeArrayRef;
ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
- Expr **strings,
- unsigned NumStrings) {
- StringLiteral **Strings = reinterpret_cast<StringLiteral**>(strings);
-
+ ArrayRef<Expr *> Strings) {
// Most ObjC strings are formed out of a single piece. However, we *can*
// have strings formed out of multiple @ strings with multiple pptokens in
// each one, e.g. @"foo" "bar" @"baz" "qux" which need to be turned into one
// StringLiteral for ObjCStringLiteral to hold onto.
- StringLiteral *S = Strings[0];
+ StringLiteral *S = cast<StringLiteral>(Strings[0]);
// If we have a multi-part string, merge it all together.
- if (NumStrings != 1) {
+ if (Strings.size() != 1) {
// Concatenate objc strings.
SmallString<128> StrBuf;
SmallVector<SourceLocation, 8> StrLocs;
- for (unsigned i = 0; i != NumStrings; ++i) {
- S = Strings[i];
+ for (Expr *E : Strings) {
+ S = cast<StringLiteral>(E);
// ObjC strings can't be wide or UTF.
if (!S->isAscii()) {
@@ -168,6 +165,77 @@ static bool validateBoxingMethod(Sema &S, SourceLocation Loc,
return true;
}
+/// \brief Maps ObjCLiteralKind to NSClassIdKindKind
+static NSAPI::NSClassIdKindKind ClassKindFromLiteralKind(
+ Sema::ObjCLiteralKind LiteralKind) {
+ switch (LiteralKind) {
+ case Sema::LK_Array:
+ return NSAPI::ClassId_NSArray;
+ case Sema::LK_Dictionary:
+ return NSAPI::ClassId_NSDictionary;
+ case Sema::LK_Numeric:
+ return NSAPI::ClassId_NSNumber;
+ case Sema::LK_String:
+ return NSAPI::ClassId_NSString;
+ case Sema::LK_Boxed:
+ return NSAPI::ClassId_NSValue;
+
+ // there is no corresponding matching
+ // between LK_None/LK_Block and NSClassIdKindKind
+ case Sema::LK_Block:
+ case Sema::LK_None:
+ break;
+ }
+ llvm_unreachable("LiteralKind can't be converted into a ClassKind");
+}
+
+/// \brief Validates ObjCInterfaceDecl availability.
+/// ObjCInterfaceDecl, used to create ObjC literals, should be defined
+/// if clang not in a debugger mode.
+static bool ValidateObjCLiteralInterfaceDecl(Sema &S, ObjCInterfaceDecl *Decl,
+ SourceLocation Loc,
+ Sema::ObjCLiteralKind LiteralKind) {
+ if (!Decl) {
+ NSAPI::NSClassIdKindKind Kind = ClassKindFromLiteralKind(LiteralKind);
+ IdentifierInfo *II = S.NSAPIObj->getNSClassId(Kind);
+ S.Diag(Loc, diag::err_undeclared_objc_literal_class)
+ << II->getName() << LiteralKind;
+ return false;
+ } else if (!Decl->hasDefinition() && !S.getLangOpts().DebuggerObjCLiteral) {
+ S.Diag(Loc, diag::err_undeclared_objc_literal_class)
+ << Decl->getName() << LiteralKind;
+ S.Diag(Decl->getLocation(), diag::note_forward_class);
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Looks up ObjCInterfaceDecl of a given NSClassIdKindKind.
+/// Used to create ObjC literals, such as NSDictionary (@{}),
+/// NSArray (@[]) and Boxed Expressions (@())
+static ObjCInterfaceDecl *LookupObjCInterfaceDeclForLiteral(Sema &S,
+ SourceLocation Loc,
+ Sema::ObjCLiteralKind LiteralKind) {
+ NSAPI::NSClassIdKindKind ClassKind = ClassKindFromLiteralKind(LiteralKind);
+ IdentifierInfo *II = S.NSAPIObj->getNSClassId(ClassKind);
+ NamedDecl *IF = S.LookupSingleName(S.TUScope, II, Loc,
+ Sema::LookupOrdinaryName);
+ ObjCInterfaceDecl *ID = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (!ID && S.getLangOpts().DebuggerObjCLiteral) {
+ ASTContext &Context = S.Context;
+ TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ ID = ObjCInterfaceDecl::Create (Context, TU, SourceLocation(), II,
+ nullptr, nullptr, SourceLocation());
+ }
+
+ if (!ValidateObjCLiteralInterfaceDecl(S, ID, Loc, LiteralKind)) {
+ ID = nullptr;
+ }
+
+ return ID;
+}
+
/// \brief Retrieve the NSNumber factory method that should be used to create
/// an Objective-C literal for the given type.
static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
@@ -197,26 +265,9 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
// Look up the NSNumber class, if we haven't done so already. It's cached
// in the Sema instance.
if (!S.NSNumberDecl) {
- IdentifierInfo *NSNumberId =
- S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber);
- NamedDecl *IF = S.LookupSingleName(S.TUScope, NSNumberId,
- Loc, Sema::LookupOrdinaryName);
- S.NSNumberDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ S.NSNumberDecl = LookupObjCInterfaceDeclForLiteral(S, Loc,
+ Sema::LK_Numeric);
if (!S.NSNumberDecl) {
- if (S.getLangOpts().DebuggerObjCLiteral) {
- // Create a stub definition of NSNumber.
- S.NSNumberDecl = ObjCInterfaceDecl::Create(CX,
- CX.getTranslationUnitDecl(),
- SourceLocation(), NSNumberId,
- nullptr, nullptr,
- SourceLocation());
- } else {
- // Otherwise, require a declaration of NSNumber.
- S.Diag(Loc, diag::err_undeclared_nsnumber);
- return nullptr;
- }
- } else if (!S.NSNumberDecl->hasDefinition()) {
- S.Diag(Loc, diag::err_undeclared_nsnumber);
return nullptr;
}
}
@@ -457,6 +508,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
if (RValue.isInvalid()) {
return ExprError();
}
+ SourceLocation Loc = SR.getBegin();
ValueExpr = RValue.get();
QualType ValueType(ValueExpr->getType());
if (const PointerType *PT = ValueType->getAs<PointerType>()) {
@@ -464,29 +516,11 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
if (Context.hasSameUnqualifiedType(PointeeType, Context.CharTy)) {
if (!NSStringDecl) {
- IdentifierInfo *NSStringId =
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSString);
- NamedDecl *Decl = LookupSingleName(TUScope, NSStringId,
- SR.getBegin(), LookupOrdinaryName);
- NSStringDecl = dyn_cast_or_null<ObjCInterfaceDecl>(Decl);
+ NSStringDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_String);
if (!NSStringDecl) {
- if (getLangOpts().DebuggerObjCLiteral) {
- // Support boxed expressions in the debugger w/o NSString declaration.
- DeclContext *TU = Context.getTranslationUnitDecl();
- NSStringDecl = ObjCInterfaceDecl::Create(Context, TU,
- SourceLocation(),
- NSStringId,
- nullptr, nullptr,
- SourceLocation());
- } else {
- Diag(SR.getBegin(), diag::err_undeclared_nsstring);
- return ExprError();
- }
- } else if (!NSStringDecl->hasDefinition()) {
- Diag(SR.getBegin(), diag::err_undeclared_nsstring);
return ExprError();
}
- assert(NSStringDecl && "NSStringDecl should not be NULL");
QualType NSStringObject = Context.getObjCInterfaceType(NSStringDecl);
NSStringPointer = Context.getObjCObjectPointerType(NSStringObject);
}
@@ -520,7 +554,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
BoxingMethod = M;
}
- if (!validateBoxingMethod(*this, SR.getBegin(), NSStringDecl,
+ if (!validateBoxingMethod(*this, Loc, NSStringDecl,
stringWithUTF8String, BoxingMethod))
return ExprError();
@@ -563,16 +597,16 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
// FIXME: Do I need to do anything special with BoolTy expressions?
// Look for the appropriate method within NSNumber.
- BoxingMethod = getNSNumberFactoryMethod(*this, SR.getBegin(), ValueType);
+ BoxingMethod = getNSNumberFactoryMethod(*this, Loc, ValueType);
BoxedType = NSNumberPointer;
} else if (const EnumType *ET = ValueType->getAs<EnumType>()) {
if (!ET->getDecl()->isComplete()) {
- Diag(SR.getBegin(), diag::err_objc_incomplete_boxed_expression_type)
+ Diag(Loc, diag::err_objc_incomplete_boxed_expression_type)
<< ValueType << ValueExpr->getSourceRange();
return ExprError();
}
- BoxingMethod = getNSNumberFactoryMethod(*this, SR.getBegin(),
+ BoxingMethod = getNSNumberFactoryMethod(*this, Loc,
ET->getDecl()->getIntegerType());
BoxedType = NSNumberPointer;
} else if (ValueType->isObjCBoxableRecordType()) {
@@ -582,29 +616,12 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
// Look up the NSValue class, if we haven't done so already. It's cached
// in the Sema instance.
if (!NSValueDecl) {
- IdentifierInfo *NSValueId =
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSValue);
- NamedDecl *IF = LookupSingleName(TUScope, NSValueId,
- SR.getBegin(), Sema::LookupOrdinaryName);
- NSValueDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ NSValueDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_Boxed);
if (!NSValueDecl) {
- if (getLangOpts().DebuggerObjCLiteral) {
- // Create a stub definition of NSValue.
- DeclContext *TU = Context.getTranslationUnitDecl();
- NSValueDecl = ObjCInterfaceDecl::Create(Context, TU,
- SourceLocation(), NSValueId,
- nullptr, nullptr,
- SourceLocation());
- } else {
- // Otherwise, require a declaration of NSValue.
- Diag(SR.getBegin(), diag::err_undeclared_nsvalue);
- return ExprError();
- }
- } else if (!NSValueDecl->hasDefinition()) {
- Diag(SR.getBegin(), diag::err_undeclared_nsvalue);
return ExprError();
}
-
+
// generate the pointer to NSValue type.
QualType NSValueObject = Context.getObjCInterfaceType(NSValueDecl);
NSValuePointer = Context.getObjCObjectPointerType(NSValueObject);
@@ -663,7 +680,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
BoxingMethod = M;
}
- if (!validateBoxingMethod(*this, SR.getBegin(), NSValueDecl,
+ if (!validateBoxingMethod(*this, Loc, NSValueDecl,
ValueWithBytesObjCType, BoxingMethod))
return ExprError();
@@ -671,8 +688,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
}
if (!ValueType.isTriviallyCopyableType(Context)) {
- Diag(SR.getBegin(),
- diag::err_objc_non_trivially_copyable_boxed_expression_type)
+ Diag(Loc, diag::err_objc_non_trivially_copyable_boxed_expression_type)
<< ValueType << ValueExpr->getSourceRange();
return ExprError();
}
@@ -682,12 +698,12 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
}
if (!BoxingMethod) {
- Diag(SR.getBegin(), diag::err_objc_illegal_boxed_expression_type)
+ Diag(Loc, diag::err_objc_illegal_boxed_expression_type)
<< ValueType << ValueExpr->getSourceRange();
return ExprError();
}
- DiagnoseUseOfDecl(BoxingMethod, SR.getBegin());
+ DiagnoseUseOfDecl(BoxingMethod, Loc);
ExprResult ConvertedValueExpr;
if (ValueType->isObjCBoxableRecordType()) {
@@ -746,26 +762,16 @@ ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
}
ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
- // Look up the NSArray class, if we haven't done so already.
- if (!NSArrayDecl) {
- NamedDecl *IF = LookupSingleName(TUScope,
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray),
- SR.getBegin(),
- LookupOrdinaryName);
- NSArrayDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
- if (!NSArrayDecl && getLangOpts().DebuggerObjCLiteral)
- NSArrayDecl = ObjCInterfaceDecl::Create (Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray),
- nullptr, nullptr, SourceLocation());
+ SourceLocation Loc = SR.getBegin();
+ if (!NSArrayDecl) {
+ NSArrayDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_Array);
if (!NSArrayDecl) {
- Diag(SR.getBegin(), diag::err_undeclared_nsarray);
return ExprError();
}
}
-
+
// Find the arrayWithObjects:count: method, if we haven't done so already.
QualType IdT = Context.getObjCIdType();
if (!ArrayWithObjectsMethod) {
@@ -801,7 +807,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
Method->setMethodParams(Context, Params, None);
}
- if (!validateBoxingMethod(*this, SR.getBegin(), NSArrayDecl, Sel, Method))
+ if (!validateBoxingMethod(*this, Loc, NSArrayDecl, Sel, Method))
return ExprError();
// Dig out the type that all elements should be converted to.
@@ -859,28 +865,18 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
ArrayWithObjectsMethod, SR));
}
-ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
- ObjCDictionaryElement *Elements,
- unsigned NumElements) {
- // Look up the NSDictionary class, if we haven't done so already.
- if (!NSDictionaryDecl) {
- NamedDecl *IF = LookupSingleName(TUScope,
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary),
- SR.getBegin(), LookupOrdinaryName);
- NSDictionaryDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
- if (!NSDictionaryDecl && getLangOpts().DebuggerObjCLiteral)
- NSDictionaryDecl = ObjCInterfaceDecl::Create (Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary),
- nullptr, nullptr, SourceLocation());
+ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
+ MutableArrayRef<ObjCDictionaryElement> Elements) {
+ SourceLocation Loc = SR.getBegin();
+ if (!NSDictionaryDecl) {
+ NSDictionaryDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
+ Sema::LK_Dictionary);
if (!NSDictionaryDecl) {
- Diag(SR.getBegin(), diag::err_undeclared_nsdictionary);
- return ExprError();
+ return ExprError();
}
}
-
+
// Find the dictionaryWithObjects:forKeys:count: method, if we haven't done
// so already.
QualType IdT = Context.getObjCIdType();
@@ -1007,31 +1003,31 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
// Check that each of the keys and values provided is valid in a collection
// literal, performing conversions as necessary.
bool HasPackExpansions = false;
- for (unsigned I = 0, N = NumElements; I != N; ++I) {
+ for (ObjCDictionaryElement &Element : Elements) {
// Check the key.
- ExprResult Key = CheckObjCCollectionLiteralElement(*this, Elements[I].Key,
+ ExprResult Key = CheckObjCCollectionLiteralElement(*this, Element.Key,
KeyT);
if (Key.isInvalid())
return ExprError();
// Check the value.
ExprResult Value
- = CheckObjCCollectionLiteralElement(*this, Elements[I].Value, ValueT);
+ = CheckObjCCollectionLiteralElement(*this, Element.Value, ValueT);
if (Value.isInvalid())
return ExprError();
- Elements[I].Key = Key.get();
- Elements[I].Value = Value.get();
+ Element.Key = Key.get();
+ Element.Value = Value.get();
- if (Elements[I].EllipsisLoc.isInvalid())
+ if (Element.EllipsisLoc.isInvalid())
continue;
- if (!Elements[I].Key->containsUnexpandedParameterPack() &&
- !Elements[I].Value->containsUnexpandedParameterPack()) {
- Diag(Elements[I].EllipsisLoc,
+ if (!Element.Key->containsUnexpandedParameterPack() &&
+ !Element.Value->containsUnexpandedParameterPack()) {
+ Diag(Element.EllipsisLoc,
diag::err_pack_expansion_without_parameter_packs)
- << SourceRange(Elements[I].Key->getLocStart(),
- Elements[I].Value->getLocEnd());
+ << SourceRange(Element.Key->getLocStart(),
+ Element.Value->getLocEnd());
return ExprError();
}
@@ -1043,7 +1039,7 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
= Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(NSDictionaryDecl));
return MaybeBindToTemporary(ObjCDictionaryLiteral::Create(
- Context, makeArrayRef(Elements, NumElements), HasPackExpansions, Ty,
+ Context, Elements, HasPackExpansions, Ty,
DictionaryWithObjectsMethod, SR));
}
@@ -1092,7 +1088,7 @@ ExprResult Sema::ParseObjCEncodeExpression(SourceLocation AtLoc,
QualType EncodedType = GetTypeFromParser(ty, &TInfo);
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(EncodedType,
- PP.getLocForEndOfToken(LParenLoc));
+ getLocForEndOfToken(LParenLoc));
return BuildObjCEncodeExpression(AtLoc, TInfo, RParenLoc);
}
@@ -1657,7 +1653,8 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
// Objective-C pointer type, we may need to extend the lifetime
// of the block object.
if (typeArgs && Args[i]->isRValue() && paramType->isBlockPointerType() &&
- origParamType->isBlockCompatibleObjCPointerType(Context)) {
+ Args[i]->getType()->isBlockPointerType() &&
+ origParamType->isObjCObjectPointerType()) {
ExprResult arg = Args[i];
maybeExtendBlockObject(arg);
Args[i] = arg.get();
@@ -1778,8 +1775,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
diag::err_property_not_found_forward_class,
MemberName, BaseRange))
return ExprError();
-
- // Search for a declared property first.
+
if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(Member)) {
// Check whether we can reference this property.
if (DiagnoseUseOfDecl(PD, MemberLoc))
@@ -2726,6 +2722,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// Try to complete the type. Under ARC, this is a hard error from which
// we don't try to recover.
+ // FIXME: In the non-ARC case, this will still be a hard error if the
+ // definition is found in a module that's not visible.
const ObjCInterfaceDecl *forwardClass = nullptr;
if (RequireCompleteType(Loc, OCIType->getPointeeType(),
getLangOpts().ObjCAutoRefCount
@@ -3397,7 +3395,7 @@ static void addFixitForObjCARCConversion(Sema &S,
DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
BridgeCall));
DiagB.AddFixItHint(FixItHint::CreateInsertion(
- S.PP.getLocForEndOfToken(range.getEnd()),
+ S.getLocForEndOfToken(range.getEnd()),
")"));
}
return;
@@ -3430,7 +3428,7 @@ static void addFixitForObjCARCConversion(Sema &S,
DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
castCode));
DiagB.AddFixItHint(FixItHint::CreateInsertion(
- S.PP.getLocForEndOfToken(range.getEnd()),
+ S.getLocForEndOfToken(range.getEnd()),
")"));
}
}
@@ -3471,7 +3469,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
(castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc());
if (S.makeUnavailableInSystemHeader(loc,
- "converts between Objective-C and C pointers in -fobjc-arc"))
+ UnavailableAttr::IR_ARCForbiddenConversion))
return;
QualType castExprType = castExpr->getType();
@@ -3498,7 +3496,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
}
// Check whether this could be fixed with a bridge cast.
- SourceLocation afterLParen = S.PP.getLocForEndOfToken(castRange.getBegin());
+ SourceLocation afterLParen = S.getLocForEndOfToken(castRange.getBegin());
SourceLocation noteLoc = afterLParen.isValid() ? afterLParen : loc;
// Bridge from an ARC type to a CF type.
@@ -3901,7 +3899,7 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
ExpressionString += RelatedClass->getNameAsString();
ExpressionString += " ";
ExpressionString += ClassMethod->getSelector().getAsString();
- SourceLocation SrcExprEndLoc = PP.getLocForEndOfToken(SrcExpr->getLocEnd());
+ SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
// Provide a fixit: [RelatedClass ClassMethod SrcExpr]
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << ClassMethod->getSelector() << false
@@ -3926,7 +3924,7 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
// Implicit conversion from ObjC type to CF object is needed.
if (InstanceMethod) {
std::string ExpressionString;
- SourceLocation SrcExprEndLoc = PP.getLocForEndOfToken(SrcExpr->getLocEnd());
+ SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
if (InstanceMethod->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl = InstanceMethod->findPropertyDecl()) {
// fixit: ObjectExpr.propertyname when it is aproperty accessor.
diff --git a/lib/Sema/SemaFixItUtils.cpp b/lib/Sema/SemaFixItUtils.cpp
index 2e327ecf231f..714fbedf09bc 100644
--- a/lib/Sema/SemaFixItUtils.cpp
+++ b/lib/Sema/SemaFixItUtils.cpp
@@ -42,7 +42,7 @@ bool ConversionFixItGenerator::compareTypesSimple(CanQualType From,
const CanQualType FromUnq = From.getUnqualifiedType();
const CanQualType ToUnq = To.getUnqualifiedType();
- if ((FromUnq == ToUnq || (S.IsDerivedFrom(FromUnq, ToUnq)) ) &&
+ if ((FromUnq == ToUnq || (S.IsDerivedFrom(Loc, FromUnq, ToUnq)) ) &&
To.isAtLeastAsQualifiedAs(From))
return true;
return false;
@@ -58,8 +58,8 @@ bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr,
const CanQualType FromQTy = S.Context.getCanonicalType(FromTy);
const CanQualType ToQTy = S.Context.getCanonicalType(ToTy);
const SourceLocation Begin = FullExpr->getSourceRange().getBegin();
- const SourceLocation End = S.PP.getLocForEndOfToken(FullExpr->getSourceRange()
- .getEnd());
+ const SourceLocation End = S.getLocForEndOfToken(FullExpr->getSourceRange()
+ .getEnd());
// Strip the implicit casts - those are implied by the compiler, not the
// original source code.
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index adec512693f3..c3a89463dc69 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -443,8 +443,11 @@ ExprResult InitListChecker::PerformEmptyInit(Sema &SemaRef,
if (!VerifyOnly) {
SemaRef.Diag(CtorDecl->getLocation(),
diag::warn_invalid_initializer_from_system_header);
- SemaRef.Diag(Entity.getDecl()->getLocation(),
- diag::note_used_in_initialization_here);
+ if (Entity.getKind() == InitializedEntity::EK_Member)
+ SemaRef.Diag(Entity.getDecl()->getLocation(),
+ diag::note_used_in_initialization_here);
+ else if (Entity.getKind() == InitializedEntity::EK_ArrayElement)
+ SemaRef.Diag(Loc, diag::note_used_in_initialization_here);
}
}
}
@@ -802,7 +805,8 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
unsigned EndIndex = (Index == StartIndex? StartIndex : Index - 1);
// Update the structured sub-object initializer so that it's ending
// range corresponds with the end of the last initializer it used.
- if (EndIndex < ParentIList->getNumInits()) {
+ if (EndIndex < ParentIList->getNumInits() &&
+ ParentIList->getInit(EndIndex)) {
SourceLocation EndLoc
= ParentIList->getInit(EndIndex)->getSourceRange().getEnd();
StructuredSubobjectInitList->setRBraceLoc(EndLoc);
@@ -3008,6 +3012,7 @@ bool InitializationSequence::isAmbiguous() const {
case FK_VariableLengthArrayHasInitializer:
case FK_PlaceholderType:
case FK_ExplicitConstructor:
+ case FK_AddressOfUnaddressableFunction:
return false;
case FK_ReferenceInitOverloadFailed:
@@ -3323,7 +3328,7 @@ static bool TryInitializerListConstruction(Sema &S,
if (!S.isStdInitializerList(DestType, &E))
return false;
- if (S.RequireCompleteType(List->getExprLoc(), E, 0)) {
+ if (!S.isCompleteType(List->getExprLoc(), E)) {
Sequence.setIncompleteTypeFailure(E);
return true;
}
@@ -3433,7 +3438,7 @@ static void TryConstructorInitialization(Sema &S,
"IsListInit must come with a single initializer list argument.");
// The type we're constructing needs to be complete.
- if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
+ if (!S.isCompleteType(Kind.getLocation(), DestType)) {
Sequence.setIncompleteTypeFailure(DestType);
return;
}
@@ -3674,7 +3679,7 @@ static void TryListInitialization(Sema &S,
}
if (DestType->isRecordType() &&
- S.RequireCompleteType(InitList->getLocStart(), DestType, 0)) {
+ !S.isCompleteType(InitList->getLocStart(), DestType)) {
Sequence.setIncompleteTypeFailure(DestType);
return;
}
@@ -3694,7 +3699,7 @@ static void TryListInitialization(Sema &S,
if (DestType->isRecordType()) {
QualType InitType = InitList->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, DestType) ||
- S.IsDerivedFrom(InitType, DestType)) {
+ S.IsDerivedFrom(InitList->getLocStart(), InitType, DestType)) {
Expr *InitAsExpr = InitList->getInit(0);
TryConstructorInitialization(S, Entity, Kind, InitAsExpr, DestType,
Sequence, /*InitListSyntax*/ false,
@@ -3728,7 +3733,9 @@ static void TryListInitialization(Sema &S,
// C++11 [dcl.init.list]p3:
// - If T is an aggregate, aggregate initialization is performed.
- if (DestType->isRecordType() && !DestType->isAggregateType()) {
+ if ((DestType->isRecordType() && !DestType->isAggregateType()) ||
+ (S.getLangOpts().CPlusPlus11 &&
+ S.isStdInitializerList(DestType, nullptr))) {
if (S.getLangOpts().CPlusPlus11) {
// - Otherwise, if the initializer list has no elements and T is a
// class type with a default constructor, the object is
@@ -3834,7 +3841,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
const RecordType *T1RecordType = nullptr;
if (AllowRValues && (T1RecordType = T1->getAs<RecordType>()) &&
- !S.RequireCompleteType(Kind.getLocation(), T1, 0)) {
+ S.isCompleteType(Kind.getLocation(), T1)) {
// The type we're converting to is a class type. Enumerate its constructors
// to see if there is a suitable conversion.
CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl());
@@ -3870,7 +3877,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
const RecordType *T2RecordType = nullptr;
if ((T2RecordType = T2->getAs<RecordType>()) &&
- !S.RequireCompleteType(Kind.getLocation(), T2, 0)) {
+ S.isCompleteType(Kind.getLocation(), T2)) {
// The type we're converting from is a class type, enumerate its conversion
// functions.
CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl());
@@ -4455,7 +4462,7 @@ static void TryUserDefinedConversion(Sema &S,
= cast<CXXRecordDecl>(DestRecordType->getDecl());
// Try to complete the type we're converting to.
- if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
+ if (S.isCompleteType(Kind.getLocation(), DestType)) {
DeclContext::lookup_result R = S.LookupConstructors(DestRecordDecl);
// The container holding the constructors can under certain conditions
// be changed while iterating. To be safe we copy the lookup results
@@ -4501,7 +4508,7 @@ static void TryUserDefinedConversion(Sema &S,
// We can only enumerate the conversion functions for a complete type; if
// the type isn't complete, simply skip this step.
- if (!S.RequireCompleteType(DeclLoc, SourceType, 0)) {
+ if (S.isCompleteType(DeclLoc, SourceType)) {
CXXRecordDecl *SourceRecordDecl
= cast<CXXRecordDecl>(SourceRecordType->getDecl());
@@ -4798,6 +4805,17 @@ InitializationSequence::InitializationSequence(Sema &S,
InitializeFrom(S, Entity, Kind, Args, TopLevelOfInitList);
}
+/// Tries to get a FunctionDecl out of `E`. If it succeeds and we can take the
+/// address of that function, this returns true. Otherwise, it returns false.
+static bool isExprAnUnaddressableFunction(Sema &S, const Expr *E) {
+ auto *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE || !isa<FunctionDecl>(DRE->getDecl()))
+ return false;
+
+ return !S.checkAddressOfFunctionIsAvailable(
+ cast<FunctionDecl>(DRE->getDecl()));
+}
+
void InitializationSequence::InitializeFrom(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4979,7 +4997,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
}
assert(S.getLangOpts().CPlusPlus);
-
+
// - If the destination type is a (possibly cv-qualified) class type:
if (DestType->isRecordType()) {
// - If the initialization is direct-initialization, or if it is
@@ -4989,7 +5007,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (Kind.getKind() == InitializationKind::IK_Direct ||
(Kind.getKind() == InitializationKind::IK_Copy &&
(Context.hasSameUnqualifiedType(SourceType, DestType) ||
- S.IsDerivedFrom(SourceType, DestType))))
+ S.IsDerivedFrom(Initializer->getLocStart(), SourceType, DestType))))
TryConstructorInitialization(S, Entity, Kind, Args,
DestType, *this);
// - Otherwise (i.e., for the remaining copy-initialization cases),
@@ -5018,7 +5036,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
bool NeedAtomicConversion = false;
if (const AtomicType *Atomic = DestType->getAs<AtomicType>()) {
if (Context.hasSameUnqualifiedType(SourceType, Atomic->getValueType()) ||
- S.IsDerivedFrom(SourceType, Atomic->getValueType())) {
+ S.IsDerivedFrom(Initializer->getLocStart(), SourceType,
+ Atomic->getValueType())) {
DestType = Atomic->getValueType();
NeedAtomicConversion = true;
}
@@ -5076,6 +5095,9 @@ void InitializationSequence::InitializeFrom(Sema &S,
!S.ResolveAddressOfOverloadedFunction(Initializer, DestType,
false, dap))
SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else if (Initializer->getType()->isFunctionType() &&
+ isExprAnUnaddressableFunction(S, Initializer))
+ SetFailed(InitializationSequence::FK_AddressOfUnaddressableFunction);
else
SetFailed(InitializationSequence::FK_ConversionFailed);
} else {
@@ -6357,7 +6379,7 @@ InitializationSequence::Perform(Sema &S,
CastKind = CK_ConstructorConversion;
QualType Class = S.Context.getTypeDeclType(Constructor->getParent());
if (S.Context.hasSameUnqualifiedType(SourceType, Class) ||
- S.IsDerivedFrom(SourceType, Class))
+ S.IsDerivedFrom(Loc, SourceType, Class))
IsCopy = true;
CreatedObject = true;
@@ -6596,6 +6618,8 @@ InitializationSequence::Perform(Sema &S,
case SK_CAssignment: {
QualType SourceType = CurInit.get()->getType();
+ // Save off the initial CurInit in case we need to emit a diagnostic
+ ExprResult InitialCurInit = CurInit;
ExprResult Result = CurInit;
Sema::AssignConvertType ConvTy =
S.CheckSingleAssignmentConstraints(Step->Type, Result, true,
@@ -6618,7 +6642,7 @@ InitializationSequence::Perform(Sema &S,
bool Complained;
if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(),
Step->Type, SourceType,
- CurInit.get(),
+ InitialCurInit.get(),
getAssignmentAction(Entity, true),
&Complained)) {
PrintInitLocationNote(S, Entity);
@@ -6921,6 +6945,13 @@ bool InitializationSequence::Diagnose(Sema &S,
break;
}
+ case FK_AddressOfUnaddressableFunction: {
+ auto *FD = cast<FunctionDecl>(cast<DeclRefExpr>(Args[0])->getDecl());
+ S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ Args[0]->getLocStart());
+ break;
+ }
+
case FK_ReferenceInitOverloadFailed:
case FK_UserConversionOverloadFailed:
switch (FailedOverloadResult) {
@@ -6943,6 +6974,7 @@ bool InitializationSequence::Diagnose(Sema &S,
diag::err_typecheck_nonviable_condition_incomplete,
Args[0]->getType(), Args[0]->getSourceRange()))
S.Diag(Kind.getLocation(), diag::err_typecheck_nonviable_condition)
+ << (Entity.getKind() == InitializedEntity::EK_Result)
<< Args[0]->getType() << Args[0]->getSourceRange()
<< DestType.getNonReferenceType();
@@ -7043,10 +7075,12 @@ bool InitializationSequence::Diagnose(Sema &S,
SourceRange R;
auto *InitList = dyn_cast<InitListExpr>(Args[0]);
- if (InitList && InitList->getNumInits() == 1)
+ if (InitList && InitList->getNumInits() >= 1) {
R = SourceRange(InitList->getInit(0)->getLocEnd(), InitList->getLocEnd());
- else
+ } else {
+ assert(Args.size() > 1 && "Expected multiple initializers!");
R = SourceRange(Args.front()->getLocEnd(), Args.back()->getLocEnd());
+ }
R.setBegin(S.getLocForEndOfToken(R.getBegin()));
if (Kind.isCStyleOrFunctionalCast())
@@ -7240,6 +7274,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "array requires initializer list";
break;
+ case FK_AddressOfUnaddressableFunction:
+ OS << "address of unaddressable function was taken";
+ break;
+
case FK_ArrayNeedsInitListOrStringLiteral:
OS << "array requires initializer list or string literal";
break;
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index 8220641166b2..884add26e43a 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -226,15 +226,16 @@ getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) {
if (LSI->GLTemplateParameterList)
return LSI->GLTemplateParameterList;
- if (LSI->AutoTemplateParams.size()) {
+ if (!LSI->AutoTemplateParams.empty()) {
SourceRange IntroRange = LSI->IntroducerRange;
SourceLocation LAngleLoc = IntroRange.getBegin();
SourceLocation RAngleLoc = IntroRange.getEnd();
LSI->GLTemplateParameterList = TemplateParameterList::Create(
SemaRef.Context,
/*Template kw loc*/ SourceLocation(), LAngleLoc,
- (NamedDecl **)LSI->AutoTemplateParams.data(),
- LSI->AutoTemplateParams.size(), RAngleLoc);
+ llvm::makeArrayRef((NamedDecl *const *)LSI->AutoTemplateParams.data(),
+ LSI->AutoTemplateParams.size()),
+ RAngleLoc);
}
return LSI->GLTemplateParameterList;
}
@@ -685,7 +686,8 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
QualType ReturnType =
(RetE ? RetE->getType() : Context.VoidTy).getUnqualifiedType();
- if (Context.hasSameType(ReturnType, CSI.ReturnType))
+ if (Context.getCanonicalFunctionResultType(ReturnType) ==
+ Context.getCanonicalFunctionResultType(CSI.ReturnType))
continue;
// FIXME: This is a poor diagnostic for ReturnStmts without expressions.
@@ -698,18 +700,11 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
}
}
-QualType Sema::performLambdaInitCaptureInitialization(SourceLocation Loc,
- bool ByRef,
- IdentifierInfo *Id,
- Expr *&Init) {
-
- // We do not need to distinguish between direct-list-initialization
- // and copy-list-initialization here, because we will always deduce
- // std::initializer_list<T>, and direct- and copy-list-initialization
- // always behave the same for such a type.
- // FIXME: We should model whether an '=' was present.
- const bool IsDirectInit = isa<ParenListExpr>(Init) || isa<InitListExpr>(Init);
-
+QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
+ bool ByRef,
+ IdentifierInfo *Id,
+ bool IsDirectInit,
+ Expr *&Init) {
// Create an 'auto' or 'auto&' TypeSourceInfo that we can use to
// deduce against.
QualType DeductType = Context.getAutoDeductType();
@@ -722,50 +717,16 @@ QualType Sema::performLambdaInitCaptureInitialization(SourceLocation Loc,
}
TypeSourceInfo *TSI = TLB.getTypeSourceInfo(Context, DeductType);
- // Are we a non-list direct initialization?
- ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
-
- Expr *DeduceInit = Init;
- // Initializer could be a C++ direct-initializer. Deduction only works if it
- // contains exactly one expression.
- if (CXXDirectInit) {
- if (CXXDirectInit->getNumExprs() == 0) {
- Diag(CXXDirectInit->getLocStart(), diag::err_init_capture_no_expression)
- << DeclarationName(Id) << TSI->getType() << Loc;
- return QualType();
- } else if (CXXDirectInit->getNumExprs() > 1) {
- Diag(CXXDirectInit->getExpr(1)->getLocStart(),
- diag::err_init_capture_multiple_expressions)
- << DeclarationName(Id) << TSI->getType() << Loc;
- return QualType();
- } else {
- DeduceInit = CXXDirectInit->getExpr(0);
- if (isa<InitListExpr>(DeduceInit))
- Diag(CXXDirectInit->getLocStart(), diag::err_init_capture_paren_braces)
- << DeclarationName(Id) << Loc;
- }
- }
-
- // Now deduce against the initialization expression and store the deduced
- // type below.
- QualType DeducedType;
- if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) {
- if (isa<InitListExpr>(Init))
- Diag(Loc, diag::err_init_capture_deduction_failure_from_init_list)
- << DeclarationName(Id)
- << (DeduceInit->getType().isNull() ? TSI->getType()
- : DeduceInit->getType())
- << DeduceInit->getSourceRange();
- else
- Diag(Loc, diag::err_init_capture_deduction_failure)
- << DeclarationName(Id) << TSI->getType()
- << (DeduceInit->getType().isNull() ? TSI->getType()
- : DeduceInit->getType())
- << DeduceInit->getSourceRange();
- }
+ // Deduce the type of the init capture.
+ QualType DeducedType = deduceVarTypeFromInitializer(
+ /*VarDecl*/nullptr, DeclarationName(Id), DeductType, TSI,
+ SourceRange(Loc, Loc), IsDirectInit, Init);
if (DeducedType.isNull())
return QualType();
+ // Are we a non-list direct initialization?
+ ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
+
// Perform initialization analysis and ensure any implicit conversions
// (such as lvalue-to-rvalue) are enforced.
InitializedEntity Entity =
@@ -802,9 +763,10 @@ QualType Sema::performLambdaInitCaptureInitialization(SourceLocation Loc,
return DeducedType;
}
-VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
- QualType InitCaptureType, IdentifierInfo *Id, Expr *Init) {
-
+VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
+ QualType InitCaptureType,
+ IdentifierInfo *Id,
+ unsigned InitStyle, Expr *Init) {
TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType,
Loc);
// Create a dummy variable representing the init-capture. This is not actually
@@ -815,6 +777,8 @@ VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
Loc, Id, InitCaptureType, TSI, SC_Auto);
NewVD->setInitCapture(true);
NewVD->setReferenced(true);
+ // FIXME: Pass in a VarDecl::InitializationStyle.
+ NewVD->setInitStyle(static_cast<VarDecl::InitializationStyle>(InitStyle));
NewVD->markUsed(Context);
NewVD->setInit(Init);
return NewVD;
@@ -1013,8 +977,23 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// in this case.
if (C->InitCaptureType.get().isNull())
continue;
- Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(),
- C->Id, C->Init.get());
+
+ unsigned InitStyle;
+ switch (C->InitKind) {
+ case LambdaCaptureInitKind::NoInit:
+ llvm_unreachable("not an init-capture?");
+ case LambdaCaptureInitKind::CopyInit:
+ InitStyle = VarDecl::CInit;
+ break;
+ case LambdaCaptureInitKind::DirectInit:
+ InitStyle = VarDecl::CallInit;
+ break;
+ case LambdaCaptureInitKind::ListInit:
+ InitStyle = VarDecl::ListInit;
+ break;
+ }
+ Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(),
+ C->Id, InitStyle, C->Init.get());
// C++1y [expr.prim.lambda]p11:
// An init-capture behaves as if it declares and explicitly
// captures a variable [...] whose declarative region is the
@@ -1022,6 +1001,9 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (Var)
PushOnScopeChains(Var, CurScope, false);
} else {
+ assert(C->InitKind == LambdaCaptureInitKind::NoInit &&
+ "init capture has valid but null init?");
+
// C++11 [expr.prim.lambda]p8:
// If a lambda-capture includes a capture-default that is &, the
// identifiers in the lambda-capture shall not be preceded by &.
@@ -1160,6 +1142,12 @@ static void addFunctionPointerConversion(Sema &S,
SourceRange IntroducerRange,
CXXRecordDecl *Class,
CXXMethodDecl *CallOperator) {
+ // This conversion is explicitly disabled if the lambda's function has
+ // pass_object_size attributes on any of its parameters.
+ if (std::any_of(CallOperator->param_begin(), CallOperator->param_end(),
+ std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>)))
+ return;
+
// Add the conversion to function pointer.
const FunctionProtoType *CallOpProto =
CallOperator->getType()->getAs<FunctionProtoType>();
@@ -1700,8 +1688,7 @@ ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SC_None);
BlockDecl::Capture Capture(/*Variable=*/CapVar, /*ByRef=*/false,
/*Nested=*/false, /*Copy=*/Init.get());
- Block->setCaptures(Context, &Capture, &Capture + 1,
- /*CapturesCXXThis=*/false);
+ Block->setCaptures(Context, Capture, /*CapturesCXXThis=*/false);
// Add a fake function body to the block. IR generation is responsible
// for filling in the actual body, which cannot be expressed as an AST.
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index 2e7f89191580..481ae6cd55b1 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -11,6 +11,7 @@
// Objective-C++.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Sema/Lookup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -153,7 +154,7 @@ namespace {
// by its using directives, transitively) as if they appeared in
// the given effective context.
void addUsingDirectives(DeclContext *DC, DeclContext *EffectiveDC) {
- SmallVector<DeclContext*,4> queue;
+ SmallVector<DeclContext*, 4> queue;
while (true) {
for (auto UD : DC->using_directives()) {
DeclContext *NS = UD->getNominatedNamespace();
@@ -204,7 +205,7 @@ namespace {
UnqualUsingEntry::Comparator()));
}
};
-}
+} // end anonymous namespace
// Retrieve the set of identifier namespaces that correspond to a
// specific kind of name lookup.
@@ -354,13 +355,114 @@ static DeclContext *getContextForScopeMatching(Decl *D) {
return D->getDeclContext()->getRedeclContext();
}
+/// \brief Determine whether \p D is a better lookup result than \p Existing,
+/// given that they declare the same entity.
+static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
+ NamedDecl *D, NamedDecl *Existing) {
+ // When looking up redeclarations of a using declaration, prefer a using
+ // shadow declaration over any other declaration of the same entity.
+ if (Kind == Sema::LookupUsingDeclName && isa<UsingShadowDecl>(D) &&
+ !isa<UsingShadowDecl>(Existing))
+ return true;
+
+ auto *DUnderlying = D->getUnderlyingDecl();
+ auto *EUnderlying = Existing->getUnderlyingDecl();
+
+ // If they have different underlying declarations, prefer a typedef over the
+ // original type (this happens when two type declarations denote the same
+ // type), per a generous reading of C++ [dcl.typedef]p3 and p4. The typedef
+ // might carry additional semantic information, such as an alignment override.
+ // However, per C++ [dcl.typedef]p5, when looking up a tag name, prefer a tag
+ // declaration over a typedef.
+ if (DUnderlying->getCanonicalDecl() != EUnderlying->getCanonicalDecl()) {
+ assert(isa<TypeDecl>(DUnderlying) && isa<TypeDecl>(EUnderlying));
+ bool HaveTag = isa<TagDecl>(EUnderlying);
+ bool WantTag = Kind == Sema::LookupTagName;
+ return HaveTag != WantTag;
+ }
+
+ // Pick the function with more default arguments.
+ // FIXME: In the presence of ambiguous default arguments, we should keep both,
+ // so we can diagnose the ambiguity if the default argument is needed.
+ // See C++ [over.match.best]p3.
+ if (auto *DFD = dyn_cast<FunctionDecl>(DUnderlying)) {
+ auto *EFD = cast<FunctionDecl>(EUnderlying);
+ unsigned DMin = DFD->getMinRequiredArguments();
+ unsigned EMin = EFD->getMinRequiredArguments();
+ // If D has more default arguments, it is preferred.
+ if (DMin != EMin)
+ return DMin < EMin;
+ // FIXME: When we track visibility for default function arguments, check
+ // that we pick the declaration with more visible default arguments.
+ }
+
+ // Pick the template with more default template arguments.
+ if (auto *DTD = dyn_cast<TemplateDecl>(DUnderlying)) {
+ auto *ETD = cast<TemplateDecl>(EUnderlying);
+ unsigned DMin = DTD->getTemplateParameters()->getMinRequiredArguments();
+ unsigned EMin = ETD->getTemplateParameters()->getMinRequiredArguments();
+ // If D has more default arguments, it is preferred. Note that default
+ // arguments (and their visibility) is monotonically increasing across the
+ // redeclaration chain, so this is a quick proxy for "is more recent".
+ if (DMin != EMin)
+ return DMin < EMin;
+ // If D has more *visible* default arguments, it is preferred. Note, an
+ // earlier default argument being visible does not imply that a later
+ // default argument is visible, so we can't just check the first one.
+ for (unsigned I = DMin, N = DTD->getTemplateParameters()->size();
+ I != N; ++I) {
+ if (!S.hasVisibleDefaultArgument(
+ ETD->getTemplateParameters()->getParam(I)) &&
+ S.hasVisibleDefaultArgument(
+ DTD->getTemplateParameters()->getParam(I)))
+ return true;
+ }
+ }
+
+ // For most kinds of declaration, it doesn't really matter which one we pick.
+ if (!isa<FunctionDecl>(DUnderlying) && !isa<VarDecl>(DUnderlying)) {
+ // If the existing declaration is hidden, prefer the new one. Otherwise,
+ // keep what we've got.
+ return !S.isVisible(Existing);
+ }
+
+ // Pick the newer declaration; it might have a more precise type.
+ for (Decl *Prev = DUnderlying->getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl())
+ if (Prev == EUnderlying)
+ return true;
+ return false;
+
+ // If the existing declaration is hidden, prefer the new one. Otherwise,
+ // keep what we've got.
+ return !S.isVisible(Existing);
+}
+
+/// Determine whether \p D can hide a tag declaration.
+static bool canHideTag(NamedDecl *D) {
+ // C++ [basic.scope.declarative]p4:
+ // Given a set of declarations in a single declarative region [...]
+ // exactly one declaration shall declare a class name or enumeration name
+ // that is not a typedef name and the other declarations shall all refer to
+ // the same variable or enumerator, or all refer to functions and function
+ // templates; in this case the class name or enumeration name is hidden.
+ // C++ [basic.scope.hiding]p2:
+ // A class name or enumeration name can be hidden by the name of a
+ // variable, data member, function, or enumerator declared in the same
+ // scope.
+ D = D->getUnderlyingDecl();
+ return isa<VarDecl>(D) || isa<EnumConstantDecl>(D) || isa<FunctionDecl>(D) ||
+ isa<FunctionTemplateDecl>(D) || isa<FieldDecl>(D);
+}
+
/// Resolves the result kind of this lookup.
void LookupResult::resolveKind() {
unsigned N = Decls.size();
// Fast case: no possible ambiguity.
if (N == 0) {
- assert(ResultKind == NotFound || ResultKind == NotFoundInCurrentInstantiation);
+ assert(ResultKind == NotFound ||
+ ResultKind == NotFoundInCurrentInstantiation);
return;
}
@@ -378,12 +480,15 @@ void LookupResult::resolveKind() {
// Don't do any extra resolution if we've already resolved as ambiguous.
if (ResultKind == Ambiguous) return;
- llvm::SmallPtrSet<NamedDecl*, 16> Unique;
- llvm::SmallPtrSet<QualType, 16> UniqueTypes;
+ llvm::SmallDenseMap<NamedDecl*, unsigned, 16> Unique;
+ llvm::SmallDenseMap<QualType, unsigned, 16> UniqueTypes;
bool Ambiguous = false;
- bool HasTag = false, HasFunction = false, HasNonFunction = false;
+ bool HasTag = false, HasFunction = false;
bool HasFunctionTemplate = false, HasUnresolved = false;
+ NamedDecl *HasNonFunction = nullptr;
+
+ llvm::SmallVector<NamedDecl*, 4> EquivalentNonFunctions;
unsigned UniqueTagIndex = 0;
@@ -393,34 +498,43 @@ void LookupResult::resolveKind() {
D = cast<NamedDecl>(D->getCanonicalDecl());
// Ignore an invalid declaration unless it's the only one left.
- if (D->isInvalidDecl() && I < N-1) {
+ if (D->isInvalidDecl() && !(I == 0 && N == 1)) {
Decls[I] = Decls[--N];
continue;
}
+ llvm::Optional<unsigned> ExistingI;
+
// Redeclarations of types via typedef can occur both within a scope
// and, through using declarations and directives, across scopes. There is
// no ambiguity if they all refer to the same type, so unique based on the
// canonical type.
if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
- if (!TD->getDeclContext()->isRecord()) {
- QualType T = getSema().Context.getTypeDeclType(TD);
- if (!UniqueTypes.insert(getSema().Context.getCanonicalType(T)).second) {
- // The type is not unique; pull something off the back and continue
- // at this index.
- Decls[I] = Decls[--N];
- continue;
- }
+ QualType T = getSema().Context.getTypeDeclType(TD);
+ auto UniqueResult = UniqueTypes.insert(
+ std::make_pair(getSema().Context.getCanonicalType(T), I));
+ if (!UniqueResult.second) {
+ // The type is not unique.
+ ExistingI = UniqueResult.first->second;
+ }
+ }
+
+ // For non-type declarations, check for a prior lookup result naming this
+ // canonical declaration.
+ if (!ExistingI) {
+ auto UniqueResult = Unique.insert(std::make_pair(D, I));
+ if (!UniqueResult.second) {
+ // We've seen this entity before.
+ ExistingI = UniqueResult.first->second;
}
}
- if (!Unique.insert(D).second) {
- // If it's not unique, pull something off the back (and
- // continue at this index).
- // FIXME: This is wrong. We need to take the more recent declaration in
- // order to get the right type, default arguments, etc. We also need to
- // prefer visible declarations to hidden ones (for redeclaration lookup
- // in modules builds).
+ if (ExistingI) {
+ // This is not a unique lookup result. Pick one of the results and
+ // discard the other.
+ if (isPreferredLookupResult(getSema(), getLookupKind(), Decls[I],
+ Decls[*ExistingI]))
+ Decls[*ExistingI] = Decls[I];
Decls[I] = Decls[--N];
continue;
}
@@ -440,9 +554,21 @@ void LookupResult::resolveKind() {
} else if (isa<FunctionDecl>(D)) {
HasFunction = true;
} else {
- if (HasNonFunction)
+ if (HasNonFunction) {
+ // If we're about to create an ambiguity between two declarations that
+ // are equivalent, but one is an internal linkage declaration from one
+ // module and the other is an internal linkage declaration from another
+ // module, just skip it.
+ if (getSema().isEquivalentInternalLinkageDeclaration(HasNonFunction,
+ D)) {
+ EquivalentNonFunctions.push_back(D);
+ Decls[I] = Decls[--N];
+ continue;
+ }
+
Ambiguous = true;
- HasNonFunction = true;
+ }
+ HasNonFunction = D;
}
I++;
}
@@ -456,15 +582,24 @@ void LookupResult::resolveKind() {
// wherever the object, function, or enumerator name is visible.
// But it's still an error if there are distinct tag types found,
// even if they're not visible. (ref?)
- if (HideTags && HasTag && !Ambiguous &&
+ if (N > 1 && HideTags && HasTag && !Ambiguous &&
(HasFunction || HasNonFunction || HasUnresolved)) {
- if (getContextForScopeMatching(Decls[UniqueTagIndex])->Equals(
- getContextForScopeMatching(Decls[UniqueTagIndex ? 0 : N - 1])))
+ NamedDecl *OtherDecl = Decls[UniqueTagIndex ? 0 : N - 1];
+ if (isa<TagDecl>(Decls[UniqueTagIndex]->getUnderlyingDecl()) &&
+ getContextForScopeMatching(Decls[UniqueTagIndex])->Equals(
+ getContextForScopeMatching(OtherDecl)) &&
+ canHideTag(OtherDecl))
Decls[UniqueTagIndex] = Decls[--N];
else
Ambiguous = true;
}
+ // FIXME: This diagnostic should really be delayed until we're done with
+ // the lookup result, in case the ambiguity is resolved by the caller.
+ if (!EquivalentNonFunctions.empty() && !Ambiguous)
+ getSema().diagnoseEquivalentInternalLinkageDeclarations(
+ getNameLoc(), HasNonFunction, EquivalentNonFunctions);
+
Decls.set_size(N);
if (HasNonFunction && (HasFunction || HasUnresolved))
@@ -534,6 +669,11 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) {
R.addDecl(S.getASTContext().getFloat128StubType());
return true;
}
+ if (S.getLangOpts().CPlusPlus && NameKind == Sema::LookupOrdinaryName &&
+ II == S.getASTContext().getMakeIntegerSeqName()) {
+ R.addDecl(S.getASTContext().getMakeIntegerSeqDecl());
+ return true;
+ }
// If this is a builtin on this (or all) targets, create the decl.
if (unsigned BuiltinID = II->getBuiltinID()) {
@@ -875,7 +1015,7 @@ struct FindLocalExternScope {
LookupResult &R;
bool OldFindLocalExtern;
};
-}
+} // end anonymous namespace
bool Sema::CppLookupName(LookupResult &R, Scope *S) {
assert(getLangOpts().CPlusPlus && "Can perform only C++ lookup");
@@ -1333,22 +1473,22 @@ bool Sema::hasVisibleDefaultArgument(const NamedDecl *D,
/// your module can see, including those later on in your module).
bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
assert(D->isHidden() && "should not call this: not in slow case");
- Module *DeclModule = SemaRef.getOwningModule(D);
- if (!DeclModule) {
- // getOwningModule() may have decided the declaration should not be hidden.
- assert(!D->isHidden() && "hidden decl not from a module");
- return true;
- }
-
- // If the owning module is visible, and the decl is not module private,
- // then the decl is visible too. (Module private is ignored within the same
- // top-level module.)
- if (!D->isFromASTFile() || !D->isModulePrivate()) {
- if (SemaRef.isModuleVisible(DeclModule))
+ Module *DeclModule = nullptr;
+
+ if (SemaRef.getLangOpts().ModulesLocalVisibility) {
+ DeclModule = SemaRef.getOwningModule(D);
+ if (!DeclModule) {
+ // getOwningModule() may have decided the declaration should not be hidden.
+ assert(!D->isHidden() && "hidden decl not from a module");
return true;
- // Also check merged definitions.
- if (SemaRef.getLangOpts().ModulesLocalVisibility &&
- SemaRef.hasVisibleMergedDefinition(D))
+ }
+
+ // If the owning module is visible, and the decl is not module private,
+ // then the decl is visible too. (Module private is ignored within the same
+ // top-level module.)
+ if ((!D->isFromASTFile() || !D->isModulePrivate()) &&
+ (SemaRef.isModuleVisible(DeclModule) ||
+ SemaRef.hasVisibleMergedDefinition(D)))
return true;
}
@@ -1380,6 +1520,11 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
if (LookupModules.empty())
return false;
+ if (!DeclModule) {
+ DeclModule = SemaRef.getOwningModule(D);
+ assert(DeclModule && "hidden decl not from a module");
+ }
+
// If our lookup set contains the decl's module, it's visible.
if (LookupModules.count(DeclModule))
return true;
@@ -1390,18 +1535,22 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
// Check whether DeclModule is transitively exported to an import of
// the lookup set.
- for (llvm::DenseSet<Module *>::iterator I = LookupModules.begin(),
- E = LookupModules.end();
- I != E; ++I)
- if ((*I)->isModuleVisible(DeclModule))
- return true;
- return false;
+ return std::any_of(LookupModules.begin(), LookupModules.end(),
+ [&](Module *M) { return M->isModuleVisible(DeclModule); });
}
bool Sema::isVisibleSlow(const NamedDecl *D) {
return LookupResult::isVisible(*this, const_cast<NamedDecl*>(D));
}
+bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
+ for (auto *D : R) {
+ if (isVisible(D))
+ return true;
+ }
+ return New->isExternallyVisible();
+}
+
/// \brief Retrieve the visible declaration corresponding to D, if any.
///
/// This routine determines whether the declaration D is visible in the current
@@ -1679,12 +1828,10 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
/// \brief Callback that looks for any member of a class with the given name.
static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
- CXXBasePath &Path,
- void *Name) {
+ CXXBasePath &Path, DeclarationName Name) {
RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
- DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
- Path.Decls = BaseRecord->lookup(N);
+ Path.Decls = BaseRecord->lookup(Name);
return !Path.Decls.empty();
}
@@ -1756,7 +1903,18 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
cast<TagDecl>(LookupCtx)->isBeingDefined()) &&
"Declaration context must already be complete!");
- // Perform qualified name lookup into the LookupCtx.
+ struct QualifiedLookupInScope {
+ bool oldVal;
+ DeclContext *Context;
+ // Set flag in DeclContext informing debugger that we're looking for qualified name
+ QualifiedLookupInScope(DeclContext *ctx) : Context(ctx) {
+ oldVal = ctx->setUseQualifiedLookup();
+ }
+ ~QualifiedLookupInScope() {
+ Context->setUseQualifiedLookup(oldVal);
+ }
+ } QL(LookupCtx);
+
if (LookupDirect(*this, R, LookupCtx)) {
R.resolveKind();
if (isa<CXXRecordDecl>(LookupCtx))
@@ -1802,7 +1960,8 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
Paths.setOrigin(LookupRec);
// Look for this member in our base classes
- CXXRecordDecl::BaseMatchesCallback *BaseCallback = nullptr;
+ bool (*BaseCallback)(const CXXBaseSpecifier *Specifier, CXXBasePath &Path,
+ DeclarationName Name) = nullptr;
switch (R.getLookupKind()) {
case LookupObjCImplicitSelfParam:
case LookupOrdinaryName:
@@ -1835,8 +1994,12 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
break;
}
- if (!LookupRec->lookupInBases(BaseCallback,
- R.getLookupName().getAsOpaquePtr(), Paths))
+ DeclarationName Name = R.getLookupName();
+ if (!LookupRec->lookupInBases(
+ [=](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return BaseCallback(Specifier, Path, Name);
+ },
+ Paths))
return false;
R.setNamingClass(LookupRec);
@@ -2013,17 +2176,30 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
///
/// @returns True if any decls were found (but possibly ambiguous)
bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
+ // The access-control rules we use here are essentially the rules for
+ // doing a lookup in Class that just magically skipped the direct
+ // members of Class itself. That is, the naming class is Class, and the
+ // access includes the access of the base.
for (const auto &BaseSpec : Class->bases()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(
BaseSpec.getType()->castAs<RecordType>()->getDecl());
LookupResult Result(*this, R.getLookupNameInfo(), R.getLookupKind());
Result.setBaseObjectType(Context.getRecordType(Class));
LookupQualifiedName(Result, RD);
- for (auto *Decl : Result)
- R.addDecl(Decl);
+
+ // Copy the lookup results into the target, merging the base's access into
+ // the path access.
+ for (auto I = Result.begin(), E = Result.end(); I != E; ++I) {
+ R.addDecl(I.getDecl(),
+ CXXRecordDecl::MergeAccess(BaseSpec.getAccessSpecifier(),
+ I.getAccess()));
+ }
+
+ Result.suppressDiagnostics();
}
R.resolveKind();
+ R.setNamingClass(Class);
return !R.empty();
}
@@ -2075,7 +2251,7 @@ void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
case LookupResult::AmbiguousTagHiding: {
Diag(NameLoc, diag::err_ambiguous_tag_hiding) << Name << LookupRange;
- llvm::SmallPtrSet<NamedDecl*,8> TagDecls;
+ llvm::SmallPtrSet<NamedDecl*, 8> TagDecls;
for (auto *D : Result)
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
@@ -2121,7 +2297,7 @@ namespace {
Sema::AssociatedClassSet &Classes;
SourceLocation InstantiationLoc;
};
-}
+} // end anonymous namespace
static void
addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType T);
@@ -2252,7 +2428,8 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
}
// Only recurse into base classes for complete types.
- if (!Class->hasDefinition())
+ if (!Result.S.isCompleteType(Result.InstantiationLoc,
+ Result.S.Context.getRecordType(Class)))
return;
// Add direct and indirect base classes along with their associated
@@ -2345,10 +2522,8 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// classes. Its associated namespaces are the namespaces in
// which its associated classes are defined.
case Type::Record: {
- Result.S.RequireCompleteType(Result.InstantiationLoc, QualType(T, 0),
- /*no diagnostic*/ 0);
- CXXRecordDecl *Class
- = cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
+ CXXRecordDecl *Class =
+ cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
addAssociatedClassesAndNamespaces(Result, Class);
break;
}
@@ -3032,7 +3207,8 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
for (Decl *DI = D; DI; DI = DI->getPreviousDecl()) {
DeclContext *LexDC = DI->getLexicalDeclContext();
if (isa<CXXRecordDecl>(LexDC) &&
- AssociatedClasses.count(cast<CXXRecordDecl>(LexDC))) {
+ AssociatedClasses.count(cast<CXXRecordDecl>(LexDC)) &&
+ isVisible(cast<NamedDecl>(DI))) {
DeclaredInAssociatedClass = true;
break;
}
@@ -3129,9 +3305,6 @@ public:
} // end anonymous namespace
NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) {
- // Look through using declarations.
- ND = ND->getUnderlyingDecl();
-
unsigned IDNS = ND->getIdentifierNamespace();
std::list<ShadowMap>::reverse_iterator SM = ShadowMaps.rbegin();
for (std::list<ShadowMap>::reverse_iterator SMEnd = ShadowMaps.rend();
@@ -3704,7 +3877,12 @@ void TypoCorrectionConsumer::addNamespaces(
if (const Type *T = NNS->getAsType())
SSIsTemplate = T->getTypeClass() == Type::TemplateSpecialization;
}
- for (const auto *TI : SemaRef.getASTContext().types()) {
+ // Do not transform this into an iterator-based loop. The loop body can
+ // trigger the creation of further types (through lazy deserialization) and
+ // invalide iterators into this list.
+ auto &Types = SemaRef.getASTContext().getTypes();
+ for (unsigned I = 0; I != Types.size(); ++I) {
+ const auto *TI = Types[I];
if (CXXRecordDecl *CD = TI->getAsCXXRecordDecl()) {
CD = CD->getCanonicalDecl();
if (!CD->isDependentType() && !CD->isAnonymousStructOrUnion() &&
@@ -3805,7 +3983,8 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
// current correction candidate is the name of that class, then skip
// it as it is unlikely a qualified version of the class' constructor
// is an appropriate correction.
- if (CXXRecordDecl *NSDecl = NSType ? NSType->getAsCXXRecordDecl() : 0) {
+ if (CXXRecordDecl *NSDecl = NSType ? NSType->getAsCXXRecordDecl() :
+ nullptr) {
if (NSDecl->getIdentifier() == QR.getCorrectionAsIdentifierInfo())
continue;
}
@@ -4540,7 +4719,7 @@ void TypoCorrection::addCorrectionDecl(NamedDecl *CDecl) {
if (isKeyword())
CorrectionDecls.clear();
- CorrectionDecls.push_back(CDecl->getUnderlyingDecl());
+ CorrectionDecls.push_back(CDecl);
if (!CorrectionName)
CorrectionName = CDecl->getDeclName();
@@ -4769,7 +4948,7 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
// Maybe we're just missing a module import.
if (Correction.requiresImport()) {
- NamedDecl *Decl = Correction.getCorrectionDecl();
+ NamedDecl *Decl = Correction.getFoundDecl();
assert(Decl && "import required but no declaration to import");
diagnoseMissingImport(Correction.getCorrectionRange().getBegin(), Decl,
@@ -4781,7 +4960,7 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
<< CorrectedQuotedStr << (ErrorRecovery ? FixTypo : FixItHint());
NamedDecl *ChosenDecl =
- Correction.isKeyword() ? nullptr : Correction.getCorrectionDecl();
+ Correction.isKeyword() ? nullptr : Correction.getFoundDecl();
if (PrevNote.getDiagID() && ChosenDecl)
Diag(ChosenDecl->getLocation(), PrevNote)
<< CorrectedQuotedStr << (ErrorRecovery ? FixItHint() : FixTypo);
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index f139c83c734b..1cb84e448067 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -61,8 +61,10 @@ static Qualifiers::ObjCLifetime getImpliedARCOwnership(
return Qualifiers::OCL_None;
}
-/// Check the internal consistency of a property declaration.
-static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) {
+/// Check the internal consistency of a property declaration with
+/// an explicit ownership qualifier.
+static void checkPropertyDeclWithOwnership(Sema &S,
+ ObjCPropertyDecl *property) {
if (property->isInvalidDecl()) return;
ObjCPropertyDecl::PropertyAttributeKind propertyKind
@@ -70,8 +72,7 @@ static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) {
Qualifiers::ObjCLifetime propertyLifetime
= property->getType().getObjCLifetime();
- // Nothing to do if we don't have a lifetime.
- if (propertyLifetime == Qualifiers::OCL_None) return;
+ assert(propertyLifetime != Qualifiers::OCL_None);
Qualifiers::ObjCLifetime expectedLifetime
= getImpliedARCOwnership(propertyKind, property->getType());
@@ -127,32 +128,71 @@ CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
CheckPropertyAgainstProtocol(S, Prop, P, Known);
}
+static unsigned deducePropertyOwnershipFromType(Sema &S, QualType T) {
+ // In GC mode, just look for the __weak qualifier.
+ if (S.getLangOpts().getGC() != LangOptions::NonGC) {
+ if (T.isObjCGCWeak()) return ObjCDeclSpec::DQ_PR_weak;
+
+ // In ARC/MRC, look for an explicit ownership qualifier.
+ // For some reason, this only applies to __weak.
+ } else if (auto ownership = T.getObjCLifetime()) {
+ switch (ownership) {
+ case Qualifiers::OCL_Weak:
+ return ObjCDeclSpec::DQ_PR_weak;
+ case Qualifiers::OCL_Strong:
+ return ObjCDeclSpec::DQ_PR_strong;
+ case Qualifiers::OCL_ExplicitNone:
+ return ObjCDeclSpec::DQ_PR_unsafe_unretained;
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_None:
+ return 0;
+ }
+ llvm_unreachable("bad qualifier");
+ }
+
+ return 0;
+}
+
+static const unsigned OwnershipMask =
+ (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_weak |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
+static unsigned getOwnershipRule(unsigned attr) {
+ unsigned result = attr & OwnershipMask;
+
+ // From an ownership perspective, assign and unsafe_unretained are
+ // identical; make sure one also implies the other.
+ if (result & (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained)) {
+ result |= ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ }
+
+ return result;
+}
+
Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
ObjCDeclSpec &ODS,
Selector GetterSel,
Selector SetterSel,
- bool *isOverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC) {
unsigned Attributes = ODS.getPropertyAttributes();
FD.D.setObjCWeakProperty((Attributes & ObjCDeclSpec::DQ_PR_weak) != 0);
TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
QualType T = TSI->getType();
- Attributes |= deduceWeakPropertyFromType(T);
+ if (!getOwnershipRule(Attributes)) {
+ Attributes |= deducePropertyOwnershipFromType(*this, T);
+ }
bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
// default is readwrite!
!(Attributes & ObjCDeclSpec::DQ_PR_readonly));
- // property is defaulted to 'assign' if it is readwrite and is
- // not retain or copy
- bool isAssign = ((Attributes & ObjCDeclSpec::DQ_PR_assign) ||
- (isReadWrite &&
- !(Attributes & ObjCDeclSpec::DQ_PR_retain) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_copy) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_weak)));
// Proceed with constructing the ObjCPropertyDecls.
ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
@@ -161,11 +201,10 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
if (CDecl->IsClassExtension()) {
Res = HandlePropertyInClassExtension(S, AtLoc, LParenLoc,
FD, GetterSel, SetterSel,
- isAssign, isReadWrite,
+ isReadWrite,
Attributes,
ODS.getPropertyAttributes(),
- isOverridingProperty, T, TSI,
- MethodImplKind);
+ T, TSI, MethodImplKind);
if (!Res)
return nullptr;
}
@@ -173,7 +212,7 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
if (!Res) {
Res = CreatePropertyDecl(S, ClassDecl, AtLoc, LParenLoc, FD,
- GetterSel, SetterSel, isAssign, isReadWrite,
+ GetterSel, SetterSel, isReadWrite,
Attributes, ODS.getPropertyAttributes(),
T, TSI, MethodImplKind);
if (lexicalDC)
@@ -181,12 +220,13 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
}
// Validate the attributes on the @property.
- CheckObjCPropertyAttributes(Res, AtLoc, Attributes,
+ CheckObjCPropertyAttributes(Res, AtLoc, Attributes,
(isa<ObjCInterfaceDecl>(ClassDecl) ||
isa<ObjCProtocolDecl>(ClassDecl)));
- if (getLangOpts().ObjCAutoRefCount)
- checkARCPropertyDecl(*this, Res);
+ // Check consistency if the type has explicit ownership qualification.
+ if (Res->getType().getObjCLifetime())
+ checkPropertyDeclWithOwnership(*this, Res);
llvm::SmallPtrSet<ObjCProtocolDecl *, 16> KnownProtos;
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
@@ -220,8 +260,12 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
}
}
} else if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
- for (auto *P : Cat->protocols())
- CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ // We don't check if class extension. Because properties in class extension
+ // are meant to override some of the attributes and checking has already done
+ // when property in class extension is constructed.
+ if (!Cat->IsClassExtension())
+ for (auto *P : Cat->protocols())
+ CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
} else {
ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(ClassDecl);
for (auto *P : Proto->protocols())
@@ -293,13 +337,73 @@ static bool LocPropertyAttribute( ASTContext &Context, const char *attrName,
}
-static unsigned getOwnershipRule(unsigned attr) {
- return attr & (ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_weak |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+/// Check for a mismatch in the atomicity of the given properties.
+static void checkAtomicPropertyMismatch(Sema &S,
+ ObjCPropertyDecl *OldProperty,
+ ObjCPropertyDecl *NewProperty,
+ bool PropagateAtomicity) {
+ // If the atomicity of both matches, we're done.
+ bool OldIsAtomic =
+ (OldProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ == 0;
+ bool NewIsAtomic =
+ (NewProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ == 0;
+ if (OldIsAtomic == NewIsAtomic) return;
+
+ // Determine whether the given property is readonly and implicitly
+ // atomic.
+ auto isImplicitlyReadonlyAtomic = [](ObjCPropertyDecl *Property) -> bool {
+ // Is it readonly?
+ auto Attrs = Property->getPropertyAttributes();
+ if ((Attrs & ObjCPropertyDecl::OBJC_PR_readonly) == 0) return false;
+
+ // Is it nonatomic?
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic) return false;
+
+ // Was 'atomic' specified directly?
+ if (Property->getPropertyAttributesAsWritten() &
+ ObjCPropertyDecl::OBJC_PR_atomic)
+ return false;
+
+ return true;
+ };
+
+ // If we're allowed to propagate atomicity, and the new property did
+ // not specify atomicity at all, propagate.
+ const unsigned AtomicityMask =
+ (ObjCPropertyDecl::OBJC_PR_atomic | ObjCPropertyDecl::OBJC_PR_nonatomic);
+ if (PropagateAtomicity &&
+ ((NewProperty->getPropertyAttributesAsWritten() & AtomicityMask) == 0)) {
+ unsigned Attrs = NewProperty->getPropertyAttributes();
+ Attrs = Attrs & ~AtomicityMask;
+ if (OldIsAtomic)
+ Attrs |= ObjCPropertyDecl::OBJC_PR_atomic;
+ else
+ Attrs |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+
+ NewProperty->overwritePropertyAttributes(Attrs);
+ return;
+ }
+
+ // One of the properties is atomic; if it's a readonly property, and
+ // 'atomic' wasn't explicitly specified, we're okay.
+ if ((OldIsAtomic && isImplicitlyReadonlyAtomic(OldProperty)) ||
+ (NewIsAtomic && isImplicitlyReadonlyAtomic(NewProperty)))
+ return;
+
+ // Diagnose the conflict.
+ const IdentifierInfo *OldContextName;
+ auto *OldDC = OldProperty->getDeclContext();
+ if (auto Category = dyn_cast<ObjCCategoryDecl>(OldDC))
+ OldContextName = Category->getClassInterface()->getIdentifier();
+ else
+ OldContextName = cast<ObjCContainerDecl>(OldDC)->getIdentifier();
+
+ S.Diag(NewProperty->getLocation(), diag::warn_property_attribute)
+ << NewProperty->getDeclName() << "atomic"
+ << OldContextName;
+ S.Diag(OldProperty->getLocation(), diag::note_property_declare);
}
ObjCPropertyDecl *
@@ -308,11 +412,9 @@ Sema::HandlePropertyInClassExtension(Scope *S,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel, Selector SetterSel,
- const bool isAssign,
const bool isReadWrite,
- const unsigned Attributes,
+ unsigned &Attributes,
const unsigned AttributesAsWritten,
- bool *isOverridingProperty,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind) {
@@ -322,80 +424,102 @@ Sema::HandlePropertyInClassExtension(Scope *S,
IdentifierInfo *PropertyId = FD.D.getIdentifier();
ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface();
- if (CCPrimary) {
- // Check for duplicate declaration of this property in current and
- // other class extensions.
- for (const auto *Ext : CCPrimary->known_extensions()) {
- if (ObjCPropertyDecl *prevDecl
- = ObjCPropertyDecl::findPropertyDecl(Ext, PropertyId)) {
- Diag(AtLoc, diag::err_duplicate_property);
- Diag(prevDecl->getLocation(), diag::note_property_declare);
- return nullptr;
- }
- }
- }
-
- // Create a new ObjCPropertyDecl with the DeclContext being
- // the class extension.
- // FIXME. We should really be using CreatePropertyDecl for this.
- ObjCPropertyDecl *PDecl =
- ObjCPropertyDecl::Create(Context, DC, FD.D.getIdentifierLoc(),
- PropertyId, AtLoc, LParenLoc, T, TSI);
- PDecl->setPropertyAttributesAsWritten(
- makePropertyAttributesAsWritten(AttributesAsWritten));
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
- if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
- if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
- if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic);
- if (Attributes & ObjCDeclSpec::DQ_PR_nullability)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nullability);
- if (Attributes & ObjCDeclSpec::DQ_PR_null_resettable)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_null_resettable);
-
- // Set setter/getter selector name. Needed later.
- PDecl->setGetterName(GetterSel);
- PDecl->setSetterName(SetterSel);
- ProcessDeclAttributes(S, PDecl, FD.D);
- DC->addDecl(PDecl);
-
// We need to look in the @interface to see if the @property was
// already declared.
if (!CCPrimary) {
Diag(CDecl->getLocation(), diag::err_continuation_class);
- *isOverridingProperty = true;
return nullptr;
}
- // Find the property in continuation class's primary class only.
+ // Find the property in the extended class's primary class or
+ // extensions.
ObjCPropertyDecl *PIDecl =
CCPrimary->FindPropertyVisibleInPrimaryClass(PropertyId);
+ // If we found a property in an extension, complain.
+ if (PIDecl && isa<ObjCCategoryDecl>(PIDecl->getDeclContext())) {
+ Diag(AtLoc, diag::err_duplicate_property);
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ return nullptr;
+ }
+
+ // Check for consistency with the previous declaration, if there is one.
+ if (PIDecl) {
+ // A readonly property declared in the primary class can be refined
+ // by adding a readwrite property within an extension.
+ // Anything else is an error.
+ if (!(PIDecl->isReadOnly() && isReadWrite)) {
+ // Tailor the diagnostics for the common case where a readwrite
+ // property is declared both in the @interface and the continuation.
+ // This is a common error where the user often intended the original
+ // declaration to be readonly.
+ unsigned diag =
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite) &&
+ (PIDecl->getPropertyAttributesAsWritten() &
+ ObjCPropertyDecl::OBJC_PR_readwrite)
+ ? diag::err_use_continuation_class_redeclaration_readwrite
+ : diag::err_use_continuation_class;
+ Diag(AtLoc, diag)
+ << CCPrimary->getDeclName();
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ return nullptr;
+ }
+
+ // Check for consistency of getters.
+ if (PIDecl->getGetterName() != GetterSel) {
+ // If the getter was written explicitly, complain.
+ if (AttributesAsWritten & ObjCDeclSpec::DQ_PR_getter) {
+ Diag(AtLoc, diag::warn_property_redecl_getter_mismatch)
+ << PIDecl->getGetterName() << GetterSel;
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+
+ // Always adopt the getter from the original declaration.
+ GetterSel = PIDecl->getGetterName();
+ Attributes |= ObjCDeclSpec::DQ_PR_getter;
+ }
+
+ // Check consistency of ownership.
+ unsigned ExistingOwnership
+ = getOwnershipRule(PIDecl->getPropertyAttributes());
+ unsigned NewOwnership = getOwnershipRule(Attributes);
+ if (ExistingOwnership && NewOwnership != ExistingOwnership) {
+ // If the ownership was written explicitly, complain.
+ if (getOwnershipRule(AttributesAsWritten)) {
+ Diag(AtLoc, diag::warn_property_attr_mismatch);
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+
+ // Take the ownership from the original property.
+ Attributes = (Attributes & ~OwnershipMask) | ExistingOwnership;
+ }
+
+ // If the redeclaration is 'weak' but the original property is not,
+ if ((Attributes & ObjCPropertyDecl::OBJC_PR_weak) &&
+ !(PIDecl->getPropertyAttributesAsWritten()
+ & ObjCPropertyDecl::OBJC_PR_weak) &&
+ PIDecl->getType()->getAs<ObjCObjectPointerType>() &&
+ PIDecl->getType().getObjCLifetime() == Qualifiers::OCL_None) {
+ Diag(AtLoc, diag::warn_property_implicitly_mismatched);
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+ }
+
+ // Create a new ObjCPropertyDecl with the DeclContext being
+ // the class extension.
+ ObjCPropertyDecl *PDecl = CreatePropertyDecl(S, CDecl, AtLoc, LParenLoc,
+ FD, GetterSel, SetterSel,
+ isReadWrite,
+ Attributes, AttributesAsWritten,
+ T, TSI, MethodImplKind, DC);
+
+ // If there was no declaration of a property with the same name in
+ // the primary class, we're done.
if (!PIDecl) {
- // No matching property found in the primary class. Just fall thru
- // and add property to continuation class's primary class.
- ObjCPropertyDecl *PrimaryPDecl =
- CreatePropertyDecl(S, CCPrimary, AtLoc, LParenLoc,
- FD, GetterSel, SetterSel, isAssign, isReadWrite,
- Attributes,AttributesAsWritten, T, TSI, MethodImplKind,
- DC);
-
- // A case of continuation class adding a new property in the class. This
- // is not what it was meant for. However, gcc supports it and so should we.
- // Make sure setter/getters are declared here.
- ProcessPropertyDecl(PrimaryPDecl, CCPrimary,
- /* redeclaredProperty = */ nullptr,
- /* lexicalDC = */ CDecl);
- PDecl->setGetterMethodDecl(PrimaryPDecl->getGetterMethodDecl());
- PDecl->setSetterMethodDecl(PrimaryPDecl->getSetterMethodDecl());
- if (ASTMutationListener *L = Context.getASTMutationListener())
- L->AddedObjCPropertyInClassExtension(PrimaryPDecl, /*OrigProp=*/nullptr,
- CDecl);
- return PrimaryPDecl;
+ ProcessPropertyDecl(PDecl);
+ return PDecl;
}
+
if (!Context.hasSameType(PIDecl->getType(), PDecl->getType())) {
bool IncompatibleObjC = false;
QualType ConvertedType;
@@ -418,103 +542,13 @@ Sema::HandlePropertyInClassExtension(Scope *S,
return nullptr;
}
}
-
- // The property 'PIDecl's readonly attribute will be over-ridden
- // with continuation class's readwrite property attribute!
- unsigned PIkind = PIDecl->getPropertyAttributesAsWritten();
- if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) {
- PIkind &= ~ObjCPropertyDecl::OBJC_PR_readonly;
- PIkind |= ObjCPropertyDecl::OBJC_PR_readwrite;
- PIkind |= deduceWeakPropertyFromType(PIDecl->getType());
- unsigned ClassExtensionMemoryModel = getOwnershipRule(Attributes);
- unsigned PrimaryClassMemoryModel = getOwnershipRule(PIkind);
- if (PrimaryClassMemoryModel && ClassExtensionMemoryModel &&
- (PrimaryClassMemoryModel != ClassExtensionMemoryModel)) {
- Diag(AtLoc, diag::warn_property_attr_mismatch);
- Diag(PIDecl->getLocation(), diag::note_property_declare);
- }
- else if (getLangOpts().ObjCAutoRefCount) {
- QualType PrimaryPropertyQT =
- Context.getCanonicalType(PIDecl->getType()).getUnqualifiedType();
- if (isa<ObjCObjectPointerType>(PrimaryPropertyQT)) {
- bool PropertyIsWeak = ((PIkind & ObjCPropertyDecl::OBJC_PR_weak) != 0);
- Qualifiers::ObjCLifetime PrimaryPropertyLifeTime =
- PrimaryPropertyQT.getObjCLifetime();
- if (PrimaryPropertyLifeTime == Qualifiers::OCL_None &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak) &&
- !PropertyIsWeak) {
- Diag(AtLoc, diag::warn_property_implicitly_mismatched);
- Diag(PIDecl->getLocation(), diag::note_property_declare);
- }
- }
- }
-
- DeclContext *DC = cast<DeclContext>(CCPrimary);
- if (!ObjCPropertyDecl::findPropertyDecl(DC,
- PIDecl->getDeclName().getAsIdentifierInfo())) {
- // In mrr mode, 'readwrite' property must have an explicit
- // memory attribute. If none specified, select the default (assign).
- if (!getLangOpts().ObjCAutoRefCount) {
- if (!(PIkind & (ObjCDeclSpec::DQ_PR_assign |
- ObjCDeclSpec::DQ_PR_retain |
- ObjCDeclSpec::DQ_PR_strong |
- ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_unsafe_unretained |
- ObjCDeclSpec::DQ_PR_weak)))
- PIkind |= ObjCPropertyDecl::OBJC_PR_assign;
- }
-
- // Protocol is not in the primary class. Must build one for it.
- ObjCDeclSpec ProtocolPropertyODS;
- // FIXME. Assuming that ObjCDeclSpec::ObjCPropertyAttributeKind
- // and ObjCPropertyDecl::PropertyAttributeKind have identical
- // values. Should consolidate both into one enum type.
- ProtocolPropertyODS.
- setPropertyAttributes((ObjCDeclSpec::ObjCPropertyAttributeKind)
- PIkind);
- // Must re-establish the context from class extension to primary
- // class context.
- ContextRAII SavedContext(*this, CCPrimary);
-
- Decl *ProtocolPtrTy =
- ActOnProperty(S, AtLoc, LParenLoc, FD, ProtocolPropertyODS,
- PIDecl->getGetterName(),
- PIDecl->getSetterName(),
- isOverridingProperty,
- MethodImplKind,
- /* lexicalDC = */ CDecl);
- PIDecl = cast<ObjCPropertyDecl>(ProtocolPtrTy);
- }
- PIDecl->makeitReadWriteAttribute();
- if (Attributes & ObjCDeclSpec::DQ_PR_retain)
- PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
- if (Attributes & ObjCDeclSpec::DQ_PR_strong)
- PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
- if (Attributes & ObjCDeclSpec::DQ_PR_copy)
- PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
- PIDecl->setSetterName(SetterSel);
- } else {
- // Tailor the diagnostics for the common case where a readwrite
- // property is declared both in the @interface and the continuation.
- // This is a common error where the user often intended the original
- // declaration to be readonly.
- unsigned diag =
- (Attributes & ObjCDeclSpec::DQ_PR_readwrite) &&
- (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite)
- ? diag::err_use_continuation_class_redeclaration_readwrite
- : diag::err_use_continuation_class;
- Diag(AtLoc, diag)
- << CCPrimary->getDeclName();
- Diag(PIDecl->getLocation(), diag::note_property_declare);
- return nullptr;
- }
- *isOverridingProperty = true;
- // Make sure setter decl is synthesized, and added to primary class's list.
- ProcessPropertyDecl(PIDecl, CCPrimary, PDecl, CDecl);
- PDecl->setGetterMethodDecl(PIDecl->getGetterMethodDecl());
- PDecl->setSetterMethodDecl(PIDecl->getSetterMethodDecl());
- if (ASTMutationListener *L = Context.getASTMutationListener())
- L->AddedObjCPropertyInClassExtension(PDecl, PIDecl, CDecl);
+
+ // Check that atomicity of property in class extension matches the previous
+ // declaration.
+ checkAtomicPropertyMismatch(*this, PIDecl, PDecl, true);
+
+ // Make sure getter/setter are appropriately synthesized.
+ ProcessPropertyDecl(PDecl);
return PDecl;
}
@@ -525,7 +559,6 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
- const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
@@ -535,10 +568,23 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
DeclContext *lexicalDC){
IdentifierInfo *PropertyId = FD.D.getIdentifier();
- // Issue a warning if property is 'assign' as default and its object, which is
- // gc'able conforms to NSCopying protocol
+ // Property defaults to 'assign' if it is readwrite, unless this is ARC
+ // and the type is retainable.
+ bool isAssign;
+ if (Attributes & (ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained)) {
+ isAssign = true;
+ } else if (getOwnershipRule(Attributes) || !isReadWrite) {
+ isAssign = false;
+ } else {
+ isAssign = (!getLangOpts().ObjCAutoRefCount ||
+ !T->isObjCRetainableType());
+ }
+
+ // Issue a warning if property is 'assign' as default and its
+ // object, which is gc'able conforms to NSCopying protocol
if (getLangOpts().getGC() != LangOptions::NonGC &&
- isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign))
+ isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign)) {
if (const ObjCObjectPointerType *ObjPtrTy =
T->getAs<ObjCObjectPointerType>()) {
ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
@@ -548,6 +594,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
if (IDecl->ClassImplementsProtocol(PNSCopying, true))
Diag(AtLoc, diag::warn_implements_nscopying) << PropertyId;
}
+ }
if (T->isObjCObjectType()) {
SourceLocation StarLoc = TInfo->getTypeLoc().getLocEnd();
@@ -663,8 +710,10 @@ static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
// We're fine if they match.
if (propertyLifetime == ivarLifetime) return;
- // These aren't valid lifetimes for object ivars; don't diagnose twice.
- if (ivarLifetime == Qualifiers::OCL_None ||
+ // None isn't a valid lifetime for an object ivar in ARC, and
+ // __autoreleasing is never valid; don't diagnose twice.
+ if ((ivarLifetime == Qualifiers::OCL_None &&
+ S.getLangOpts().ObjCAutoRefCount) ||
ivarLifetime == Qualifiers::OCL_Autoreleasing)
return;
@@ -797,6 +846,38 @@ DiagnosePropertyMismatchDeclInProtocols(Sema &S, SourceLocation AtLoc,
S.Diag(AtLoc, diag::note_property_synthesize);
}
+/// Determine whether any storage attributes were written on the property.
+static bool hasWrittenStorageAttribute(ObjCPropertyDecl *Prop) {
+ if (Prop->getPropertyAttributesAsWritten() & OwnershipMask) return true;
+
+ // If this is a readwrite property in a class extension that refines
+ // a readonly property in the original class definition, check it as
+ // well.
+
+ // If it's a readonly property, we're not interested.
+ if (Prop->isReadOnly()) return false;
+
+ // Is it declared in an extension?
+ auto Category = dyn_cast<ObjCCategoryDecl>(Prop->getDeclContext());
+ if (!Category || !Category->IsClassExtension()) return false;
+
+ // Find the corresponding property in the primary class definition.
+ auto OrigClass = Category->getClassInterface();
+ for (auto Found : OrigClass->lookup(Prop->getDeclName())) {
+ if (ObjCPropertyDecl *OrigProp = dyn_cast<ObjCPropertyDecl>(Found))
+ return OrigProp->getPropertyAttributesAsWritten() & OwnershipMask;
+ }
+
+ // Look through all of the protocols.
+ for (const auto *Proto : OrigClass->all_referenced_protocols()) {
+ if (ObjCPropertyDecl *OrigProp =
+ Proto->FindPropertyDeclaration(Prop->getIdentifier()))
+ return OrigProp->getPropertyAttributesAsWritten() & OwnershipMask;
+ }
+
+ return false;
+}
+
/// ActOnPropertyImplDecl - This routine performs semantic checks and
/// builds the AST node for a property implementation declaration; declared
/// as \@synthesize or \@dynamic.
@@ -953,18 +1034,49 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
ObjCPropertyDecl::PropertyAttributeKind kind
= property->getPropertyAttributes();
- // Add GC __weak to the ivar type if the property is weak.
- if ((kind & ObjCPropertyDecl::OBJC_PR_weak) &&
- getLangOpts().getGC() != LangOptions::NonGC) {
- assert(!getLangOpts().ObjCAutoRefCount);
- if (PropertyIvarType.isObjCGCStrong()) {
- Diag(PropertyDiagLoc, diag::err_gc_weak_property_strong_type);
- Diag(property->getLocation(), diag::note_property_declare);
+ bool isARCWeak = false;
+ if (kind & ObjCPropertyDecl::OBJC_PR_weak) {
+ // Add GC __weak to the ivar type if the property is weak.
+ if (getLangOpts().getGC() != LangOptions::NonGC) {
+ assert(!getLangOpts().ObjCAutoRefCount);
+ if (PropertyIvarType.isObjCGCStrong()) {
+ Diag(PropertyDiagLoc, diag::err_gc_weak_property_strong_type);
+ Diag(property->getLocation(), diag::note_property_declare);
+ } else {
+ PropertyIvarType =
+ Context.getObjCGCQualType(PropertyIvarType, Qualifiers::Weak);
+ }
+
+ // Otherwise, check whether ARC __weak is enabled and works with
+ // the property type.
} else {
- PropertyIvarType =
- Context.getObjCGCQualType(PropertyIvarType, Qualifiers::Weak);
+ if (!getLangOpts().ObjCWeak) {
+ // Only complain here when synthesizing an ivar.
+ if (!Ivar) {
+ Diag(PropertyDiagLoc,
+ getLangOpts().ObjCWeakRuntime
+ ? diag::err_synthesizing_arc_weak_property_disabled
+ : diag::err_synthesizing_arc_weak_property_no_runtime);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+ CompleteTypeErr = true; // suppress later diagnostics about the ivar
+ } else {
+ isARCWeak = true;
+ if (const ObjCObjectPointerType *ObjT =
+ PropertyIvarType->getAs<ObjCObjectPointerType>()) {
+ const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl();
+ if (ObjI && ObjI->isArcWeakrefUnavailable()) {
+ Diag(property->getLocation(),
+ diag::err_arc_weak_unavailable_property)
+ << PropertyIvarType;
+ Diag(ClassImpDecl->getLocation(), diag::note_implemented_by_class)
+ << ClassImpDecl->getName();
+ }
+ }
+ }
}
}
+
if (AtLoc.isInvalid()) {
// Check when default synthesizing a property that there is
// an ivar matching property name and issue warning; since this
@@ -987,13 +1099,13 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (!Ivar) {
// In ARC, give the ivar a lifetime qualifier based on the
// property attributes.
- if (getLangOpts().ObjCAutoRefCount &&
+ if ((getLangOpts().ObjCAutoRefCount || isARCWeak) &&
!PropertyIvarType.getObjCLifetime() &&
PropertyIvarType->isObjCRetainableType()) {
// It's an error if we have to do this and the user didn't
// explicitly write an ownership attribute on the property.
- if (!property->hasWrittenStorageAttribute() &&
+ if (!hasWrittenStorageAttribute(property) &&
!(kind & ObjCPropertyDecl::OBJC_PR_strong)) {
Diag(PropertyDiagLoc,
diag::err_arc_objc_property_default_assign_on_object);
@@ -1002,24 +1114,6 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Qualifiers::ObjCLifetime lifetime =
getImpliedARCOwnership(kind, PropertyIvarType);
assert(lifetime && "no lifetime for property?");
- if (lifetime == Qualifiers::OCL_Weak) {
- bool err = false;
- if (const ObjCObjectPointerType *ObjT =
- PropertyIvarType->getAs<ObjCObjectPointerType>()) {
- const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl();
- if (ObjI && ObjI->isArcWeakrefUnavailable()) {
- Diag(property->getLocation(),
- diag::err_arc_weak_unavailable_property) << PropertyIvarType;
- Diag(ClassImpDecl->getLocation(), diag::note_implemented_by_class)
- << ClassImpDecl->getName();
- err = true;
- }
- }
- if (!err && !getLangOpts().ObjCARCWeak) {
- Diag(PropertyDiagLoc, diag::err_arc_weak_no_runtime);
- Diag(property->getLocation(), diag::note_property_declare);
- }
- }
Qualifiers qs;
qs.addObjCLifetime(lifetime);
@@ -1027,13 +1121,6 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
}
- if (kind & ObjCPropertyDecl::OBJC_PR_weak &&
- !getLangOpts().ObjCAutoRefCount &&
- getLangOpts().getGC() == LangOptions::NonGC) {
- Diag(PropertyDiagLoc, diag::error_synthesize_weak_non_arc_or_gc);
- Diag(property->getLocation(), diag::note_property_declare);
- }
-
Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl,
PropertyIvarLoc,PropertyIvarLoc, PropertyIvar,
PropertyIvarType, /*Dinfo=*/nullptr,
@@ -1121,7 +1208,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// Fall thru - see previous comment
}
}
- if (getLangOpts().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount || isARCWeak ||
+ Ivar->getType().getObjCLifetime())
checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
} else if (PropertyIvar)
// @dynamic
@@ -1349,12 +1437,10 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
}
}
- if ((CAttr & ObjCPropertyDecl::OBJC_PR_nonatomic)
- != (SAttr & ObjCPropertyDecl::OBJC_PR_nonatomic)) {
- Diag(Property->getLocation(), diag::warn_property_attribute)
- << Property->getDeclName() << "atomic" << inheritedName;
- Diag(SuperProperty->getLocation(), diag::note_property_declare);
- }
+ // Check for nonatomic; note that nonatomic is effectively
+ // meaningless for readonly properties, so don't diagnose if the
+ // atomic property is 'readonly'.
+ checkAtomicPropertyMismatch(*this, SuperProperty, Property, false);
if (Property->getSetterName() != SuperProperty->getSetterName()) {
Diag(Property->getLocation(), diag::warn_property_attribute)
<< Property->getDeclName() << "setter" << inheritedName;
@@ -1395,12 +1481,11 @@ bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
QualType PropertyIvarType = property->getType().getNonReferenceType();
bool compat = Context.hasSameType(PropertyIvarType, GetterType);
if (!compat) {
- if (isa<ObjCObjectPointerType>(PropertyIvarType) &&
- isa<ObjCObjectPointerType>(GetterType))
- compat =
- Context.canAssignObjCInterfaces(
- GetterType->getAs<ObjCObjectPointerType>(),
- PropertyIvarType->getAs<ObjCObjectPointerType>());
+ const ObjCObjectPointerType *propertyObjCPtr = nullptr;
+ const ObjCObjectPointerType *getterObjCPtr = nullptr;
+ if ((propertyObjCPtr = PropertyIvarType->getAs<ObjCObjectPointerType>()) &&
+ (getterObjCPtr = GetterType->getAs<ObjCObjectPointerType>()))
+ compat = Context.canAssignObjCInterfaces(getterObjCPtr, propertyObjCPtr);
else if (CheckAssignmentConstraints(Loc, GetterType, PropertyIvarType)
!= Compatible) {
Diag(Loc, diag::error_property_accessor_type)
@@ -1438,6 +1523,11 @@ static void CollectImmediateProperties(ObjCContainerDecl *CDecl,
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
for (auto *Prop : IDecl->properties())
PropMap[Prop->getIdentifier()] = Prop;
+
+ // Collect the properties from visible extensions.
+ for (auto *Ext : IDecl->visible_extensions())
+ CollectImmediateProperties(Ext, PropMap, SuperPropMap, IncludeProtocols);
+
if (IncludeProtocols) {
// Scan through class's protocols.
for (auto *PI : IDecl->all_referenced_protocols())
@@ -1445,9 +1535,8 @@ static void CollectImmediateProperties(ObjCContainerDecl *CDecl,
}
}
if (ObjCCategoryDecl *CATDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) {
- if (!CATDecl->IsClassExtension())
- for (auto *Prop : CATDecl->properties())
- PropMap[Prop->getIdentifier()] = Prop;
+ for (auto *Prop : CATDecl->properties())
+ PropMap[Prop->getIdentifier()] = Prop;
if (IncludeProtocols) {
// Scan through class's protocols.
for (auto *PI : CATDecl->protocols())
@@ -1507,6 +1596,14 @@ Sema::IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
(Property->getPropertyIvarDecl() == IV))
return true;
}
+ // Also look up property declaration in class extension whose one of its
+ // accessors is implemented by this method.
+ for (const auto *Ext : IFace->known_extensions())
+ for (const auto *Property : Ext->properties())
+ if ((Property->getGetterName() == IMD->getSelector() ||
+ Property->getSetterName() == IMD->getSelector()) &&
+ (Property->getPropertyIvarDecl() == IV))
+ return true;
return false;
}
@@ -1563,7 +1660,7 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier())) {
Diag(Prop->getLocation(), diag::warn_no_autosynthesis_shared_ivar_property)
<< Prop->getIdentifier();
- if (!PID->getLocation().isInvalid())
+ if (PID->getLocation().isValid())
Diag(PID->getLocation(), diag::note_property_synthesize);
continue;
}
@@ -1791,11 +1888,20 @@ void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl)
void
Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
- ObjCContainerDecl* IDecl) {
+ ObjCInterfaceDecl* IDecl) {
// Rules apply in non-GC mode only
if (getLangOpts().getGC() != LangOptions::NonGC)
return;
- for (const auto *Property : IDecl->properties()) {
+ ObjCContainerDecl::PropertyMap PM;
+ for (auto *Prop : IDecl->properties())
+ PM[Prop->getIdentifier()] = Prop;
+ for (const auto *Ext : IDecl->known_extensions())
+ for (auto *Prop : Ext->properties())
+ PM[Prop->getIdentifier()] = Prop;
+
+ for (ObjCContainerDecl::PropertyMap::iterator I = PM.begin(), E = PM.end();
+ I != E; ++I) {
+ const ObjCPropertyDecl *Property = I->second;
ObjCMethodDecl *GetterMethod = nullptr;
ObjCMethodDecl *SetterMethod = nullptr;
bool LookedUpGetterSetter = false;
@@ -1842,30 +1948,23 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
<< Property->getIdentifier() << (GetterMethod != nullptr)
<< (SetterMethod != nullptr);
// fixit stuff.
- if (!AttributesAsWritten) {
- if (Property->getLParenLoc().isValid()) {
- // @property () ... case.
- SourceRange PropSourceRange(Property->getAtLoc(),
- Property->getLParenLoc());
- Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
- FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic");
- }
- else {
- //@property id etc.
- SourceLocation endLoc =
- Property->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
- endLoc = endLoc.getLocWithOffset(-1);
- SourceRange PropSourceRange(Property->getAtLoc(), endLoc);
- Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
- FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic) ");
- }
- }
- else if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) {
+ if (Property->getLParenLoc().isValid() &&
+ !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) {
// @property () ... case.
- SourceLocation endLoc = Property->getLParenLoc();
- SourceRange PropSourceRange(Property->getAtLoc(), endLoc);
- Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
- FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic, ");
+ SourceLocation AfterLParen =
+ getLocForEndOfToken(Property->getLParenLoc());
+ StringRef NonatomicStr = AttributesAsWritten? "nonatomic, "
+ : "nonatomic";
+ Diag(Property->getLocation(),
+ diag::note_atomic_property_fixup_suggest)
+ << FixItHint::CreateInsertion(AfterLParen, NonatomicStr);
+ } else if (Property->getLParenLoc().isInvalid()) {
+ //@property id etc.
+ SourceLocation startLoc =
+ Property->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ Diag(Property->getLocation(),
+ diag::note_atomic_property_fixup_suggest)
+ << FixItHint::CreateInsertion(startLoc, "(nonatomic) ");
}
else
Diag(MethodLoc, diag::note_atomic_property_fixup_suggest);
@@ -1950,10 +2049,16 @@ void Sema::DiagnoseMissingDesignatedInitOverrides(
I = DesignatedInits.begin(), E = DesignatedInits.end(); I != E; ++I) {
const ObjCMethodDecl *MD = *I;
if (!InitSelSet.count(MD->getSelector())) {
- Diag(ImplD->getLocation(),
- diag::warn_objc_implementation_missing_designated_init_override)
- << MD->getSelector();
- Diag(MD->getLocation(), diag::note_objc_designated_init_marked_here);
+ bool Ignore = false;
+ if (auto *IMD = IFD->getInstanceMethod(MD->getSelector())) {
+ Ignore = IMD->isUnavailable();
+ }
+ if (!Ignore) {
+ Diag(ImplD->getLocation(),
+ diag::warn_objc_implementation_missing_designated_init_override)
+ << MD->getSelector();
+ Diag(MD->getLocation(), diag::note_objc_designated_init_marked_here);
+ }
}
}
}
@@ -1974,20 +2079,28 @@ static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod,
/// ProcessPropertyDecl - Make sure that any user-defined setter/getter methods
/// have the property type and issue diagnostics if they don't.
/// Also synthesize a getter/setter method if none exist (and update the
-/// appropriate lookup tables. FIXME: Should reconsider if adding synthesized
-/// methods is the "right" thing to do.
-void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
- ObjCContainerDecl *CD,
- ObjCPropertyDecl *redeclaredProperty,
- ObjCContainerDecl *lexicalDC) {
-
+/// appropriate lookup tables.
+void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
ObjCMethodDecl *GetterMethod, *SetterMethod;
-
+ ObjCContainerDecl *CD = cast<ObjCContainerDecl>(property->getDeclContext());
if (CD->isInvalidDecl())
return;
GetterMethod = CD->getInstanceMethod(property->getGetterName());
+ // if setter or getter is not found in class extension, it might be
+ // in the primary class.
+ if (!GetterMethod)
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD))
+ if (CatDecl->IsClassExtension())
+ GetterMethod = CatDecl->getClassInterface()->
+ getInstanceMethod(property->getGetterName());
+
SetterMethod = CD->getInstanceMethod(property->getSetterName());
+ if (!SetterMethod)
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD))
+ if (CatDecl->IsClassExtension())
+ SetterMethod = CatDecl->getClassInterface()->
+ getInstanceMethod(property->getSetterName());
DiagnosePropertyAccessorMismatch(property, GetterMethod,
property->getLocation());
@@ -2020,9 +2133,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
// No instance method of same name as property getter name was found.
// Declare a getter method and add it to the list of methods
// for this class.
- SourceLocation Loc = redeclaredProperty ?
- redeclaredProperty->getLocation() :
- property->getLocation();
+ SourceLocation Loc = property->getLocation();
// If the property is null_resettable, the getter returns nonnull.
QualType resultTy = property->getType();
@@ -2050,10 +2161,6 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
AddPropertyAttrs(*this, GetterMethod, property);
- // FIXME: Eventually this shouldn't be needed, as the lexical context
- // and the real context should be the same.
- if (lexicalDC)
- GetterMethod->setLexicalDeclContext(lexicalDC);
if (property->hasAttr<NSReturnsNotRetainedAttr>())
GetterMethod->addAttr(NSReturnsNotRetainedAttr::CreateImplicit(Context,
Loc));
@@ -2082,9 +2189,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
// No instance method of same name as property setter name was found.
// Declare a setter method and add it to the list of methods
// for this class.
- SourceLocation Loc = redeclaredProperty ?
- redeclaredProperty->getLocation() :
- property->getLocation();
+ SourceLocation Loc = property->getLocation();
SetterMethod =
ObjCMethodDecl::Create(Context, Loc, Loc,
@@ -2126,10 +2231,6 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
AddPropertyAttrs(*this, SetterMethod, property);
CD->addDecl(SetterMethod);
- // FIXME: Eventually this shouldn't be needed, as the lexical context
- // and the real context should be the same.
- if (lexicalDC)
- SetterMethod->setLexicalDeclContext(lexicalDC);
if (const SectionAttr *SA = property->getAttr<SectionAttr>())
SetterMethod->addAttr(
SectionAttr::CreateImplicit(Context, SectionAttr::GNU_section,
@@ -2189,15 +2290,6 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
ObjCPropertyDecl *PropertyDecl = cast<ObjCPropertyDecl>(PDecl);
QualType PropertyTy = PropertyDecl->getType();
- unsigned PropertyOwnership = getOwnershipRule(Attributes);
-
- // 'readonly' property with no obvious lifetime.
- // its life time will be determined by its backing ivar.
- if (getLangOpts().ObjCAutoRefCount &&
- Attributes & ObjCDeclSpec::DQ_PR_readonly &&
- PropertyTy->isObjCRetainableType() &&
- !PropertyOwnership)
- return;
// Check for copy or retain on non-object types.
if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
@@ -2295,13 +2387,6 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
if (*nullability == NullabilityKind::NonNull)
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "nonnull" << "weak";
- } else {
- PropertyTy =
- Context.getAttributedType(
- AttributedType::getNullabilityAttrKind(NullabilityKind::Nullable),
- PropertyTy, PropertyTy);
- TypeSourceInfo *TSInfo = PropertyDecl->getTypeSourceInfo();
- PropertyDecl->setType(PropertyTy, TSInfo);
}
}
@@ -2314,16 +2399,14 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
// Warn if user supplied no assignment attribute, property is
// readwrite, and this is an object type.
- if (!(Attributes & (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_unsafe_unretained |
- ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong |
- ObjCDeclSpec::DQ_PR_weak)) &&
- PropertyTy->isObjCObjectPointerType()) {
- if (getLangOpts().ObjCAutoRefCount)
- // With arc, @property definitions should default to (strong) when
- // not specified; including when property is 'readonly'.
- PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
- else if (!(Attributes & ObjCDeclSpec::DQ_PR_readonly)) {
+ if (!getOwnershipRule(Attributes) && PropertyTy->isObjCRetainableType()) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly) {
+ // do nothing
+ } else if (getLangOpts().ObjCAutoRefCount) {
+ // With arc, @property definitions should default to strong when
+ // not specified.
+ PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ } else if (PropertyTy->isObjCObjectPointerType()) {
bool isAnyClassTy =
(PropertyTy->isObjCClassType() ||
PropertyTy->isObjCQualifiedClassType());
@@ -2342,7 +2425,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
if (getLangOpts().getGC() == LangOptions::NonGC)
Diag(Loc, diag::warn_objc_property_default_assign_on_object);
}
- }
+ }
// FIXME: Implement warning dependent on NSCopying being
// implemented. See also:
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index ca67a1c0bf24..5dd835498a9a 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -77,6 +77,11 @@ public:
ImplicitDSALoc() {}
};
+public:
+ struct MapInfo {
+ Expr *RefExpr;
+ };
+
private:
struct DSAInfo {
OpenMPClauseKind Attributes;
@@ -84,33 +89,41 @@ private:
};
typedef llvm::SmallDenseMap<VarDecl *, DSAInfo, 64> DeclSAMapTy;
typedef llvm::SmallDenseMap<VarDecl *, DeclRefExpr *, 64> AlignedMapTy;
- typedef llvm::DenseSet<VarDecl *> LoopControlVariablesSetTy;
+ typedef llvm::DenseMap<VarDecl *, unsigned> LoopControlVariablesMapTy;
+ typedef llvm::SmallDenseMap<VarDecl *, MapInfo, 64> MappedDeclsTy;
+ typedef llvm::StringMap<std::pair<OMPCriticalDirective *, llvm::APSInt>>
+ CriticalsWithHintsTy;
struct SharingMapTy {
DeclSAMapTy SharingMap;
AlignedMapTy AlignedMap;
- LoopControlVariablesSetTy LCVSet;
+ MappedDeclsTy MappedDecls;
+ LoopControlVariablesMapTy LCVMap;
DefaultDataSharingAttributes DefaultAttr;
SourceLocation DefaultAttrLoc;
OpenMPDirectiveKind Directive;
DeclarationNameInfo DirectiveName;
Scope *CurScope;
SourceLocation ConstructLoc;
- bool OrderedRegion;
+ /// \brief first argument (Expr *) contains optional argument of the
+ /// 'ordered' clause, the second one is true if the regions has 'ordered'
+ /// clause, false otherwise.
+ llvm::PointerIntPair<Expr *, 1, bool> OrderedRegion;
bool NowaitRegion;
- unsigned CollapseNumber;
+ bool CancelRegion;
+ unsigned AssociatedLoops;
SourceLocation InnerTeamsRegionLoc;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
- : SharingMap(), AlignedMap(), LCVSet(), DefaultAttr(DSA_unspecified),
+ : SharingMap(), AlignedMap(), LCVMap(), DefaultAttr(DSA_unspecified),
Directive(DKind), DirectiveName(std::move(Name)), CurScope(CurScope),
- ConstructLoc(Loc), OrderedRegion(false), NowaitRegion(false),
- CollapseNumber(1), InnerTeamsRegionLoc() {}
+ ConstructLoc(Loc), OrderedRegion(), NowaitRegion(false),
+ CancelRegion(false), AssociatedLoops(1), InnerTeamsRegionLoc() {}
SharingMapTy()
- : SharingMap(), AlignedMap(), LCVSet(), DefaultAttr(DSA_unspecified),
+ : SharingMap(), AlignedMap(), LCVMap(), DefaultAttr(DSA_unspecified),
Directive(OMPD_unknown), DirectiveName(), CurScope(nullptr),
- ConstructLoc(), OrderedRegion(false), NowaitRegion(false),
- CollapseNumber(1), InnerTeamsRegionLoc() {}
+ ConstructLoc(), OrderedRegion(), NowaitRegion(false),
+ CancelRegion(false), AssociatedLoops(1), InnerTeamsRegionLoc() {}
};
typedef SmallVector<SharingMapTy, 64> StackTy;
@@ -122,6 +135,7 @@ private:
OpenMPClauseKind ClauseKindMode;
Sema &SemaRef;
bool ForceCapturing;
+ CriticalsWithHintsTy Criticals;
typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
@@ -152,6 +166,16 @@ public:
Stack.pop_back();
}
+ void addCriticalWithHint(OMPCriticalDirective *D, llvm::APSInt Hint) {
+ Criticals[D->getDirectiveName().getAsString()] = std::make_pair(D, Hint);
+ }
+ const std::pair<OMPCriticalDirective *, llvm::APSInt>
+ getCriticalWithHint(const DeclarationNameInfo &Name) const {
+ auto I = Criticals.find(Name.getAsString());
+ if (I != Criticals.end())
+ return I->second;
+ return std::make_pair(nullptr, llvm::APSInt());
+ }
/// \brief If 'aligned' declaration for given variable \a D was not seen yet,
/// add it and return NULL; otherwise return previous occurrence's expression
/// for diagnostics.
@@ -161,7 +185,17 @@ public:
void addLoopControlVariable(VarDecl *D);
/// \brief Check if the specified variable is a loop control variable for
/// current region.
- bool isLoopControlVariable(VarDecl *D);
+ /// \return The index of the loop control variable in the list of associated
+ /// for-loops (from outer to inner).
+ unsigned isLoopControlVariable(VarDecl *D);
+ /// \brief Check if the specified variable is a loop control variable for
+ /// parent region.
+ /// \return The index of the loop control variable in the list of associated
+ /// for-loops (from outer to inner).
+ unsigned isParentLoopControlVariable(VarDecl *D);
+ /// \brief Get the loop control variable for the I-th loop (or nullptr) in
+ /// parent directive.
+ VarDecl *getParentLoopControlVariable(unsigned I);
/// \brief Adds explicit data sharing attribute to the specified declaration.
void addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A);
@@ -190,6 +224,13 @@ public:
bool hasExplicitDSA(VarDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
unsigned Level);
+
+ /// \brief Returns true if the directive at level \Level matches in the
+ /// specified \a DPred predicate.
+ bool hasExplicitDirective(
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
+ unsigned Level);
+
/// \brief Finds a directive which matches specified \a DPred predicate.
template <class NamedDirectivesPredicate>
bool hasDirective(NamedDirectivesPredicate DPred, bool FromParent);
@@ -204,6 +245,8 @@ public:
return Stack[Stack.size() - 2].Directive;
return OMPD_unknown;
}
+ /// \brief Return the directive associated with the provided scope.
+ OpenMPDirectiveKind getDirectiveForScope(const Scope *S) const;
/// \brief Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
@@ -230,16 +273,23 @@ public:
}
/// \brief Marks current region as ordered (it has an 'ordered' clause).
- void setOrderedRegion(bool IsOrdered = true) {
- Stack.back().OrderedRegion = IsOrdered;
+ void setOrderedRegion(bool IsOrdered, Expr *Param) {
+ Stack.back().OrderedRegion.setInt(IsOrdered);
+ Stack.back().OrderedRegion.setPointer(Param);
}
/// \brief Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (Stack.size() > 2)
- return Stack[Stack.size() - 2].OrderedRegion;
+ return Stack[Stack.size() - 2].OrderedRegion.getInt();
return false;
}
+ /// \brief Returns optional parameter for the ordered region.
+ Expr *getParentOrderedRegionParam() const {
+ if (Stack.size() > 2)
+ return Stack[Stack.size() - 2].OrderedRegion.getPointer();
+ return nullptr;
+ }
/// \brief Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
Stack.back().NowaitRegion = IsNowait;
@@ -251,13 +301,21 @@ public:
return Stack[Stack.size() - 2].NowaitRegion;
return false;
}
+ /// \brief Marks parent region as cancel region.
+ void setParentCancelRegion(bool Cancel = true) {
+ if (Stack.size() > 2)
+ Stack[Stack.size() - 2].CancelRegion =
+ Stack[Stack.size() - 2].CancelRegion || Cancel;
+ }
+ /// \brief Return true if current region has inner cancel construct.
+ bool isCancelRegion() const {
+ return Stack.back().CancelRegion;
+ }
/// \brief Set collapse value for the region.
- void setCollapseNumber(unsigned Val) { Stack.back().CollapseNumber = Val; }
+ void setAssociatedLoops(unsigned Val) { Stack.back().AssociatedLoops = Val; }
/// \brief Return collapse value for region.
- unsigned getCollapseNumber() const {
- return Stack.back().CollapseNumber;
- }
+ unsigned getAssociatedLoops() const { return Stack.back().AssociatedLoops; }
/// \brief Marks current target region as one with closely nested teams
/// region.
@@ -279,10 +337,37 @@ public:
Scope *getCurScope() const { return Stack.back().CurScope; }
Scope *getCurScope() { return Stack.back().CurScope; }
SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; }
+
+ MapInfo getMapInfoForVar(VarDecl *VD) {
+ MapInfo VarMI = {0};
+ for (auto Cnt = Stack.size() - 1; Cnt > 0; --Cnt) {
+ if (Stack[Cnt].MappedDecls.count(VD)) {
+ VarMI = Stack[Cnt].MappedDecls[VD];
+ break;
+ }
+ }
+ return VarMI;
+ }
+
+ void addMapInfoForVar(VarDecl *VD, MapInfo MI) {
+ if (Stack.size() > 1) {
+ Stack.back().MappedDecls[VD] = MI;
+ }
+ }
+
+ MapInfo IsMappedInCurrentRegion(VarDecl *VD) {
+ assert(Stack.size() > 1 && "Target level is 0");
+ MapInfo VarMI = {0};
+ if (Stack.size() > 1 && Stack.back().MappedDecls.count(VD)) {
+ VarMI = Stack.back().MappedDecls[VD];
+ }
+ return VarMI;
+ }
};
bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
return isOpenMPParallelDirective(DKind) || DKind == OMPD_task ||
- isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
+ isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown ||
+ isOpenMPTaskLoopDirective(DKind);
}
} // namespace
@@ -409,13 +494,32 @@ DeclRefExpr *DSAStackTy::addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE) {
void DSAStackTy::addLoopControlVariable(VarDecl *D) {
assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
D = D->getCanonicalDecl();
- Stack.back().LCVSet.insert(D);
+ Stack.back().LCVMap.insert(std::make_pair(D, Stack.back().LCVMap.size() + 1));
}
-bool DSAStackTy::isLoopControlVariable(VarDecl *D) {
+unsigned DSAStackTy::isLoopControlVariable(VarDecl *D) {
assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
D = D->getCanonicalDecl();
- return Stack.back().LCVSet.count(D) > 0;
+ return Stack.back().LCVMap.count(D) > 0 ? Stack.back().LCVMap[D] : 0;
+}
+
+unsigned DSAStackTy::isParentLoopControlVariable(VarDecl *D) {
+ assert(Stack.size() > 2 && "Data-sharing attributes stack is empty");
+ D = D->getCanonicalDecl();
+ return Stack[Stack.size() - 2].LCVMap.count(D) > 0
+ ? Stack[Stack.size() - 2].LCVMap[D]
+ : 0;
+}
+
+VarDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) {
+ assert(Stack.size() > 2 && "Data-sharing attributes stack is empty");
+ if (Stack[Stack.size() - 2].LCVMap.size() < I)
+ return nullptr;
+ for (auto &Pair : Stack[Stack.size() - 2].LCVMap) {
+ if (Pair.second == I)
+ return Pair.first;
+ }
+ return nullptr;
}
void DSAStackTy::addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A) {
@@ -452,12 +556,17 @@ bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) {
/// \brief Build a variable declaration for OpenMP loop iteration variable.
static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
- StringRef Name) {
+ StringRef Name, const AttrVec *Attrs = nullptr) {
DeclContext *DC = SemaRef.CurContext;
IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
VarDecl *Decl =
VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
+ if (Attrs) {
+ for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
+ I != E; ++I)
+ Decl->addAttr(*I);
+ }
Decl->setImplicit();
return Decl;
}
@@ -496,41 +605,20 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) {
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
- // in a Construct, C/C++, predetermined, p.1]
- // Variables with automatic storage duration that are declared in a scope
- // inside the construct are private.
- OpenMPDirectiveKind Kind =
- FromParent ? getParentDirective() : getCurrentDirective();
- auto StartI = std::next(Stack.rbegin());
- auto EndI = std::prev(Stack.rend());
- if (FromParent && StartI != EndI) {
- StartI = std::next(StartI);
- }
- if (!isParallelOrTaskRegion(Kind)) {
- if (isOpenMPLocal(D, StartI) &&
- ((D->isLocalVarDecl() && (D->getStorageClass() == SC_Auto ||
- D->getStorageClass() == SC_None)) ||
- isa<ParmVarDecl>(D))) {
- DVar.CKind = OMPC_private;
+ // in a Construct, C/C++, predetermined, p.4]
+ // Static data members are shared.
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, predetermined, p.7]
+ // Variables with static storage duration that are declared in a scope
+ // inside the construct are shared.
+ if (D->isStaticDataMember()) {
+ DSAVarData DVarTemp =
+ hasDSA(D, isOpenMPPrivate, MatchesAlways(), FromParent);
+ if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
return DVar;
- }
- // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
- // in a Construct, C/C++, predetermined, p.4]
- // Static data members are shared.
- // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
- // in a Construct, C/C++, predetermined, p.7]
- // Variables with static storage duration that are declared in a scope
- // inside the construct are shared.
- if (D->isStaticDataMember() || D->isStaticLocal()) {
- DSAVarData DVarTemp =
- hasDSA(D, isOpenMPPrivate, MatchesAlways(), FromParent);
- if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
- return DVar;
-
- DVar.CKind = OMPC_shared;
- return DVar;
- }
+ DVar.CKind = OMPC_shared;
+ return DVar;
}
QualType Type = D->getType().getNonReferenceType().getCanonicalType();
@@ -542,6 +630,9 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) {
// shared.
CXXRecordDecl *RD =
SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
+ if (auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
+ if (auto *CTD = CTSD->getSpecializedTemplate())
+ RD = CTD->getTemplatedDecl();
if (IsConstant &&
!(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasMutableFields())) {
// Variables with const-qualified type having no mutable member may be
@@ -557,6 +648,11 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) {
// Explicitly specified attributes and local variables with predetermined
// attributes.
+ auto StartI = std::next(Stack.rbegin());
+ auto EndI = std::prev(Stack.rend());
+ if (FromParent && StartI != EndI) {
+ StartI = std::next(StartI);
+ }
auto I = std::prev(StartI);
if (I->SharingMap.count(D)) {
DVar.RefExpr = I->SharingMap[D].RefExpr;
@@ -635,6 +731,19 @@ bool DSAStackTy::hasExplicitDSA(
CPred(StartI->SharingMap[D].Attributes);
}
+bool DSAStackTy::hasExplicitDirective(
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
+ unsigned Level) {
+ if (isClauseParsingMode())
+ ++Level;
+ auto StartI = Stack.rbegin();
+ auto EndI = std::prev(Stack.rend());
+ if (std::distance(StartI, EndI) <= (int)Level)
+ return false;
+ std::advance(StartI, Level);
+ return DPred(StartI->Directive);
+}
+
template <class NamedDirectivesPredicate>
bool DSAStackTy::hasDirective(NamedDirectivesPredicate DPred, bool FromParent) {
auto StartI = std::next(Stack.rbegin());
@@ -649,15 +758,134 @@ bool DSAStackTy::hasDirective(NamedDirectivesPredicate DPred, bool FromParent) {
return false;
}
+OpenMPDirectiveKind DSAStackTy::getDirectiveForScope(const Scope *S) const {
+ for (auto I = Stack.rbegin(), EE = Stack.rend(); I != EE; ++I)
+ if (I->CurScope == S)
+ return I->Directive;
+ return OMPD_unknown;
+}
+
void Sema::InitDataSharingAttributesStack() {
VarDataSharingAttributesStack = new DSAStackTy(*this);
}
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
+bool Sema::IsOpenMPCapturedByRef(VarDecl *VD,
+ const CapturedRegionScopeInfo *RSI) {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+
+ auto &Ctx = getASTContext();
+ bool IsByRef = true;
+
+ // Find the directive that is associated with the provided scope.
+ auto DKind = DSAStack->getDirectiveForScope(RSI->TheScope);
+ auto Ty = VD->getType();
+
+ if (isOpenMPTargetDirective(DKind)) {
+ // This table summarizes how a given variable should be passed to the device
+ // given its type and the clauses where it appears. This table is based on
+ // the description in OpenMP 4.5 [2.10.4, target Construct] and
+ // OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
+ //
+ // =========================================================================
+ // | type | defaultmap | pvt | first | is_device_ptr | map | res. |
+ // | |(tofrom:scalar)| | pvt | | | |
+ // =========================================================================
+ // | scl | | | | - | | bycopy|
+ // | scl | | - | x | - | - | bycopy|
+ // | scl | | x | - | - | - | null |
+ // | scl | x | | | - | | byref |
+ // | scl | x | - | x | - | - | bycopy|
+ // | scl | x | x | - | - | - | null |
+ // | scl | | - | - | - | x | byref |
+ // | scl | x | - | - | - | x | byref |
+ //
+ // | agg | n.a. | | | - | | byref |
+ // | agg | n.a. | - | x | - | - | byref |
+ // | agg | n.a. | x | - | - | - | null |
+ // | agg | n.a. | - | - | - | x | byref |
+ // | agg | n.a. | - | - | - | x[] | byref |
+ //
+ // | ptr | n.a. | | | - | | bycopy|
+ // | ptr | n.a. | - | x | - | - | bycopy|
+ // | ptr | n.a. | x | - | - | - | null |
+ // | ptr | n.a. | - | - | - | x | byref |
+ // | ptr | n.a. | - | - | - | x[] | bycopy|
+ // | ptr | n.a. | - | - | x | | bycopy|
+ // | ptr | n.a. | - | - | x | x | bycopy|
+ // | ptr | n.a. | - | - | x | x[] | bycopy|
+ // =========================================================================
+ // Legend:
+ // scl - scalar
+ // ptr - pointer
+ // agg - aggregate
+ // x - applies
+ // - - invalid in this combination
+ // [] - mapped with an array section
+ // byref - should be mapped by reference
+ // byval - should be mapped by value
+ // null - initialize a local variable to null on the device
+ //
+ // Observations:
+ // - All scalar declarations that show up in a map clause have to be passed
+ // by reference, because they may have been mapped in the enclosing data
+ // environment.
+ // - If the scalar value does not fit the size of uintptr, it has to be
+ // passed by reference, regardless the result in the table above.
+ // - For pointers mapped by value that have either an implicit map or an
+ // array section, the runtime library may pass the NULL value to the
+ // device instead of the value passed to it by the compiler.
+
+ // FIXME: Right now, only implicit maps are implemented. Properly mapping
+ // values requires having the map, private, and firstprivate clauses SEMA
+ // and parsing in place, which we don't yet.
+
+ if (Ty->isReferenceType())
+ Ty = Ty->castAs<ReferenceType>()->getPointeeType();
+ IsByRef = !Ty->isScalarType();
+ }
+
+ // When passing data by value, we need to make sure it fits the uintptr size
+ // and alignment, because the runtime library only deals with uintptr types.
+ // If it does not fit the uintptr size, we need to pass the data by reference
+ // instead.
+ if (!IsByRef &&
+ (Ctx.getTypeSizeInChars(Ty) >
+ Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
+ Ctx.getDeclAlign(VD) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType())))
+ IsByRef = true;
+
+ return IsByRef;
+}
+
bool Sema::IsOpenMPCapturedVar(VarDecl *VD) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
VD = VD->getCanonicalDecl();
+
+ // If we are attempting to capture a global variable in a directive with
+ // 'target' we return true so that this global is also mapped to the device.
+ //
+ // FIXME: If the declaration is enclosed in a 'declare target' directive,
+ // then it should not be captured. Therefore, an extra check has to be
+ // inserted here once support for 'declare target' is added.
+ //
+ if (!VD->hasLocalStorage()) {
+ if (DSAStack->getCurrentDirective() == OMPD_target &&
+ !DSAStack->isClauseParsingMode()) {
+ return true;
+ }
+ if (DSAStack->getCurScope() &&
+ DSAStack->hasDirective(
+ [](OpenMPDirectiveKind K, const DeclarationNameInfo &DNI,
+ SourceLocation Loc) -> bool {
+ return isOpenMPTargetDirective(K);
+ },
+ false)) {
+ return true;
+ }
+ }
+
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
(!DSAStack->isClauseParsingMode() ||
DSAStack->getParentDirective() != OMPD_unknown)) {
@@ -682,6 +910,14 @@ bool Sema::isOpenMPPrivateVar(VarDecl *VD, unsigned Level) {
VD, [](OpenMPClauseKind K) -> bool { return K == OMPC_private; }, Level);
}
+bool Sema::isOpenMPTargetCapturedVar(VarDecl *VD, unsigned Level) {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ // Return true if the current level is no longer enclosed in a target region.
+
+ return !VD->hasLocalStorage() &&
+ DSAStack->hasExplicitDirective(isOpenMPTargetDirective, Level);
+}
+
void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
@@ -715,7 +951,7 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
continue;
}
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(DE)->getDecl());
- QualType Type = VD->getType();
+ QualType Type = VD->getType().getNonReferenceType();
auto DVar = DSAStack->getTopDSA(VD, false);
if (DVar.CKind == OMPC_lastprivate) {
// Generate helper private variable and initialize it with the
@@ -723,9 +959,9 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// by the address of the new private variable in CodeGen. This new
// variable is not added to IdResolver, so the code in the OpenMP
// region uses original variable for proper diagnostics.
- auto *VDPrivate =
- buildVarDecl(*this, DE->getExprLoc(), Type.getUnqualifiedType(),
- VD->getName());
+ auto *VDPrivate = buildVarDecl(
+ *this, DE->getExprLoc(), Type.getUnqualifiedType(),
+ VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr);
ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false);
if (VDPrivate->isInvalidDecl())
continue;
@@ -1158,7 +1394,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
switch (DKind) {
case OMPD_parallel: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
@@ -1234,7 +1471,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
}
case OMPD_parallel_for: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
@@ -1246,7 +1484,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
}
case OMPD_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
@@ -1258,7 +1497,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
}
case OMPD_parallel_sections: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
@@ -1309,6 +1549,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
+ case OMPD_target_data:
case OMPD_target: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
@@ -1319,7 +1560,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
}
case OMPD_teams: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty);
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
@@ -1337,6 +1579,30 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
+ case OMPD_taskloop: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_taskloop_simd: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
+ case OMPD_distribute: {
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
case OMPD_threadprivate:
case OMPD_taskyield:
case OMPD_barrier:
@@ -1356,6 +1622,10 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ActOnCapturedRegionError();
return StmtError();
}
+
+ OMPOrderedClause *OC = nullptr;
+ OMPScheduleClause *SC = nullptr;
+ SmallVector<OMPLinearClause *, 4> LCs;
// This is required for proper codegen.
for (auto *Clause : Clauses) {
if (isOpenMPPrivate(Clause->getClauseKind()) ||
@@ -1377,10 +1647,42 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
// Required for proper codegen of combined directives.
// TODO: add processing for other clauses.
if (auto *E = cast_or_null<Expr>(
- cast<OMPScheduleClause>(Clause)->getHelperChunkSize())) {
- MarkDeclarationsReferencedInExpr(E);
- }
+ cast<OMPScheduleClause>(Clause)->getHelperChunkSize()))
+ MarkDeclarationsReferencedInExpr(E);
}
+ if (Clause->getClauseKind() == OMPC_schedule)
+ SC = cast<OMPScheduleClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_ordered)
+ OC = cast<OMPOrderedClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_linear)
+ LCs.push_back(cast<OMPLinearClause>(Clause));
+ }
+ bool ErrorFound = false;
+ // OpenMP, 2.7.1 Loop Construct, Restrictions
+ // The nonmonotonic modifier cannot be specified if an ordered clause is
+ // specified.
+ if (SC &&
+ (SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ SC->getSecondScheduleModifier() ==
+ OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
+ OC) {
+ Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
+ ? SC->getFirstScheduleModifierLoc()
+ : SC->getSecondScheduleModifierLoc(),
+ diag::err_omp_schedule_nonmonotonic_ordered)
+ << SourceRange(OC->getLocStart(), OC->getLocEnd());
+ ErrorFound = true;
+ }
+ if (!LCs.empty() && OC && OC->getNumForLoops()) {
+ for (auto *C : LCs) {
+ Diag(C->getLocStart(), diag::err_omp_linear_ordered)
+ << SourceRange(OC->getLocStart(), OC->getLocEnd());
+ }
+ ErrorFound = true;
+ }
+ if (ErrorFound) {
+ ActOnCapturedRegionError();
+ return StmtError();
}
return ActOnCapturedRegionEnd(S.get());
}
@@ -1419,6 +1721,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | parallel | cancellation | |
// | | point | ! |
// | parallel | cancel | ! |
+ // | parallel | taskloop | * |
+ // | parallel | taskloop simd | * |
+ // | parallel | distribute | |
// +------------------+-----------------+------------------------------------+
// | for | parallel | * |
// | for | for | + |
@@ -1445,6 +1750,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | for | cancellation | |
// | | point | ! |
// | for | cancel | ! |
+ // | for | taskloop | * |
+ // | for | taskloop simd | * |
+ // | for | distribute | |
// +------------------+-----------------+------------------------------------+
// | master | parallel | * |
// | master | for | + |
@@ -1471,6 +1779,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | master | cancellation | |
// | | point | |
// | master | cancel | |
+ // | master | taskloop | * |
+ // | master | taskloop simd | * |
+ // | master | distribute | |
// +------------------+-----------------+------------------------------------+
// | critical | parallel | * |
// | critical | for | + |
@@ -1496,6 +1807,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | critical | cancellation | |
// | | point | |
// | critical | cancel | |
+ // | critical | taskloop | * |
+ // | critical | taskloop simd | * |
+ // | critical | distribute | |
// +------------------+-----------------+------------------------------------+
// | simd | parallel | |
// | simd | for | |
@@ -1515,13 +1829,16 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | simd | taskwait | |
// | simd | taskgroup | |
// | simd | flush | |
- // | simd | ordered | |
+ // | simd | ordered | + (with simd clause) |
// | simd | atomic | |
// | simd | target | |
// | simd | teams | |
// | simd | cancellation | |
// | | point | |
// | simd | cancel | |
+ // | simd | taskloop | |
+ // | simd | taskloop simd | |
+ // | simd | distribute | |
// +------------------+-----------------+------------------------------------+
// | for simd | parallel | |
// | for simd | for | |
@@ -1541,13 +1858,16 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | for simd | taskwait | |
// | for simd | taskgroup | |
// | for simd | flush | |
- // | for simd | ordered | |
+ // | for simd | ordered | + (with simd clause) |
// | for simd | atomic | |
// | for simd | target | |
// | for simd | teams | |
// | for simd | cancellation | |
// | | point | |
// | for simd | cancel | |
+ // | for simd | taskloop | |
+ // | for simd | taskloop simd | |
+ // | for simd | distribute | |
// +------------------+-----------------+------------------------------------+
// | parallel for simd| parallel | |
// | parallel for simd| for | |
@@ -1567,13 +1887,16 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | parallel for simd| taskwait | |
// | parallel for simd| taskgroup | |
// | parallel for simd| flush | |
- // | parallel for simd| ordered | |
+ // | parallel for simd| ordered | + (with simd clause) |
// | parallel for simd| atomic | |
// | parallel for simd| target | |
// | parallel for simd| teams | |
// | parallel for simd| cancellation | |
// | | point | |
// | parallel for simd| cancel | |
+ // | parallel for simd| taskloop | |
+ // | parallel for simd| taskloop simd | |
+ // | parallel for simd| distribute | |
// +------------------+-----------------+------------------------------------+
// | sections | parallel | * |
// | sections | for | + |
@@ -1600,6 +1923,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | sections | cancellation | |
// | | point | ! |
// | sections | cancel | ! |
+ // | sections | taskloop | * |
+ // | sections | taskloop simd | * |
+ // | sections | distribute | |
// +------------------+-----------------+------------------------------------+
// | section | parallel | * |
// | section | for | + |
@@ -1626,6 +1952,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | section | cancellation | |
// | | point | ! |
// | section | cancel | ! |
+ // | section | taskloop | * |
+ // | section | taskloop simd | * |
+ // | section | distribute | |
// +------------------+-----------------+------------------------------------+
// | single | parallel | * |
// | single | for | + |
@@ -1652,6 +1981,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | single | cancellation | |
// | | point | |
// | single | cancel | |
+ // | single | taskloop | * |
+ // | single | taskloop simd | * |
+ // | single | distribute | |
// +------------------+-----------------+------------------------------------+
// | parallel for | parallel | * |
// | parallel for | for | + |
@@ -1678,6 +2010,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | parallel for | cancellation | |
// | | point | ! |
// | parallel for | cancel | ! |
+ // | parallel for | taskloop | * |
+ // | parallel for | taskloop simd | * |
+ // | parallel for | distribute | |
// +------------------+-----------------+------------------------------------+
// | parallel sections| parallel | * |
// | parallel sections| for | + |
@@ -1704,6 +2039,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | parallel sections| cancellation | |
// | | point | ! |
// | parallel sections| cancel | ! |
+ // | parallel sections| taskloop | * |
+ // | parallel sections| taskloop simd | * |
+ // | parallel sections| distribute | |
// +------------------+-----------------+------------------------------------+
// | task | parallel | * |
// | task | for | + |
@@ -1730,6 +2068,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | task | cancellation | |
// | | point | ! |
// | task | cancel | ! |
+ // | task | taskloop | * |
+ // | task | taskloop simd | * |
+ // | task | distribute | |
// +------------------+-----------------+------------------------------------+
// | ordered | parallel | * |
// | ordered | for | + |
@@ -1756,6 +2097,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | ordered | cancellation | |
// | | point | |
// | ordered | cancel | |
+ // | ordered | taskloop | * |
+ // | ordered | taskloop simd | * |
+ // | ordered | distribute | |
// +------------------+-----------------+------------------------------------+
// | atomic | parallel | |
// | atomic | for | |
@@ -1782,6 +2126,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | atomic | cancellation | |
// | | point | |
// | atomic | cancel | |
+ // | atomic | taskloop | |
+ // | atomic | taskloop simd | |
+ // | atomic | distribute | |
// +------------------+-----------------+------------------------------------+
// | target | parallel | * |
// | target | for | * |
@@ -1808,6 +2155,9 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | target | cancellation | |
// | | point | |
// | target | cancel | |
+ // | target | taskloop | * |
+ // | target | taskloop simd | * |
+ // | target | distribute | |
// +------------------+-----------------+------------------------------------+
// | teams | parallel | * |
// | teams | for | + |
@@ -1834,6 +2184,95 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// | teams | cancellation | |
// | | point | |
// | teams | cancel | |
+ // | teams | taskloop | + |
+ // | teams | taskloop simd | + |
+ // | teams | distribute | ! |
+ // +------------------+-----------------+------------------------------------+
+ // | taskloop | parallel | * |
+ // | taskloop | for | + |
+ // | taskloop | for simd | + |
+ // | taskloop | master | + |
+ // | taskloop | critical | * |
+ // | taskloop | simd | * |
+ // | taskloop | sections | + |
+ // | taskloop | section | + |
+ // | taskloop | single | + |
+ // | taskloop | parallel for | * |
+ // | taskloop |parallel for simd| * |
+ // | taskloop |parallel sections| * |
+ // | taskloop | task | * |
+ // | taskloop | taskyield | * |
+ // | taskloop | barrier | + |
+ // | taskloop | taskwait | * |
+ // | taskloop | taskgroup | * |
+ // | taskloop | flush | * |
+ // | taskloop | ordered | + |
+ // | taskloop | atomic | * |
+ // | taskloop | target | * |
+ // | taskloop | teams | + |
+ // | taskloop | cancellation | |
+ // | | point | |
+ // | taskloop | cancel | |
+ // | taskloop | taskloop | * |
+ // | taskloop | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | taskloop simd | parallel | |
+ // | taskloop simd | for | |
+ // | taskloop simd | for simd | |
+ // | taskloop simd | master | |
+ // | taskloop simd | critical | |
+ // | taskloop simd | simd | |
+ // | taskloop simd | sections | |
+ // | taskloop simd | section | |
+ // | taskloop simd | single | |
+ // | taskloop simd | parallel for | |
+ // | taskloop simd |parallel for simd| |
+ // | taskloop simd |parallel sections| |
+ // | taskloop simd | task | |
+ // | taskloop simd | taskyield | |
+ // | taskloop simd | barrier | |
+ // | taskloop simd | taskwait | |
+ // | taskloop simd | taskgroup | |
+ // | taskloop simd | flush | |
+ // | taskloop simd | ordered | + (with simd clause) |
+ // | taskloop simd | atomic | |
+ // | taskloop simd | target | |
+ // | taskloop simd | teams | |
+ // | taskloop simd | cancellation | |
+ // | | point | |
+ // | taskloop simd | cancel | |
+ // | taskloop simd | taskloop | |
+ // | taskloop simd | taskloop simd | |
+ // | taskloop simd | distribute | |
+ // +------------------+-----------------+------------------------------------+
+ // | distribute | parallel | * |
+ // | distribute | for | * |
+ // | distribute | for simd | * |
+ // | distribute | master | * |
+ // | distribute | critical | * |
+ // | distribute | simd | * |
+ // | distribute | sections | * |
+ // | distribute | section | * |
+ // | distribute | single | * |
+ // | distribute | parallel for | * |
+ // | distribute |parallel for simd| * |
+ // | distribute |parallel sections| * |
+ // | distribute | task | * |
+ // | distribute | taskyield | * |
+ // | distribute | barrier | * |
+ // | distribute | taskwait | * |
+ // | distribute | taskgroup | * |
+ // | distribute | flush | * |
+ // | distribute | ordered | + |
+ // | distribute | atomic | * |
+ // | distribute | target | |
+ // | distribute | teams | |
+ // | distribute | cancellation | + |
+ // | | point | |
+ // | distribute | cancel | + |
+ // | distribute | taskloop | * |
+ // | distribute | taskloop simd | * |
+ // | distribute | distribute | |
// +------------------+-----------------+------------------------------------+
if (Stack->getCurScope()) {
auto ParentRegion = Stack->getParentDirective();
@@ -1843,11 +2282,15 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
NoRecommend,
ShouldBeInParallelRegion,
ShouldBeInOrderedRegion,
- ShouldBeInTargetRegion
+ ShouldBeInTargetRegion,
+ ShouldBeInTeamsRegion
} Recommend = NoRecommend;
- if (isOpenMPSimdDirective(ParentRegion)) {
+ if (isOpenMPSimdDirective(ParentRegion) && CurrentRegion != OMPD_ordered) {
// OpenMP [2.16, Nesting of Regions]
// OpenMP constructs may not be nested inside a simd region.
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP construct
+ // that can appear in the simd region.
SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_simd);
return true;
}
@@ -1890,16 +2333,19 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// construct-type-clause.
NestingProhibited =
!((CancelRegion == OMPD_parallel && ParentRegion == OMPD_parallel) ||
- (CancelRegion == OMPD_for && ParentRegion == OMPD_for) ||
+ (CancelRegion == OMPD_for &&
+ (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for)) ||
(CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
(CancelRegion == OMPD_sections &&
- (ParentRegion == OMPD_section || ParentRegion == OMPD_sections)));
+ (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
+ ParentRegion == OMPD_parallel_sections)));
} else if (CurrentRegion == OMPD_master) {
// OpenMP [2.16, Nesting of Regions]
// A master region may not be closely nested inside a worksharing,
// atomic, or explicit task region.
NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
- ParentRegion == OMPD_task;
+ ParentRegion == OMPD_task ||
+ isOpenMPTaskLoopDirective(ParentRegion);
} else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
// OpenMP [2.16, Nesting of Regions]
// A critical region may not be nested (closely or otherwise) inside a
@@ -1936,7 +2382,8 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
NestingProhibited =
isOpenMPWorksharingDirective(ParentRegion) ||
ParentRegion == OMPD_task || ParentRegion == OMPD_master ||
- ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered ||
+ isOpenMPTaskLoopDirective(ParentRegion);
} else if (isOpenMPWorksharingDirective(CurrentRegion) &&
!isOpenMPParallelDirective(CurrentRegion)) {
// OpenMP [2.16, Nesting of Regions]
@@ -1945,7 +2392,8 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
NestingProhibited =
isOpenMPWorksharingDirective(ParentRegion) ||
ParentRegion == OMPD_task || ParentRegion == OMPD_master ||
- ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered ||
+ isOpenMPTaskLoopDirective(ParentRegion);
Recommend = ShouldBeInParallelRegion;
} else if (CurrentRegion == OMPD_ordered) {
// OpenMP [2.16, Nesting of Regions]
@@ -1953,9 +2401,14 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// atomic, or explicit task region.
// An ordered region must be closely nested inside a loop region (or
// parallel loop region) with an ordered clause.
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP construct
+ // that can appear in the simd region.
NestingProhibited = ParentRegion == OMPD_critical ||
ParentRegion == OMPD_task ||
- !Stack->isParentOrderedRegion();
+ isOpenMPTaskLoopDirective(ParentRegion) ||
+ !(isOpenMPSimdDirective(ParentRegion) ||
+ Stack->isParentOrderedRegion());
Recommend = ShouldBeInOrderedRegion;
} else if (isOpenMPTeamsDirective(CurrentRegion)) {
// OpenMP [2.16, Nesting of Regions]
@@ -1970,10 +2423,17 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// distribute, parallel, parallel sections, parallel workshare, and the
// parallel loop and parallel loop SIMD constructs are the only OpenMP
// constructs that can be closely nested in the teams region.
- // TODO: add distribute directive.
- NestingProhibited = !isOpenMPParallelDirective(CurrentRegion);
+ NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
+ !isOpenMPDistributeDirective(CurrentRegion);
Recommend = ShouldBeInParallelRegion;
}
+ if (!NestingProhibited && isOpenMPDistributeDirective(CurrentRegion)) {
+ // OpenMP 4.5 [2.17 Nesting of Regions]
+ // The region associated with the distribute construct must be strictly
+ // nested inside a teams region
+ NestingProhibited = !isOpenMPTeamsDirective(ParentRegion);
+ Recommend = ShouldBeInTeamsRegion;
+ }
if (NestingProhibited) {
SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
<< CloseNesting << getOpenMPDirectiveName(ParentRegion) << Recommend
@@ -1984,6 +2444,88 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
return false;
}
+static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
+ ArrayRef<OMPClause *> Clauses,
+ ArrayRef<OpenMPDirectiveKind> AllowedNameModifiers) {
+ bool ErrorFound = false;
+ unsigned NamedModifiersNumber = 0;
+ SmallVector<const OMPIfClause *, OMPC_unknown + 1> FoundNameModifiers(
+ OMPD_unknown + 1);
+ SmallVector<SourceLocation, 4> NameModifierLoc;
+ for (const auto *C : Clauses) {
+ if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
+ // At most one if clause without a directive-name-modifier can appear on
+ // the directive.
+ OpenMPDirectiveKind CurNM = IC->getNameModifier();
+ if (FoundNameModifiers[CurNM]) {
+ S.Diag(C->getLocStart(), diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
+ << (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
+ ErrorFound = true;
+ } else if (CurNM != OMPD_unknown) {
+ NameModifierLoc.push_back(IC->getNameModifierLoc());
+ ++NamedModifiersNumber;
+ }
+ FoundNameModifiers[CurNM] = IC;
+ if (CurNM == OMPD_unknown)
+ continue;
+ // Check if the specified name modifier is allowed for the current
+ // directive.
+ // At most one if clause with the particular directive-name-modifier can
+ // appear on the directive.
+ bool MatchFound = false;
+ for (auto NM : AllowedNameModifiers) {
+ if (CurNM == NM) {
+ MatchFound = true;
+ break;
+ }
+ }
+ if (!MatchFound) {
+ S.Diag(IC->getNameModifierLoc(),
+ diag::err_omp_wrong_if_directive_name_modifier)
+ << getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
+ ErrorFound = true;
+ }
+ }
+ }
+ // If any if clause on the directive includes a directive-name-modifier then
+ // all if clauses on the directive must include a directive-name-modifier.
+ if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
+ if (NamedModifiersNumber == AllowedNameModifiers.size()) {
+ S.Diag(FoundNameModifiers[OMPD_unknown]->getLocStart(),
+ diag::err_omp_no_more_if_clause);
+ } else {
+ std::string Values;
+ std::string Sep(", ");
+ unsigned AllowedCnt = 0;
+ unsigned TotalAllowedNum =
+ AllowedNameModifiers.size() - NamedModifiersNumber;
+ for (unsigned Cnt = 0, End = AllowedNameModifiers.size(); Cnt < End;
+ ++Cnt) {
+ OpenMPDirectiveKind NM = AllowedNameModifiers[Cnt];
+ if (!FoundNameModifiers[NM]) {
+ Values += "'";
+ Values += getOpenMPDirectiveName(NM);
+ Values += "'";
+ if (AllowedCnt + 2 == TotalAllowedNum)
+ Values += " or ";
+ else if (AllowedCnt + 1 != TotalAllowedNum)
+ Values += Sep;
+ ++AllowedCnt;
+ }
+ }
+ S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getLocStart(),
+ diag::err_omp_unnamed_if_clause)
+ << (TotalAllowedNum > 1) << Values;
+ }
+ for (auto Loc : NameModifierLoc) {
+ S.Diag(Loc, diag::note_omp_previous_named_if_clause);
+ }
+ ErrorFound = true;
+ }
+ return ErrorFound;
+}
+
StmtResult Sema::ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
@@ -2020,10 +2562,12 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
}
}
+ llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
switch (Kind) {
case OMPD_parallel:
Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
+ AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_simd:
Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
@@ -2056,25 +2600,28 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
break;
case OMPD_critical:
- assert(ClausesWithImplicit.empty() &&
- "No clauses are allowed for 'omp critical' directive");
- Res = ActOnOpenMPCriticalDirective(DirName, AStmt, StartLoc, EndLoc);
+ Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
+ StartLoc, EndLoc);
break;
case OMPD_parallel_for:
Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_for_simd:
Res = ActOnOpenMPParallelForSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_sections:
Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
+ AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_task:
Res =
ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
+ AllowedNameModifiers.push_back(OMPD_task);
break;
case OMPD_taskyield:
assert(ClausesWithImplicit.empty() &&
@@ -2108,9 +2655,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
break;
case OMPD_ordered:
- assert(ClausesWithImplicit.empty() &&
- "No clauses are allowed for 'omp ordered' directive");
- Res = ActOnOpenMPOrderedDirective(AStmt, StartLoc, EndLoc);
+ Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
break;
case OMPD_atomic:
Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc,
@@ -2123,6 +2669,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_target:
Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
+ AllowedNameModifiers.push_back(OMPD_target);
break;
case OMPD_cancellation_point:
assert(ClausesWithImplicit.empty() &&
@@ -2132,11 +2679,30 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion);
break;
case OMPD_cancel:
- assert(ClausesWithImplicit.empty() &&
- "No clauses are allowed for 'omp cancel' directive");
assert(AStmt == nullptr &&
"No associated statement allowed for 'omp cancel' directive");
- Res = ActOnOpenMPCancelDirective(StartLoc, EndLoc, CancelRegion);
+ Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
+ CancelRegion);
+ AllowedNameModifiers.push_back(OMPD_cancel);
+ break;
+ case OMPD_target_data:
+ Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ AllowedNameModifiers.push_back(OMPD_target_data);
+ break;
+ case OMPD_taskloop:
+ Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ break;
+ case OMPD_taskloop_simd:
+ Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ break;
+ case OMPD_distribute:
+ Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
break;
case OMPD_threadprivate:
llvm_unreachable("OpenMP Directive is not allowed");
@@ -2148,8 +2714,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
<< P.first << P.second->getSourceRange();
}
- if (!VarsWithInheritedDSA.empty())
- return StmtError();
+ ErrorFound = !VarsWithInheritedDSA.empty() || ErrorFound;
+
+ if (!AllowedNameModifiers.empty())
+ ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
+ ErrorFound;
if (ErrorFound)
return StmtError();
@@ -2160,7 +2729,9 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
CapturedStmt *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -2171,8 +2742,8 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
getCurFunction()->setHasBranchProtectedScope();
- return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->isCancelRegion());
}
namespace {
@@ -2247,6 +2818,9 @@ public:
Expr *BuildPreCond(Scope *S, Expr *Cond) const;
/// \brief Build reference expression to the counter be used for codegen.
Expr *BuildCounterVar() const;
+ /// \brief Build reference expression to the private counter be used for
+ /// codegen.
+ Expr *BuildPrivateCounterVar() const;
/// \brief Build initization of the counter be used for codegen.
Expr *BuildCounterInit() const;
/// \brief Build step of the counter be used for codegen.
@@ -2261,8 +2835,8 @@ private:
/// \brief Helper to set loop counter variable and its initializer.
bool SetVarAndLB(VarDecl *NewVar, DeclRefExpr *NewVarRefExpr, Expr *NewLB);
/// \brief Helper to set upper bound.
- bool SetUB(Expr *NewUB, bool LessOp, bool StrictOp, const SourceRange &SR,
- const SourceLocation &SL);
+ bool SetUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
+ SourceLocation SL);
/// \brief Helper to set loop increment.
bool SetStep(Expr *NewStep, bool Subtract);
};
@@ -2313,8 +2887,7 @@ bool OpenMPIterationSpaceChecker::SetVarAndLB(VarDecl *NewVar,
}
bool OpenMPIterationSpaceChecker::SetUB(Expr *NewUB, bool LessOp, bool StrictOp,
- const SourceRange &SR,
- const SourceLocation &SL) {
+ SourceRange SR, SourceLocation SL) {
// State consistency checking to ensure correct usage.
assert(Var != nullptr && LB != nullptr && UB == nullptr && Step == nullptr &&
!TestIsLessOp && !TestIsStrictOp);
@@ -2410,7 +2983,7 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
} else if (auto DS = dyn_cast<DeclStmt>(S)) {
if (DS->isSingleDecl()) {
if (auto Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
- if (Var->hasInit()) {
+ if (Var->hasInit() && !Var->getType()->isReferenceType()) {
// Accept non-canonical init form here but emit ext. warning.
if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
SemaRef.Diag(S->getLocStart(),
@@ -2630,6 +3203,8 @@ public:
NewVD->setPreviousDeclInSameBlockScope(
VD->isPreviousDeclInSameBlockScope());
VD->getDeclContext()->addHiddenDecl(NewVD);
+ if (VD->hasAttrs())
+ NewVD->setAttrs(VD->getAttrs());
transformedLocalDecl(VD, NewVD);
return NewVD;
}
@@ -2802,7 +3377,21 @@ Expr *OpenMPIterationSpaceChecker::BuildPreCond(Scope *S, Expr *Cond) const {
/// \brief Build reference expression to the counter be used for codegen.
Expr *OpenMPIterationSpaceChecker::BuildCounterVar() const {
- return buildDeclRefExpr(SemaRef, Var, Var->getType(), DefaultLoc);
+ return buildDeclRefExpr(SemaRef, Var, Var->getType().getNonReferenceType(),
+ DefaultLoc);
+}
+
+Expr *OpenMPIterationSpaceChecker::BuildPrivateCounterVar() const {
+ if (Var && !Var->isInvalidDecl()) {
+ auto Type = Var->getType().getNonReferenceType();
+ auto *PrivateVar =
+ buildVarDecl(SemaRef, DefaultLoc, Type, Var->getName(),
+ Var->hasAttrs() ? &Var->getAttrs() : nullptr);
+ if (PrivateVar->isInvalidDecl())
+ return nullptr;
+ return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
+ }
+ return nullptr;
}
/// \brief Build initization of the counter be used for codegen.
@@ -2820,6 +3409,8 @@ struct LoopIterationSpace {
Expr *NumIterations;
/// \brief The loop counter variable.
Expr *CounterVar;
+ /// \brief Private loop counter variable.
+ Expr *PrivateCounterVar;
/// \brief This is initializer for the initial value of #CounterVar.
Expr *CounterInit;
/// \brief This is step for the #CounterVar used to generate its update:
@@ -2840,14 +3431,13 @@ struct LoopIterationSpace {
void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
assert(getLangOpts().OpenMP && "OpenMP is not active.");
assert(Init && "Expected loop in canonical form.");
- unsigned CollapseIteration = DSAStack->getCollapseNumber();
- if (CollapseIteration > 0 &&
+ unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
+ if (AssociatedLoops > 0 &&
isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
OpenMPIterationSpaceChecker ISC(*this, ForLoc);
- if (!ISC.CheckInit(Init, /*EmitDiags=*/false)) {
+ if (!ISC.CheckInit(Init, /*EmitDiags=*/false))
DSAStack->addLoopControlVariable(ISC.GetLoopVar());
- }
- DSAStack->setCollapseNumber(CollapseIteration - 1);
+ DSAStack->setAssociatedLoops(AssociatedLoops - 1);
}
}
@@ -2856,7 +3446,7 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
static bool CheckOpenMPIterationSpace(
OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
- Expr *NestedLoopCountExpr,
+ Expr *CollapseLoopCountExpr, Expr *OrderedLoopCountExpr,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA,
LoopIterationSpace &ResultIterSpace) {
// OpenMP [2.6, Canonical Loop Form]
@@ -2864,13 +3454,24 @@ static bool CheckOpenMPIterationSpace(
auto For = dyn_cast_or_null<ForStmt>(S);
if (!For) {
SemaRef.Diag(S->getLocStart(), diag::err_omp_not_for)
- << (NestedLoopCountExpr != nullptr) << getOpenMPDirectiveName(DKind)
- << NestedLoopCount << (CurrentNestedLoopCount > 0)
- << CurrentNestedLoopCount;
- if (NestedLoopCount > 1)
- SemaRef.Diag(NestedLoopCountExpr->getExprLoc(),
- diag::note_omp_collapse_expr)
- << NestedLoopCountExpr->getSourceRange();
+ << (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
+ << getOpenMPDirectiveName(DKind) << NestedLoopCount
+ << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
+ if (NestedLoopCount > 1) {
+ if (CollapseLoopCountExpr && OrderedLoopCountExpr)
+ SemaRef.Diag(DSA.getConstructLoc(),
+ diag::note_omp_collapse_ordered_expr)
+ << 2 << CollapseLoopCountExpr->getSourceRange()
+ << OrderedLoopCountExpr->getSourceRange();
+ else if (CollapseLoopCountExpr)
+ SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
+ diag::note_omp_collapse_ordered_expr)
+ << 0 << CollapseLoopCountExpr->getSourceRange();
+ else
+ SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
+ diag::note_omp_collapse_ordered_expr)
+ << 1 << OrderedLoopCountExpr->getSourceRange();
+ }
return true;
}
assert(For->getBody());
@@ -2893,7 +3494,7 @@ static bool CheckOpenMPIterationSpace(
// A variable of signed or unsigned integer type.
// For C++, a variable of a random access iterator type.
// For C, a variable of a pointer type.
- auto VarType = Var->getType();
+ auto VarType = Var->getType().getNonReferenceType();
if (!VarType->isDependentType() && !VarType->isIntegerType() &&
!VarType->isPointerType() &&
!(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
@@ -2929,12 +3530,12 @@ static bool CheckOpenMPIterationSpace(
? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
: OMPC_private;
if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
- DVar.CKind != OMPC_threadprivate && DVar.CKind != PredeterminedCKind) ||
- (isOpenMPWorksharingDirective(DKind) && !isOpenMPSimdDirective(DKind) &&
- DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private &&
- DVar.CKind != OMPC_lastprivate && DVar.CKind != OMPC_threadprivate)) &&
- ((DVar.CKind != OMPC_private && DVar.CKind != OMPC_threadprivate) ||
- DVar.RefExpr != nullptr)) {
+ DVar.CKind != PredeterminedCKind) ||
+ ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
+ isOpenMPDistributeDirective(DKind)) &&
+ !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
+ DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
+ (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_var_dsa)
<< getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(PredeterminedCKind);
@@ -2945,7 +3546,8 @@ static bool CheckOpenMPIterationSpace(
} else if (LoopVarRefExpr != nullptr) {
// Make the loop iteration variable private (for worksharing constructs),
// linear (for simd directives with the only one associated loop) or
- // lastprivate (for simd directives with several collapsed loops).
+ // lastprivate (for simd directives with several collapsed or ordered
+ // loops).
if (DVar.CKind == OMPC_unknown)
DVar = DSA.hasDSA(Var, isOpenMPPrivate, MatchesAlways(),
/*FromParent=*/false);
@@ -2966,8 +3568,11 @@ static bool CheckOpenMPIterationSpace(
// Build the loop's iteration space representation.
ResultIterSpace.PreCond = ISC.BuildPreCond(DSA.getCurScope(), For->getCond());
ResultIterSpace.NumIterations = ISC.BuildNumIterations(
- DSA.getCurScope(), /* LimitedType */ isOpenMPWorksharingDirective(DKind));
+ DSA.getCurScope(), (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind)));
ResultIterSpace.CounterVar = ISC.BuildCounterVar();
+ ResultIterSpace.PrivateCounterVar = ISC.BuildPrivateCounterVar();
ResultIterSpace.CounterInit = ISC.BuildCounterInit();
ResultIterSpace.CounterStep = ISC.BuildCounterStep();
ResultIterSpace.InitSrcRange = ISC.GetInitSrcRange();
@@ -2978,6 +3583,7 @@ static bool CheckOpenMPIterationSpace(
HasErrors |= (ResultIterSpace.PreCond == nullptr ||
ResultIterSpace.NumIterations == nullptr ||
ResultIterSpace.CounterVar == nullptr ||
+ ResultIterSpace.PrivateCounterVar == nullptr ||
ResultIterSpace.CounterInit == nullptr ||
ResultIterSpace.CounterStep == nullptr);
@@ -3091,17 +3697,33 @@ static bool FitsInto(unsigned Bits, bool Signed, Expr *E, Sema &SemaRef) {
/// \return Returns 0 if one of the collapsed stmts is not canonical for loop,
/// number of collapsed loops otherwise.
static unsigned
-CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
- Stmt *AStmt, Sema &SemaRef, DSAStackTy &DSA,
+CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
+ Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
+ DSAStackTy &DSA,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA,
OMPLoopDirective::HelperExprs &Built) {
unsigned NestedLoopCount = 1;
- if (NestedLoopCountExpr) {
+ if (CollapseLoopCountExpr) {
// Found 'collapse' clause - calculate collapse number.
llvm::APSInt Result;
- if (NestedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
+ if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
NestedLoopCount = Result.getLimitedValue();
}
+ if (OrderedLoopCountExpr) {
+ // Found 'ordered' clause - calculate collapse number.
+ llvm::APSInt Result;
+ if (OrderedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) {
+ if (Result.getLimitedValue() < NestedLoopCount) {
+ SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
+ diag::err_omp_wrong_ordered_loop_count)
+ << OrderedLoopCountExpr->getSourceRange();
+ SemaRef.Diag(CollapseLoopCountExpr->getExprLoc(),
+ diag::note_collapse_loop_count)
+ << CollapseLoopCountExpr->getSourceRange();
+ }
+ NestedLoopCount = Result.getLimitedValue();
+ }
+ }
// This is helper routine for loop directives (e.g., 'for', 'simd',
// 'for simd', etc.).
SmallVector<LoopIterationSpace, 4> IterSpaces;
@@ -3109,8 +3731,9 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
if (CheckOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt,
- NestedLoopCount, NestedLoopCountExpr,
- VarsWithImplicitDSA, IterSpaces[Cnt]))
+ NestedLoopCount, CollapseLoopCountExpr,
+ OrderedLoopCountExpr, VarsWithImplicitDSA,
+ IterSpaces[Cnt]))
return 0;
// Move on to the next nested for loop, or to the loop body.
// OpenMP [2.8.1, simd construct, Restrictions]
@@ -3127,11 +3750,12 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
// An example of what is generated for the following code:
//
- // #pragma omp simd collapse(2)
+ // #pragma omp simd collapse(2) ordered(2)
// for (i = 0; i < NI; ++i)
- // for (j = J0; j < NJ; j+=2) {
- // <loop body>
- // }
+ // for (k = 0; k < NK; ++k)
+ // for (j = J0; j < NJ; j+=2) {
+ // <loop body>
+ // }
//
// We generate the code below.
// Note: the loop body may be outlined in CodeGen.
@@ -3254,7 +3878,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
QualType VType = LastIteration.get()->getType();
// Build variables passed into runtime, nesessary for worksharing directives.
ExprResult LB, UB, IL, ST, EUB;
- if (isOpenMPWorksharingDirective(DKind)) {
+ if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind)) {
// Lower bound variable, initialized with zero.
VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc);
@@ -3302,7 +3927,9 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
{
VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.iv");
IV = buildDeclRefExpr(SemaRef, IVDecl, VType, InitLoc);
- Expr *RHS = isOpenMPWorksharingDirective(DKind)
+ Expr *RHS = (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind))
? LB.get()
: SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
@@ -3312,7 +3939,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
// Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
SourceLocation CondLoc;
ExprResult Cond =
- isOpenMPWorksharingDirective(DKind)
+ (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
: SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
NumIterations.get());
@@ -3332,7 +3960,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
// Increments for worksharing loops (LB = LB + ST; UB = UB + ST).
// Used for directives with static scheduling.
ExprResult NextLB, NextUB;
- if (isOpenMPWorksharingDirective(DKind)) {
+ if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind)) {
// LB + ST
NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
if (!NextLB.isUsable())
@@ -3437,6 +4066,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
}
// Save results
Built.Counters[Cnt] = IS.CounterVar;
+ Built.PrivateCounters[Cnt] = IS.PrivateCounterVar;
Built.Inits[Cnt] = Init.get();
Built.Updates[Cnt] = Update.get();
Built.Finals[Cnt] = Final.get();
@@ -3467,26 +4097,60 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr,
return NestedLoopCount;
}
-static Expr *GetCollapseNumberExpr(ArrayRef<OMPClause *> Clauses) {
- auto &&CollapseFilter = [](const OMPClause *C) -> bool {
- return C->getClauseKind() == OMPC_collapse;
- };
- OMPExecutableDirective::filtered_clause_iterator<decltype(CollapseFilter)> I(
- Clauses, std::move(CollapseFilter));
- if (I)
- return cast<OMPCollapseClause>(*I)->getNumForLoops();
+static Expr *getCollapseNumberExpr(ArrayRef<OMPClause *> Clauses) {
+ auto CollapseClauses =
+ OMPExecutableDirective::getClausesOfKind<OMPCollapseClause>(Clauses);
+ if (CollapseClauses.begin() != CollapseClauses.end())
+ return (*CollapseClauses.begin())->getNumForLoops();
+ return nullptr;
+}
+
+static Expr *getOrderedNumberExpr(ArrayRef<OMPClause *> Clauses) {
+ auto OrderedClauses =
+ OMPExecutableDirective::getClausesOfKind<OMPOrderedClause>(Clauses);
+ if (OrderedClauses.begin() != OrderedClauses.end())
+ return (*OrderedClauses.begin())->getNumForLoops();
return nullptr;
}
+static bool checkSimdlenSafelenValues(Sema &S, const Expr *Simdlen,
+ const Expr *Safelen) {
+ llvm::APSInt SimdlenRes, SafelenRes;
+ if (Simdlen->isValueDependent() || Simdlen->isTypeDependent() ||
+ Simdlen->isInstantiationDependent() ||
+ Simdlen->containsUnexpandedParameterPack())
+ return false;
+ if (Safelen->isValueDependent() || Safelen->isTypeDependent() ||
+ Safelen->isInstantiationDependent() ||
+ Safelen->containsUnexpandedParameterPack())
+ return false;
+ Simdlen->EvaluateAsInt(SimdlenRes, S.Context);
+ Safelen->EvaluateAsInt(SafelenRes, S.Context);
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ if (SimdlenRes > SafelenRes) {
+ S.Diag(Simdlen->getExprLoc(), diag::err_omp_wrong_simdlen_safelen_values)
+ << Simdlen->getSourceRange() << Safelen->getSourceRange();
+ return true;
+ }
+ return false;
+}
+
StmtResult Sema::ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
OMPLoopDirective::HelperExprs B;
- // In presence of clause 'collapse', it will define the nested loops number.
- unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_simd, GetCollapseNumberExpr(Clauses), AStmt, *this,
- *DSAStack, VarsWithImplicitDSA, B);
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_simd, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
+ AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
@@ -3503,6 +4167,24 @@ StmtResult Sema::ActOnOpenMPSimdDirective(
}
}
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ OMPSafelenClause *Safelen = nullptr;
+ OMPSimdlenClause *Simdlen = nullptr;
+ for (auto *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_safelen)
+ Safelen = cast<OMPSafelenClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_simdlen)
+ Simdlen = cast<OMPSimdlenClause>(Clause);
+ if (Safelen && Simdlen)
+ break;
+ }
+ if (Simdlen && Safelen &&
+ checkSimdlenSafelenValues(*this, Simdlen->getSimdlen(),
+ Safelen->getSafelen()))
+ return StmtError();
+
getCurFunction()->setHasBranchProtectedScope();
return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
Clauses, AStmt, B);
@@ -3512,31 +4194,52 @@ StmtResult Sema::ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
OMPLoopDirective::HelperExprs B;
- // In presence of clause 'collapse', it will define the nested loops number.
- unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_for, GetCollapseNumberExpr(Clauses), AStmt, *this,
- *DSAStack, VarsWithImplicitDSA, B);
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_for, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
+ AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope))
+ return StmtError();
+ }
+ }
+
getCurFunction()->setHasBranchProtectedScope();
return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
- Clauses, AStmt, B);
+ Clauses, AStmt, B, DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
OMPLoopDirective::HelperExprs B;
- // In presence of clause 'collapse', it will define the nested loops number.
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_for_simd, GetCollapseNumberExpr(Clauses), AStmt,
- *this, *DSAStack, VarsWithImplicitDSA, B);
+ CheckOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
@@ -3553,6 +4256,24 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
}
}
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ OMPSafelenClause *Safelen = nullptr;
+ OMPSimdlenClause *Simdlen = nullptr;
+ for (auto *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_safelen)
+ Safelen = cast<OMPSafelenClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_simdlen)
+ Simdlen = cast<OMPSimdlenClause>(Clause);
+ if (Safelen && Simdlen)
+ break;
+ }
+ if (Simdlen && Safelen &&
+ checkSimdlenSafelenValues(*this, Simdlen->getSimdlen(),
+ Safelen->getSafelen()))
+ return StmtError();
+
getCurFunction()->setHasBranchProtectedScope();
return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
Clauses, AStmt, B);
@@ -3562,23 +4283,28 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
auto BaseStmt = AStmt;
while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
BaseStmt = CS->getCapturedStmt();
if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
auto S = C->children();
- if (!S)
+ if (S.begin() == S.end())
return StmtError();
// All associated statements must be '#pragma omp section' except for
// the first one.
- for (Stmt *SectionStmt : ++S) {
+ for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
Diag(SectionStmt->getLocStart(),
diag::err_omp_sections_substmt_not_section);
return StmtError();
}
+ cast<OMPSectionDirective>(SectionStmt)
+ ->setHasCancel(DSAStack->isCancelRegion());
}
} else {
Diag(AStmt->getLocStart(), diag::err_omp_sections_not_compound_stmt);
@@ -3587,25 +4313,33 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
getCurFunction()->setHasBranchProtectedScope();
- return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
getCurFunction()->setHasBranchProtectedScope();
+ DSAStack->setParentCancelRegion(DSAStack->isCancelRegion());
- return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt);
+ return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
getCurFunction()->setHasBranchProtectedScope();
@@ -3632,30 +4366,81 @@ StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
getCurFunction()->setHasBranchProtectedScope();
return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt);
}
-StmtResult
-Sema::ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+StmtResult Sema::ActOnOpenMPCriticalDirective(
+ const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ bool ErrorFound = false;
+ llvm::APSInt Hint;
+ SourceLocation HintLoc;
+ bool DependentHint = false;
+ for (auto *C : Clauses) {
+ if (C->getClauseKind() == OMPC_hint) {
+ if (!DirName.getName()) {
+ Diag(C->getLocStart(), diag::err_omp_hint_clause_no_name);
+ ErrorFound = true;
+ }
+ Expr *E = cast<OMPHintClause>(C)->getHint();
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ E->isInstantiationDependent())
+ DependentHint = true;
+ else {
+ Hint = E->EvaluateKnownConstInt(Context);
+ HintLoc = C->getLocStart();
+ }
+ }
+ }
+ if (ErrorFound)
+ return StmtError();
+ auto Pair = DSAStack->getCriticalWithHint(DirName);
+ if (Pair.first && DirName.getName() && !DependentHint) {
+ if (llvm::APSInt::compareValues(Hint, Pair.second) != 0) {
+ Diag(StartLoc, diag::err_omp_critical_with_hint);
+ if (HintLoc.isValid()) {
+ Diag(HintLoc, diag::note_omp_critical_hint_here)
+ << 0 << Hint.toString(/*Radix=*/10, /*Signed=*/false);
+ } else
+ Diag(StartLoc, diag::note_omp_critical_no_hint) << 0;
+ if (auto *C = Pair.first->getSingleClause<OMPHintClause>()) {
+ Diag(C->getLocStart(), diag::note_omp_critical_hint_here)
+ << 1
+ << C->getHint()->EvaluateKnownConstInt(Context).toString(
+ /*Radix=*/10, /*Signed=*/false);
+ } else
+ Diag(Pair.first->getLocStart(), diag::note_omp_critical_no_hint) << 1;
+ }
+ }
getCurFunction()->setHasBranchProtectedScope();
- return OMPCriticalDirective::Create(Context, DirName, StartLoc, EndLoc,
- AStmt);
+ auto *Dir = OMPCriticalDirective::Create(Context, DirName, StartLoc, EndLoc,
+ Clauses, AStmt);
+ if (!Pair.first && DirName.getName() && !DependentHint)
+ DSAStack->addCriticalWithHint(Dir, Hint);
+ return Dir;
}
StmtResult Sema::ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
CapturedStmt *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -3665,26 +4450,41 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
CS->getCapturedDecl()->setNothrow();
OMPLoopDirective::HelperExprs B;
- // In presence of clause 'collapse', it will define the nested loops number.
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_parallel_for, GetCollapseNumberExpr(Clauses), AStmt,
- *this, *DSAStack, VarsWithImplicitDSA, B);
+ CheckOpenMPLoop(OMPD_parallel_for, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp parallel for loop exprs were not built");
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope))
+ return StmtError();
+ }
+ }
+
getCurFunction()->setHasBranchProtectedScope();
return OMPParallelForDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B);
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
CapturedStmt *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -3694,10 +4494,12 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
CS->getCapturedDecl()->setNothrow();
OMPLoopDirective::HelperExprs B;
- // In presence of clause 'collapse', it will define the nested loops number.
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_parallel_for_simd, GetCollapseNumberExpr(Clauses),
- AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ CheckOpenMPLoop(OMPD_parallel_for_simd, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
@@ -3711,6 +4513,24 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
}
}
+ // OpenMP 4.1 [2.8.1, simd Construct, Restrictions]
+ // If both simdlen and safelen clauses are specified, the value of the simdlen
+ // parameter must be less than or equal to the value of the safelen parameter.
+ OMPSafelenClause *Safelen = nullptr;
+ OMPSimdlenClause *Simdlen = nullptr;
+ for (auto *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_safelen)
+ Safelen = cast<OMPSafelenClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_simdlen)
+ Simdlen = cast<OMPSimdlenClause>(Clause);
+ if (Safelen && Simdlen)
+ break;
+ }
+ if (Simdlen && Safelen &&
+ checkSimdlenSafelenValues(*this, Simdlen->getSimdlen(),
+ Safelen->getSafelen()))
+ return StmtError();
+
getCurFunction()->setHasBranchProtectedScope();
return OMPParallelForSimdDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
@@ -3720,23 +4540,28 @@ StmtResult
Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
auto BaseStmt = AStmt;
while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
BaseStmt = CS->getCapturedStmt();
if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
auto S = C->children();
- if (!S)
+ if (S.begin() == S.end())
return StmtError();
// All associated statements must be '#pragma omp section' except for
// the first one.
- for (Stmt *SectionStmt : ++S) {
+ for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
Diag(SectionStmt->getLocStart(),
diag::err_omp_parallel_sections_substmt_not_section);
return StmtError();
}
+ cast<OMPSectionDirective>(SectionStmt)
+ ->setHasCancel(DSAStack->isCancelRegion());
}
} else {
Diag(AStmt->getLocStart(),
@@ -3746,14 +4571,16 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
getCurFunction()->setHasBranchProtectedScope();
- return OMPParallelSectionsDirective::Create(Context, StartLoc, EndLoc,
- Clauses, AStmt);
+ return OMPParallelSectionsDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt, DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
CapturedStmt *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -3764,7 +4591,8 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
getCurFunction()->setHasBranchProtectedScope();
- return OMPTaskDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+ return OMPTaskDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
@@ -3785,7 +4613,10 @@ StmtResult Sema::ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
StmtResult Sema::ActOnOpenMPTaskgroupDirective(Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
getCurFunction()->setHasBranchProtectedScope();
@@ -3799,14 +4630,79 @@ StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses);
}
-StmtResult Sema::ActOnOpenMPOrderedDirective(Stmt *AStmt,
+StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPClause *DependFound = nullptr;
+ OMPClause *DependSourceClause = nullptr;
+ OMPClause *DependSinkClause = nullptr;
+ bool ErrorFound = false;
+ OMPThreadsClause *TC = nullptr;
+ OMPSIMDClause *SC = nullptr;
+ for (auto *C : Clauses) {
+ if (auto *DC = dyn_cast<OMPDependClause>(C)) {
+ DependFound = C;
+ if (DC->getDependencyKind() == OMPC_DEPEND_source) {
+ if (DependSourceClause) {
+ Diag(C->getLocStart(), diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(OMPD_ordered)
+ << getOpenMPClauseName(OMPC_depend) << 2;
+ ErrorFound = true;
+ } else
+ DependSourceClause = C;
+ if (DependSinkClause) {
+ Diag(C->getLocStart(), diag::err_omp_depend_sink_source_not_allowed)
+ << 0;
+ ErrorFound = true;
+ }
+ } else if (DC->getDependencyKind() == OMPC_DEPEND_sink) {
+ if (DependSourceClause) {
+ Diag(C->getLocStart(), diag::err_omp_depend_sink_source_not_allowed)
+ << 1;
+ ErrorFound = true;
+ }
+ DependSinkClause = C;
+ }
+ } else if (C->getClauseKind() == OMPC_threads)
+ TC = cast<OMPThreadsClause>(C);
+ else if (C->getClauseKind() == OMPC_simd)
+ SC = cast<OMPSIMDClause>(C);
+ }
+ if (!ErrorFound && !SC &&
+ isOpenMPSimdDirective(DSAStack->getParentDirective())) {
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP construct
+ // that can appear in the simd region.
+ Diag(StartLoc, diag::err_omp_prohibited_region_simd);
+ ErrorFound = true;
+ } else if (DependFound && (TC || SC)) {
+ Diag(DependFound->getLocStart(), diag::err_omp_depend_clause_thread_simd)
+ << getOpenMPClauseName(TC ? TC->getClauseKind() : SC->getClauseKind());
+ ErrorFound = true;
+ } else if (DependFound && !DSAStack->getParentOrderedRegionParam()) {
+ Diag(DependFound->getLocStart(),
+ diag::err_omp_ordered_directive_without_param);
+ ErrorFound = true;
+ } else if (TC || Clauses.empty()) {
+ if (auto *Param = DSAStack->getParentOrderedRegionParam()) {
+ SourceLocation ErrLoc = TC ? TC->getLocStart() : StartLoc;
+ Diag(ErrLoc, diag::err_omp_ordered_directive_with_param)
+ << (TC != nullptr);
+ Diag(Param->getLocStart(), diag::note_omp_ordered_param);
+ ErrorFound = true;
+ }
+ }
+ if ((!AStmt && !DependFound) || ErrorFound)
+ return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ if (AStmt) {
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, AStmt);
+ getCurFunction()->setHasBranchProtectedScope();
+ }
+
+ return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
}
namespace {
@@ -4006,7 +4902,7 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
NoteLoc = AtomicUnaryOp->getOperatorLoc();
NoteRange = SourceRange(NoteLoc, NoteLoc);
}
- } else {
+ } else if (!AtomicBody->isInstantiationDependent()) {
ErrorFound = NotABinaryOrUnaryExpression;
NoteLoc = ErrorLoc = AtomicBody->getExprLoc();
NoteRange = ErrorRange = AtomicBody->getSourceRange();
@@ -4053,7 +4949,9 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
auto CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -4150,7 +5048,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
NoteLoc = NotScalarExpr->getExprLoc();
NoteRange = NotScalarExpr->getSourceRange();
}
- } else {
+ } else if (!AtomicBody->isInstantiationDependent()) {
ErrorFound = NotAnAssignmentOp;
ErrorLoc = AtomicBody->getExprLoc();
ErrorRange = AtomicBody->getSourceRange();
@@ -4211,7 +5109,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
NoteLoc = NotScalarExpr->getExprLoc();
NoteRange = NotScalarExpr->getSourceRange();
}
- } else {
+ } else if (!AtomicBody->isInstantiationDependent()) {
ErrorFound = NotAnAssignmentOp;
ErrorLoc = AtomicBody->getExprLoc();
ErrorRange = AtomicBody->getSourceRange();
@@ -4289,7 +5187,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
UE = Checker.getUpdateExpr();
IsXLHSInRHSPart = Checker.isXLHSInRHSPart();
IsPostfixUpdate = Checker.isPostfixUpdate();
- } else {
+ } else if (!AtomicBody->isInstantiationDependent()) {
ErrorLoc = AtomicBody->getExprLoc();
ErrorRange = AtomicBody->getSourceRange();
NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc()
@@ -4396,46 +5294,54 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
if (!IsUpdateExprFound) {
// { v = x; x = expr; }
- auto *FirstBinOp = dyn_cast<BinaryOperator>(First);
- if (!FirstBinOp || FirstBinOp->getOpcode() != BO_Assign) {
- ErrorFound = NotAnAssignmentOp;
- NoteLoc = ErrorLoc = FirstBinOp ? FirstBinOp->getOperatorLoc()
- : First->getLocStart();
- NoteRange = ErrorRange = FirstBinOp
- ? FirstBinOp->getSourceRange()
- : SourceRange(ErrorLoc, ErrorLoc);
- } else {
- auto *SecondBinOp = dyn_cast<BinaryOperator>(Second);
- if (!SecondBinOp || SecondBinOp->getOpcode() != BO_Assign) {
+ auto *FirstExpr = dyn_cast<Expr>(First);
+ auto *SecondExpr = dyn_cast<Expr>(Second);
+ if (!FirstExpr || !SecondExpr ||
+ !(FirstExpr->isInstantiationDependent() ||
+ SecondExpr->isInstantiationDependent())) {
+ auto *FirstBinOp = dyn_cast<BinaryOperator>(First);
+ if (!FirstBinOp || FirstBinOp->getOpcode() != BO_Assign) {
ErrorFound = NotAnAssignmentOp;
- NoteLoc = ErrorLoc = SecondBinOp ? SecondBinOp->getOperatorLoc()
- : Second->getLocStart();
- NoteRange = ErrorRange = SecondBinOp
- ? SecondBinOp->getSourceRange()
+ NoteLoc = ErrorLoc = FirstBinOp ? FirstBinOp->getOperatorLoc()
+ : First->getLocStart();
+ NoteRange = ErrorRange = FirstBinOp
+ ? FirstBinOp->getSourceRange()
: SourceRange(ErrorLoc, ErrorLoc);
} else {
- auto *PossibleXRHSInFirst =
- FirstBinOp->getRHS()->IgnoreParenImpCasts();
- auto *PossibleXLHSInSecond =
- SecondBinOp->getLHS()->IgnoreParenImpCasts();
- llvm::FoldingSetNodeID X1Id, X2Id;
- PossibleXRHSInFirst->Profile(X1Id, Context, /*Canonical=*/true);
- PossibleXLHSInSecond->Profile(X2Id, Context,
- /*Canonical=*/true);
- IsUpdateExprFound = X1Id == X2Id;
- if (IsUpdateExprFound) {
- V = FirstBinOp->getLHS();
- X = SecondBinOp->getLHS();
- E = SecondBinOp->getRHS();
- UE = nullptr;
- IsXLHSInRHSPart = false;
- IsPostfixUpdate = true;
+ auto *SecondBinOp = dyn_cast<BinaryOperator>(Second);
+ if (!SecondBinOp || SecondBinOp->getOpcode() != BO_Assign) {
+ ErrorFound = NotAnAssignmentOp;
+ NoteLoc = ErrorLoc = SecondBinOp
+ ? SecondBinOp->getOperatorLoc()
+ : Second->getLocStart();
+ NoteRange = ErrorRange =
+ SecondBinOp ? SecondBinOp->getSourceRange()
+ : SourceRange(ErrorLoc, ErrorLoc);
} else {
- ErrorFound = NotASpecificExpression;
- ErrorLoc = FirstBinOp->getExprLoc();
- ErrorRange = FirstBinOp->getSourceRange();
- NoteLoc = SecondBinOp->getLHS()->getExprLoc();
- NoteRange = SecondBinOp->getRHS()->getSourceRange();
+ auto *PossibleXRHSInFirst =
+ FirstBinOp->getRHS()->IgnoreParenImpCasts();
+ auto *PossibleXLHSInSecond =
+ SecondBinOp->getLHS()->IgnoreParenImpCasts();
+ llvm::FoldingSetNodeID X1Id, X2Id;
+ PossibleXRHSInFirst->Profile(X1Id, Context,
+ /*Canonical=*/true);
+ PossibleXLHSInSecond->Profile(X2Id, Context,
+ /*Canonical=*/true);
+ IsUpdateExprFound = X1Id == X2Id;
+ if (IsUpdateExprFound) {
+ V = FirstBinOp->getLHS();
+ X = SecondBinOp->getLHS();
+ E = SecondBinOp->getRHS();
+ UE = nullptr;
+ IsXLHSInRHSPart = false;
+ IsPostfixUpdate = true;
+ } else {
+ ErrorFound = NotASpecificExpression;
+ ErrorLoc = FirstBinOp->getExprLoc();
+ ErrorRange = FirstBinOp->getSourceRange();
+ NoteLoc = SecondBinOp->getLHS()->getExprLoc();
+ NoteRange = SecondBinOp->getRHS()->getSourceRange();
+ }
}
}
}
@@ -4474,7 +5380,16 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
// OpenMP [2.16, Nesting of Regions]
// If specified, a teams construct must be contained within a target
@@ -4511,10 +5426,27 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
return OMPTargetDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
}
+StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTargetDataDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ AStmt);
+}
+
StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ if (!AStmt)
+ return StmtError();
+
CapturedStmt *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -4550,7 +5482,8 @@ Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
CancelRegion);
}
-StmtResult Sema::ActOnOpenMPCancelDirective(SourceLocation StartLoc,
+StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion) {
if (CancelRegion != OMPD_parallel && CancelRegion != OMPD_for &&
@@ -4567,7 +5500,123 @@ StmtResult Sema::ActOnOpenMPCancelDirective(SourceLocation StartLoc,
Diag(StartLoc, diag::err_omp_parent_cancel_region_ordered) << 1;
return StmtError();
}
- return OMPCancelDirective::Create(Context, StartLoc, EndLoc, CancelRegion);
+ DSAStack->setParentCancelRegion(/*Cancel=*/true);
+ return OMPCancelDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ CancelRegion);
+}
+
+static bool checkGrainsizeNumTasksClauses(Sema &S,
+ ArrayRef<OMPClause *> Clauses) {
+ OMPClause *PrevClause = nullptr;
+ bool ErrorFound = false;
+ for (auto *C : Clauses) {
+ if (C->getClauseKind() == OMPC_grainsize ||
+ C->getClauseKind() == OMPC_num_tasks) {
+ if (!PrevClause)
+ PrevClause = C;
+ else if (PrevClause->getClauseKind() != C->getClauseKind()) {
+ S.Diag(C->getLocStart(),
+ diag::err_omp_grainsize_num_tasks_mutually_exclusive)
+ << getOpenMPClauseName(C->getClauseKind())
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ S.Diag(PrevClause->getLocStart(),
+ diag::note_omp_previous_grainsize_num_tasks)
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ ErrorFound = true;
+ }
+ }
+ }
+ return ErrorFound;
+}
+
+StmtResult Sema::ActOnOpenMPTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_taskloop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTaskLoopDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_taskloop_simd, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTaskLoopSimdDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPDistributeDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_distribute, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt,
+ *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPDistributeDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
}
OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
@@ -4576,9 +5625,6 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
SourceLocation EndLoc) {
OMPClause *Res = nullptr;
switch (Kind) {
- case OMPC_if:
- Res = ActOnOpenMPIfClause(Expr, StartLoc, LParenLoc, EndLoc);
- break;
case OMPC_final:
Res = ActOnOpenMPFinalClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
@@ -4588,9 +5634,37 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_safelen:
Res = ActOnOpenMPSafelenClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_simdlen:
+ Res = ActOnOpenMPSimdlenClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_collapse:
Res = ActOnOpenMPCollapseClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_ordered:
+ Res = ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Expr);
+ break;
+ case OMPC_device:
+ Res = ActOnOpenMPDeviceClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_num_teams:
+ Res = ActOnOpenMPNumTeamsClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_thread_limit:
+ Res = ActOnOpenMPThreadLimitClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_priority:
+ Res = ActOnOpenMPPriorityClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_grainsize:
+ Res = ActOnOpenMPGrainsizeClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_num_tasks:
+ Res = ActOnOpenMPNumTasksClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_hint:
+ Res = ActOnOpenMPHintClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_if:
case OMPC_default:
case OMPC_proc_bind:
case OMPC_schedule:
@@ -4603,7 +5677,6 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_aligned:
case OMPC_copyin:
case OMPC_copyprivate:
- case OMPC_ordered:
case OMPC_nowait:
case OMPC_untied:
case OMPC_mergeable:
@@ -4615,14 +5688,21 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_capture:
case OMPC_seq_cst:
case OMPC_depend:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_nogroup:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
return Res;
}
-OMPClause *Sema::ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
+OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
+ Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation NameModifierLoc,
+ SourceLocation ColonLoc,
SourceLocation EndLoc) {
Expr *ValExpr = Condition;
if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
@@ -4636,7 +5716,8 @@ OMPClause *Sema::ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
ValExpr = Val.get();
}
- return new (Context) OMPIfClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+ return new (Context) OMPIfClause(NameModifier, ValExpr, StartLoc, LParenLoc,
+ NameModifierLoc, ColonLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPFinalClause(Expr *Condition,
@@ -4701,38 +5782,52 @@ ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc,
return PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser);
}
+static bool IsNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef,
+ OpenMPClauseKind CKind,
+ bool StrictlyPositive) {
+ if (!ValExpr->isTypeDependent() && !ValExpr->isValueDependent() &&
+ !ValExpr->isInstantiationDependent()) {
+ SourceLocation Loc = ValExpr->getExprLoc();
+ ExprResult Value =
+ SemaRef.PerformOpenMPImplicitIntegerConversion(Loc, ValExpr);
+ if (Value.isInvalid())
+ return false;
+
+ ValExpr = Value.get();
+ // The expression must evaluate to a non-negative integer value.
+ llvm::APSInt Result;
+ if (ValExpr->isIntegerConstantExpr(Result, SemaRef.Context) &&
+ Result.isSigned() &&
+ !((!StrictlyPositive && Result.isNonNegative()) ||
+ (StrictlyPositive && Result.isStrictlyPositive()))) {
+ SemaRef.Diag(Loc, diag::err_omp_negative_expression_in_clause)
+ << getOpenMPClauseName(CKind) << (StrictlyPositive ? 1 : 0)
+ << ValExpr->getSourceRange();
+ return false;
+ }
+ }
+ return true;
+}
+
OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
Expr *ValExpr = NumThreads;
- if (!NumThreads->isValueDependent() && !NumThreads->isTypeDependent() &&
- !NumThreads->containsUnexpandedParameterPack()) {
- SourceLocation NumThreadsLoc = NumThreads->getLocStart();
- ExprResult Val =
- PerformOpenMPImplicitIntegerConversion(NumThreadsLoc, NumThreads);
- if (Val.isInvalid())
- return nullptr;
- ValExpr = Val.get();
-
- // OpenMP [2.5, Restrictions]
- // The num_threads expression must evaluate to a positive integer value.
- llvm::APSInt Result;
- if (ValExpr->isIntegerConstantExpr(Result, Context) && Result.isSigned() &&
- !Result.isStrictlyPositive()) {
- Diag(NumThreadsLoc, diag::err_omp_negative_expression_in_clause)
- << "num_threads" << NumThreads->getSourceRange();
- return nullptr;
- }
- }
+ // OpenMP [2.5, Restrictions]
+ // The num_threads expression must evaluate to a positive integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_threads,
+ /*StrictlyPositive=*/true))
+ return nullptr;
return new (Context)
OMPNumThreadsClause(ValExpr, StartLoc, LParenLoc, EndLoc);
}
ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
- OpenMPClauseKind CKind) {
+ OpenMPClauseKind CKind,
+ bool StrictlyPositive) {
if (!E)
return ExprError();
if (E->isValueDependent() || E->isTypeDependent() ||
@@ -4742,9 +5837,11 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
ExprResult ICE = VerifyIntegerConstantExpression(E, &Result);
if (ICE.isInvalid())
return ExprError();
- if (!Result.isStrictlyPositive()) {
+ if ((StrictlyPositive && !Result.isStrictlyPositive()) ||
+ (!StrictlyPositive && !Result.isNonNegative())) {
Diag(E->getExprLoc(), diag::err_omp_negative_expression_in_clause)
- << getOpenMPClauseName(CKind) << E->getSourceRange();
+ << getOpenMPClauseName(CKind) << (StrictlyPositive ? 1 : 0)
+ << E->getSourceRange();
return ExprError();
}
if (CKind == OMPC_aligned && !Result.isPowerOf2()) {
@@ -4752,9 +5849,10 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
<< E->getSourceRange();
return ExprError();
}
- if (CKind == OMPC_collapse) {
- DSAStack->setCollapseNumber(Result.getExtValue());
- }
+ if (CKind == OMPC_collapse && DSAStack->getAssociatedLoops() == 1)
+ DSAStack->setAssociatedLoops(Result.getExtValue());
+ else if (CKind == OMPC_ordered)
+ DSAStack->setAssociatedLoops(Result.getExtValue());
return ICE;
}
@@ -4771,6 +5869,19 @@ OMPClause *Sema::ActOnOpenMPSafelenClause(Expr *Len, SourceLocation StartLoc,
OMPSafelenClause(Safelen.get(), StartLoc, LParenLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ // OpenMP [2.8.1, simd construct, Description]
+ // The parameter of the simdlen clause must be a constant
+ // positive integer expression.
+ ExprResult Simdlen = VerifyPositiveIntegerConstantInClause(Len, OMPC_simdlen);
+ if (Simdlen.isInvalid())
+ return nullptr;
+ return new (Context)
+ OMPSimdlenClause(Simdlen.get(), StartLoc, LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -4788,6 +5899,28 @@ OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops,
OMPCollapseClause(NumForLoopsResult.get(), StartLoc, LParenLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ SourceLocation LParenLoc,
+ Expr *NumForLoops) {
+ // OpenMP [2.7.1, loop construct, Description]
+ // OpenMP [2.8.1, simd construct, Description]
+ // OpenMP [2.9.6, distribute construct, Description]
+ // The parameter of the ordered clause must be a constant
+ // positive integer expression if any.
+ if (NumForLoops && LParenLoc.isValid()) {
+ ExprResult NumForLoopsResult =
+ VerifyPositiveIntegerConstantInClause(NumForLoops, OMPC_ordered);
+ if (NumForLoopsResult.isInvalid())
+ return nullptr;
+ NumForLoops = NumForLoopsResult.get();
+ } else
+ NumForLoops = nullptr;
+ DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops);
+ return new (Context)
+ OMPOrderedClause(NumForLoops, StartLoc, LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSimpleClause(
OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
@@ -4807,6 +5940,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
case OMPC_schedule:
case OMPC_private:
@@ -4830,39 +5964,58 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_capture:
case OMPC_seq_cst:
case OMPC_depend:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
return Res;
}
+static std::string
+getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last,
+ ArrayRef<unsigned> Exclude = llvm::None) {
+ std::string Values;
+ unsigned Bound = Last >= 2 ? Last - 2 : 0;
+ unsigned Skipped = Exclude.size();
+ auto S = Exclude.begin(), E = Exclude.end();
+ for (unsigned i = First; i < Last; ++i) {
+ if (std::find(S, E, i) != E) {
+ --Skipped;
+ continue;
+ }
+ Values += "'";
+ Values += getOpenMPSimpleClauseTypeName(K, i);
+ Values += "'";
+ if (i == Bound - Skipped)
+ Values += " or ";
+ else if (i != Bound + 1 - Skipped)
+ Values += ", ";
+ }
+ return Values;
+}
+
OMPClause *Sema::ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindKwLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
if (Kind == OMPC_DEFAULT_unknown) {
- std::string Values;
static_assert(OMPC_DEFAULT_unknown > 0,
"OMPC_DEFAULT_unknown not greater than 0");
- std::string Sep(", ");
- for (unsigned i = 0; i < OMPC_DEFAULT_unknown; ++i) {
- Values += "'";
- Values += getOpenMPSimpleClauseTypeName(OMPC_default, i);
- Values += "'";
- switch (i) {
- case OMPC_DEFAULT_unknown - 2:
- Values += " or ";
- break;
- case OMPC_DEFAULT_unknown - 1:
- break;
- default:
- Values += Sep;
- break;
- }
- }
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
- << Values << getOpenMPClauseName(OMPC_default);
+ << getListOfPossibleValues(OMPC_default, /*First=*/0,
+ /*Last=*/OMPC_DEFAULT_unknown)
+ << getOpenMPClauseName(OMPC_default);
return nullptr;
}
switch (Kind) {
@@ -4886,25 +6039,10 @@ OMPClause *Sema::ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
if (Kind == OMPC_PROC_BIND_unknown) {
- std::string Values;
- std::string Sep(", ");
- for (unsigned i = 0; i < OMPC_PROC_BIND_unknown; ++i) {
- Values += "'";
- Values += getOpenMPSimpleClauseTypeName(OMPC_proc_bind, i);
- Values += "'";
- switch (i) {
- case OMPC_PROC_BIND_unknown - 2:
- Values += " or ";
- break;
- case OMPC_PROC_BIND_unknown - 1:
- break;
- default:
- Values += Sep;
- break;
- }
- }
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
- << Values << getOpenMPClauseName(OMPC_proc_bind);
+ << getListOfPossibleValues(OMPC_proc_bind, /*First=*/0,
+ /*Last=*/OMPC_PROC_BIND_unknown)
+ << getOpenMPClauseName(OMPC_proc_bind);
return nullptr;
}
return new (Context)
@@ -4912,21 +6050,33 @@ OMPClause *Sema::ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
}
OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
- OpenMPClauseKind Kind, unsigned Argument, Expr *Expr,
+ OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation ArgumentLoc, SourceLocation CommaLoc,
+ ArrayRef<SourceLocation> ArgumentLoc, SourceLocation DelimLoc,
SourceLocation EndLoc) {
OMPClause *Res = nullptr;
switch (Kind) {
case OMPC_schedule:
+ enum { Modifier1, Modifier2, ScheduleKind, NumberOfElements };
+ assert(Argument.size() == NumberOfElements &&
+ ArgumentLoc.size() == NumberOfElements);
Res = ActOnOpenMPScheduleClause(
- static_cast<OpenMPScheduleClauseKind>(Argument), Expr, StartLoc,
- LParenLoc, ArgumentLoc, CommaLoc, EndLoc);
+ static_cast<OpenMPScheduleClauseModifier>(Argument[Modifier1]),
+ static_cast<OpenMPScheduleClauseModifier>(Argument[Modifier2]),
+ static_cast<OpenMPScheduleClauseKind>(Argument[ScheduleKind]), Expr,
+ StartLoc, LParenLoc, ArgumentLoc[Modifier1], ArgumentLoc[Modifier2],
+ ArgumentLoc[ScheduleKind], DelimLoc, EndLoc);
break;
case OMPC_if:
+ assert(Argument.size() == 1 && ArgumentLoc.size() == 1);
+ Res = ActOnOpenMPIfClause(static_cast<OpenMPDirectiveKind>(Argument.back()),
+ Expr, StartLoc, LParenLoc, ArgumentLoc.back(),
+ DelimLoc, EndLoc);
+ break;
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
case OMPC_default:
case OMPC_proc_bind:
@@ -4951,38 +6101,91 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_capture:
case OMPC_seq_cst:
case OMPC_depend:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
return Res;
}
+static bool checkScheduleModifiers(Sema &S, OpenMPScheduleClauseModifier M1,
+ OpenMPScheduleClauseModifier M2,
+ SourceLocation M1Loc, SourceLocation M2Loc) {
+ if (M1 == OMPC_SCHEDULE_MODIFIER_unknown && M1Loc.isValid()) {
+ SmallVector<unsigned, 2> Excluded;
+ if (M2 != OMPC_SCHEDULE_MODIFIER_unknown)
+ Excluded.push_back(M2);
+ if (M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)
+ Excluded.push_back(OMPC_SCHEDULE_MODIFIER_monotonic);
+ if (M2 == OMPC_SCHEDULE_MODIFIER_monotonic)
+ Excluded.push_back(OMPC_SCHEDULE_MODIFIER_nonmonotonic);
+ S.Diag(M1Loc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_schedule,
+ /*First=*/OMPC_SCHEDULE_MODIFIER_unknown + 1,
+ /*Last=*/OMPC_SCHEDULE_MODIFIER_last,
+ Excluded)
+ << getOpenMPClauseName(OMPC_schedule);
+ return true;
+ }
+ return false;
+}
+
OMPClause *Sema::ActOnOpenMPScheduleClause(
+ OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc,
- SourceLocation EndLoc) {
+ SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
+ SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) {
+ if (checkScheduleModifiers(*this, M1, M2, M1Loc, M2Loc) ||
+ checkScheduleModifiers(*this, M2, M1, M2Loc, M1Loc))
+ return nullptr;
+ // OpenMP, 2.7.1, Loop Construct, Restrictions
+ // Either the monotonic modifier or the nonmonotonic modifier can be specified
+ // but not both.
+ if ((M1 == M2 && M1 != OMPC_SCHEDULE_MODIFIER_unknown) ||
+ (M1 == OMPC_SCHEDULE_MODIFIER_monotonic &&
+ M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) ||
+ (M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic &&
+ M2 == OMPC_SCHEDULE_MODIFIER_monotonic)) {
+ Diag(M2Loc, diag::err_omp_unexpected_schedule_modifier)
+ << getOpenMPSimpleClauseTypeName(OMPC_schedule, M2)
+ << getOpenMPSimpleClauseTypeName(OMPC_schedule, M1);
+ return nullptr;
+ }
if (Kind == OMPC_SCHEDULE_unknown) {
std::string Values;
- std::string Sep(", ");
- for (unsigned i = 0; i < OMPC_SCHEDULE_unknown; ++i) {
- Values += "'";
- Values += getOpenMPSimpleClauseTypeName(OMPC_schedule, i);
- Values += "'";
- switch (i) {
- case OMPC_SCHEDULE_unknown - 2:
- Values += " or ";
- break;
- case OMPC_SCHEDULE_unknown - 1:
- break;
- default:
- Values += Sep;
- break;
- }
+ if (M1Loc.isInvalid() && M2Loc.isInvalid()) {
+ unsigned Exclude[] = {OMPC_SCHEDULE_unknown};
+ Values = getListOfPossibleValues(OMPC_schedule, /*First=*/0,
+ /*Last=*/OMPC_SCHEDULE_MODIFIER_last,
+ Exclude);
+ } else {
+ Values = getListOfPossibleValues(OMPC_schedule, /*First=*/0,
+ /*Last=*/OMPC_SCHEDULE_unknown);
}
Diag(KindLoc, diag::err_omp_unexpected_clause_value)
<< Values << getOpenMPClauseName(OMPC_schedule);
return nullptr;
}
+ // OpenMP, 2.7.1, Loop Construct, Restrictions
+ // The nonmonotonic modifier can only be specified with schedule(dynamic) or
+ // schedule(guided).
+ if ((M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
+ Kind != OMPC_SCHEDULE_dynamic && Kind != OMPC_SCHEDULE_guided) {
+ Diag(M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ? M1Loc : M2Loc,
+ diag::err_omp_schedule_nonmonotonic_static);
+ return nullptr;
+ }
Expr *ValExpr = ChunkSize;
Expr *HelperValExpr = nullptr;
if (ChunkSize) {
@@ -5004,7 +6207,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
if (ValExpr->isIntegerConstantExpr(Result, Context)) {
if (Result.isSigned() && !Result.isStrictlyPositive()) {
Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
- << "schedule" << ChunkSize->getSourceRange();
+ << "schedule" << 1 << ChunkSize->getSourceRange();
return nullptr;
}
} else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective())) {
@@ -5018,8 +6221,9 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
}
}
- return new (Context) OMPScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc,
- EndLoc, Kind, ValExpr, HelperValExpr);
+ return new (Context)
+ OMPScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc, Kind,
+ ValExpr, HelperValExpr, M1, M1Loc, M2, M2Loc);
}
OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
@@ -5054,10 +6258,20 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_seq_cst:
Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc);
break;
+ case OMPC_threads:
+ Res = ActOnOpenMPThreadsClause(StartLoc, EndLoc);
+ break;
+ case OMPC_simd:
+ Res = ActOnOpenMPSIMDClause(StartLoc, EndLoc);
+ break;
+ case OMPC_nogroup:
+ Res = ActOnOpenMPNogroupClause(StartLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
case OMPC_schedule:
case OMPC_private:
@@ -5074,18 +6288,20 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_threadprivate:
case OMPC_flush:
case OMPC_depend:
+ case OMPC_device:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_num_tasks:
+ case OMPC_hint:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
return Res;
}
-OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- DSAStack->setOrderedRegion();
- return new (Context) OMPOrderedClause(StartLoc, EndLoc);
-}
-
OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
DSAStack->setNowaitRegion();
@@ -5127,12 +6343,28 @@ OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPThreadsClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPThreadsClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSIMDClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPSIMDClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNogroupClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPNogroupClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc) {
+ OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
+ OpenMPMapClauseKind MapType, SourceLocation DepLinMapLoc) {
OMPClause *Res = nullptr;
switch (Kind) {
case OMPC_private:
@@ -5153,7 +6385,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
break;
case OMPC_linear:
Res = ActOnOpenMPLinearClause(VarList, TailExpr, StartLoc, LParenLoc,
- ColonLoc, EndLoc);
+ LinKind, DepLinMapLoc, ColonLoc, EndLoc);
break;
case OMPC_aligned:
Res = ActOnOpenMPAlignedClause(VarList, TailExpr, StartLoc, LParenLoc,
@@ -5169,13 +6401,18 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
Res = ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_depend:
- Res = ActOnOpenMPDependClause(DepKind, DepLoc, ColonLoc, VarList, StartLoc,
- LParenLoc, EndLoc);
+ Res = ActOnOpenMPDependClause(DepKind, DepLinMapLoc, ColonLoc, VarList,
+ StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_map:
+ Res = ActOnOpenMPMapClause(MapTypeModifier, MapType, DepLinMapLoc, ColonLoc,
+ VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
+ case OMPC_simdlen:
case OMPC_collapse:
case OMPC_default:
case OMPC_proc_bind:
@@ -5190,6 +6427,16 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
case OMPC_unknown:
llvm_unreachable("Clause is not allowed.");
}
@@ -5240,16 +6487,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
diag::err_omp_private_incomplete_type)) {
continue;
}
- if (Type->isReferenceType()) {
- Diag(ELoc, diag::err_omp_clause_ref_type_arg)
- << getOpenMPClauseName(OMPC_private) << Type;
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- continue;
- }
+ Type = Type.getNonReferenceType();
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct]
@@ -5290,7 +6528,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// IdResolver, so the code in the OpenMP region uses original variable for
// proper diagnostics.
Type = Type.getUnqualifiedType();
- auto VDPrivate = buildVarDecl(*this, DE->getExprLoc(), Type, VD->getName());
+ auto VDPrivate = buildVarDecl(*this, DE->getExprLoc(), Type, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false);
if (VDPrivate->isInvalidDecl())
continue;
@@ -5384,23 +6623,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
diag::err_omp_firstprivate_incomplete_type)) {
continue;
}
- if (Type->isReferenceType()) {
- if (IsImplicitClause) {
- Diag(ImplicitClauseLoc,
- diag::err_omp_task_predetermined_firstprivate_ref_type_arg)
- << Type;
- Diag(RefExpr->getExprLoc(), diag::note_used_here);
- } else {
- Diag(ELoc, diag::err_omp_clause_ref_type_arg)
- << getOpenMPClauseName(OMPC_firstprivate) << Type;
- }
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- continue;
- }
+ Type = Type.getNonReferenceType();
// OpenMP [2.9.3.4, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a private
@@ -5492,6 +6715,49 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
continue;
}
}
+
+ // OpenMP 4.5 [2.15.3.4, Restrictions, p.3]
+ // A list item that is private within a teams region must not appear in a
+ // firstprivate clause on a distribute construct if any of the distribute
+ // regions arising from the distribute construct ever bind to any of the
+ // teams regions arising from the teams construct.
+ // OpenMP 4.5 [2.15.3.4, Restrictions, p.3]
+ // A list item that appears in a reduction clause of a teams construct
+ // must not appear in a firstprivate clause on a distribute construct if
+ // any of the distribute regions arising from the distribute construct
+ // ever bind to any of the teams regions arising from the teams construct.
+ // OpenMP 4.5 [2.10.8, Distribute Construct, p.3]
+ // A list item may appear in a firstprivate or lastprivate clause but not
+ // both.
+ if (CurrDir == OMPD_distribute) {
+ DVar = DSAStack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_private),
+ [](OpenMPDirectiveKind K) -> bool {
+ return isOpenMPTeamsDirective(K);
+ },
+ false);
+ if (DVar.CKind == OMPC_private && isOpenMPTeamsDirective(DVar.DKind)) {
+ Diag(ELoc, diag::err_omp_firstprivate_distribute_private_teams);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ DVar = DSAStack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_reduction),
+ [](OpenMPDirectiveKind K) -> bool {
+ return isOpenMPTeamsDirective(K);
+ },
+ false);
+ if (DVar.CKind == OMPC_reduction &&
+ isOpenMPTeamsDirective(DVar.DKind)) {
+ Diag(ELoc, diag::err_omp_firstprivate_distribute_in_teams_reduction);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind == OMPC_lastprivate) {
+ Diag(ELoc, diag::err_omp_firstprivate_and_lastprivate_in_distribute);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
}
// Variably modified types are not supported for tasks.
@@ -5509,7 +6775,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
}
Type = Type.getUnqualifiedType();
- auto VDPrivate = buildVarDecl(*this, ELoc, Type, VD->getName());
+ auto VDPrivate = buildVarDecl(*this, ELoc, Type, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
// Generate helper private variable and initialize it with the value of the
// original variable. The address of the original variable is replaced by
// the address of the new private variable in the CodeGen. This new variable
@@ -5536,6 +6803,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
VDPrivate->setInvalidDecl();
else
VDPrivate->setInit(Result.getAs<Expr>());
+ // Remove temp variable declaration.
+ Context.Deallocate(VDInitTemp);
} else {
auto *VDInit =
buildVarDecl(*this, DE->getLocStart(), Type, ".firstprivate.temp");
@@ -5618,16 +6887,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
diag::err_omp_lastprivate_incomplete_type)) {
continue;
}
- if (Type->isReferenceType()) {
- Diag(ELoc, diag::err_omp_clause_ref_type_arg)
- << getOpenMPClauseName(OMPC_lastprivate) << Type;
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
- continue;
- }
+ Type = Type.getNonReferenceType();
// OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct]
@@ -5674,11 +6934,13 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
// operator for the class type.
Type = Context.getBaseElementType(Type).getNonReferenceType();
auto *SrcVD = buildVarDecl(*this, DE->getLocStart(),
- Type.getUnqualifiedType(), ".lastprivate.src");
+ Type.getUnqualifiedType(), ".lastprivate.src",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
auto *PseudoSrcExpr = buildDeclRefExpr(
*this, SrcVD, Type.getUnqualifiedType(), DE->getExprLoc());
auto *DstVD =
- buildVarDecl(*this, DE->getLocStart(), Type, ".lastprivate.dst");
+ buildVarDecl(*this, DE->getLocStart(), Type, ".lastprivate.dst",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
auto *PseudoDstExpr =
buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc());
// For arrays generate assignment operation for single element and replace
@@ -5692,6 +6954,18 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
if (AssignmentOp.isInvalid())
continue;
+ // OpenMP 4.5 [2.10.8, Distribute Construct, p.3]
+ // A list item may appear in a firstprivate or lastprivate clause but not
+ // both.
+ if (CurrDir == OMPD_distribute) {
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ if (DVar.CKind == OMPC_firstprivate) {
+ Diag(ELoc, diag::err_omp_firstprivate_and_lastprivate_in_distribute);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+ }
+
if (TopDVar.CKind != OMPC_firstprivate)
DSAStack->addDSA(VD, DE, OMPC_lastprivate);
Vars.push_back(DE);
@@ -5879,6 +7153,7 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
case OO_Call:
case OO_Subscript:
case OO_Conditional:
+ case OO_Coawait:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Unexpected reduction identifier");
case OO_None:
@@ -5903,6 +7178,7 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
}
SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> Privates;
SmallVector<Expr *, 8> LHSs;
SmallVector<Expr *, 8> RHSs;
SmallVector<Expr *, 8> ReductionOps;
@@ -5911,6 +7187,7 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
+ Privates.push_back(nullptr);
LHSs.push_back(nullptr);
RHSs.push_back(nullptr);
ReductionOps.push_back(nullptr);
@@ -5922,6 +7199,7 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
RefExpr->containsUnexpandedParameterPack()) {
// It will be analyzed later.
Vars.push_back(RefExpr);
+ Privates.push_back(nullptr);
LHSs.push_back(nullptr);
RHSs.push_back(nullptr);
ReductionOps.push_back(nullptr);
@@ -5937,14 +7215,53 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
// OpenMP [2.14.3.3, Restrictions, p.1]
// A variable that is part of another variable (as an array or
// structure element) cannot appear in a private clause.
- auto DE = dyn_cast<DeclRefExpr>(RefExpr);
- if (!DE || !isa<VarDecl>(DE->getDecl())) {
- Diag(ELoc, diag::err_omp_expected_var_name) << ERange;
+ auto *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr);
+ auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr);
+ if (!ASE && !OASE && (!DE || !isa<VarDecl>(DE->getDecl()))) {
+ Diag(ELoc, diag::err_omp_expected_var_name_or_array_item) << ERange;
continue;
}
- auto D = DE->getDecl();
- auto VD = cast<VarDecl>(D);
- auto Type = VD->getType();
+ QualType Type;
+ VarDecl *VD = nullptr;
+ if (DE) {
+ auto D = DE->getDecl();
+ VD = cast<VarDecl>(D);
+ Type = VD->getType();
+ } else if (ASE) {
+ Type = ASE->getType();
+ auto *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ DE = dyn_cast<DeclRefExpr>(Base);
+ if (DE)
+ VD = dyn_cast<VarDecl>(DE->getDecl());
+ if (!VD) {
+ Diag(Base->getExprLoc(), diag::err_omp_expected_base_var_name)
+ << 0 << Base->getSourceRange();
+ continue;
+ }
+ } else if (OASE) {
+ auto BaseType = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ if (auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ Type = ATy->getElementType();
+ else
+ Type = BaseType->getPointeeType();
+ auto *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ DE = dyn_cast<DeclRefExpr>(Base);
+ if (DE)
+ VD = dyn_cast<VarDecl>(DE->getDecl());
+ if (!VD) {
+ Diag(Base->getExprLoc(), diag::err_omp_expected_base_var_name)
+ << 1 << Base->getSourceRange();
+ continue;
+ }
+ }
+
// OpenMP [2.9.3.3, Restrictions, C/C++, p.3]
// A variable that appears in a private clause must not have an incomplete
// type or a reference type.
@@ -5955,36 +7272,42 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
// Arrays may not appear in a reduction clause.
if (Type.getNonReferenceType()->isArrayType()) {
Diag(ELoc, diag::err_omp_reduction_type_array) << Type << ERange;
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
continue;
}
// OpenMP [2.14.3.6, reduction clause, Restrictions]
// A list item that appears in a reduction clause must not be
// const-qualified.
if (Type.getNonReferenceType().isConstant(Context)) {
- Diag(ELoc, diag::err_omp_const_variable)
+ Diag(ELoc, diag::err_omp_const_reduction_list_item)
<< getOpenMPClauseName(OMPC_reduction) << Type << ERange;
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
continue;
}
// OpenMP [2.9.3.6, Restrictions, C/C++, p.4]
// If a list-item is a reference type then it must bind to the same object
// for all threads of the team.
- VarDecl *VDDef = VD->getDefinition();
- if (Type->isReferenceType() && VDDef) {
- DSARefChecker Check(DSAStack);
- if (Check.Visit(VDDef->getInit())) {
- Diag(ELoc, diag::err_omp_reduction_ref_type_arg) << ERange;
- Diag(VDDef->getLocation(), diag::note_defined_here) << VDDef;
- continue;
+ if (!ASE && !OASE) {
+ VarDecl *VDDef = VD->getDefinition();
+ if (Type->isReferenceType() && VDDef) {
+ DSARefChecker Check(DSAStack);
+ if (Check.Visit(VDDef->getInit())) {
+ Diag(ELoc, diag::err_omp_reduction_ref_type_arg) << ERange;
+ Diag(VDDef->getLocation(), diag::note_defined_here) << VDDef;
+ continue;
+ }
}
}
// OpenMP [2.14.3.6, reduction clause, Restrictions]
@@ -6000,21 +7323,25 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
(getLangOpts().CPlusPlus && Type->isArithmeticType()))) {
Diag(ELoc, diag::err_omp_clause_not_arithmetic_type_arg)
<< getLangOpts().CPlusPlus;
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
continue;
}
if ((BOK == BO_OrAssign || BOK == BO_AndAssign || BOK == BO_XorAssign) &&
!getLangOpts().CPlusPlus && Type->isFloatingType()) {
Diag(ELoc, diag::err_omp_clause_floating_type_arg);
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
+ if (!ASE && !OASE) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
continue;
}
// OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
@@ -6028,7 +7355,8 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
// Any number of reduction clauses can be specified on the directive,
// but a list item can appear only once in the reduction clauses for that
// directive.
- DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false);
+ DSAStackTy::DSAVarData DVar;
+ DVar = DSAStack->getTopDSA(VD, false);
if (DVar.CKind == OMPC_reduction) {
Diag(ELoc, diag::err_omp_once_referenced)
<< getOpenMPClauseName(OMPC_reduction);
@@ -6059,9 +7387,27 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
continue;
}
}
+
Type = Type.getNonLValueExprType(Context).getUnqualifiedType();
- auto *LHSVD = buildVarDecl(*this, ELoc, Type, ".reduction.lhs");
- auto *RHSVD = buildVarDecl(*this, ELoc, Type, VD->getName());
+ auto *LHSVD = buildVarDecl(*this, ELoc, Type, ".reduction.lhs",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *RHSVD = buildVarDecl(*this, ELoc, Type, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto PrivateTy = Type;
+ if (OASE) {
+ // For array sections only:
+ // Create pseudo array type for private copy. The size for this array will
+ // be generated during codegen.
+ // For array subscripts or single variables Private Ty is the same as Type
+ // (type of the variable or single array element).
+ PrivateTy = Context.getVariableArrayType(
+ Type, new (Context) OpaqueValueExpr(SourceLocation(),
+ Context.getSizeType(), VK_RValue),
+ ArrayType::Normal, /*IndexTypeQuals=*/0, SourceRange());
+ }
+ // Private copy.
+ auto *PrivateVD = buildVarDecl(*this, ELoc, PrivateTy, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
// Add initializer for private variable.
Expr *Init = nullptr;
switch (BOK) {
@@ -6170,21 +7516,27 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
if (Init) {
AddInitializerToDecl(RHSVD, Init, /*DirectInit=*/false,
/*TypeMayContainAuto=*/false);
- } else {
+ } else
ActOnUninitializedDecl(RHSVD, /*TypeMayContainAuto=*/false);
- }
if (!RHSVD->hasInit()) {
Diag(ELoc, diag::err_omp_reduction_id_not_compatible) << Type
<< ReductionIdRange;
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
+ if (VD) {
+ bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ Diag(VD->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << VD;
+ }
continue;
}
+ // Store initializer for single element in private copy. Will be used during
+ // codegen.
+ PrivateVD->setInit(RHSVD->getInit());
+ PrivateVD->setInitStyle(RHSVD->getInitStyle());
auto *LHSDRE = buildDeclRefExpr(*this, LHSVD, Type, ELoc);
auto *RHSDRE = buildDeclRefExpr(*this, RHSVD, Type, ELoc);
+ auto *PrivateDRE = buildDeclRefExpr(*this, PrivateVD, PrivateTy, ELoc);
ExprResult ReductionOp =
BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), BOK,
LHSDRE, RHSDRE);
@@ -6201,15 +7553,14 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(),
BO_Assign, LHSDRE, ConditionalOp);
}
- if (ReductionOp.isUsable()) {
- ReductionOp = ActOnFinishFullExpr(ReductionOp.get());
- }
+ ReductionOp = ActOnFinishFullExpr(ReductionOp.get());
}
if (ReductionOp.isInvalid())
continue;
DSAStack->addDSA(VD, DE, OMPC_reduction);
- Vars.push_back(DE);
+ Vars.push_back(RefExpr);
+ Privates.push_back(PrivateDRE);
LHSs.push_back(LHSDRE);
RHSs.push_back(RHSDRE);
ReductionOps.push_back(ReductionOp.get());
@@ -6220,22 +7571,28 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
return OMPReductionClause::Create(
Context, StartLoc, LParenLoc, ColonLoc, EndLoc, Vars,
- ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, LHSs,
- RHSs, ReductionOps);
+ ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, Privates,
+ LHSs, RHSs, ReductionOps);
}
-OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation ColonLoc,
- SourceLocation EndLoc) {
+OMPClause *Sema::ActOnOpenMPLinearClause(
+ ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc,
+ SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind,
+ SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
+ SmallVector<Expr *, 8> Privates;
SmallVector<Expr *, 8> Inits;
+ if ((!LangOpts.CPlusPlus && LinKind != OMPC_LINEAR_val) ||
+ LinKind == OMPC_LINEAR_unknown) {
+ Diag(LinLoc, diag::err_omp_wrong_linear_modifier) << LangOpts.CPlusPlus;
+ LinKind = OMPC_LINEAR_val;
+ }
for (auto &RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP linear clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
+ Privates.push_back(nullptr);
Inits.push_back(nullptr);
continue;
}
@@ -6278,6 +7635,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
if (QType->isDependentType() || QType->isInstantiationDependentType()) {
// It will be analyzed later.
Vars.push_back(DE);
+ Privates.push_back(nullptr);
Inits.push_back(nullptr);
continue;
}
@@ -6287,16 +7645,13 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
diag::err_omp_linear_incomplete_type)) {
continue;
}
- if (QType->isReferenceType()) {
- Diag(ELoc, diag::err_omp_clause_ref_type_arg)
- << getOpenMPClauseName(OMPC_linear) << QType;
- bool IsDecl =
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(VD->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << VD;
+ if ((LinKind == OMPC_LINEAR_uval || LinKind == OMPC_LINEAR_ref) &&
+ !QType->isReferenceType()) {
+ Diag(ELoc, diag::err_omp_wrong_linear_modifier_non_reference)
+ << QType << getOpenMPSimpleClauseTypeName(OMPC_linear, LinKind);
continue;
}
+ QType = QType.getNonReferenceType();
// A list item must not be const-qualified.
if (QType.isConstant(Context)) {
@@ -6324,14 +7679,25 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
continue;
}
+ // Build private copy of original var.
+ auto *Private = buildVarDecl(*this, ELoc, QType, VD->getName(),
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ auto *PrivateRef = buildDeclRefExpr(
+ *this, Private, DE->getType().getUnqualifiedType(), DE->getExprLoc());
// Build var to save initial value.
VarDecl *Init = buildVarDecl(*this, ELoc, QType, ".linear.start");
- AddInitializerToDecl(Init, DefaultLvalueConversion(DE).get(),
+ Expr *InitExpr;
+ if (LinKind == OMPC_LINEAR_uval)
+ InitExpr = VD->getInit();
+ else
+ InitExpr = DE;
+ AddInitializerToDecl(Init, DefaultLvalueConversion(InitExpr).get(),
/*DirectInit*/ false, /*TypeMayContainAuto*/ false);
auto InitRef = buildDeclRefExpr(
*this, Init, DE->getType().getUnqualifiedType(), DE->getExprLoc());
DSAStack->addDSA(VD, DE, OMPC_linear);
Vars.push_back(DE);
+ Privates.push_back(PrivateRef);
Inits.push_back(InitRef);
}
@@ -6356,6 +7722,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
buildDeclRefExpr(*this, SaveVar, StepExpr->getType(), StepLoc);
ExprResult CalcStep =
BuildBinOp(CurScope, StepLoc, BO_Assign, SaveRef.get(), StepExpr);
+ CalcStep = ActOnFinishFullExpr(CalcStep.get());
// Warn about zero linear step (it would be probably better specified as
// making corresponding variables 'const').
@@ -6371,8 +7738,9 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
}
}
- return OMPLinearClause::Create(Context, StartLoc, LParenLoc, ColonLoc, EndLoc,
- Vars, Inits, StepExpr, CalcStepExpr);
+ return OMPLinearClause::Create(Context, StartLoc, LParenLoc, LinKind, LinLoc,
+ ColonLoc, EndLoc, Vars, Privates, Inits,
+ StepExpr, CalcStepExpr);
}
static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
@@ -6391,27 +7759,35 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Step = cast<BinaryOperator>(CalcStep)->getLHS();
bool HasErrors = false;
auto CurInit = Clause.inits().begin();
+ auto CurPrivate = Clause.privates().begin();
+ auto LinKind = Clause.getModifier();
for (auto &RefExpr : Clause.varlists()) {
Expr *InitExpr = *CurInit;
// Build privatized reference to the current linear var.
auto DE = cast<DeclRefExpr>(RefExpr);
- auto PrivateRef =
- buildDeclRefExpr(SemaRef, cast<VarDecl>(DE->getDecl()),
- DE->getType().getUnqualifiedType(), DE->getExprLoc(),
- /*RefersToCapture=*/true);
+ Expr *CapturedRef;
+ if (LinKind == OMPC_LINEAR_uval)
+ CapturedRef = cast<VarDecl>(DE->getDecl())->getInit();
+ else
+ CapturedRef =
+ buildDeclRefExpr(SemaRef, cast<VarDecl>(DE->getDecl()),
+ DE->getType().getUnqualifiedType(), DE->getExprLoc(),
+ /*RefersToCapture=*/true);
// Build update: Var = InitExpr + IV * Step
ExprResult Update =
- BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), PrivateRef,
+ BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), *CurPrivate,
InitExpr, IV, Step, /* Subtract */ false);
- Update = SemaRef.ActOnFinishFullExpr(Update.get());
+ Update = SemaRef.ActOnFinishFullExpr(Update.get(), DE->getLocStart(),
+ /*DiscardedValue=*/true);
// Build final: Var = InitExpr + NumIterations * Step
ExprResult Final =
- BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), PrivateRef,
+ BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), CapturedRef,
InitExpr, NumIterations, Step, /* Subtract */ false);
- Final = SemaRef.ActOnFinishFullExpr(Final.get());
+ Final = SemaRef.ActOnFinishFullExpr(Final.get(), DE->getLocStart(),
+ /*DiscardedValue=*/true);
if (!Update.isUsable() || !Final.isUsable()) {
Updates.push_back(nullptr);
Finals.push_back(nullptr);
@@ -6420,7 +7796,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Updates.push_back(Update.get());
Finals.push_back(Final.get());
}
- ++CurInit;
+ ++CurInit, ++CurPrivate;
}
Clause.setUpdates(Updates);
Clause.setFinals(Finals);
@@ -6557,12 +7933,14 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
// copyin clause requires an accessible, unambiguous copy assignment
// operator for the class type.
auto ElemType = Context.getBaseElementType(Type).getNonReferenceType();
- auto *SrcVD = buildVarDecl(*this, DE->getLocStart(),
- ElemType.getUnqualifiedType(), ".copyin.src");
+ auto *SrcVD =
+ buildVarDecl(*this, DE->getLocStart(), ElemType.getUnqualifiedType(),
+ ".copyin.src", VD->hasAttrs() ? &VD->getAttrs() : nullptr);
auto *PseudoSrcExpr = buildDeclRefExpr(
*this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc());
auto *DstVD =
- buildVarDecl(*this, DE->getLocStart(), ElemType, ".copyin.dst");
+ buildVarDecl(*this, DE->getLocStart(), ElemType, ".copyin.dst",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
auto *PseudoDstExpr =
buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc());
// For arrays generate assignment operation for single element and replace
@@ -6679,13 +8057,16 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
// A variable of class type (or array thereof) that appears in a
// copyin clause requires an accessible, unambiguous copy assignment
// operator for the class type.
- Type = Context.getBaseElementType(Type).getUnqualifiedType();
+ Type = Context.getBaseElementType(Type.getNonReferenceType())
+ .getUnqualifiedType();
auto *SrcVD =
- buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.src");
+ buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.src",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
auto *PseudoSrcExpr =
buildDeclRefExpr(*this, SrcVD, Type, DE->getExprLoc());
auto *DstVD =
- buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.dst");
+ buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.dst",
+ VD->hasAttrs() ? &VD->getAttrs() : nullptr);
auto *PseudoDstExpr =
buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc());
auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign,
@@ -6727,61 +8108,430 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
- if (DepKind == OMPC_DEPEND_unknown) {
- std::string Values;
- std::string Sep(", ");
- for (unsigned i = 0; i < OMPC_DEPEND_unknown; ++i) {
- Values += "'";
- Values += getOpenMPSimpleClauseTypeName(OMPC_depend, i);
- Values += "'";
- switch (i) {
- case OMPC_DEPEND_unknown - 2:
- Values += " or ";
- break;
- case OMPC_DEPEND_unknown - 1:
- break;
- default:
- Values += Sep;
- break;
- }
- }
+ if (DSAStack->getCurrentDirective() == OMPD_ordered &&
+ DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink) {
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
+ return nullptr;
+ }
+ if (DSAStack->getCurrentDirective() != OMPD_ordered &&
+ (DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
+ DepKind == OMPC_DEPEND_sink)) {
+ unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink};
Diag(DepLoc, diag::err_omp_unexpected_clause_value)
- << Values << getOpenMPClauseName(OMPC_depend);
+ << getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown, Except)
+ << getOpenMPClauseName(OMPC_depend);
return nullptr;
}
SmallVector<Expr *, 8> Vars;
- for (auto &RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
- if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ llvm::APSInt DepCounter(/*BitWidth=*/32);
+ llvm::APSInt TotalDepCount(/*BitWidth=*/32);
+ if (DepKind == OMPC_DEPEND_sink) {
+ if (auto *OrderedCountExpr = DSAStack->getParentOrderedRegionParam()) {
+ TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(Context);
+ TotalDepCount.setIsUnsigned(/*Val=*/true);
+ }
+ }
+ if ((DepKind != OMPC_DEPEND_sink && DepKind != OMPC_DEPEND_source) ||
+ DSAStack->getParentOrderedRegionParam()) {
+ for (auto &RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr) ||
+ (DepKind == OMPC_DEPEND_sink && CurContext->isDependentContext())) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ auto *SimpleExpr = RefExpr->IgnoreParenCasts();
+ if (DepKind == OMPC_DEPEND_sink) {
+ if (DepCounter >= TotalDepCount) {
+ Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
+ continue;
+ }
+ ++DepCounter;
+ // OpenMP [2.13.9, Summary]
+ // depend(dependence-type : vec), where dependence-type is:
+ // 'sink' and where vec is the iteration vector, which has the form:
+ // x1 [+- d1], x2 [+- d2 ], . . . , xn [+- dn]
+ // where n is the value specified by the ordered clause in the loop
+ // directive, xi denotes the loop iteration variable of the i-th nested
+ // loop associated with the loop directive, and di is a constant
+ // non-negative integer.
+ SimpleExpr = SimpleExpr->IgnoreImplicit();
+ auto *DE = dyn_cast<DeclRefExpr>(SimpleExpr);
+ if (!DE) {
+ OverloadedOperatorKind OOK = OO_None;
+ SourceLocation OOLoc;
+ Expr *LHS, *RHS;
+ if (auto *BO = dyn_cast<BinaryOperator>(SimpleExpr)) {
+ OOK = BinaryOperator::getOverloadedOperator(BO->getOpcode());
+ OOLoc = BO->getOperatorLoc();
+ LHS = BO->getLHS()->IgnoreParenImpCasts();
+ RHS = BO->getRHS()->IgnoreParenImpCasts();
+ } else if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(SimpleExpr)) {
+ OOK = OCE->getOperator();
+ OOLoc = OCE->getOperatorLoc();
+ LHS = OCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
+ RHS = OCE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
+ } else if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SimpleExpr)) {
+ OOK = MCE->getMethodDecl()
+ ->getNameInfo()
+ .getName()
+ .getCXXOverloadedOperator();
+ OOLoc = MCE->getCallee()->getExprLoc();
+ LHS = MCE->getImplicitObjectArgument()->IgnoreParenImpCasts();
+ RHS = MCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
+ } else {
+ Diag(ELoc, diag::err_omp_depend_sink_wrong_expr);
+ continue;
+ }
+ DE = dyn_cast<DeclRefExpr>(LHS);
+ if (!DE) {
+ Diag(LHS->getExprLoc(),
+ diag::err_omp_depend_sink_expected_loop_iteration)
+ << DSAStack->getParentLoopControlVariable(
+ DepCounter.getZExtValue());
+ continue;
+ }
+ if (OOK != OO_Plus && OOK != OO_Minus) {
+ Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
+ continue;
+ }
+ ExprResult Res = VerifyPositiveIntegerConstantInClause(
+ RHS, OMPC_depend, /*StrictlyPositive=*/false);
+ if (Res.isInvalid())
+ continue;
+ }
+ auto *VD = dyn_cast<VarDecl>(DE->getDecl());
+ if (!CurContext->isDependentContext() &&
+ DSAStack->getParentOrderedRegionParam() &&
+ (!VD || DepCounter != DSAStack->isParentLoopControlVariable(VD))) {
+ Diag(DE->getExprLoc(),
+ diag::err_omp_depend_sink_expected_loop_iteration)
+ << DSAStack->getParentLoopControlVariable(
+ DepCounter.getZExtValue());
+ continue;
+ }
+ } else {
+ // OpenMP [2.11.1.1, Restrictions, p.3]
+ // A variable that is part of another variable (such as a field of a
+ // structure) but is not an array element or an array section cannot
+ // appear in a depend clause.
+ auto *DE = dyn_cast<DeclRefExpr>(SimpleExpr);
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+ if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (!ASE && !DE && !OASE) || (DE && !isa<VarDecl>(DE->getDecl())) ||
+ (ASE && !ASE->getBase()->getType()->isAnyPointerType() &&
+ !ASE->getBase()->getType()->isArrayType())) {
+ Diag(ELoc, diag::err_omp_expected_var_name_or_array_item)
+ << RefExpr->getSourceRange();
+ continue;
+ }
+ }
+
+ Vars.push_back(RefExpr->IgnoreParenImpCasts());
+ }
+
+ if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
+ TotalDepCount > VarList.size() &&
+ DSAStack->getParentOrderedRegionParam()) {
+ Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
+ }
+ if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
+ Vars.empty())
+ return nullptr;
+ }
+
+ return OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc, DepKind,
+ DepLoc, ColonLoc, Vars);
+}
+
+OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Device;
+
+ // OpenMP [2.9.1, Restrictions]
+ // The device expression must evaluate to a non-negative integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
+ /*StrictlyPositive=*/false))
+ return nullptr;
+
+ return new (Context) OMPDeviceClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+static bool IsCXXRecordForMappable(Sema &SemaRef, SourceLocation Loc,
+ DSAStackTy *Stack, CXXRecordDecl *RD) {
+ if (!RD || RD->isInvalidDecl())
+ return true;
+
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ if (auto *CTD = CTSD->getSpecializedTemplate())
+ RD = CTD->getTemplatedDecl();
+ auto QTy = SemaRef.Context.getRecordType(RD);
+ if (RD->isDynamicClass()) {
+ SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
+ SemaRef.Diag(RD->getLocation(), diag::note_omp_polymorphic_in_target);
+ return false;
+ }
+ auto *DC = RD;
+ bool IsCorrect = true;
+ for (auto *I : DC->decls()) {
+ if (I) {
+ if (auto *MD = dyn_cast<CXXMethodDecl>(I)) {
+ if (MD->isStatic()) {
+ SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
+ SemaRef.Diag(MD->getLocation(),
+ diag::note_omp_static_member_in_target);
+ IsCorrect = false;
+ }
+ } else if (auto *VD = dyn_cast<VarDecl>(I)) {
+ if (VD->isStaticDataMember()) {
+ SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
+ SemaRef.Diag(VD->getLocation(),
+ diag::note_omp_static_member_in_target);
+ IsCorrect = false;
+ }
+ }
+ }
+ }
+
+ for (auto &I : RD->bases()) {
+ if (!IsCXXRecordForMappable(SemaRef, I.getLocStart(), Stack,
+ I.getType()->getAsCXXRecordDecl()))
+ IsCorrect = false;
+ }
+ return IsCorrect;
+}
+
+static bool CheckTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
+ DSAStackTy *Stack, QualType QTy) {
+ NamedDecl *ND;
+ if (QTy->isIncompleteType(&ND)) {
+ SemaRef.Diag(SL, diag::err_incomplete_type) << QTy << SR;
+ return false;
+ } else if (CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(ND)) {
+ if (!RD->isInvalidDecl() &&
+ !IsCXXRecordForMappable(SemaRef, SL, Stack, RD))
+ return false;
+ }
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPMapClause(
+ OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType,
+ SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
+ SmallVector<Expr *, 4> Vars;
+
+ for (auto &RE : VarList) {
+ assert(RE && "Null expr in omp map");
+ if (isa<DependentScopeDeclRefExpr>(RE)) {
// It will be analyzed later.
- Vars.push_back(RefExpr);
+ Vars.push_back(RE);
continue;
}
+ SourceLocation ELoc = RE->getExprLoc();
- SourceLocation ELoc = RefExpr->getExprLoc();
- // OpenMP [2.11.1.1, Restrictions, p.3]
- // A variable that is part of another variable (such as a field of a
+ // OpenMP [2.14.5, Restrictions]
+ // A variable that is part of another variable (such as field of a
// structure) but is not an array element or an array section cannot appear
- // in a depend clause.
- auto *SimpleExpr = RefExpr->IgnoreParenCasts();
- DeclRefExpr *DE = dyn_cast<DeclRefExpr>(SimpleExpr);
- ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
- if (!RefExpr->IgnoreParenImpCasts()->isLValue() || (!ASE && !DE) ||
+ // in a map clause.
+ auto *VE = RE->IgnoreParenLValueCasts();
+
+ if (VE->isValueDependent() || VE->isTypeDependent() ||
+ VE->isInstantiationDependent() ||
+ VE->containsUnexpandedParameterPack()) {
+ // It will be analyzed later.
+ Vars.push_back(RE);
+ continue;
+ }
+
+ auto *SimpleExpr = RE->IgnoreParenCasts();
+ auto *DE = dyn_cast<DeclRefExpr>(SimpleExpr);
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+
+ if (!RE->IgnoreParenImpCasts()->isLValue() ||
+ (!OASE && !ASE && !DE) ||
(DE && !isa<VarDecl>(DE->getDecl())) ||
(ASE && !ASE->getBase()->getType()->isAnyPointerType() &&
!ASE->getBase()->getType()->isArrayType())) {
Diag(ELoc, diag::err_omp_expected_var_name_or_array_item)
- << RefExpr->getSourceRange();
+ << RE->getSourceRange();
continue;
}
- Vars.push_back(RefExpr->IgnoreParenImpCasts());
- }
+ Decl *D = nullptr;
+ if (DE) {
+ D = DE->getDecl();
+ } else if (ASE) {
+ auto *B = ASE->getBase()->IgnoreParenCasts();
+ D = dyn_cast<DeclRefExpr>(B)->getDecl();
+ } else if (OASE) {
+ auto *B = OASE->getBase();
+ D = dyn_cast<DeclRefExpr>(B)->getDecl();
+ }
+ assert(D && "Null decl on map clause.");
+ auto *VD = cast<VarDecl>(D);
+
+ // OpenMP [2.14.5, Restrictions, p.8]
+ // threadprivate variables cannot appear in a map clause.
+ if (DSAStack->isThreadPrivate(VD)) {
+ auto DVar = DSAStack->getTopDSA(VD, false);
+ Diag(ELoc, diag::err_omp_threadprivate_in_map);
+ ReportOriginalDSA(*this, DSAStack, VD, DVar);
+ continue;
+ }
+
+ // OpenMP [2.14.5, Restrictions, p.2]
+ // At most one list item can be an array item derived from a given variable
+ // in map clauses of the same construct.
+ // OpenMP [2.14.5, Restrictions, p.3]
+ // List items of map clauses in the same construct must not share original
+ // storage.
+ // OpenMP [2.14.5, Restrictions, C/C++, p.2]
+ // A variable for which the type is pointer, reference to array, or
+ // reference to pointer and an array section derived from that variable
+ // must not appear as list items of map clauses of the same construct.
+ DSAStackTy::MapInfo MI = DSAStack->IsMappedInCurrentRegion(VD);
+ if (MI.RefExpr) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << ELoc;
+ Diag(MI.RefExpr->getExprLoc(), diag::note_used_here)
+ << MI.RefExpr->getSourceRange();
+ continue;
+ }
+
+ // OpenMP [2.14.5, Restrictions, C/C++, p.3,4]
+ // A variable for which the type is pointer, reference to array, or
+ // reference to pointer must not appear as a list item if the enclosing
+ // device data environment already contains an array section derived from
+ // that variable.
+ // An array section derived from a variable for which the type is pointer,
+ // reference to array, or reference to pointer must not appear as a list
+ // item if the enclosing device data environment already contains that
+ // variable.
+ QualType Type = VD->getType();
+ MI = DSAStack->getMapInfoForVar(VD);
+ if (MI.RefExpr && (isa<DeclRefExpr>(MI.RefExpr->IgnoreParenLValueCasts()) !=
+ isa<DeclRefExpr>(VE)) &&
+ (Type->isPointerType() || Type->isReferenceType())) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << ELoc;
+ Diag(MI.RefExpr->getExprLoc(), diag::note_used_here)
+ << MI.RefExpr->getSourceRange();
+ continue;
+ }
+ // OpenMP [2.14.5, Restrictions, C/C++, p.7]
+ // A list item must have a mappable type.
+ if (!CheckTypeMappable(VE->getExprLoc(), VE->getSourceRange(), *this,
+ DSAStack, Type))
+ continue;
+
+ Vars.push_back(RE);
+ MI.RefExpr = RE;
+ DSAStack->addMapInfoForVar(VD, MI);
+ }
if (Vars.empty())
return nullptr;
- return OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc, DepKind,
- DepLoc, ColonLoc, Vars);
+ return OMPMapClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
+ MapTypeModifier, MapType, MapLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = NumTeams;
+
+ // OpenMP [teams Constrcut, Restrictions]
+ // The num_teams expression must evaluate to a positive integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_teams,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPNumTeamsClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = ThreadLimit;
+
+ // OpenMP [teams Constrcut, Restrictions]
+ // The thread_limit expression must evaluate to a positive integer value.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_thread_limit,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPThreadLimitClause(ValExpr, StartLoc, LParenLoc,
+ EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Priority;
+
+ // OpenMP [2.9.1, task Constrcut]
+ // The priority-value is a non-negative numerical scalar expression.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_priority,
+ /*StrictlyPositive=*/false))
+ return nullptr;
+
+ return new (Context) OMPPriorityClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPGrainsizeClause(Expr *Grainsize,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Grainsize;
+
+ // OpenMP [2.9.2, taskloop Constrcut]
+ // The parameter of the grainsize clause must be a positive integer
+ // expression.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_grainsize,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPGrainsizeClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNumTasksClause(Expr *NumTasks,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = NumTasks;
+
+ // OpenMP [2.9.2, taskloop Constrcut]
+ // The parameter of the num_tasks clause must be a positive integer
+ // expression.
+ if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_tasks,
+ /*StrictlyPositive=*/true))
+ return nullptr;
+
+ return new (Context) OMPNumTasksClause(ValExpr, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ // OpenMP [2.13.2, critical construct, Description]
+ // ... where hint-expression is an integer constant expression that evaluates
+ // to a valid lock hint.
+ ExprResult HintExpr = VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint);
+ if (HintExpr.isInvalid())
+ return nullptr;
+ return new (Context)
+ OMPHintClause(HintExpr.get(), StartLoc, LParenLoc, EndLoc);
}
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 31f581dc15b6..1caa94c9a458 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -38,6 +38,11 @@
using namespace clang;
using namespace sema;
+static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
+ return std::any_of(FD->param_begin(), FD->param_end(),
+ std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>));
+}
+
/// A convenience routine for creating a decayed reference to a function.
static ExprResult
CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
@@ -60,12 +65,8 @@ CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
DRE->setHadMultipleCandidates(true);
S.MarkDeclRefReferenced(DRE);
-
- ExprResult E = DRE;
- E = S.DefaultFunctionArrayConversion(E.get());
- if (E.isInvalid())
- return ExprError();
- return E;
+ return S.ImpCastExprToType(DRE, S.Context.getPointerType(DRE->getType()),
+ CK_FunctionToPointerDecay);
}
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
@@ -88,7 +89,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
static ImplicitConversionSequence::CompareKind
-CompareStandardConversionSequences(Sema &S,
+CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2);
@@ -98,7 +99,7 @@ CompareQualificationConversions(Sema &S,
const StandardConversionSequence& SCS2);
static ImplicitConversionSequence::CompareKind
-CompareDerivedToBaseConversions(Sema &S,
+CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2);
@@ -130,7 +131,11 @@ ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
ICR_Complex_Real_Conversion,
ICR_Conversion,
ICR_Conversion,
- ICR_Writeback_Conversion
+ ICR_Writeback_Conversion,
+ ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
+ // it was omitted by the patch that added
+ // ICK_Zero_Event_Conversion
+ ICR_C_Conversion
};
return Rank[(int)Kind];
}
@@ -162,7 +167,9 @@ static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Complex-real conversion",
"Block Pointer conversion",
"Transparent Union Conversion",
- "Writeback conversion"
+ "Writeback conversion",
+ "OpenCL Zero Event Conversion",
+ "C specific type conversion"
};
return Name[Kind];
}
@@ -896,6 +903,11 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
OldD = cast<UsingShadowDecl>(OldD)->getTargetDecl();
}
+ // A using-declaration does not conflict with another declaration
+ // if one of them is hidden.
+ if ((OldIsUsingDecl || NewIsUsingDecl) && !isVisible(*I))
+ continue;
+
// If either declaration was introduced by a using declaration,
// we'll need to use slightly different rules for matching.
// Essentially, these rules are the normal rules, except that
@@ -1051,6 +1063,14 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
return true;
}
+ // Though pass_object_size is placed on parameters and takes an argument, we
+ // consider it to be a function-level modifier for the sake of function
+ // identity. Either the function has one or more parameters with
+ // pass_object_size or it doesn't.
+ if (functionHasPassObjectSizeParams(New) !=
+ functionHasPassObjectSizeParams(Old))
+ return true;
+
// enable_if attributes are an order-sensitive part of the signature.
for (specific_attr_iterator<EnableIfAttr>
NewI = New->specific_attr_begin<EnableIfAttr>(),
@@ -1067,6 +1087,25 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
return true;
}
+ if (getLangOpts().CUDA && getLangOpts().CUDATargetOverloads) {
+ CUDAFunctionTarget NewTarget = IdentifyCUDATarget(New),
+ OldTarget = IdentifyCUDATarget(Old);
+ if (NewTarget == CFT_InvalidTarget || NewTarget == CFT_Global)
+ return false;
+
+ assert((OldTarget != CFT_InvalidTarget) && "Unexpected invalid target.");
+
+ // Don't allow mixing of HD with other kinds. This guarantees that
+ // we have only one viable function with this signature on any
+ // side of CUDA compilation .
+ if ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice))
+ return false;
+
+ // Allow overloading of functions with same signature, but
+ // different CUDA target attributes.
+ return NewTarget != OldTarget;
+ }
+
// The signatures match; this is not an overload.
return false;
}
@@ -1125,7 +1164,8 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
QualType ToCanon
= S.Context.getCanonicalType(ToType).getUnqualifiedType();
if (Constructor->isCopyConstructor() &&
- (FromCanon == ToCanon || S.IsDerivedFrom(FromCanon, ToCanon))) {
+ (FromCanon == ToCanon ||
+ S.IsDerivedFrom(From->getLocStart(), FromCanon, ToCanon))) {
// Turn this into a "standard" conversion sequence, so that it
// gets ranked with standard conversion sequences.
ICS.setStandard();
@@ -1215,7 +1255,7 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
QualType FromType = From->getType();
if (ToType->getAs<RecordType>() && FromType->getAs<RecordType>() &&
(S.Context.hasSameUnqualifiedType(FromType, ToType) ||
- S.IsDerivedFrom(FromType, ToType))) {
+ S.IsDerivedFrom(From->getLocStart(), FromType, ToType))) {
ICS.setStandard();
ICS.Standard.setAsIdentityConversion();
ICS.Standard.setFromType(FromType);
@@ -1387,7 +1427,7 @@ static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle);
-
+
/// IsStandardConversion - Determines whether there is a standard
/// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the
/// expression From to the type ToType. Standard conversion sequences
@@ -1410,13 +1450,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.CopyConstructor = nullptr;
// There are no standard conversions for class types in C++, so
- // abort early. When overloading in C, however, we do permit
- if (FromType->isRecordType() || ToType->isRecordType()) {
- if (S.getLangOpts().CPlusPlus)
- return false;
-
- // When we're overloading in C, we allow, as standard conversions,
- }
+ // abort early. When overloading in C, however, we do permit them.
+ if (S.getLangOpts().CPlusPlus &&
+ (FromType->isRecordType() || ToType->isRecordType()))
+ return false;
// The first conversion can be an lvalue-to-rvalue conversion,
// array-to-pointer conversion, or function-to-pointer conversion
@@ -1521,6 +1558,11 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// Function-to-pointer conversion (C++ 4.3).
SCS.First = ICK_Function_To_Pointer;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(From->IgnoreParenCasts()))
+ if (auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl()))
+ if (!S.checkAddressOfFunctionIsAvailable(FD))
+ return false;
+
// An lvalue of function type T can be converted to an rvalue of
// type "pointer to T." The result is a pointer to the
// function. (C++ 4.3p1).
@@ -1625,9 +1667,9 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// tryAtomicConversion has updated the standard conversion sequence
// appropriately.
return true;
- } else if (ToType->isEventT() &&
+ } else if (ToType->isEventT() &&
From->isIntegerConstantExpr(S.getASTContext()) &&
- (From->EvaluateKnownConstInt(S.getASTContext()) == 0)) {
+ From->EvaluateKnownConstInt(S.getASTContext()) == 0) {
SCS.Second = ICK_Zero_Event_Conversion;
FromType = ToType;
} else {
@@ -1666,11 +1708,28 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
}
SCS.setToType(2, FromType);
+ if (CanonFrom == CanonTo)
+ return true;
+
// If we have not converted the argument type to the parameter type,
- // this is a bad conversion sequence.
- if (CanonFrom != CanonTo)
+ // this is a bad conversion sequence, unless we're resolving an overload in C.
+ if (S.getLangOpts().CPlusPlus || !InOverloadResolution)
+ return false;
+
+ ExprResult ER = ExprResult{From};
+ auto Conv = S.CheckSingleAssignmentConstraints(ToType, ER,
+ /*Diagnose=*/false,
+ /*DiagnoseCFAudited=*/false,
+ /*ConvertRHS=*/false);
+ if (Conv != Sema::Compatible)
return false;
+ SCS.setAllToTypes(ToType);
+ // We need to set all three because we want this conversion to rank terribly,
+ // and we don't know what conversions it may overlap with.
+ SCS.First = ICK_C_Only_Conversion;
+ SCS.Second = ICK_C_Only_Conversion;
+ SCS.Third = ICK_C_Only_Conversion;
return true;
}
@@ -1763,7 +1822,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// We have already pre-calculated the promotion type, so this is trivial.
if (ToType->isIntegerType() &&
- !RequireCompleteType(From->getLocStart(), FromType, 0))
+ isCompleteType(From->getLocStart(), FromType))
return Context.hasSameUnqualifiedType(
ToType, FromEnumType->getDecl()->getPromotionType());
}
@@ -2060,7 +2119,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
}
// MSVC allows implicit function to void* type conversion.
- if (getLangOpts().MicrosoftExt && FromPointeeType->isFunctionType() &&
+ if (getLangOpts().MSVCCompat && FromPointeeType->isFunctionType() &&
ToPointeeType->isVoidType()) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
@@ -2094,8 +2153,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
if (getLangOpts().CPlusPlus &&
FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
!Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) &&
- !RequireCompleteType(From->getLocStart(), FromPointeeType, 0) &&
- IsDerivedFrom(FromPointeeType, ToPointeeType)) {
+ IsDerivedFrom(From->getLocStart(), FromPointeeType, ToPointeeType)) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
ToType, Context);
@@ -2467,6 +2525,18 @@ enum {
ft_qualifer_mismatch
};
+/// Attempts to get the FunctionProtoType from a Type. Handles
+/// MemberFunctionPointers properly.
+static const FunctionProtoType *tryGetFunctionProtoType(QualType FromType) {
+ if (auto *FPT = FromType->getAs<FunctionProtoType>())
+ return FPT;
+
+ if (auto *MPT = FromType->getAs<MemberPointerType>())
+ return MPT->getPointeeType()->getAs<FunctionProtoType>();
+
+ return nullptr;
+}
+
/// HandleFunctionTypeMismatch - Gives diagnostic information for differeing
/// function types. Catches different number of parameter, mismatch in
/// parameter types, and different return types.
@@ -2513,8 +2583,8 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
return;
}
- const FunctionProtoType *FromFunction = FromType->getAs<FunctionProtoType>(),
- *ToFunction = ToType->getAs<FunctionProtoType>();
+ const FunctionProtoType *FromFunction = tryGetFunctionProtoType(FromType),
+ *ToFunction = tryGetFunctionProtoType(ToType);
// Both types need to be function types.
if (!FromFunction || !ToFunction) {
@@ -2621,6 +2691,14 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
// The conversion was successful.
Kind = CK_DerivedToBase;
}
+
+ if (!IsCStyleOrFunctionalCast && FromPointeeType->isFunctionType() &&
+ ToPointeeType->isVoidType()) {
+ assert(getLangOpts().MSVCCompat &&
+ "this should only be possible with MSVCCompat!");
+ Diag(From->getExprLoc(), diag::ext_ms_impcast_fn_obj)
+ << From->getSourceRange();
+ }
}
} else if (const ObjCObjectPointerType *ToPtrType =
ToType->getAs<ObjCObjectPointerType>()) {
@@ -2681,8 +2759,7 @@ bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
QualType ToClass(ToTypePtr->getClass(), 0);
if (!Context.hasSameUnqualifiedType(FromClass, ToClass) &&
- !RequireCompleteType(From->getLocStart(), ToClass, 0) &&
- IsDerivedFrom(ToClass, FromClass)) {
+ IsDerivedFrom(From->getLocStart(), ToClass, FromClass)) {
ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
ToClass.getTypePtr());
return true;
@@ -2725,7 +2802,8 @@ bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
- bool DerivationOkay = IsDerivedFrom(ToClass, FromClass, Paths);
+ bool DerivationOkay =
+ IsDerivedFrom(From->getLocStart(), ToClass, FromClass, Paths);
assert(DerivationOkay &&
"Should not have been called if derivation isn't OK.");
(void)DerivationOkay;
@@ -3004,14 +3082,10 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// the parentheses of the initializer.
if (S.Context.hasSameUnqualifiedType(ToType, From->getType()) ||
(From->getType()->getAs<RecordType>() &&
- S.IsDerivedFrom(From->getType(), ToType)))
+ S.IsDerivedFrom(From->getLocStart(), From->getType(), ToType)))
ConstructorsOnly = true;
- S.RequireCompleteType(From->getExprLoc(), ToType, 0);
- // RequireCompleteType may have returned true due to some invalid decl
- // during template instantiation, but ToType may be complete enough now
- // to try to recover.
- if (ToType->isIncompleteType()) {
+ if (!S.isCompleteType(From->getExprLoc(), ToType)) {
// We're not going to find any constructors.
} else if (CXXRecordDecl *ToRecordDecl
= dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
@@ -3085,7 +3159,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// Enumerate conversion functions, if we're allowed to.
if (ConstructorsOnly || isa<InitListExpr>(From)) {
- } else if (S.RequireCompleteType(From->getLocStart(), From->getType(), 0)) {
+ } else if (!S.isCompleteType(From->getLocStart(), From->getType())) {
// No conversion functions from incomplete types.
} else if (const RecordType *FromRecordType
= From->getType()->getAs<RecordType>()) {
@@ -3212,7 +3286,7 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
diag::err_typecheck_nonviable_condition_incomplete,
From->getType(), From->getSourceRange()))
Diag(From->getLocStart(), diag::err_typecheck_nonviable_condition)
- << From->getType() << From->getSourceRange() << ToType;
+ << false << From->getType() << From->getSourceRange() << ToType;
} else
return false;
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From);
@@ -3264,7 +3338,7 @@ static bool hasDeprecatedStringLiteralToCharPtrConversion(
/// conversion sequences to determine whether one is better than the
/// other or if they are indistinguishable (C++ 13.3.3.2).
static ImplicitConversionSequence::CompareKind
-CompareImplicitConversionSequences(Sema &S,
+CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
const ImplicitConversionSequence& ICS1,
const ImplicitConversionSequence& ICS2)
{
@@ -3344,7 +3418,7 @@ CompareImplicitConversionSequences(Sema &S,
if (ICS1.isStandard())
// Standard conversion sequence S1 is a better conversion sequence than
// standard conversion sequence S2 if [...]
- Result = CompareStandardConversionSequences(S,
+ Result = CompareStandardConversionSequences(S, Loc,
ICS1.Standard, ICS2.Standard);
else if (ICS1.isUserDefined()) {
// User-defined conversion sequence U1 is a better conversion
@@ -3355,7 +3429,7 @@ CompareImplicitConversionSequences(Sema &S,
// U2 (C++ 13.3.3.2p3).
if (ICS1.UserDefined.ConversionFunction ==
ICS2.UserDefined.ConversionFunction)
- Result = CompareStandardConversionSequences(S,
+ Result = CompareStandardConversionSequences(S, Loc,
ICS1.UserDefined.After,
ICS2.UserDefined.After);
else
@@ -3453,7 +3527,7 @@ isBetterReferenceBindingKind(const StandardConversionSequence &SCS1,
/// conversion sequences to determine whether one is better than the
/// other or if they are indistinguishable (C++ 13.3.3.2p3).
static ImplicitConversionSequence::CompareKind
-CompareStandardConversionSequences(Sema &S,
+CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2)
{
@@ -3509,7 +3583,7 @@ CompareStandardConversionSequences(Sema &S,
// Neither conversion sequence converts to a void pointer; compare
// their derived-to-base conversions.
if (ImplicitConversionSequence::CompareKind DerivedCK
- = CompareDerivedToBaseConversions(S, SCS1, SCS2))
+ = CompareDerivedToBaseConversions(S, Loc, SCS1, SCS2))
return DerivedCK;
} else if (SCS1ConvertsToVoid && SCS2ConvertsToVoid &&
!S.Context.hasSameType(SCS1.getFromType(), SCS2.getFromType())) {
@@ -3529,9 +3603,9 @@ CompareStandardConversionSequences(Sema &S,
QualType FromPointee1 = FromType1->getPointeeType().getUnqualifiedType();
QualType FromPointee2 = FromType2->getPointeeType().getUnqualifiedType();
- if (S.IsDerivedFrom(FromPointee2, FromPointee1))
+ if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
return ImplicitConversionSequence::Better;
- else if (S.IsDerivedFrom(FromPointee1, FromPointee2))
+ else if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
return ImplicitConversionSequence::Worse;
// Objective-C++: If one interface is more specific than the
@@ -3739,7 +3813,7 @@ CompareQualificationConversions(Sema &S,
/// [over.ics.rank]p4b3). As part of these checks, we also look at
/// conversions between Objective-C interface types.
static ImplicitConversionSequence::CompareKind
-CompareDerivedToBaseConversions(Sema &S,
+CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2) {
QualType FromType1 = SCS1.getFromType();
@@ -3782,17 +3856,17 @@ CompareDerivedToBaseConversions(Sema &S,
// -- conversion of C* to B* is better than conversion of C* to A*,
if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
- if (S.IsDerivedFrom(ToPointee1, ToPointee2))
+ if (S.IsDerivedFrom(Loc, ToPointee1, ToPointee2))
return ImplicitConversionSequence::Better;
- else if (S.IsDerivedFrom(ToPointee2, ToPointee1))
+ else if (S.IsDerivedFrom(Loc, ToPointee2, ToPointee1))
return ImplicitConversionSequence::Worse;
}
// -- conversion of B* to A* is better than conversion of C* to A*,
if (FromPointee1 != FromPointee2 && ToPointee1 == ToPointee2) {
- if (S.IsDerivedFrom(FromPointee2, FromPointee1))
+ if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
return ImplicitConversionSequence::Better;
- else if (S.IsDerivedFrom(FromPointee1, FromPointee2))
+ else if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
return ImplicitConversionSequence::Worse;
}
} else if (SCS1.Second == ICK_Pointer_Conversion &&
@@ -3889,16 +3963,16 @@ CompareDerivedToBaseConversions(Sema &S,
QualType ToPointee2 = QualType(ToPointeeType2, 0).getUnqualifiedType();
// conversion of A::* to B::* is better than conversion of A::* to C::*,
if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
- if (S.IsDerivedFrom(ToPointee1, ToPointee2))
+ if (S.IsDerivedFrom(Loc, ToPointee1, ToPointee2))
return ImplicitConversionSequence::Worse;
- else if (S.IsDerivedFrom(ToPointee2, ToPointee1))
+ else if (S.IsDerivedFrom(Loc, ToPointee2, ToPointee1))
return ImplicitConversionSequence::Better;
}
// conversion of B::* to C::* is better than conversion of A::* to C::*
if (ToPointee1 == ToPointee2 && FromPointee1 != FromPointee2) {
- if (S.IsDerivedFrom(FromPointee1, FromPointee2))
+ if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
return ImplicitConversionSequence::Better;
- else if (S.IsDerivedFrom(FromPointee2, FromPointee1))
+ else if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
return ImplicitConversionSequence::Worse;
}
}
@@ -3910,9 +3984,9 @@ CompareDerivedToBaseConversions(Sema &S,
// reference of type A&,
if (S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
!S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
- if (S.IsDerivedFrom(ToType1, ToType2))
+ if (S.IsDerivedFrom(Loc, ToType1, ToType2))
return ImplicitConversionSequence::Better;
- else if (S.IsDerivedFrom(ToType2, ToType1))
+ else if (S.IsDerivedFrom(Loc, ToType2, ToType1))
return ImplicitConversionSequence::Worse;
}
@@ -3922,9 +3996,9 @@ CompareDerivedToBaseConversions(Sema &S,
// reference of type A&,
if (!S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
- if (S.IsDerivedFrom(FromType2, FromType1))
+ if (S.IsDerivedFrom(Loc, FromType2, FromType1))
return ImplicitConversionSequence::Better;
- else if (S.IsDerivedFrom(FromType1, FromType2))
+ else if (S.IsDerivedFrom(Loc, FromType1, FromType2))
return ImplicitConversionSequence::Worse;
}
}
@@ -3973,9 +4047,9 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
ObjCLifetimeConversion = false;
if (UnqualT1 == UnqualT2) {
// Nothing to do.
- } else if (!RequireCompleteType(Loc, OrigT2, 0) &&
+ } else if (isCompleteType(Loc, OrigT2) &&
isTypeValid(UnqualT1) && isTypeValid(UnqualT2) &&
- IsDerivedFrom(UnqualT2, UnqualT1))
+ IsDerivedFrom(Loc, UnqualT2, UnqualT1))
DerivedToBase = true;
else if (UnqualT1->isObjCObjectOrInterfaceType() &&
UnqualT2->isObjCObjectOrInterfaceType() &&
@@ -4240,7 +4314,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// conversion functions (13.3.1.6) and choosing the best
// one through overload resolution (13.3)),
if (!SuppressUserConversions && T2->isRecordType() &&
- !S.RequireCompleteType(DeclLoc, T2, 0) &&
+ S.isCompleteType(DeclLoc, T2) &&
RefRelationship == Sema::Ref_Incompatible) {
if (FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
Init, T2, /*AllowRvalues=*/false,
@@ -4303,7 +4377,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// in the second case (or, in either case, to an appropriate base
// class subobject).
if (!SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
- T2->isRecordType() && !S.RequireCompleteType(DeclLoc, T2, 0) &&
+ T2->isRecordType() && S.isCompleteType(DeclLoc, T2) &&
FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
Init, T2, /*AllowRvalues=*/true,
AllowExplicit)) {
@@ -4441,7 +4515,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// We need a complete type for what follows. Incomplete types can never be
// initialized from init lists.
- if (S.RequireCompleteType(From->getLocStart(), ToType, 0))
+ if (!S.isCompleteType(From->getLocStart(), ToType))
return Result;
// Per DR1467:
@@ -4458,7 +4532,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
if (ToType->isRecordType()) {
QualType InitType = From->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, ToType) ||
- S.IsDerivedFrom(InitType, ToType))
+ S.IsDerivedFrom(From->getLocStart(), InitType, ToType))
return TryCopyInitialization(S, From->getInit(0), ToType,
SuppressUserConversions,
InOverloadResolution,
@@ -4515,7 +4589,8 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
}
// Otherwise, look for the worst conversion.
if (Result.isBad() ||
- CompareImplicitConversionSequences(S, ICS, Result) ==
+ CompareImplicitConversionSequences(S, From->getLocStart(), ICS,
+ Result) ==
ImplicitConversionSequence::Worse)
Result = ICS;
}
@@ -4722,7 +4797,7 @@ static bool TryCopyInitialization(const CanQualType FromQTy,
/// parameter of the given member function (@c Method) from the
/// expression @p From.
static ImplicitConversionSequence
-TryObjectArgumentInitialization(Sema &S, QualType FromType,
+TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
Expr::Classification FromClassification,
CXXMethodDecl *Method,
CXXRecordDecl *ActingContext) {
@@ -4782,7 +4857,7 @@ TryObjectArgumentInitialization(Sema &S, QualType FromType,
ImplicitConversionKind SecondKind;
if (ClassTypeCanon == FromTypeCanon.getLocalUnqualifiedType()) {
SecondKind = ICK_Identity;
- } else if (S.IsDerivedFrom(FromType, ClassType))
+ } else if (S.IsDerivedFrom(Loc, FromType, ClassType))
SecondKind = ICK_Derived_To_Base;
else {
ICS.setBad(BadConversionSequence::unrelated_class,
@@ -4857,7 +4932,8 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
// Note that we always use the true parent context when performing
// the actual argument initialization.
ImplicitConversionSequence ICS = TryObjectArgumentInitialization(
- *this, From->getType(), FromClassification, Method, Method->getParent());
+ *this, From->getLocStart(), From->getType(), FromClassification, Method,
+ Method->getParent());
if (ICS.isBad()) {
if (ICS.Bad.Kind == BadConversionSequence::bad_qualifiers) {
Qualifiers FromQs = FromRecordType.getQualifiers();
@@ -4969,6 +5045,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_TransparentUnionConversion:
case ICK_Writeback_Conversion:
case ICK_Zero_Event_Conversion:
+ case ICK_C_Only_Conversion:
return false;
case ICK_Lvalue_To_Rvalue:
@@ -5372,14 +5449,15 @@ ExprResult Sema::PerformContextualImplicitConversion(
Expr *From;
TypeDiagnoserPartialDiag(ContextualImplicitConverter &Converter, Expr *From)
- : TypeDiagnoser(Converter.Suppress), Converter(Converter), From(From) {}
+ : Converter(Converter), From(From) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
Converter.diagnoseIncomplete(S, Loc, T) << From->getSourceRange();
}
} IncompleteDiagnoser(Converter, From);
- if (RequireCompleteType(Loc, T, IncompleteDiagnoser))
+ if (Converter.Suppress ? !isCompleteType(Loc, T)
+ : RequireCompleteType(Loc, T, IncompleteDiagnoser))
return From;
// Look for a conversion to an integral or enumeration type.
@@ -5637,10 +5715,10 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// A member function template is never instantiated to perform the copy
// of a class object to an object of its class type.
QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
- if (Args.size() == 1 &&
- Constructor->isSpecializationCopyingObject() &&
+ if (Args.size() == 1 && Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
- IsDerivedFrom(Args[0]->getType(), ClassType))) {
+ IsDerivedFrom(Args[0]->getLocStart(), Args[0]->getType(),
+ ClassType))) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_illegal_constructor;
return;
@@ -5761,7 +5839,7 @@ ObjCMethodDecl *Sema::SelectBestMethod(Selector Sel, MultiExprArg Args,
Match = false;
break;
}
-
+
ImplicitConversionSequence ConversionState
= TryCopyInitialization(*this, argExpr, param->getType(),
/*SuppressUserConversions*/false,
@@ -5809,27 +5887,36 @@ ObjCMethodDecl *Sema::SelectBestMethod(Selector Sel, MultiExprArg Args,
return nullptr;
}
-static bool IsNotEnableIfAttr(Attr *A) { return !isa<EnableIfAttr>(A); }
+// specific_attr_iterator iterates over enable_if attributes in reverse, and
+// enable_if is order-sensitive. As a result, we need to reverse things
+// sometimes. Size of 4 elements is arbitrary.
+static SmallVector<EnableIfAttr *, 4>
+getOrderedEnableIfAttrs(const FunctionDecl *Function) {
+ SmallVector<EnableIfAttr *, 4> Result;
+ if (!Function->hasAttrs())
+ return Result;
+
+ const auto &FuncAttrs = Function->getAttrs();
+ for (Attr *Attr : FuncAttrs)
+ if (auto *EnableIf = dyn_cast<EnableIfAttr>(Attr))
+ Result.push_back(EnableIf);
+
+ std::reverse(Result.begin(), Result.end());
+ return Result;
+}
EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis) {
- // FIXME: specific_attr_iterator<EnableIfAttr> iterates in reverse order, but
- // we need to find the first failing one.
- if (!Function->hasAttrs())
- return nullptr;
- AttrVec Attrs = Function->getAttrs();
- AttrVec::iterator E = std::remove_if(Attrs.begin(), Attrs.end(),
- IsNotEnableIfAttr);
- if (Attrs.begin() == E)
+ auto EnableIfAttrs = getOrderedEnableIfAttrs(Function);
+ if (EnableIfAttrs.empty())
return nullptr;
- std::reverse(Attrs.begin(), E);
SFINAETrap Trap(*this);
-
- // Convert the arguments.
SmallVector<Expr *, 16> ConvertedArgs;
bool InitializationFailed = false;
bool ContainsValueDependentExpr = false;
+
+ // Convert the arguments.
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
if (i == 0 && !MissingImplicitThis && isa<CXXMethodDecl>(Function) &&
!cast<CXXMethodDecl>(Function)->isStatic() &&
@@ -5861,11 +5948,32 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
}
if (InitializationFailed || Trap.hasErrorOccurred())
- return cast<EnableIfAttr>(Attrs[0]);
+ return EnableIfAttrs[0];
+
+ // Push default arguments if needed.
+ if (!Function->isVariadic() && Args.size() < Function->getNumParams()) {
+ for (unsigned i = Args.size(), e = Function->getNumParams(); i != e; ++i) {
+ ParmVarDecl *P = Function->getParamDecl(i);
+ ExprResult R = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ Function->getParamDecl(i)),
+ SourceLocation(),
+ P->hasUninstantiatedDefaultArg() ? P->getUninstantiatedDefaultArg()
+ : P->getDefaultArg());
+ if (R.isInvalid()) {
+ InitializationFailed = true;
+ break;
+ }
+ ContainsValueDependentExpr |= R.get()->isValueDependent();
+ ConvertedArgs.push_back(R.get());
+ }
+
+ if (InitializationFailed || Trap.hasErrorOccurred())
+ return EnableIfAttrs[0];
+ }
- for (AttrVec::iterator I = Attrs.begin(); I != E; ++I) {
+ for (auto *EIA : EnableIfAttrs) {
APValue Result;
- EnableIfAttr *EIA = cast<EnableIfAttr>(*I);
if (EIA->getCond()->isValueDependent()) {
// Don't even try now, we'll examine it after instantiation.
continue;
@@ -6027,9 +6135,9 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
else {
// Determine the implicit conversion sequence for the object
// parameter.
- Candidate.Conversions[0]
- = TryObjectArgumentInitialization(*this, ObjectType, ObjectClassification,
- Method, ActingContext);
+ Candidate.Conversions[0] = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), ObjectType, ObjectClassification,
+ Method, ActingContext);
if (Candidate.Conversions[0].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -6286,10 +6394,9 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
CXXRecordDecl *ConversionContext
= cast<CXXRecordDecl>(ImplicitParamType->getAs<RecordType>()->getDecl());
- Candidate.Conversions[0]
- = TryObjectArgumentInitialization(*this, From->getType(),
- From->Classify(Context),
- Conversion, ConversionContext);
+ Candidate.Conversions[0] = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), From->getType(),
+ From->Classify(Context), Conversion, ConversionContext);
if (Candidate.Conversions[0].isBad()) {
Candidate.Viable = false;
@@ -6303,7 +6410,8 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
QualType FromCanon
= Context.getCanonicalType(From->getType().getUnqualifiedType());
QualType ToCanon = Context.getCanonicalType(ToType).getUnqualifiedType();
- if (FromCanon == ToCanon || IsDerivedFrom(FromCanon, ToCanon)) {
+ if (FromCanon == ToCanon ||
+ IsDerivedFrom(CandidateSet.getLocation(), FromCanon, ToCanon)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_trivial_conversion;
return;
@@ -6325,7 +6433,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
&ConversionRef, VK_RValue);
QualType ConversionType = Conversion->getConversionType();
- if (RequireCompleteType(From->getLocStart(), ConversionType, 0)) {
+ if (!isCompleteType(From->getLocStart(), ConversionType)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_final_conversion;
return;
@@ -6463,10 +6571,9 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
// Determine the implicit conversion sequence for the implicit
// object parameter.
- ImplicitConversionSequence ObjectInit
- = TryObjectArgumentInitialization(*this, Object->getType(),
- Object->Classify(Context),
- Conversion, ActingContext);
+ ImplicitConversionSequence ObjectInit = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), Object->getType(),
+ Object->Classify(Context), Conversion, ActingContext);
if (ObjectInit.isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -6575,7 +6682,8 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
// the set of member candidates is empty.
if (const RecordType *T1Rec = T1->getAs<RecordType>()) {
// Complete the type if it can be completed.
- RequireCompleteType(OpLoc, T1, 0);
+ if (!isCompleteType(OpLoc, T1) && !T1Rec->isBeingDefined())
+ return;
// If the type is neither complete nor being defined, bail out now.
if (!T1Rec->getDecl()->getDefinition())
return;
@@ -6924,7 +7032,7 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
HasNullPtrType = true;
} else if (AllowUserConversions && TyRec) {
// No conversion functions in incomplete types.
- if (SemaRef.RequireCompleteType(Loc, Ty, 0))
+ if (!SemaRef.isCompleteType(Loc, Ty))
return;
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
@@ -7527,7 +7635,7 @@ public:
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (int Arg = 0; Arg < 2; ++Arg) {
- QualType AsymetricParamTypes[2] = {
+ QualType AsymmetricParamTypes[2] = {
S.Context.getPointerDiffType(),
S.Context.getPointerDiffType(),
};
@@ -7539,11 +7647,11 @@ public:
if (!PointeeTy->isObjectType())
continue;
- AsymetricParamTypes[Arg] = *Ptr;
+ AsymmetricParamTypes[Arg] = *Ptr;
if (Arg == 0 || Op == OO_Plus) {
// operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
// T* operator+(ptrdiff_t, T*);
- S.AddBuiltinCandidate(*Ptr, AsymetricParamTypes, Args, CandidateSet);
+ S.AddBuiltinCandidate(*Ptr, AsymmetricParamTypes, Args, CandidateSet);
}
if (Op == OO_Minus) {
// ptrdiff_t operator-(T, T);
@@ -8013,7 +8121,7 @@ public:
const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr);
QualType C2 = QualType(mptr->getClass(), 0);
C2 = C2.getUnqualifiedType();
- if (C1 != C2 && !S.IsDerivedFrom(C1, C2))
+ if (C1 != C2 && !S.IsDerivedFrom(CandidateSet.getLocation(), C1, C2))
break;
QualType ParamTypes[2] = { *Ptr, *MemPtr };
// build CV12 T&
@@ -8157,9 +8265,11 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_Comma:
case OO_Arrow:
+ case OO_Coawait:
// C++ [over.match.oper]p3:
- // -- For the operator ',', the unary operator '&', or the
- // operator '->', the built-in candidates set is empty.
+ // -- For the operator ',', the unary operator '&', the
+ // operator '->', or the operator 'co_await', the
+ // built-in candidates set is empty.
break;
case OO_Plus: // '+' is either unary or binary
@@ -8328,6 +8438,44 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
}
}
+// Determines whether Cand1 is "better" in terms of its enable_if attrs than
+// Cand2 for overloading. This function assumes that all of the enable_if attrs
+// on Cand1 and Cand2 have conditions that evaluate to true.
+//
+// Cand1's set of enable_if attributes are said to be "better" than Cand2's iff
+// Cand1's first N enable_if attributes have precisely the same conditions as
+// Cand2's first N enable_if attributes (where N = the number of enable_if
+// attributes on Cand2), and Cand1 has more than N enable_if attributes.
+static bool hasBetterEnableIfAttrs(Sema &S, const FunctionDecl *Cand1,
+ const FunctionDecl *Cand2) {
+
+ // FIXME: The next several lines are just
+ // specific_attr_iterator<EnableIfAttr> but going in declaration order,
+ // instead of reverse order which is how they're stored in the AST.
+ auto Cand1Attrs = getOrderedEnableIfAttrs(Cand1);
+ auto Cand2Attrs = getOrderedEnableIfAttrs(Cand2);
+
+ // Candidate 1 is better if it has strictly more attributes and
+ // the common sequence is identical.
+ if (Cand1Attrs.size() <= Cand2Attrs.size())
+ return false;
+
+ auto Cand1I = Cand1Attrs.begin();
+ llvm::FoldingSetNodeID Cand1ID, Cand2ID;
+ for (auto &Cand2A : Cand2Attrs) {
+ Cand1ID.clear();
+ Cand2ID.clear();
+
+ auto &Cand1A = *Cand1I++;
+ Cand1A->getCond()->Profile(Cand1ID, S.getASTContext(), true);
+ Cand2A->getCond()->Profile(Cand2ID, S.getASTContext(), true);
+ if (Cand1ID != Cand2ID)
+ return false;
+ }
+
+ return true;
+}
+
/// isBetterOverloadCandidate - Determines whether the first overload
/// candidate is a better candidate than the second (C++ 13.3.3p1).
bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1,
@@ -8359,7 +8507,7 @@ bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1,
assert(Cand2.NumConversions == NumArgs && "Overload candidate mismatch");
bool HasBetterConversion = false;
for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
- switch (CompareImplicitConversionSequences(S,
+ switch (CompareImplicitConversionSequences(S, Loc,
Cand1.Conversions[ArgIdx],
Cand2.Conversions[ArgIdx])) {
case ImplicitConversionSequence::Better:
@@ -8398,7 +8546,7 @@ bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1,
ImplicitConversionSequence::CompareKind Result =
compareConversionFunctions(S, Cand1.Function, Cand2.Function);
if (Result == ImplicitConversionSequence::Indistinguishable)
- Result = CompareStandardConversionSequences(S,
+ Result = CompareStandardConversionSequences(S, Loc,
Cand1.FinalConversion,
Cand2.FinalConversion);
@@ -8438,51 +8586,90 @@ bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1,
// Check for enable_if value-based overload resolution.
if (Cand1.Function && Cand2.Function &&
(Cand1.Function->hasAttr<EnableIfAttr>() ||
- Cand2.Function->hasAttr<EnableIfAttr>())) {
- // FIXME: The next several lines are just
- // specific_attr_iterator<EnableIfAttr> but going in declaration order,
- // instead of reverse order which is how they're stored in the AST.
- AttrVec Cand1Attrs;
- if (Cand1.Function->hasAttrs()) {
- Cand1Attrs = Cand1.Function->getAttrs();
- Cand1Attrs.erase(std::remove_if(Cand1Attrs.begin(), Cand1Attrs.end(),
- IsNotEnableIfAttr),
- Cand1Attrs.end());
- std::reverse(Cand1Attrs.begin(), Cand1Attrs.end());
- }
-
- AttrVec Cand2Attrs;
- if (Cand2.Function->hasAttrs()) {
- Cand2Attrs = Cand2.Function->getAttrs();
- Cand2Attrs.erase(std::remove_if(Cand2Attrs.begin(), Cand2Attrs.end(),
- IsNotEnableIfAttr),
- Cand2Attrs.end());
- std::reverse(Cand2Attrs.begin(), Cand2Attrs.end());
- }
-
- // Candidate 1 is better if it has strictly more attributes and
- // the common sequence is identical.
- if (Cand1Attrs.size() <= Cand2Attrs.size())
- return false;
+ Cand2.Function->hasAttr<EnableIfAttr>()))
+ return hasBetterEnableIfAttrs(S, Cand1.Function, Cand2.Function);
+
+ if (S.getLangOpts().CUDA && S.getLangOpts().CUDATargetOverloads &&
+ Cand1.Function && Cand2.Function) {
+ FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
+ return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
+ S.IdentifyCUDAPreference(Caller, Cand2.Function);
+ }
+
+ bool HasPS1 = Cand1.Function != nullptr &&
+ functionHasPassObjectSizeParams(Cand1.Function);
+ bool HasPS2 = Cand2.Function != nullptr &&
+ functionHasPassObjectSizeParams(Cand2.Function);
+ return HasPS1 != HasPS2 && HasPS1;
+}
+
+/// Determine whether two declarations are "equivalent" for the purposes of
+/// name lookup and overload resolution. This applies when the same internal/no
+/// linkage entity is defined by two modules (probably by textually including
+/// the same header). In such a case, we don't consider the declarations to
+/// declare the same entity, but we also don't want lookups with both
+/// declarations visible to be ambiguous in some cases (this happens when using
+/// a modularized libstdc++).
+bool Sema::isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
+ const NamedDecl *B) {
+ auto *VA = dyn_cast_or_null<ValueDecl>(A);
+ auto *VB = dyn_cast_or_null<ValueDecl>(B);
+ if (!VA || !VB)
+ return false;
- auto Cand1I = Cand1Attrs.begin();
- for (auto &Cand2A : Cand2Attrs) {
- auto &Cand1A = *Cand1I++;
- llvm::FoldingSetNodeID Cand1ID, Cand2ID;
- cast<EnableIfAttr>(Cand1A)->getCond()->Profile(Cand1ID,
- S.getASTContext(), true);
- cast<EnableIfAttr>(Cand2A)->getCond()->Profile(Cand2ID,
- S.getASTContext(), true);
- if (Cand1ID != Cand2ID)
- return false;
- }
+ // The declarations must be declaring the same name as an internal linkage
+ // entity in different modules.
+ if (!VA->getDeclContext()->getRedeclContext()->Equals(
+ VB->getDeclContext()->getRedeclContext()) ||
+ getOwningModule(const_cast<ValueDecl *>(VA)) ==
+ getOwningModule(const_cast<ValueDecl *>(VB)) ||
+ VA->isExternallyVisible() || VB->isExternallyVisible())
+ return false;
+ // Check that the declarations appear to be equivalent.
+ //
+ // FIXME: Checking the type isn't really enough to resolve the ambiguity.
+ // For constants and functions, we should check the initializer or body is
+ // the same. For non-constant variables, we shouldn't allow it at all.
+ if (Context.hasSameType(VA->getType(), VB->getType()))
return true;
+
+ // Enum constants within unnamed enumerations will have different types, but
+ // may still be similar enough to be interchangeable for our purposes.
+ if (auto *EA = dyn_cast<EnumConstantDecl>(VA)) {
+ if (auto *EB = dyn_cast<EnumConstantDecl>(VB)) {
+ // Only handle anonymous enums. If the enumerations were named and
+ // equivalent, they would have been merged to the same type.
+ auto *EnumA = cast<EnumDecl>(EA->getDeclContext());
+ auto *EnumB = cast<EnumDecl>(EB->getDeclContext());
+ if (EnumA->hasNameForLinkage() || EnumB->hasNameForLinkage() ||
+ !Context.hasSameType(EnumA->getIntegerType(),
+ EnumB->getIntegerType()))
+ return false;
+ // Allow this only if the value is the same for both enumerators.
+ return llvm::APSInt::isSameValue(EA->getInitVal(), EB->getInitVal());
+ }
}
+ // Nothing else is sufficiently similar.
return false;
}
+void Sema::diagnoseEquivalentInternalLinkageDeclarations(
+ SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv) {
+ Diag(Loc, diag::ext_equivalent_internal_linkage_decl_in_modules) << D;
+
+ Module *M = getOwningModule(const_cast<NamedDecl*>(D));
+ Diag(D->getLocation(), diag::note_equivalent_internal_linkage_decl)
+ << !M << (M ? M->getFullModuleName() : "");
+
+ for (auto *E : Equiv) {
+ Module *M = getOwningModule(const_cast<NamedDecl*>(E));
+ Diag(E->getLocation(), diag::note_equivalent_internal_linkage_decl)
+ << !M << (M ? M->getFullModuleName() : "");
+ }
+}
+
/// \brief Computes the best viable function (C++ 13.3.3)
/// within an overload candidate set.
///
@@ -8510,6 +8697,8 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
if (Best == end())
return OR_No_Viable_Function;
+ llvm::SmallVector<const NamedDecl *, 4> EquivalentCands;
+
// Make sure that this function is better than every other viable
// function. If not, we have an ambiguity.
for (iterator Cand = begin(); Cand != end(); ++Cand) {
@@ -8517,6 +8706,12 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
Cand != Best &&
!isBetterOverloadCandidate(S, *Best, *Cand, Loc,
UserDefinedConversion)) {
+ if (S.isEquivalentInternalLinkageDeclaration(Best->Function,
+ Cand->Function)) {
+ EquivalentCands.push_back(Cand->Function);
+ continue;
+ }
+
Best = end();
return OR_Ambiguous;
}
@@ -8528,6 +8723,10 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
S.isFunctionConsideredUnavailable(Best->Function)))
return OR_Deleted;
+ if (!EquivalentCands.empty())
+ S.diagnoseEquivalentInternalLinkageDeclarations(Loc, Best->Function,
+ EquivalentCands);
+
return OR_Success;
}
@@ -8608,12 +8807,85 @@ void MaybeEmitInheritedConstructorNote(Sema &S, Decl *Fn) {
} // end anonymous namespace
+static bool isFunctionAlwaysEnabled(const ASTContext &Ctx,
+ const FunctionDecl *FD) {
+ for (auto *EnableIf : FD->specific_attrs<EnableIfAttr>()) {
+ bool AlwaysTrue;
+ if (!EnableIf->getCond()->EvaluateAsBooleanCondition(AlwaysTrue, Ctx))
+ return false;
+ if (!AlwaysTrue)
+ return false;
+ }
+ return true;
+}
+
+/// \brief Returns true if we can take the address of the function.
+///
+/// \param Complain - If true, we'll emit a diagnostic
+/// \param InOverloadResolution - For the purposes of emitting a diagnostic, are
+/// we in overload resolution?
+/// \param Loc - The location of the statement we're complaining about. Ignored
+/// if we're not complaining, or if we're in overload resolution.
+static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
+ bool Complain,
+ bool InOverloadResolution,
+ SourceLocation Loc) {
+ if (!isFunctionAlwaysEnabled(S.Context, FD)) {
+ if (Complain) {
+ if (InOverloadResolution)
+ S.Diag(FD->getLocStart(),
+ diag::note_addrof_ovl_candidate_disabled_by_enable_if_attr);
+ else
+ S.Diag(Loc, diag::err_addrof_function_disabled_by_enable_if_attr) << FD;
+ }
+ return false;
+ }
+
+ auto I = std::find_if(FD->param_begin(), FD->param_end(),
+ std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>));
+ if (I == FD->param_end())
+ return true;
+
+ if (Complain) {
+ // Add one to ParamNo because it's user-facing
+ unsigned ParamNo = std::distance(FD->param_begin(), I) + 1;
+ if (InOverloadResolution)
+ S.Diag(FD->getLocation(),
+ diag::note_ovl_candidate_has_pass_object_size_params)
+ << ParamNo;
+ else
+ S.Diag(Loc, diag::err_address_of_function_with_pass_object_size_params)
+ << FD << ParamNo;
+ }
+ return false;
+}
+
+static bool checkAddressOfCandidateIsAvailable(Sema &S,
+ const FunctionDecl *FD) {
+ return checkAddressOfFunctionIsAvailable(S, FD, /*Complain=*/true,
+ /*InOverloadResolution=*/true,
+ /*Loc=*/SourceLocation());
+}
+
+bool Sema::checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
+ bool Complain,
+ SourceLocation Loc) {
+ return ::checkAddressOfFunctionIsAvailable(*this, Function, Complain,
+ /*InOverloadResolution=*/false,
+ Loc);
+}
+
// Notes the location of an overload candidate.
-void Sema::NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType) {
+void Sema::NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType,
+ bool TakingAddress) {
+ if (TakingAddress && !checkAddressOfCandidateIsAvailable(*this, Fn))
+ return;
+
std::string FnDesc;
OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Fn, FnDesc);
PartialDiagnostic PD = PDiag(diag::note_ovl_candidate)
<< (unsigned) K << FnDesc;
+
HandleFunctionTypeMismatch(PD, Fn->getType(), DestType);
Diag(Fn->getLocation(), PD);
MaybeEmitInheritedConstructorNote(*this, Fn);
@@ -8621,7 +8893,8 @@ void Sema::NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType) {
// Notes the location of all overload candidates designated through
// OverloadedExpr
-void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr, QualType DestType) {
+void Sema::NoteAllOverloadCandidates(Expr *OverloadedExpr, QualType DestType,
+ bool TakingAddress) {
assert(OverloadedExpr->getType() == Context.OverloadTy);
OverloadExpr::FindResult Ovl = OverloadExpr::find(OverloadedExpr);
@@ -8632,10 +8905,11 @@ void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr, QualType DestType) {
I != IEnd; ++I) {
if (FunctionTemplateDecl *FunTmpl =
dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
- NoteOverloadCandidate(FunTmpl->getTemplatedDecl(), DestType);
+ NoteOverloadCandidate(FunTmpl->getTemplatedDecl(), DestType,
+ TakingAddress);
} else if (FunctionDecl *Fun
= dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
- NoteOverloadCandidate(Fun, DestType);
+ NoteOverloadCandidate(Fun, DestType, TakingAddress);
}
}
}
@@ -8666,7 +8940,7 @@ void ImplicitConversionSequence::DiagnoseAmbiguousConversion(
}
static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
- unsigned I) {
+ unsigned I, bool TakingCandidateAddress) {
const ImplicitConversionSequence &Conv = Cand->Conversions[I];
assert(Conv.isBad());
assert(Cand->Function && "for now, candidate must be a function");
@@ -8808,7 +9082,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
FromPtrTy->getPointeeType()) &&
!FromPtrTy->getPointeeType()->isIncompleteType() &&
!ToPtrTy->getPointeeType()->isIncompleteType() &&
- S.IsDerivedFrom(ToPtrTy->getPointeeType(),
+ S.IsDerivedFrom(SourceLocation(), ToPtrTy->getPointeeType(),
FromPtrTy->getPointeeType()))
BaseToDerivedConversion = 1;
}
@@ -8826,7 +9100,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) &&
!FromTy->isIncompleteType() &&
!ToRefTy->getPointeeType()->isIncompleteType() &&
- S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy)) {
+ S.IsDerivedFrom(SourceLocation(), ToRefTy->getPointeeType(), FromTy)) {
BaseToDerivedConversion = 3;
} else if (ToTy->isLValueReferenceType() && !FromExpr->isLValue() &&
ToTy.getNonReferenceType().getCanonicalType() ==
@@ -8864,7 +9138,11 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
return;
}
}
-
+
+ if (TakingCandidateAddress &&
+ !checkAddressOfCandidateIsAvailable(S, Cand->Function))
+ return;
+
// Emit the generic diagnostic and, optionally, add the hints to it.
PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv);
FDiag << (unsigned) FnKind << FnDesc
@@ -8975,7 +9253,8 @@ static TemplateDecl *getDescribedTemplate(Decl *Templated) {
/// Diagnose a failed template-argument deduction.
static void DiagnoseBadDeduction(Sema &S, Decl *Templated,
DeductionFailureInfo &DeductionFailure,
- unsigned NumArgs) {
+ unsigned NumArgs,
+ bool TakingCandidateAddress) {
TemplateParameter Param = DeductionFailure.getTemplateParameter();
NamedDecl *ParamD;
(ParamD = Param.dyn_cast<TemplateTypeParmDecl*>()) ||
@@ -9143,6 +9422,11 @@ static void DiagnoseBadDeduction(Sema &S, Decl *Templated,
}
}
}
+
+ if (TakingCandidateAddress && isa<FunctionDecl>(Templated) &&
+ !checkAddressOfCandidateIsAvailable(S, cast<FunctionDecl>(Templated)))
+ return;
+
// FIXME: For generic lambda parameters, check if the function is a lambda
// call operator, and if so, emit a prettier and more informative
// diagnostic that mentions 'auto' and lambda in addition to
@@ -9163,14 +9447,15 @@ static void DiagnoseBadDeduction(Sema &S, Decl *Templated,
/// Diagnose a failed template-argument deduction, for function calls.
static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
- unsigned NumArgs) {
+ unsigned NumArgs,
+ bool TakingCandidateAddress) {
unsigned TDK = Cand->DeductionFailure.Result;
if (TDK == Sema::TDK_TooFewArguments || TDK == Sema::TDK_TooManyArguments) {
if (CheckArityMismatch(S, Cand, NumArgs))
return;
}
DiagnoseBadDeduction(S, Cand->Function, // pattern
- Cand->DeductionFailure, NumArgs);
+ Cand->DeductionFailure, NumArgs, TakingCandidateAddress);
}
/// CUDA: diagnose an invalid call across targets.
@@ -9251,7 +9536,8 @@ static void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
/// more richly for those diagnostic clients that cared, but we'd
/// still have to be just as careful with the default diagnostics.
static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
- unsigned NumArgs) {
+ unsigned NumArgs,
+ bool TakingCandidateAddress) {
FunctionDecl *Fn = Cand->Function;
// Note deleted candidates, but only if they're viable.
@@ -9279,7 +9565,7 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
return DiagnoseArityMismatch(S, Cand, NumArgs);
case ovl_fail_bad_deduction:
- return DiagnoseBadDeduction(S, Cand, NumArgs);
+ return DiagnoseBadDeduction(S, Cand, NumArgs, TakingCandidateAddress);
case ovl_fail_illegal_constructor: {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_illegal_constructor)
@@ -9297,7 +9583,7 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
unsigned I = (Cand->IgnoreObjectArgument ? 1 : 0);
for (unsigned N = Cand->NumConversions; I != N; ++I)
if (Cand->Conversions[I].isBad())
- return DiagnoseBadConversion(S, Cand, I);
+ return DiagnoseBadConversion(S, Cand, I, TakingCandidateAddress);
// FIXME: this currently happens when we're called from SemaInit
// when user-conversion overload fails. Figure out how to handle
@@ -9421,9 +9707,10 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
namespace {
struct CompareOverloadCandidatesForDisplay {
Sema &S;
+ SourceLocation Loc;
size_t NumArgs;
- CompareOverloadCandidatesForDisplay(Sema &S, size_t nArgs)
+ CompareOverloadCandidatesForDisplay(Sema &S, SourceLocation Loc, size_t nArgs)
: S(S), NumArgs(nArgs) {}
bool operator()(const OverloadCandidate *L,
@@ -9494,7 +9781,7 @@ struct CompareOverloadCandidatesForDisplay {
int leftBetter = 0;
unsigned I = (L->IgnoreObjectArgument || R->IgnoreObjectArgument);
for (unsigned E = L->NumConversions; I != E; ++I) {
- switch (CompareImplicitConversionSequences(S,
+ switch (CompareImplicitConversionSequences(S, Loc,
L->Conversions[I],
R->Conversions[I])) {
case ImplicitConversionSequence::Better:
@@ -9649,7 +9936,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S,
}
std::sort(Cands.begin(), Cands.end(),
- CompareOverloadCandidatesForDisplay(S, Args.size()));
+ CompareOverloadCandidatesForDisplay(S, OpLoc, Args.size()));
bool ReportedAmbiguousConversions = false;
@@ -9668,7 +9955,8 @@ void OverloadCandidateSet::NoteCandidates(Sema &S,
++CandsShown;
if (Cand->Function)
- NoteFunctionCandidate(S, Cand, Args.size());
+ NoteFunctionCandidate(S, Cand, Args.size(),
+ /*TakingCandidateAddress=*/false);
else if (Cand->IsSurrogate)
NoteSurrogateCandidate(S, Cand);
else {
@@ -9736,9 +10024,10 @@ struct CompareTemplateSpecCandidatesForDisplay {
/// Diagnose a template argument deduction failure.
/// We are treating these failures as overload failures due to bad
/// deductions.
-void TemplateSpecCandidate::NoteDeductionFailure(Sema &S) {
+void TemplateSpecCandidate::NoteDeductionFailure(Sema &S,
+ bool ForTakingAddress) {
DiagnoseBadDeduction(S, Specialization, // pattern
- DeductionFailure, /*NumArgs=*/0);
+ DeductionFailure, /*NumArgs=*/0, ForTakingAddress);
}
void TemplateSpecCandidateSet::destroyCandidates() {
@@ -9791,7 +10080,7 @@ void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) {
assert(Cand->Specialization &&
"Non-matching built-in candidates are not added to Cands.");
- Cand->NoteDeductionFailure(S);
+ Cand->NoteDeductionFailure(S, ForTakingAddress);
}
if (I != E)
@@ -9836,6 +10125,7 @@ class AddressOfFunctionResolver {
bool TargetTypeIsNonStaticMemberFunction;
bool FoundNonTemplateFunction;
bool StaticMemberFunctionFromBoundPointer;
+ bool HasComplained;
OverloadExpr::FindResult OvlExprInfo;
OverloadExpr *OvlExpr;
@@ -9852,9 +10142,10 @@ public:
!!TargetType->getAs<MemberPointerType>()),
FoundNonTemplateFunction(false),
StaticMemberFunctionFromBoundPointer(false),
+ HasComplained(false),
OvlExprInfo(OverloadExpr::find(SourceExpr)),
OvlExpr(OvlExprInfo.Expression),
- FailedCandidates(OvlExpr->getNameLoc()) {
+ FailedCandidates(OvlExpr->getNameLoc(), /*ForTakingAddress=*/true) {
ExtractUnqualifiedFunctionTypeFromTargetType();
if (TargetFunctionType->isFunctionType()) {
@@ -9885,21 +10176,57 @@ public:
}
if (OvlExpr->hasExplicitTemplateArgs())
- OvlExpr->getExplicitTemplateArgs().copyInto(OvlExplicitTemplateArgs);
+ OvlExpr->copyTemplateArgumentsInto(OvlExplicitTemplateArgs);
if (FindAllFunctionsThatMatchTargetTypeExactly()) {
// C++ [over.over]p4:
// If more than one function is selected, [...]
- if (Matches.size() > 1) {
+ if (Matches.size() > 1 && !eliminiateSuboptimalOverloadCandidates()) {
if (FoundNonTemplateFunction)
EliminateAllTemplateMatches();
else
EliminateAllExceptMostSpecializedTemplate();
}
}
+
+ if (S.getLangOpts().CUDA && S.getLangOpts().CUDATargetOverloads &&
+ Matches.size() > 1)
+ EliminateSuboptimalCudaMatches();
}
-
+
+ bool hasComplained() const { return HasComplained; }
+
private:
+ // Is A considered a better overload candidate for the desired type than B?
+ bool isBetterCandidate(const FunctionDecl *A, const FunctionDecl *B) {
+ return hasBetterEnableIfAttrs(S, A, B);
+ }
+
+ // Returns true if we've eliminated any (read: all but one) candidates, false
+ // otherwise.
+ bool eliminiateSuboptimalOverloadCandidates() {
+ // Same algorithm as overload resolution -- one pass to pick the "best",
+ // another pass to be sure that nothing is better than the best.
+ auto Best = Matches.begin();
+ for (auto I = Matches.begin()+1, E = Matches.end(); I != E; ++I)
+ if (isBetterCandidate(I->second, Best->second))
+ Best = I;
+
+ const FunctionDecl *BestFn = Best->second;
+ auto IsBestOrInferiorToBest = [this, BestFn](
+ const std::pair<DeclAccessPair, FunctionDecl *> &Pair) {
+ return BestFn == Pair.second || isBetterCandidate(BestFn, Pair.second);
+ };
+
+ // Note: We explicitly leave Matches unmodified if there isn't a clear best
+ // option, so we can potentially give the user a better error
+ if (!std::all_of(Matches.begin(), Matches.end(), IsBestOrInferiorToBest))
+ return false;
+ Matches[0] = *Best;
+ Matches.resize(1);
+ return true;
+ }
+
bool isTargetTypeAFunction() const {
return TargetFunctionType->isFunctionType();
}
@@ -9953,6 +10280,10 @@ private:
assert(S.isSameOrCompatibleFunctionType(
Context.getCanonicalType(Specialization->getType()),
Context.getCanonicalType(TargetFunctionType)));
+
+ if (!S.checkAddressOfFunctionIsAvailable(Specialization))
+ return false;
+
Matches.push_back(std::make_pair(CurAccessFunPair, Specialization));
return true;
}
@@ -9978,16 +10309,22 @@ private:
// now.
if (S.getLangOpts().CPlusPlus14 &&
FunDecl->getReturnType()->isUndeducedType() &&
- S.DeduceReturnType(FunDecl, SourceExpr->getLocStart(), Complain))
+ S.DeduceReturnType(FunDecl, SourceExpr->getLocStart(), Complain)) {
+ HasComplained |= Complain;
+ return false;
+ }
+
+ if (!S.checkAddressOfFunctionIsAvailable(FunDecl))
return false;
QualType ResultTy;
if (Context.hasSameUnqualifiedType(TargetFunctionType,
FunDecl->getType()) ||
S.IsNoReturnConversion(FunDecl->getType(), TargetFunctionType,
- ResultTy)) {
- Matches.push_back(std::make_pair(CurAccessFunPair,
- cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
+ ResultTy) ||
+ (!S.getLangOpts().CPlusPlus && TargetType->isVoidPointerType())) {
+ Matches.push_back(std::make_pair(
+ CurAccessFunPair, cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
FoundNonTemplateFunction = true;
return true;
}
@@ -10061,7 +10398,8 @@ private:
Matches[0].first = Matches[Result - MatchesCopy.begin()].first;
Matches[0].second = cast<FunctionDecl>(*Result);
Matches.resize(1);
- }
+ } else
+ HasComplained |= Complain;
}
void EliminateAllTemplateMatches() {
@@ -10072,11 +10410,15 @@ private:
++I;
else {
Matches[I] = Matches[--N];
- Matches.set_size(N);
+ Matches.resize(N);
}
}
}
+ void EliminateSuboptimalCudaMatches() {
+ S.EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(S.CurContext), Matches);
+ }
+
public:
void ComplainNoMatchesFound() const {
assert(Matches.empty());
@@ -10084,7 +10426,8 @@ public:
<< OvlExpr->getName() << TargetFunctionType
<< OvlExpr->getSourceRange();
if (FailedCandidates.empty())
- S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType);
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType,
+ /*TakingAddress=*/true);
else {
// We have some deduction failure messages. Use them to diagnose
// the function templates, and diagnose the non-template candidates
@@ -10094,7 +10437,9 @@ public:
I != IEnd; ++I)
if (FunctionDecl *Fun =
dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()))
- S.NoteOverloadCandidate(Fun, TargetFunctionType);
+ if (!functionHasPassObjectSizeParams(Fun))
+ S.NoteOverloadCandidate(Fun, TargetFunctionType,
+ /*TakingAddress=*/true);
FailedCandidates.NoteCandidates(S, OvlExpr->getLocStart());
}
}
@@ -10132,7 +10477,8 @@ public:
S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous)
<< OvlExpr->getName()
<< OvlExpr->getSourceRange();
- S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType);
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType,
+ /*TakingAddress=*/true);
}
bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); }
@@ -10178,13 +10524,14 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
Complain);
int NumMatches = Resolver.getNumMatches();
FunctionDecl *Fn = nullptr;
- if (NumMatches == 0 && Complain) {
+ bool ShouldComplain = Complain && !Resolver.hasComplained();
+ if (NumMatches == 0 && ShouldComplain) {
if (Resolver.IsInvalidFormOfPointerToMemberFunction())
Resolver.ComplainIsInvalidFormOfPointerToMemberFunction();
else
Resolver.ComplainNoMatchesFound();
}
- else if (NumMatches > 1 && Complain)
+ else if (NumMatches > 1 && ShouldComplain)
Resolver.ComplainMultipleMatchesFound();
else if (NumMatches == 1) {
Fn = Resolver.getMatchingFunctionDecl();
@@ -10229,7 +10576,7 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
return nullptr;
TemplateArgumentListInfo ExplicitTemplateArgs;
- ovl->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs);
+ ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs);
TemplateSpecCandidateSet FailedCandidates(ovl->getNameLoc());
// Look through all of the overloaded functions, searching for one
@@ -10303,7 +10650,7 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
// returns true if 'complain' is set.
bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr, bool doFunctionPointerConverion,
- bool complain, const SourceRange& OpRangeForComplaining,
+ bool complain, SourceRange OpRangeForComplaining,
QualType DestTypeForComplaining,
unsigned DiagIDForComplaining) {
assert(SrcExpr.get()->getType() == Context.OverloadTy);
@@ -10678,8 +11025,8 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// casts and such from the call, we don't really care.
ExprResult NewFn = ExprError();
if ((*R.begin())->isCXXClassMember())
- NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
- R, ExplicitTemplateArgs);
+ NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R,
+ ExplicitTemplateArgs, S);
else if (ExplicitTemplateArgs || TemplateKWLoc.isValid())
NewFn = SemaRef.BuildTemplateIdExpr(SS, TemplateKWLoc, R, false,
ExplicitTemplateArgs);
@@ -10749,6 +11096,8 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
CallExpr *CE = new (Context) CallExpr(
Context, Fn, Args, Context.DependentTy, VK_RValue, RParenLoc);
CE->setTypeDependent(true);
+ CE->setValueDependent(true);
+ CE->setInstantiationDependent(true);
*Result = CE;
return true;
}
@@ -10800,9 +11149,23 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
if (!Recovery.isInvalid())
return Recovery;
- SemaRef.Diag(Fn->getLocStart(),
- diag::err_ovl_no_viable_function_in_call)
- << ULE->getName() << Fn->getSourceRange();
+ // If the user passes in a function that we can't take the address of, we
+ // generally end up emitting really bad error messages. Here, we attempt to
+ // emit better ones.
+ for (const Expr *Arg : Args) {
+ if (!Arg->getType()->isFunctionType())
+ continue;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(Arg->IgnoreParenImpCasts())) {
+ auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (FD &&
+ !SemaRef.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ Arg->getExprLoc()))
+ return ExprError();
+ }
+ }
+
+ SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_no_viable_function_in_call)
+ << ULE->getName() << Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
break;
}
@@ -10875,8 +11238,7 @@ static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
///
/// \param OpLoc The location of the operator itself (e.g., '*').
///
-/// \param OpcIn The UnaryOperator::Opcode that describes this
-/// operator.
+/// \param Opc The UnaryOperatorKind that describes this operator.
///
/// \param Fns The set of non-member functions that will be
/// considered by overload resolution. The caller needs to build this
@@ -10887,11 +11249,9 @@ static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
///
/// \param Input The input argument.
ExprResult
-Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
+Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *Input) {
- UnaryOperator::Opcode Opc = static_cast<UnaryOperator::Opcode>(OpcIn);
-
OverloadedOperatorKind Op = UnaryOperator::getOverloadedOperator(Opc);
assert(Op != OO_None && "Invalid opcode for overloaded unary operator");
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
@@ -11062,8 +11422,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
///
/// \param OpLoc The location of the operator itself (e.g., '+').
///
-/// \param OpcIn The BinaryOperator::Opcode that describes this
-/// operator.
+/// \param Opc The BinaryOperatorKind that describes this operator.
///
/// \param Fns The set of non-member functions that will be
/// considered by overload resolution. The caller needs to build this
@@ -11076,13 +11435,12 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
/// \param RHS Right-hand argument.
ExprResult
Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
- unsigned OpcIn,
+ BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS) {
Expr *Args[2] = { LHS, RHS };
LHS=RHS=nullptr; // Please use only Args instead of LHS/RHS couple
- BinaryOperator::Opcode Opc = static_cast<BinaryOperator::Opcode>(OpcIn);
OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc);
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
@@ -11565,10 +11923,6 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
<< (qualsString.find(' ') == std::string::npos ? 1 : 2);
}
- if (resultType->isMemberPointerType())
- if (Context.getTargetInfo().getCXXABI().isMicrosoft())
- RequireCompleteType(LParenLoc, resultType, 0);
-
CXXMemberCallExpr *call
= new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
resultType, valueKind, RParenLoc);
@@ -11767,18 +12121,39 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (CheckFunctionCall(Method, TheCall, Proto))
return ExprError();
+ // In the case the method to call was not selected by the overloading
+ // resolution process, we still need to handle the enable_if attribute. Do
+ // that here, so it will not hide previous -- and more relevant -- errors
+ if (isa<MemberExpr>(NakedMemExpr)) {
+ if (const EnableIfAttr *Attr = CheckEnableIf(Method, Args, true)) {
+ Diag(MemExprE->getLocStart(),
+ diag::err_ovl_no_viable_member_function_in_call)
+ << Method << Method->getSourceRange();
+ Diag(Method->getLocation(),
+ diag::note_ovl_candidate_disabled_by_enable_if_attr)
+ << Attr->getCond()->getSourceRange() << Attr->getMessage();
+ return ExprError();
+ }
+ }
+
if ((isa<CXXConstructorDecl>(CurContext) ||
isa<CXXDestructorDecl>(CurContext)) &&
TheCall->getMethodDecl()->isPure()) {
const CXXMethodDecl *MD = TheCall->getMethodDecl();
- if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts())) {
- Diag(MemExpr->getLocStart(),
+ if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts()) &&
+ MemExpr->performsVirtualDispatch(getLangOpts())) {
+ Diag(MemExpr->getLocStart(),
diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor)
<< MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
<< MD->getParent()->getDeclName();
Diag(MD->getLocStart(), diag::note_previous_decl) << MD->getDeclName();
+ if (getLangOpts().AppleKext)
+ Diag(MemExpr->getLocStart(),
+ diag::note_pure_qualified_call_kext)
+ << MD->getParent()->getDeclName()
+ << MD->getDeclName();
}
}
return MaybeBindToTemporary(TheCall);
@@ -12267,13 +12642,14 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
/// otherwise CallExpr is set to ExprError() and some non-success value
/// is returned.
Sema::ForRangeStatus
-Sema::BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
- SourceLocation RangeLoc, VarDecl *Decl,
- BeginEndFunction BEF,
+Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
+ SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr) {
+ Scope *S = nullptr;
+
CandidateSet->clear();
if (!MemberLookup.empty()) {
ExprResult MemberRef =
@@ -12282,18 +12658,14 @@ Sema::BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
/*TemplateKWLoc=*/SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
MemberLookup,
- /*TemplateArgs=*/nullptr);
+ /*TemplateArgs=*/nullptr, S);
if (MemberRef.isInvalid()) {
*CallExpr = ExprError();
- Diag(Range->getLocStart(), diag::note_in_for_range)
- << RangeLoc << BEF << Range->getType();
return FRS_DiagnosticIssued;
}
*CallExpr = ActOnCallExpr(S, MemberRef.get(), Loc, None, Loc, nullptr);
if (CallExpr->isInvalid()) {
*CallExpr = ExprError();
- Diag(Range->getLocStart(), diag::note_in_for_range)
- << RangeLoc << BEF << Range->getType();
return FRS_DiagnosticIssued;
}
} else {
@@ -12324,8 +12696,6 @@ Sema::BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
/*AllowTypoCorrection=*/false);
if (CallExpr->isInvalid() || OverloadResult != OR_Success) {
*CallExpr = ExprError();
- Diag(Range->getLocStart(), diag::note_in_for_range)
- << RangeLoc << BEF << Range->getType();
return FRS_DiagnosticIssued;
}
}
diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp
index fec97488f531..e5d51f173caa 100644
--- a/lib/Sema/SemaPseudoObject.cpp
+++ b/lib/Sema/SemaPseudoObject.cpp
@@ -44,17 +44,76 @@ using namespace sema;
namespace {
// Basically just a very focused copy of TreeTransform.
- template <class T> struct Rebuilder {
+ struct Rebuilder {
Sema &S;
- Rebuilder(Sema &S) : S(S) {}
+ unsigned MSPropertySubscriptCount;
+ typedef llvm::function_ref<Expr *(Expr *, unsigned)> SpecificRebuilderRefTy;
+ const SpecificRebuilderRefTy &SpecificCallback;
+ Rebuilder(Sema &S, const SpecificRebuilderRefTy &SpecificCallback)
+ : S(S), MSPropertySubscriptCount(0),
+ SpecificCallback(SpecificCallback) {}
+
+ Expr *rebuildObjCPropertyRefExpr(ObjCPropertyRefExpr *refExpr) {
+ // Fortunately, the constraint that we're rebuilding something
+ // with a base limits the number of cases here.
+ if (refExpr->isClassReceiver() || refExpr->isSuperReceiver())
+ return refExpr;
+
+ if (refExpr->isExplicitProperty()) {
+ return new (S.Context) ObjCPropertyRefExpr(
+ refExpr->getExplicitProperty(), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getLocation(), SpecificCallback(refExpr->getBase(), 0));
+ }
+ return new (S.Context) ObjCPropertyRefExpr(
+ refExpr->getImplicitPropertyGetter(),
+ refExpr->getImplicitPropertySetter(), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getLocation(), SpecificCallback(refExpr->getBase(), 0));
+ }
+ Expr *rebuildObjCSubscriptRefExpr(ObjCSubscriptRefExpr *refExpr) {
+ assert(refExpr->getBaseExpr());
+ assert(refExpr->getKeyExpr());
+
+ return new (S.Context) ObjCSubscriptRefExpr(
+ SpecificCallback(refExpr->getBaseExpr(), 0),
+ SpecificCallback(refExpr->getKeyExpr(), 1), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getAtIndexMethodDecl(), refExpr->setAtIndexMethodDecl(),
+ refExpr->getRBracket());
+ }
+ Expr *rebuildMSPropertyRefExpr(MSPropertyRefExpr *refExpr) {
+ assert(refExpr->getBaseExpr());
- T &getDerived() { return static_cast<T&>(*this); }
+ return new (S.Context) MSPropertyRefExpr(
+ SpecificCallback(refExpr->getBaseExpr(), 0),
+ refExpr->getPropertyDecl(), refExpr->isArrow(), refExpr->getType(),
+ refExpr->getValueKind(), refExpr->getQualifierLoc(),
+ refExpr->getMemberLoc());
+ }
+ Expr *rebuildMSPropertySubscriptExpr(MSPropertySubscriptExpr *refExpr) {
+ assert(refExpr->getBase());
+ assert(refExpr->getIdx());
+
+ auto *NewBase = rebuild(refExpr->getBase());
+ ++MSPropertySubscriptCount;
+ return new (S.Context) MSPropertySubscriptExpr(
+ NewBase,
+ SpecificCallback(refExpr->getIdx(), MSPropertySubscriptCount),
+ refExpr->getType(), refExpr->getValueKind(), refExpr->getObjectKind(),
+ refExpr->getRBracketLoc());
+ }
Expr *rebuild(Expr *e) {
// Fast path: nothing to look through.
- if (typename T::specific_type *specific
- = dyn_cast<typename T::specific_type>(e))
- return getDerived().rebuildSpecific(specific);
+ if (auto *PRE = dyn_cast<ObjCPropertyRefExpr>(e))
+ return rebuildObjCPropertyRefExpr(PRE);
+ if (auto *SRE = dyn_cast<ObjCSubscriptRefExpr>(e))
+ return rebuildObjCSubscriptRefExpr(SRE);
+ if (auto *MSPRE = dyn_cast<MSPropertyRefExpr>(e))
+ return rebuildMSPropertyRefExpr(MSPRE);
+ if (auto *MSPSE = dyn_cast<MSPropertySubscriptExpr>(e))
+ return rebuildMSPropertySubscriptExpr(MSPSE);
// Otherwise, we should look through and rebuild anything that
// IgnoreParens would.
@@ -125,72 +184,6 @@ namespace {
}
};
- struct ObjCPropertyRefRebuilder : Rebuilder<ObjCPropertyRefRebuilder> {
- Expr *NewBase;
- ObjCPropertyRefRebuilder(Sema &S, Expr *newBase)
- : Rebuilder<ObjCPropertyRefRebuilder>(S), NewBase(newBase) {}
-
- typedef ObjCPropertyRefExpr specific_type;
- Expr *rebuildSpecific(ObjCPropertyRefExpr *refExpr) {
- // Fortunately, the constraint that we're rebuilding something
- // with a base limits the number of cases here.
- assert(refExpr->isObjectReceiver());
-
- if (refExpr->isExplicitProperty()) {
- return new (S.Context)
- ObjCPropertyRefExpr(refExpr->getExplicitProperty(),
- refExpr->getType(), refExpr->getValueKind(),
- refExpr->getObjectKind(), refExpr->getLocation(),
- NewBase);
- }
- return new (S.Context)
- ObjCPropertyRefExpr(refExpr->getImplicitPropertyGetter(),
- refExpr->getImplicitPropertySetter(),
- refExpr->getType(), refExpr->getValueKind(),
- refExpr->getObjectKind(),refExpr->getLocation(),
- NewBase);
- }
- };
-
- struct ObjCSubscriptRefRebuilder : Rebuilder<ObjCSubscriptRefRebuilder> {
- Expr *NewBase;
- Expr *NewKeyExpr;
- ObjCSubscriptRefRebuilder(Sema &S, Expr *newBase, Expr *newKeyExpr)
- : Rebuilder<ObjCSubscriptRefRebuilder>(S),
- NewBase(newBase), NewKeyExpr(newKeyExpr) {}
-
- typedef ObjCSubscriptRefExpr specific_type;
- Expr *rebuildSpecific(ObjCSubscriptRefExpr *refExpr) {
- assert(refExpr->getBaseExpr());
- assert(refExpr->getKeyExpr());
-
- return new (S.Context)
- ObjCSubscriptRefExpr(NewBase,
- NewKeyExpr,
- refExpr->getType(), refExpr->getValueKind(),
- refExpr->getObjectKind(),refExpr->getAtIndexMethodDecl(),
- refExpr->setAtIndexMethodDecl(),
- refExpr->getRBracket());
- }
- };
-
- struct MSPropertyRefRebuilder : Rebuilder<MSPropertyRefRebuilder> {
- Expr *NewBase;
- MSPropertyRefRebuilder(Sema &S, Expr *newBase)
- : Rebuilder<MSPropertyRefRebuilder>(S), NewBase(newBase) {}
-
- typedef MSPropertyRefExpr specific_type;
- Expr *rebuildSpecific(MSPropertyRefExpr *refExpr) {
- assert(refExpr->getBaseExpr());
-
- return new (S.Context)
- MSPropertyRefExpr(NewBase, refExpr->getPropertyDecl(),
- refExpr->isArrow(), refExpr->getType(),
- refExpr->getValueKind(), refExpr->getQualifierLoc(),
- refExpr->getMemberLoc());
- }
- };
-
class PseudoOpBuilder {
public:
Sema &S;
@@ -236,7 +229,7 @@ namespace {
}
/// Return true if assignments have a non-void result.
- bool CanCaptureValue(Expr *exp) {
+ static bool CanCaptureValue(Expr *exp) {
if (exp->isGLValue())
return true;
QualType ty = exp->getType();
@@ -252,6 +245,20 @@ namespace {
virtual ExprResult buildGet() = 0;
virtual ExprResult buildSet(Expr *, SourceLocation,
bool captureSetValueAsResult) = 0;
+ /// \brief Should the result of an assignment be the formal result of the
+ /// setter call or the value that was passed to the setter?
+ ///
+ /// Different pseudo-object language features use different language rules
+ /// for this.
+ /// The default is to use the set value. Currently, this affects the
+ /// behavior of simple assignments, compound assignments, and prefix
+ /// increment and decrement.
+ /// Postfix increment and decrement always use the getter result as the
+ /// expression result.
+ ///
+ /// If this method returns true, and the set value isn't capturable for
+ /// some reason, the result of the expression will be void.
+ virtual bool captureSetValueAsResult() const { return true; }
};
/// A PseudoOpBuilder for Objective-C \@properties.
@@ -328,15 +335,25 @@ namespace {
class MSPropertyOpBuilder : public PseudoOpBuilder {
MSPropertyRefExpr *RefExpr;
+ OpaqueValueExpr *InstanceBase;
+ SmallVector<Expr *, 4> CallArgs;
+
+ MSPropertyRefExpr *getBaseMSProperty(MSPropertySubscriptExpr *E);
public:
MSPropertyOpBuilder(Sema &S, MSPropertyRefExpr *refExpr) :
PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
- RefExpr(refExpr) {}
+ RefExpr(refExpr), InstanceBase(nullptr) {}
+ MSPropertyOpBuilder(Sema &S, MSPropertySubscriptExpr *refExpr)
+ : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
+ InstanceBase(nullptr) {
+ RefExpr = getBaseMSProperty(refExpr);
+ }
Expr *rebuildAndCaptureObject(Expr *) override;
ExprResult buildGet() override;
ExprResult buildSet(Expr *op, SourceLocation, bool) override;
+ bool captureSetValueAsResult() const override { return false; }
};
}
@@ -406,19 +423,27 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
BinaryOperatorKind opcode,
Expr *LHS, Expr *RHS) {
assert(BinaryOperator::isAssignmentOp(opcode));
-
- // Recover from user error
- if (isa<UnresolvedLookupExpr>(RHS))
- return ExprError();
Expr *syntacticLHS = rebuildAndCaptureObject(LHS);
OpaqueValueExpr *capturedRHS = capture(RHS);
+ // In some very specific cases, semantic analysis of the RHS as an
+ // expression may require it to be rewritten. In these cases, we
+ // cannot safely keep the OVE around. Fortunately, we don't really
+ // need to: we don't use this particular OVE in multiple places, and
+ // no clients rely that closely on matching up expressions in the
+ // semantic expression with expressions from the syntactic form.
+ Expr *semanticRHS = capturedRHS;
+ if (RHS->hasPlaceholderType() || isa<InitListExpr>(RHS)) {
+ semanticRHS = RHS;
+ Semantics.pop_back();
+ }
+
Expr *syntactic;
ExprResult result;
if (opcode == BO_Assign) {
- result = capturedRHS;
+ result = semanticRHS;
syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS,
opcode, capturedRHS->getType(),
capturedRHS->getValueKind(),
@@ -430,8 +455,7 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
// Build an ordinary, non-compound operation.
BinaryOperatorKind nonCompound =
BinaryOperator::getOpForCompoundAssignment(opcode);
- result = S.BuildBinOp(Sc, opcLoc, nonCompound,
- opLHS.get(), capturedRHS);
+ result = S.BuildBinOp(Sc, opcLoc, nonCompound, opLHS.get(), semanticRHS);
if (result.isInvalid()) return ExprError();
syntactic =
@@ -446,9 +470,12 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
// The result of the assignment, if not void, is the value set into
// the l-value.
- result = buildSet(result.get(), opcLoc, /*captureSetValueAsResult*/ true);
+ result = buildSet(result.get(), opcLoc, captureSetValueAsResult());
if (result.isInvalid()) return ExprError();
addSemanticExpr(result.get());
+ if (!captureSetValueAsResult() && !result.get()->getType()->isVoidType() &&
+ (result.get()->isTypeDependent() || CanCaptureValue(result.get())))
+ setResultToLastSemantic();
return complete(syntactic);
}
@@ -490,9 +517,14 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
// Store that back into the result. The value stored is the result
// of a prefix operation.
- result = buildSet(result.get(), opcLoc, UnaryOperator::isPrefix(opcode));
+ result = buildSet(result.get(), opcLoc, UnaryOperator::isPrefix(opcode) &&
+ captureSetValueAsResult());
if (result.isInvalid()) return ExprError();
addSemanticExpr(result.get());
+ if (UnaryOperator::isPrefix(opcode) && !captureSetValueAsResult() &&
+ !result.get()->getType()->isVoidType() &&
+ (result.get()->isTypeDependent() || CanCaptureValue(result.get())))
+ setResultToLastSemantic();
UnaryOperator *syntactic =
new (S.Context) UnaryOperator(syntacticOp, opcode, resultType,
@@ -666,9 +698,9 @@ Expr *ObjCPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
// form to use the OVE as its base.
if (RefExpr->isObjectReceiver()) {
InstanceReceiver = capture(RefExpr->getBase());
-
- syntacticBase =
- ObjCPropertyRefRebuilder(S, InstanceReceiver).rebuild(syntacticBase);
+ syntacticBase = Rebuilder(S, [=](Expr *, unsigned) -> Expr * {
+ return InstanceReceiver;
+ }).rebuild(syntacticBase);
}
if (ObjCPropertyRefExpr *
@@ -745,16 +777,6 @@ ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
op = opResult.get();
assert(op && "successful assignment left argument invalid?");
}
- else if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(op)) {
- Expr *Initializer = OVE->getSourceExpr();
- // passing C++11 style initialized temporaries to objc++ properties
- // requires special treatment by removing OpaqueValueExpr so type
- // conversion takes place and adding the OpaqueValueExpr later on.
- if (isa<InitListExpr>(Initializer) &&
- Initializer->getType()->isVoidType()) {
- op = Initializer;
- }
- }
}
// Arguments.
@@ -996,11 +1018,19 @@ Expr *ObjCSubscriptOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
// form to use the OVE as its base expression.
InstanceBase = capture(RefExpr->getBaseExpr());
InstanceKey = capture(RefExpr->getKeyExpr());
-
+
syntacticBase =
- ObjCSubscriptRefRebuilder(S, InstanceBase,
- InstanceKey).rebuild(syntacticBase);
-
+ Rebuilder(S, [=](Expr *, unsigned Idx) -> Expr * {
+ switch (Idx) {
+ case 0:
+ return InstanceBase;
+ case 1:
+ return InstanceKey;
+ default:
+ llvm_unreachable("Unexpected index for ObjCSubscriptExpr");
+ }
+ }).rebuild(syntacticBase);
+
return syntacticBase;
}
@@ -1402,11 +1432,30 @@ ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
// MSVC __declspec(property) references
//===----------------------------------------------------------------------===//
-Expr *MSPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
- Expr *NewBase = capture(RefExpr->getBaseExpr());
+MSPropertyRefExpr *
+MSPropertyOpBuilder::getBaseMSProperty(MSPropertySubscriptExpr *E) {
+ CallArgs.insert(CallArgs.begin(), E->getIdx());
+ Expr *Base = E->getBase()->IgnoreParens();
+ while (auto *MSPropSubscript = dyn_cast<MSPropertySubscriptExpr>(Base)) {
+ CallArgs.insert(CallArgs.begin(), MSPropSubscript->getIdx());
+ Base = MSPropSubscript->getBase()->IgnoreParens();
+ }
+ return cast<MSPropertyRefExpr>(Base);
+}
- syntacticBase =
- MSPropertyRefRebuilder(S, NewBase).rebuild(syntacticBase);
+Expr *MSPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ InstanceBase = capture(RefExpr->getBaseExpr());
+ std::for_each(CallArgs.begin(), CallArgs.end(),
+ [this](Expr *&Arg) { Arg = capture(Arg); });
+ syntacticBase = Rebuilder(S, [=](Expr *, unsigned Idx) -> Expr * {
+ switch (Idx) {
+ case 0:
+ return InstanceBase;
+ default:
+ assert(Idx <= CallArgs.size());
+ return CallArgs[Idx - 1];
+ }
+ }).rebuild(syntacticBase);
return syntacticBase;
}
@@ -1423,10 +1472,10 @@ ExprResult MSPropertyOpBuilder::buildGet() {
GetterName.setIdentifier(II, RefExpr->getMemberLoc());
CXXScopeSpec SS;
SS.Adopt(RefExpr->getQualifierLoc());
- ExprResult GetterExpr = S.ActOnMemberAccessExpr(
- S.getCurScope(), RefExpr->getBaseExpr(), SourceLocation(),
- RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(),
- GetterName, nullptr);
+ ExprResult GetterExpr =
+ S.ActOnMemberAccessExpr(S.getCurScope(), InstanceBase, SourceLocation(),
+ RefExpr->isArrow() ? tok::arrow : tok::period, SS,
+ SourceLocation(), GetterName, nullptr);
if (GetterExpr.isInvalid()) {
S.Diag(RefExpr->getMemberLoc(),
diag::error_cannot_find_suitable_accessor) << 0 /* getter */
@@ -1434,9 +1483,8 @@ ExprResult MSPropertyOpBuilder::buildGet() {
return ExprError();
}
- MultiExprArg ArgExprs;
return S.ActOnCallExpr(S.getCurScope(), GetterExpr.get(),
- RefExpr->getSourceRange().getBegin(), ArgExprs,
+ RefExpr->getSourceRange().getBegin(), CallArgs,
RefExpr->getSourceRange().getEnd());
}
@@ -1453,10 +1501,10 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
SetterName.setIdentifier(II, RefExpr->getMemberLoc());
CXXScopeSpec SS;
SS.Adopt(RefExpr->getQualifierLoc());
- ExprResult SetterExpr = S.ActOnMemberAccessExpr(
- S.getCurScope(), RefExpr->getBaseExpr(), SourceLocation(),
- RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(),
- SetterName, nullptr);
+ ExprResult SetterExpr =
+ S.ActOnMemberAccessExpr(S.getCurScope(), InstanceBase, SourceLocation(),
+ RefExpr->isArrow() ? tok::arrow : tok::period, SS,
+ SourceLocation(), SetterName, nullptr);
if (SetterExpr.isInvalid()) {
S.Diag(RefExpr->getMemberLoc(),
diag::error_cannot_find_suitable_accessor) << 1 /* setter */
@@ -1464,7 +1512,8 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
return ExprError();
}
- SmallVector<Expr*, 1> ArgExprs;
+ SmallVector<Expr*, 4> ArgExprs;
+ ArgExprs.append(CallArgs.begin(), CallArgs.end());
ArgExprs.push_back(op);
return S.ActOnCallExpr(S.getCurScope(), SetterExpr.get(),
RefExpr->getSourceRange().getBegin(), ArgExprs,
@@ -1490,6 +1539,10 @@ ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
MSPropertyOpBuilder builder(*this, refExpr);
return builder.buildRValueOperation(E);
+ } else if (MSPropertySubscriptExpr *RefExpr =
+ dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
+ MSPropertyOpBuilder Builder(*this, RefExpr);
+ return Builder.buildRValueOperation(E);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
@@ -1516,6 +1569,10 @@ ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
MSPropertyOpBuilder builder(*this, refExpr);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
+ } else if (MSPropertySubscriptExpr *RefExpr
+ = dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
+ MSPropertyOpBuilder Builder(*this, RefExpr);
+ return Builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
@@ -1547,8 +1604,12 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr);
- return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ MSPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else if (MSPropertySubscriptExpr *RefExpr
+ = dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
+ MSPropertyOpBuilder Builder(*this, RefExpr);
+ return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
@@ -1558,29 +1619,11 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
/// values. Basically, undo the behavior of rebuildAndCaptureObject.
/// This should never operate in-place.
static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
- Expr *opaqueRef = E->IgnoreParens();
- if (ObjCPropertyRefExpr *refExpr
- = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- // Class and super property references don't have opaque values in them.
- if (refExpr->isClassReceiver() || refExpr->isSuperReceiver())
- return E;
-
- assert(refExpr->isObjectReceiver() && "Unknown receiver kind?");
- OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBase());
- return ObjCPropertyRefRebuilder(S, baseOVE->getSourceExpr()).rebuild(E);
- } else if (ObjCSubscriptRefExpr *refExpr
- = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBaseExpr());
- OpaqueValueExpr *keyOVE = cast<OpaqueValueExpr>(refExpr->getKeyExpr());
- return ObjCSubscriptRefRebuilder(S, baseOVE->getSourceExpr(),
- keyOVE->getSourceExpr()).rebuild(E);
- } else if (MSPropertyRefExpr *refExpr
- = dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBaseExpr());
- return MSPropertyRefRebuilder(S, baseOVE->getSourceExpr()).rebuild(E);
- } else {
- llvm_unreachable("unknown pseudo-object kind!");
- }
+ return Rebuilder(S,
+ [=](Expr *E, unsigned) -> Expr * {
+ return cast<OpaqueValueExpr>(E)->getSourceExpr();
+ })
+ .rebuild(E);
}
/// Given a pseudo-object expression, recreate what it looks like
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index c4f6fd8df1c4..e1b1a47e182b 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -195,7 +195,7 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (isUnevaluatedContext())
return;
- SourceLocation ExprLoc = E->IgnoreParens()->getExprLoc();
+ SourceLocation ExprLoc = E->IgnoreParenImpCasts()->getExprLoc();
// In most cases, we don't want to warn if the expression is written in a
// macro body, or if the macro comes from a system header. If the offending
// expression is a call to a function with the warn_unused_result attribute,
@@ -218,6 +218,15 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (isa<StmtExpr>(E) && Loc.isMacroID())
return;
+ // Check if this is the UNREFERENCED_PARAMETER from the Microsoft headers.
+ // That macro is frequently used to suppress "unused parameter" warnings,
+ // but its implementation makes clang's -Wunused-value fire. Prevent this.
+ if (isa<ParenExpr>(E->IgnoreImpCasts()) && Loc.isMacroID()) {
+ SourceLocation SpellLoc = Loc;
+ if (findMacroSpelling(SpellLoc, "UNREFERENCED_PARAMETER"))
+ return;
+ }
+
// Okay, we have an unused result. Depending on what the base expression is,
// we might want to make a more specific diagnostic. Check for one of these
// cases now.
@@ -483,13 +492,6 @@ StmtResult
Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
Stmt *thenStmt, SourceLocation ElseLoc,
Stmt *elseStmt) {
- // If the condition was invalid, discard the if statement. We could recover
- // better by replacing it with a valid expr, but don't do that yet.
- if (!CondVal.get() && !CondVar) {
- getCurFunction()->setHasDroppedStmt();
- return StmtError();
- }
-
ExprResult CondResult(CondVal.release());
VarDecl *ConditionVar = nullptr;
@@ -497,22 +499,23 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
ConditionVar = cast<VarDecl>(CondVar);
CondResult = CheckConditionVariable(ConditionVar, IfLoc, true);
CondResult = ActOnFinishFullExpr(CondResult.get(), IfLoc);
- if (CondResult.isInvalid())
- return StmtError();
}
Expr *ConditionExpr = CondResult.getAs<Expr>();
- if (!ConditionExpr)
- return StmtError();
+ if (ConditionExpr) {
+ DiagnoseUnusedExprResult(thenStmt);
- DiagnoseUnusedExprResult(thenStmt);
+ if (!elseStmt) {
+ DiagnoseEmptyStmtBody(ConditionExpr->getLocEnd(), thenStmt,
+ diag::warn_empty_if_body);
+ }
- if (!elseStmt) {
- DiagnoseEmptyStmtBody(ConditionExpr->getLocEnd(), thenStmt,
- diag::warn_empty_if_body);
+ DiagnoseUnusedExprResult(elseStmt);
+ } else {
+ // Create a dummy Expr for the condition for error recovery
+ ConditionExpr = new (Context) OpaqueValueExpr(SourceLocation(),
+ Context.BoolTy, VK_RValue);
}
- DiagnoseUnusedExprResult(elseStmt);
-
return new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr,
thenStmt, ElseLoc, elseStmt);
}
@@ -698,8 +701,6 @@ static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
EnumValsTy::iterator &EI,
EnumValsTy::iterator &EIEnd,
const llvm::APSInt &Val) {
- bool FlagType = ED->hasAttr<FlagEnumAttr>();
-
if (const DeclRefExpr *DRE =
dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
@@ -711,7 +712,7 @@ static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
}
}
- if (FlagType) {
+ if (ED->hasAttr<FlagEnumAttr>()) {
return !S.IsValueInFlagEnum(ED, Val, false);
} else {
while (EI != EIEnd && EI->first < Val)
@@ -1349,7 +1350,7 @@ namespace {
}; // end class DeclExtractor
- // DeclMatcher checks to see if the decls are used in a non-evauluated
+ // DeclMatcher checks to see if the decls are used in a non-evaluated
// context.
class DeclMatcher : public EvaluatedExprVisitor<DeclMatcher> {
llvm::SmallPtrSetImpl<VarDecl*> &Decls;
@@ -1705,11 +1706,10 @@ Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
// If we have a forward-declared type, we can't do this check.
// Under ARC, it is an error not to have a forward-declared class.
if (iface &&
- RequireCompleteType(forLoc, QualType(objectType, 0),
- getLangOpts().ObjCAutoRefCount
- ? diag::err_arc_collection_forward
- : 0,
- collection)) {
+ (getLangOpts().ObjCAutoRefCount
+ ? RequireCompleteType(forLoc, QualType(objectType, 0),
+ diag::err_arc_collection_forward, collection)
+ : !isCompleteType(forLoc, QualType(objectType, 0)))) {
// Otherwise, if we have any useful type information, check that
// the type declares the appropriate method.
} else if (iface || !objectType->qual_empty()) {
@@ -1867,13 +1867,19 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
}
namespace {
+// An enum to represent whether something is dealing with a call to begin()
+// or a call to end() in a range-based for loop.
+enum BeginEndFunction {
+ BEF_begin,
+ BEF_end
+};
/// Produce a note indicating which begin/end function was implicitly called
/// by a C++11 for-range statement. This is often not obvious from the code,
/// nor from the diagnostics produced when analysing the implicit expressions
/// required in a for-range statement.
void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E,
- Sema::BeginEndFunction BEF) {
+ BeginEndFunction BEF) {
CallExpr *CE = dyn_cast<CallExpr>(E);
if (!CE)
return;
@@ -1931,10 +1937,11 @@ static bool ObjCEnumerationCollection(Expr *Collection) {
///
/// The body of the loop is not available yet, since it cannot be analysed until
/// we have determined the type of the for-range-declaration.
-StmtResult
-Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
- Stmt *First, SourceLocation ColonLoc, Expr *Range,
- SourceLocation RParenLoc, BuildForRangeKind Kind) {
+StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
+ SourceLocation CoawaitLoc, Stmt *First,
+ SourceLocation ColonLoc, Expr *Range,
+ SourceLocation RParenLoc,
+ BuildForRangeKind Kind) {
if (!First)
return StmtError();
@@ -1956,6 +1963,13 @@ Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
return StmtError();
}
+ // Coroutines: 'for co_await' implicitly co_awaits its range.
+ if (CoawaitLoc.isValid()) {
+ ExprResult Coawait = ActOnCoawaitExpr(S, CoawaitLoc, Range);
+ if (Coawait.isInvalid()) return StmtError();
+ Range = Coawait.get();
+ }
+
// Build auto && __range = range-init
SourceLocation RangeLoc = Range->getLocStart();
VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc,
@@ -1977,7 +1991,7 @@ Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
return StmtError();
}
- return BuildCXXForRangeStmt(ForLoc, ColonLoc, RangeDecl.get(),
+ return BuildCXXForRangeStmt(ForLoc, CoawaitLoc, ColonLoc, RangeDecl.get(),
/*BeginEndDecl=*/nullptr, /*Cond=*/nullptr,
/*Inc=*/nullptr, DS, RParenLoc, Kind);
}
@@ -1991,7 +2005,7 @@ Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
/// BeginExpr and EndExpr are set and FRS_Success is returned on success;
/// CandidateSet and BEF are set and some non-success value is returned on
/// failure.
-static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S,
+static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef,
Expr *BeginRange, Expr *EndRange,
QualType RangeType,
VarDecl *BeginVar,
@@ -2000,7 +2014,7 @@ static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S,
OverloadCandidateSet *CandidateSet,
ExprResult *BeginExpr,
ExprResult *EndExpr,
- Sema::BeginEndFunction *BEF) {
+ BeginEndFunction *BEF) {
DeclarationNameInfo BeginNameInfo(
&SemaRef.PP.getIdentifierTable().get("begin"), ColonLoc);
DeclarationNameInfo EndNameInfo(&SemaRef.PP.getIdentifierTable().get("end"),
@@ -2021,7 +2035,7 @@ static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S,
if (BeginMemberLookup.empty() != EndMemberLookup.empty()) {
SourceLocation RangeLoc = BeginVar->getLocation();
- *BEF = BeginMemberLookup.empty() ? Sema::BEF_end : Sema::BEF_begin;
+ *BEF = BeginMemberLookup.empty() ? BEF_end : BEF_begin;
SemaRef.Diag(RangeLoc, diag::err_for_range_member_begin_end_mismatch)
<< RangeLoc << BeginRange->getType() << *BEF;
@@ -2035,29 +2049,35 @@ static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S,
}
- *BEF = Sema::BEF_begin;
+ *BEF = BEF_begin;
Sema::ForRangeStatus RangeStatus =
- SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, BeginVar,
- Sema::BEF_begin, BeginNameInfo,
+ SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, BeginNameInfo,
BeginMemberLookup, CandidateSet,
BeginRange, BeginExpr);
- if (RangeStatus != Sema::FRS_Success)
+ if (RangeStatus != Sema::FRS_Success) {
+ if (RangeStatus == Sema::FRS_DiagnosticIssued)
+ SemaRef.Diag(BeginRange->getLocStart(), diag::note_in_for_range)
+ << ColonLoc << BEF_begin << BeginRange->getType();
return RangeStatus;
+ }
if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc,
diag::err_for_range_iter_deduction_failure)) {
NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF);
return Sema::FRS_DiagnosticIssued;
}
- *BEF = Sema::BEF_end;
+ *BEF = BEF_end;
RangeStatus =
- SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, EndVar,
- Sema::BEF_end, EndNameInfo,
+ SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, EndNameInfo,
EndMemberLookup, CandidateSet,
EndRange, EndExpr);
- if (RangeStatus != Sema::FRS_Success)
+ if (RangeStatus != Sema::FRS_Success) {
+ if (RangeStatus == Sema::FRS_DiagnosticIssued)
+ SemaRef.Diag(EndRange->getLocStart(), diag::note_in_for_range)
+ << ColonLoc << BEF_end << EndRange->getType();
return RangeStatus;
+ }
if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc,
diag::err_for_range_iter_deduction_failure)) {
NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF);
@@ -2071,6 +2091,7 @@ static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S,
/// and emit no diagnostics.
static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
SourceLocation ForLoc,
+ SourceLocation CoawaitLoc,
Stmt *LoopVarDecl,
SourceLocation ColonLoc,
Expr *Range,
@@ -2086,10 +2107,9 @@ static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
if (AdjustedRange.isInvalid())
return StmtResult();
- StmtResult SR =
- SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc,
- AdjustedRange.get(), RParenLoc,
- Sema::BFRK_Check);
+ StmtResult SR = SemaRef.ActOnCXXForRangeStmt(
+ S, ForLoc, CoawaitLoc, LoopVarDecl, ColonLoc, AdjustedRange.get(),
+ RParenLoc, Sema::BFRK_Check);
if (SR.isInvalid())
return StmtResult();
}
@@ -2099,8 +2119,8 @@ static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
// case there are any other (non-fatal) problems with it.
SemaRef.Diag(RangeLoc, diag::err_for_range_dereference)
<< Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*");
- return SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc,
- AdjustedRange.get(), RParenLoc,
+ return SemaRef.ActOnCXXForRangeStmt(S, ForLoc, CoawaitLoc, LoopVarDecl,
+ ColonLoc, AdjustedRange.get(), RParenLoc,
Sema::BFRK_Rebuild);
}
@@ -2122,10 +2142,20 @@ struct InvalidateOnErrorScope {
/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
StmtResult
-Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
+Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
+ SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEnd, Expr *Cond,
Expr *Inc, Stmt *LoopVarDecl,
SourceLocation RParenLoc, BuildForRangeKind Kind) {
+ // FIXME: This should not be used during template instantiation. We should
+ // pick up the set of unqualified lookup results for the != and + operators
+ // in the initial parse.
+ //
+ // Testcase (accepts-invalid):
+ // template<typename T> void f() { for (auto x : T()) {} }
+ // namespace N { struct X { X begin(); X end(); int operator*(); }; }
+ // bool operator!=(N::X, N::X); void operator++(N::X);
+ // void g() { f<N::X>(); }
Scope *S = getCurScope();
DeclStmt *RangeDS = cast<DeclStmt>(RangeDecl);
@@ -2225,9 +2255,9 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
} else {
OverloadCandidateSet CandidateSet(RangeLoc,
OverloadCandidateSet::CSK_Normal);
- Sema::BeginEndFunction BEFFailure;
+ BeginEndFunction BEFFailure;
ForRangeStatus RangeStatus =
- BuildNonArrayForRange(*this, S, BeginRangeRef.get(),
+ BuildNonArrayForRange(*this, BeginRangeRef.get(),
EndRangeRef.get(), RangeType,
BeginVar, EndVar, ColonLoc, &CandidateSet,
&BeginExpr, &EndExpr, &BEFFailure);
@@ -2252,6 +2282,7 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
// If building the range failed, try dereferencing the range expression
// unless a diagnostic was issued or the end function is problematic.
StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc,
+ CoawaitLoc,
LoopVarDecl, ColonLoc,
Range, RangeLoc,
RParenLoc);
@@ -2322,7 +2353,10 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
return StmtError();
IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get());
- IncrExpr = ActOnFinishFullExpr(IncrExpr.get());
+ if (!IncrExpr.isInvalid() && CoawaitLoc.isValid())
+ IncrExpr = ActOnCoawaitExpr(S, CoawaitLoc, IncrExpr.get());
+ if (!IncrExpr.isInvalid())
+ IncrExpr = ActOnFinishFullExpr(IncrExpr.get());
if (IncrExpr.isInvalid()) {
Diag(RangeLoc, diag::note_for_range_invalid_iterator)
<< RangeLoc << 2 << BeginRangeRef.get()->getType() ;
@@ -2361,7 +2395,8 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
return new (Context) CXXForRangeStmt(
RangeDS, cast_or_null<DeclStmt>(BeginEndDecl.get()), NotEqExpr.get(),
- IncrExpr.get(), LoopVarDS, /*Body=*/nullptr, ForLoc, ColonLoc, RParenLoc);
+ IncrExpr.get(), LoopVarDS, /*Body=*/nullptr, ForLoc, CoawaitLoc,
+ ColonLoc, RParenLoc);
}
/// FinishObjCForCollectionStmt - Attach the body to a objective-C foreach
@@ -2919,6 +2954,9 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (CurCap->HasImplicitReturnType || NRVOCandidate)
FunctionScopes.back()->Returns.push_back(Result);
+ if (FunctionScopes.back()->FirstReturnLoc.isInvalid())
+ FunctionScopes.back()->FirstReturnLoc = ReturnLoc;
+
return Result;
}
@@ -2989,14 +3027,9 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
// statement with a non-type-dependent operand.
assert(AT->isDeduced() && "should have deduced to dependent type");
return false;
- } else if (RetExpr) {
- // If the deduction is for a return statement and the initializer is
- // a braced-init-list, the program is ill-formed.
- if (isa<InitListExpr>(RetExpr)) {
- Diag(RetExpr->getExprLoc(), diag::err_auto_fn_return_init_list);
- return true;
- }
+ }
+ if (RetExpr) {
// Otherwise, [...] deduce a value for U using the rules of template
// argument deduction.
DeduceAutoResult DAR = DeduceAutoType(OrigResultType, RetExpr, Deduced);
@@ -3035,8 +3068,11 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
// the program is ill-formed.
if (AT->isDeduced() && !FD->isInvalidDecl()) {
AutoType *NewAT = Deduced->getContainedAutoType();
- if (!FD->isDependentContext() &&
- !Context.hasSameType(AT->getDeducedType(), NewAT->getDeducedType())) {
+ CanQualType OldDeducedType = Context.getCanonicalFunctionResultType(
+ AT->getDeducedType());
+ CanQualType NewDeducedType = Context.getCanonicalFunctionResultType(
+ NewAT->getDeducedType());
+ if (!FD->isDependentContext() && OldDeducedType != NewDeducedType) {
const LambdaScopeInfo *LambdaSI = getCurLambda();
if (LambdaSI && LambdaSI->HasImplicitReturnType) {
Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible)
@@ -3179,7 +3215,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
// return (some void expression); is legal in C++.
else if (D != diag::ext_return_has_void_expr ||
- !getLangOpts().CPlusPlus) {
+ !getLangOpts().CPlusPlus) {
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
int FunctionKind = 0;
@@ -3287,6 +3323,9 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (Result->getNRVOCandidate())
FunctionScopes.back()->Returns.push_back(Result);
+ if (FunctionScopes.back()->FirstReturnLoc.isInvalid())
+ FunctionScopes.back()->FirstReturnLoc = ReturnLoc;
+
return Result;
}
@@ -3512,16 +3551,14 @@ public:
CXXCatchStmt *getFoundHandler() const { return FoundHandler; }
CanQualType getFoundHandlerType() const { return FoundHandlerType; }
- static bool FindPublicBasesOfType(const CXXBaseSpecifier *S, CXXBasePath &,
- void *User) {
- auto &PBOT = *reinterpret_cast<CatchTypePublicBases *>(User);
+ bool operator()(const CXXBaseSpecifier *S, CXXBasePath &) {
if (S->getAccessSpecifier() == AccessSpecifier::AS_public) {
- CatchHandlerType Check(S->getType(), PBOT.CheckAgainstPointer);
- auto M = PBOT.TypesToCheck;
+ CatchHandlerType Check(S->getType(), CheckAgainstPointer);
+ auto M = TypesToCheck;
auto I = M.find(Check);
if (I != M.end()) {
- PBOT.FoundHandler = I->second;
- PBOT.FoundHandlerType = PBOT.Ctx.getCanonicalType(S->getType());
+ FoundHandler = I->second;
+ FoundHandlerType = Ctx.getCanonicalType(S->getType());
return true;
}
}
@@ -3589,8 +3626,7 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
CXXBasePaths Paths;
Paths.setOrigin(RD);
CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer());
- if (RD->lookupInBases(CatchTypePublicBases::FindPublicBasesOfType, &CTPB,
- Paths)) {
+ if (RD->lookupInBases(CTPB, Paths)) {
const CXXCatchStmt *Problem = CTPB.getFoundHandler();
if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) {
Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
@@ -3766,11 +3802,10 @@ static void buildCapturedStmtCaptureList(
continue;
}
- assert(Cap->isReferenceCapture() &&
- "non-reference capture not yet implemented");
-
Captures.push_back(CapturedStmt::Capture(Cap->getLocation(),
- CapturedStmt::VCK_ByRef,
+ Cap->isReferenceCapture()
+ ? CapturedStmt::VCK_ByRef
+ : CapturedStmt::VCK_ByCopy,
Cap->getVariable()));
CaptureInits.push_back(Cap->getInitExpr());
}
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index 8e3e89f1e572..0d6e0f8e41b0 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -107,6 +107,37 @@ static bool CheckNakedParmReference(Expr *E, Sema &S) {
return false;
}
+/// \brief Returns true if given expression is not compatible with inline
+/// assembly's memory constraint; false otherwise.
+static bool checkExprMemoryConstraintCompat(Sema &S, Expr *E,
+ TargetInfo::ConstraintInfo &Info,
+ bool is_input_expr) {
+ enum {
+ ExprBitfield = 0,
+ ExprVectorElt,
+ ExprGlobalRegVar,
+ ExprSafeType
+ } EType = ExprSafeType;
+
+ // Bitfields, vector elements and global register variables are not
+ // compatible.
+ if (E->refersToBitField())
+ EType = ExprBitfield;
+ else if (E->refersToVectorElement())
+ EType = ExprVectorElt;
+ else if (E->refersToGlobalRegisterVar())
+ EType = ExprGlobalRegVar;
+
+ if (EType != ExprSafeType) {
+ S.Diag(E->getLocStart(), diag::err_asm_non_addr_value_in_memory_constraint)
+ << EType << is_input_expr << Info.getConstraintStr()
+ << E->getSourceRange();
+ return true;
+ }
+
+ return false;
+}
+
StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
@@ -124,8 +155,14 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// The parser verifies that there is a string literal here.
assert(AsmString->isAscii());
- bool ValidateConstraints =
- DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl());
+ // If we're compiling CUDA file and function attributes indicate that it's not
+ // for this compilation side, skip all the checks.
+ if (!DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl())) {
+ GCCAsmStmt *NS = new (Context) GCCAsmStmt(
+ Context, AsmLoc, IsSimple, IsVolatile, NumOutputs, NumInputs, Names,
+ Constraints, Exprs.data(), AsmString, NumClobbers, Clobbers, RParenLoc);
+ return NS;
+ }
for (unsigned i = 0; i != NumOutputs; i++) {
StringLiteral *Literal = Constraints[i];
@@ -136,8 +173,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
OutputName = Names[i]->getName();
TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
- if (ValidateConstraints &&
- !Context.getTargetInfo().validateOutputConstraint(Info))
+ if (!Context.getTargetInfo().validateOutputConstraint(Info))
return StmtError(Diag(Literal->getLocStart(),
diag::err_asm_invalid_output_constraint)
<< Info.getConstraintStr());
@@ -154,13 +190,10 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (CheckNakedParmReference(OutputExpr, *this))
return StmtError();
- // Bitfield can't be referenced with a pointer.
- if (Info.allowsMemory() && OutputExpr->refersToBitField())
- return StmtError(Diag(OutputExpr->getLocStart(),
- diag::err_asm_bitfield_in_memory_constraint)
- << 1
- << Info.getConstraintStr()
- << OutputExpr->getSourceRange());
+ // Check that the output expression is compatible with memory constraint.
+ if (Info.allowsMemory() &&
+ checkExprMemoryConstraintCompat(*this, OutputExpr, Info, false))
+ return StmtError();
OutputConstraintInfos.push_back(Info);
@@ -219,9 +252,8 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
InputName = Names[i]->getName();
TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
- if (ValidateConstraints &&
- !Context.getTargetInfo().validateInputConstraint(
- OutputConstraintInfos.data(), NumOutputs, Info)) {
+ if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos,
+ Info)) {
return StmtError(Diag(Literal->getLocStart(),
diag::err_asm_invalid_input_constraint)
<< Info.getConstraintStr());
@@ -238,13 +270,10 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (CheckNakedParmReference(InputExpr, *this))
return StmtError();
- // Bitfield can't be referenced with a pointer.
- if (Info.allowsMemory() && InputExpr->refersToBitField())
- return StmtError(Diag(InputExpr->getLocStart(),
- diag::err_asm_bitfield_in_memory_constraint)
- << 0
- << Info.getConstraintStr()
- << InputExpr->getSourceRange());
+ // Check that the input expression is compatible with memory constraint.
+ if (Info.allowsMemory() &&
+ checkExprMemoryConstraintCompat(*this, InputExpr, Info, true))
+ return StmtError();
// Only allow void types for memory constraints.
if (Info.allowsMemory() && !Info.allowsRegister()) {
@@ -260,8 +289,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
return StmtError(
Diag(InputExpr->getLocStart(), diag::err_asm_immediate_expected)
<< Info.getConstraintStr() << InputExpr->getSourceRange());
- if (Result.slt(Info.getImmConstantMin()) ||
- Result.sgt(Info.getImmConstantMax()))
+ if (!Info.isValidAsmImmediate(Result))
return StmtError(Diag(InputExpr->getLocStart(),
diag::err_invalid_asm_value_for_constraint)
<< Result.toString(10) << Info.getConstraintStr()
@@ -392,6 +420,8 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
diag::err_asm_unexpected_constraint_alternatives)
<< NumAlternatives << AltCount);
}
+ SmallVector<size_t, 4> InputMatchedToOutput(OutputConstraintInfos.size(),
+ ~0U);
for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
StringRef ConstraintStr = Info.getConstraintStr();
@@ -413,6 +443,19 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
Expr *OutputExpr = Exprs[TiedTo];
Expr *InputExpr = Exprs[InputOpNo];
+ // Make sure no more than one input constraint matches each output.
+ assert(TiedTo < InputMatchedToOutput.size() && "TiedTo value out of range");
+ if (InputMatchedToOutput[TiedTo] != ~0U) {
+ Diag(NS->getInputExpr(i)->getLocStart(),
+ diag::err_asm_input_duplicate_match)
+ << TiedTo;
+ Diag(NS->getInputExpr(InputMatchedToOutput[TiedTo])->getLocStart(),
+ diag::note_asm_input_duplicate_first)
+ << TiedTo;
+ return StmtError();
+ }
+ InputMatchedToOutput[TiedTo] = i;
+
if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent())
continue;
@@ -504,6 +547,17 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
return NS;
}
+static void fillInlineAsmTypeInfo(const ASTContext &Context, QualType T,
+ llvm::InlineAsmIdentifierInfo &Info) {
+ // Compute the type size (and array length if applicable?).
+ Info.Type = Info.Size = Context.getTypeSizeInChars(T).getQuantity();
+ if (T->isArrayType()) {
+ const ArrayType *ATy = Context.getAsArrayType(T);
+ Info.Type = Context.getTypeSizeInChars(ATy->getElementType()).getQuantity();
+ Info.Length = Info.Size / Info.Type;
+ }
+}
+
ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
@@ -551,13 +605,7 @@ ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
return ExprError();
}
- // Compute the type size (and array length if applicable?).
- Info.Type = Info.Size = Context.getTypeSizeInChars(T).getQuantity();
- if (T->isArrayType()) {
- const ArrayType *ATy = Context.getAsArrayType(T);
- Info.Type = Context.getTypeSizeInChars(ATy->getElementType()).getQuantity();
- Info.Length = Info.Size / Info.Type;
- }
+ fillInlineAsmTypeInfo(Context, T, Info);
// We can work with the expression as long as it's not an r-value.
if (!Result.get()->isRValue())
@@ -569,47 +617,103 @@ ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc) {
Offset = 0;
+ SmallVector<StringRef, 2> Members;
+ Member.split(Members, ".");
+
LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
LookupOrdinaryName);
if (!LookupName(BaseResult, getCurScope()))
return true;
- if (!BaseResult.isSingleResult())
- return true;
+ LookupResult CurrBaseResult(BaseResult);
- const RecordType *RT = nullptr;
- NamedDecl *FoundDecl = BaseResult.getFoundDecl();
- if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
- RT = VD->getType()->getAs<RecordType>();
- else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
- MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
- RT = TD->getUnderlyingType()->getAs<RecordType>();
- } else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
- RT = TD->getTypeForDecl()->getAs<RecordType>();
- if (!RT)
- return true;
+ for (StringRef NextMember : Members) {
- if (RequireCompleteType(AsmLoc, QualType(RT, 0), 0))
- return true;
+ if (!CurrBaseResult.isSingleResult())
+ return true;
+
+ const RecordType *RT = nullptr;
+ NamedDecl *FoundDecl = CurrBaseResult.getFoundDecl();
+ if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
+ RT = VD->getType()->getAs<RecordType>();
+ else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
+ MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
+ RT = TD->getUnderlyingType()->getAs<RecordType>();
+ } else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
+ RT = TD->getTypeForDecl()->getAs<RecordType>();
+ else if (FieldDecl *TD = dyn_cast<FieldDecl>(FoundDecl))
+ RT = TD->getType()->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ if (RequireCompleteType(AsmLoc, QualType(RT, 0),
+ diag::err_asm_incomplete_type))
+ return true;
+
+ LookupResult FieldResult(*this, &Context.Idents.get(NextMember),
+ SourceLocation(), LookupMemberName);
+
+ if (!LookupQualifiedName(FieldResult, RT->getDecl()))
+ return true;
+
+ // FIXME: Handle IndirectFieldDecl?
+ FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
+ if (!FD)
+ return true;
+
+ CurrBaseResult = FieldResult;
+
+ const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
+ unsigned i = FD->getFieldIndex();
+ CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
+ Offset += (unsigned)Result.getQuantity();
+ }
+
+ return false;
+}
- LookupResult FieldResult(*this, &Context.Idents.get(Member), SourceLocation(),
+ExprResult
+Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member, unsigned &Offset,
+ llvm::InlineAsmIdentifierInfo &Info,
+ SourceLocation AsmLoc) {
+ Info.clear();
+
+ const RecordType *RT = E->getType()->getAs<RecordType>();
+ // FIXME: Diagnose this as field access into a scalar type.
+ if (!RT)
+ return ExprResult();
+
+ LookupResult FieldResult(*this, &Context.Idents.get(Member), AsmLoc,
LookupMemberName);
if (!LookupQualifiedName(FieldResult, RT->getDecl()))
- return true;
+ return ExprResult();
- // FIXME: Handle IndirectFieldDecl?
- FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
+ // Only normal and indirect field results will work.
+ ValueDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
if (!FD)
- return true;
+ FD = dyn_cast<IndirectFieldDecl>(FieldResult.getFoundDecl());
+ if (!FD)
+ return ExprResult();
- const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
- unsigned i = FD->getFieldIndex();
- CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
- Offset = (unsigned)Result.getQuantity();
+ Offset = (unsigned)Context.toCharUnitsFromBits(Context.getFieldOffset(FD))
+ .getQuantity();
- return false;
+ // Make an Expr to thread through OpDecl.
+ ExprResult Result = BuildMemberReferenceExpr(
+ E, E->getType(), AsmLoc, /*IsArrow=*/false, CXXScopeSpec(),
+ SourceLocation(), nullptr, FieldResult, nullptr, nullptr);
+ if (Result.isInvalid())
+ return Result;
+ Info.OpDecl = Result.get();
+
+ fillInlineAsmTypeInfo(Context, Result.get()->getType(), Info);
+
+ // Fields are "variables" as far as inline assembly is concerned.
+ Info.IsVarDecl = true;
+
+ return Result;
}
StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
@@ -646,7 +750,15 @@ LabelDecl *Sema::GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
// Create an internal name for the label. The name should not be a valid mangled
// name, and should be unique. We use a dot to make the name an invalid mangled
// name.
- OS << "__MSASMLABEL_." << MSAsmLabelNameCounter++ << "__" << ExternalLabelName;
+ OS << "__MSASMLABEL_." << MSAsmLabelNameCounter++ << "__";
+ for (auto it = ExternalLabelName.begin(); it != ExternalLabelName.end();
+ ++it) {
+ OS << *it;
+ if (*it == '$') {
+ // We escape '$' in asm strings by replacing it with "$$"
+ OS << '$';
+ }
+ }
Label->setMSAsmLabel(OS.str());
}
if (AlwaysCreate) {
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index 5b71c11b5297..984bd078fa03 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -65,19 +65,32 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const AttributeList &A,
return nullptr;
}
- LoopHintAttr::OptionType Option;
LoopHintAttr::Spelling Spelling;
- if (PragmaUnroll) {
- Option = ValueExpr ? LoopHintAttr::UnrollCount : LoopHintAttr::Unroll;
- Spelling = LoopHintAttr::Pragma_unroll;
- } else if (PragmaNoUnroll) {
- Option = LoopHintAttr::Unroll;
+ LoopHintAttr::OptionType Option;
+ LoopHintAttr::LoopHintState State;
+ if (PragmaNoUnroll) {
+ // #pragma nounroll
Spelling = LoopHintAttr::Pragma_nounroll;
+ Option = LoopHintAttr::Unroll;
+ State = LoopHintAttr::Disable;
+ } else if (PragmaUnroll) {
+ Spelling = LoopHintAttr::Pragma_unroll;
+ if (ValueExpr) {
+ // #pragma unroll N
+ Option = LoopHintAttr::UnrollCount;
+ State = LoopHintAttr::Numeric;
+ } else {
+ // #pragma unroll
+ Option = LoopHintAttr::Unroll;
+ State = LoopHintAttr::Enable;
+ }
} else {
+ // #pragma clang loop ...
+ Spelling = LoopHintAttr::Pragma_clang_loop;
assert(OptionLoc && OptionLoc->Ident &&
"Attribute must have valid option info.");
- IdentifierInfo *OptionInfo = OptionLoc->Ident;
- Option = llvm::StringSwitch<LoopHintAttr::OptionType>(OptionInfo->getName())
+ Option = llvm::StringSwitch<LoopHintAttr::OptionType>(
+ OptionLoc->Ident->getName())
.Case("vectorize", LoopHintAttr::Vectorize)
.Case("vectorize_width", LoopHintAttr::VectorizeWidth)
.Case("interleave", LoopHintAttr::Interleave)
@@ -85,31 +98,29 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const AttributeList &A,
.Case("unroll", LoopHintAttr::Unroll)
.Case("unroll_count", LoopHintAttr::UnrollCount)
.Default(LoopHintAttr::Vectorize);
- Spelling = LoopHintAttr::Pragma_clang_loop;
- }
-
- LoopHintAttr::LoopHintState State = LoopHintAttr::Default;
- if (PragmaNoUnroll) {
- State = LoopHintAttr::Disable;
- } else if (Option == LoopHintAttr::VectorizeWidth ||
- Option == LoopHintAttr::InterleaveCount ||
- Option == LoopHintAttr::UnrollCount) {
- assert(ValueExpr && "Attribute must have a valid value expression.");
- if (S.CheckLoopHintExpr(ValueExpr, St->getLocStart()))
- return nullptr;
- } else if (Option == LoopHintAttr::Vectorize ||
- Option == LoopHintAttr::Interleave ||
- Option == LoopHintAttr::Unroll) {
- // Default state is assumed if StateLoc is not specified, such as with
- // '#pragma unroll'.
- if (StateLoc && StateLoc->Ident) {
+ if (Option == LoopHintAttr::VectorizeWidth ||
+ Option == LoopHintAttr::InterleaveCount ||
+ Option == LoopHintAttr::UnrollCount) {
+ assert(ValueExpr && "Attribute must have a valid value expression.");
+ if (S.CheckLoopHintExpr(ValueExpr, St->getLocStart()))
+ return nullptr;
+ State = LoopHintAttr::Numeric;
+ } else if (Option == LoopHintAttr::Vectorize ||
+ Option == LoopHintAttr::Interleave ||
+ Option == LoopHintAttr::Unroll) {
+ assert(StateLoc && StateLoc->Ident && "Loop hint must have an argument");
if (StateLoc->Ident->isStr("disable"))
State = LoopHintAttr::Disable;
else if (StateLoc->Ident->isStr("assume_safety"))
State = LoopHintAttr::AssumeSafety;
- else
+ else if (StateLoc->Ident->isStr("full"))
+ State = LoopHintAttr::Full;
+ else if (StateLoc->Ident->isStr("enable"))
State = LoopHintAttr::Enable;
- }
+ else
+ llvm_unreachable("bad loop hint argument");
+ } else
+ llvm_unreachable("bad loop hint");
}
return LoopHintAttr::CreateImplicit(S.Context, Spelling, Option, State,
@@ -139,9 +150,8 @@ CheckForIncompatibleAttributes(Sema &S,
if (!LH)
continue;
- int Option = LH->getOption();
- int Category;
- enum { Vectorize, Interleave, Unroll };
+ LoopHintAttr::OptionType Option = LH->getOption();
+ enum { Vectorize, Interleave, Unroll } Category;
switch (Option) {
case LoopHintAttr::Vectorize:
case LoopHintAttr::VectorizeWidth:
@@ -183,7 +193,8 @@ CheckForIncompatibleAttributes(Sema &S,
CategoryState.StateAttr->getState() == LoopHintAttr::Disable)) {
// Disable hints are not compatible with numeric hints of the same
// category. As a special case, numeric unroll hints are also not
- // compatible with "enable" form of the unroll pragma, unroll(full).
+ // compatible with enable or full form of the unroll pragma because these
+ // directives indicate full unrolling.
S.Diag(OptionLoc, diag::err_pragma_loop_compatibility)
<< /*Duplicate=*/false
<< CategoryState.StateAttr->getDiagnosticName(Policy)
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 035c37cfe6e9..6cc85883345d 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
@@ -208,7 +209,8 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
R.suppressDiagnostics();
} else {
assert(isa<ClassTemplateDecl>(TD) || isa<TemplateTemplateParmDecl>(TD) ||
- isa<TypeAliasTemplateDecl>(TD) || isa<VarTemplateDecl>(TD));
+ isa<TypeAliasTemplateDecl>(TD) || isa<VarTemplateDecl>(TD) ||
+ isa<BuiltinTemplateDecl>(TD));
TemplateKind =
isa<VarTemplateDecl>(TD) ? TNK_Var_template : TNK_Type_template;
}
@@ -327,8 +329,8 @@ void Sema::LookupTemplateName(LookupResult &Found,
Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
std::move(FilterCCC), CTK_ErrorRecovery, LookupCtx)) {
Found.setLookupName(Corrected.getCorrection());
- if (Corrected.getCorrectionDecl())
- Found.addDecl(Corrected.getCorrectionDecl());
+ if (auto *ND = Corrected.getFoundDecl())
+ Found.addDecl(ND);
FilterAcceptableTemplateNames(Found);
if (!Found.empty()) {
if (LookupCtx) {
@@ -812,14 +814,15 @@ Sema::ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
- Decl **Params, unsigned NumParams,
+ ArrayRef<Decl *> Params,
SourceLocation RAngleLoc) {
if (ExportLoc.isValid())
Diag(ExportLoc, diag::warn_template_export_unsupported);
- return TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
- (NamedDecl**)Params, NumParams,
- RAngleLoc);
+ return TemplateParameterList::Create(
+ Context, TemplateLoc, LAngleLoc,
+ llvm::makeArrayRef((NamedDecl *const *)Params.data(), Params.size()),
+ RAngleLoc);
}
static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) {
@@ -1089,9 +1092,9 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
/*DelayTypeCreation=*/true);
SetNestedNameSpecifier(NewClass, SS);
if (NumOuterTemplateParamLists > 0)
- NewClass->setTemplateParameterListsInfo(Context,
- NumOuterTemplateParamLists,
- OuterTemplateParamLists);
+ NewClass->setTemplateParameterListsInfo(
+ Context, llvm::makeArrayRef(OuterTemplateParamLists,
+ NumOuterTemplateParamLists));
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
@@ -1936,7 +1939,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// Fabricate an empty template parameter list for the invented header.
return TemplateParameterList::Create(Context, SourceLocation(),
- SourceLocation(), nullptr, 0,
+ SourceLocation(), None,
SourceLocation());
}
@@ -2017,6 +2020,58 @@ void Sema::NoteAllFoundTemplates(TemplateName Name) {
}
}
+static QualType
+checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
+ const SmallVectorImpl<TemplateArgument> &Converted,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ ASTContext &Context = SemaRef.getASTContext();
+ switch (BTD->getBuiltinTemplateKind()) {
+ case BTK__make_integer_seq:
+ // Specializations of __make_integer_seq<S, T, N> are treated like
+ // S<T, 0, ..., N-1>.
+
+ // C++14 [inteseq.intseq]p1:
+ // T shall be an integer type.
+ if (!Converted[1].getAsType()->isIntegralType(Context)) {
+ SemaRef.Diag(TemplateArgs[1].getLocation(),
+ diag::err_integer_sequence_integral_element_type);
+ return QualType();
+ }
+
+ // C++14 [inteseq.make]p1:
+ // If N is negative the program is ill-formed.
+ TemplateArgument NumArgsArg = Converted[2];
+ llvm::APSInt NumArgs = NumArgsArg.getAsIntegral();
+ if (NumArgs < 0) {
+ SemaRef.Diag(TemplateArgs[2].getLocation(),
+ diag::err_integer_sequence_negative_length);
+ return QualType();
+ }
+
+ QualType ArgTy = NumArgsArg.getIntegralType();
+ TemplateArgumentListInfo SyntheticTemplateArgs;
+ // The type argument gets reused as the first template argument in the
+ // synthetic template argument list.
+ SyntheticTemplateArgs.addArgument(TemplateArgs[1]);
+ // Expand N into 0 ... N-1.
+ for (llvm::APSInt I(NumArgs.getBitWidth(), NumArgs.isUnsigned());
+ I < NumArgs; ++I) {
+ TemplateArgument TA(Context, I, ArgTy);
+ Expr *E = SemaRef.BuildExpressionFromIntegralTemplateArgument(
+ TA, TemplateArgs[2].getLocation())
+ .getAs<Expr>();
+ SyntheticTemplateArgs.addArgument(
+ TemplateArgumentLoc(TemplateArgument(E), E));
+ }
+ // The first template argument will be reused as the template decl that
+ // our synthetic template arguments will be applied to.
+ return SemaRef.CheckTemplateIdType(Converted[0].getAsTemplate(),
+ TemplateLoc, SyntheticTemplateArgs);
+ }
+ llvm_unreachable("unexpected BuiltinTemplateDecl!");
+}
+
QualType Sema::CheckTemplateIdType(TemplateName Name,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
@@ -2171,6 +2226,9 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
CanonType = Context.getTypeDeclType(Decl);
assert(isa<RecordType>(CanonType) &&
"type of non-dependent specialization is not a RecordType");
+ } else if (auto *BTD = dyn_cast<BuiltinTemplateDecl>(Template)) {
+ CanonType = checkBuiltinTemplateIdType(*this, BTD, Converted, TemplateLoc,
+ TemplateArgs);
}
// Build the fully-sugared type for this class template
@@ -2469,25 +2527,6 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
false, Converted))
return true;
- // Check that the type of this variable template specialization
- // matches the expected type.
- TypeSourceInfo *ExpectedDI;
- {
- // Do substitution on the type of the declaration
- TemplateArgumentList TemplateArgList(TemplateArgumentList::OnStack,
- Converted.data(), Converted.size());
- InstantiatingTemplate Inst(*this, TemplateKWLoc, VarTemplate);
- if (Inst.isInvalid())
- return true;
- VarDecl *Templated = VarTemplate->getTemplatedDecl();
- ExpectedDI =
- SubstType(Templated->getTypeSourceInfo(),
- MultiLevelTemplateArgumentList(TemplateArgList),
- Templated->getTypeSpecStartLoc(), Templated->getDeclName());
- }
- if (!ExpectedDI)
- return true;
-
// Find the variable template (partial) specialization declaration that
// corresponds to these arguments.
if (IsPartialSpecialization) {
@@ -2710,7 +2749,8 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
typedef PartialSpecMatchResult MatchResult;
SmallVector<MatchResult, 4> Matched;
SourceLocation PointOfInstantiation = TemplateNameLoc;
- TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation);
+ TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation,
+ /*ForTakingAddress=*/false);
// 1. Attempt to find the closest partial specialization that this
// specializes, if any.
@@ -3242,7 +3282,8 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
TemplateArgLists.addOuterTemplateArguments(None);
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext ConstantEvaluated(SemaRef,
+ Sema::ConstantEvaluated);
return SemaRef.SubstExpr(Param->getDefaultArgument(), TemplateArgLists);
}
@@ -3733,9 +3774,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// We're done with this parameter pack. Pack up its arguments and add
// them to the list.
Converted.push_back(
- TemplateArgument::CreatePackCopy(Context,
- ArgumentPack.data(),
- ArgumentPack.size()));
+ TemplateArgument::CreatePackCopy(Context, ArgumentPack));
ArgumentPack.clear();
// This argument is assigned to the next parameter.
@@ -3816,10 +3855,9 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// If we're checking a partial template argument list, we're done.
if (PartialTemplateArgs) {
if ((*Param)->isTemplateParameterPack() && !ArgumentPack.empty())
- Converted.push_back(TemplateArgument::CreatePackCopy(Context,
- ArgumentPack.data(),
- ArgumentPack.size()));
-
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context, ArgumentPack));
+
return false;
}
@@ -3835,9 +3873,8 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
if (Param + 1 != ParamEnd)
return true;
- Converted.push_back(TemplateArgument::CreatePackCopy(Context,
- ArgumentPack.data(),
- ArgumentPack.size()));
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context, ArgumentPack));
ArgumentPack.clear();
++Param;
@@ -3946,7 +3983,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// No problems found with the new argument list, propagate changes back
// to caller.
- TemplateArgs = NewArgs;
+ TemplateArgs = std::move(NewArgs);
return false;
}
@@ -4245,7 +4282,11 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
QualType ParamType, Expr *Arg) {
if (Arg->isValueDependent() || Arg->isTypeDependent())
return NPV_NotNullPointer;
-
+
+ if (!S.isCompleteType(Arg->getExprLoc(), ParamType))
+ llvm_unreachable(
+ "Incomplete parameter type in isNullPointerValueTemplateArgument!");
+
if (!S.getLangOpts().CPlusPlus11)
return NPV_NotNullPointer;
@@ -4693,8 +4734,6 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument(S.Context.getCanonicalType(ParamType),
/*isNullPtr*/true);
- if (S.Context.getTargetInfo().getCXXABI().isMicrosoft())
- S.RequireCompleteType(Arg->getExprLoc(), ParamType, 0);
return false;
case NPV_NotNullPointer:
break;
@@ -6311,9 +6350,8 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
PrevPartial);
SetNestedNameSpecifier(Partial, SS);
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
- Partial->setTemplateParameterListsInfo(Context,
- TemplateParameterLists.size() - 1,
- TemplateParameterLists.data());
+ Partial->setTemplateParameterListsInfo(
+ Context, TemplateParameterLists.drop_back(1));
}
if (!PrevPartial)
@@ -6367,14 +6405,23 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
SetNestedNameSpecifier(Specialization, SS);
if (TemplateParameterLists.size() > 0) {
Specialization->setTemplateParameterListsInfo(Context,
- TemplateParameterLists.size(),
- TemplateParameterLists.data());
+ TemplateParameterLists);
}
if (!PrevDecl)
ClassTemplate->AddSpecialization(Specialization, InsertPos);
- CanonType = Context.getTypeDeclType(Specialization);
+ if (CurContext->isDependentContext()) {
+ // -fms-extensions permits specialization of nested classes without
+ // fully specializing the outer class(es).
+ assert(getLangOpts().MicrosoftExt &&
+ "Only possible with -fms-extensions!");
+ TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
+ CanonType = Context.getTemplateSpecializationType(
+ CanonTemplate, Converted.data(), Converted.size());
+ } else {
+ CanonType = Context.getTypeDeclType(Specialization);
+ }
}
// C++ [temp.expl.spec]p6:
@@ -6497,24 +6544,6 @@ Decl *Sema::ActOnTemplateDeclarator(Scope *S,
return NewDecl;
}
-Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
- MultiTemplateParamsArg TemplateParameterLists,
- Declarator &D) {
- assert(getCurFunctionDecl() == nullptr && "Function parsing confused");
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
-
- if (FTI.hasPrototype) {
- // FIXME: Diagnose arguments without names in C.
- }
-
- Scope *ParentScope = FnBodyScope->getParent();
-
- D.setFunctionDefinitionKind(FDK_Definition);
- Decl *DP = HandleDeclarator(ParentScope, D,
- TemplateParameterLists);
- return ActOnStartOfFunctionDef(FnBodyScope, DP);
-}
-
/// \brief Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
static void StripImplicitInstantiation(NamedDecl *D) {
@@ -6795,7 +6824,11 @@ bool Sema::CheckFunctionTemplateSpecialization(
// The set of function template specializations that could match this
// explicit function template specialization.
UnresolvedSet<8> Candidates;
- TemplateSpecCandidateSet FailedCandidates(FD->getLocation());
+ TemplateSpecCandidateSet FailedCandidates(FD->getLocation(),
+ /*ForTakingAddress=*/false);
+
+ llvm::SmallDenseMap<FunctionDecl *, TemplateArgumentListInfo, 8>
+ ConvertedTemplateArgs;
DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
@@ -6826,6 +6859,10 @@ bool Sema::CheckFunctionTemplateSpecialization(
}
}
+ TemplateArgumentListInfo Args;
+ if (ExplicitTemplateArgs)
+ Args = *ExplicitTemplateArgs;
+
// C++ [temp.expl.spec]p11:
// A trailing template-argument can be left unspecified in the
// template-id naming an explicit function template specialization
@@ -6837,7 +6874,7 @@ bool Sema::CheckFunctionTemplateSpecialization(
FunctionDecl *Specialization = nullptr;
if (TemplateDeductionResult TDK = DeduceTemplateArguments(
cast<FunctionTemplateDecl>(FunTmpl->getFirstDecl()),
- ExplicitTemplateArgs, FT, Specialization, Info)) {
+ ExplicitTemplateArgs ? &Args : nullptr, FT, Specialization, Info)) {
// Template argument deduction failed; record why it failed, so
// that we can provide nifty diagnostics.
FailedCandidates.addCandidate()
@@ -6848,6 +6885,8 @@ bool Sema::CheckFunctionTemplateSpecialization(
}
// Record this candidate.
+ if (ExplicitTemplateArgs)
+ ConvertedTemplateArgs[Specialization] = std::move(Args);
Candidates.addDecl(Specialization, I.getAccess());
}
}
@@ -6926,10 +6965,10 @@ bool Sema::CheckFunctionTemplateSpecialization(
// Take copies of (semantic and syntactic) template argument lists.
const TemplateArgumentList* TemplArgs = new (Context)
TemplateArgumentList(Specialization->getTemplateSpecializationArgs());
- FD->setFunctionTemplateSpecialization(Specialization->getPrimaryTemplate(),
- TemplArgs, /*InsertPos=*/nullptr,
- SpecInfo->getTemplateSpecializationKind(),
- ExplicitTemplateArgs);
+ FD->setFunctionTemplateSpecialization(
+ Specialization->getPrimaryTemplate(), TemplArgs, /*InsertPos=*/nullptr,
+ SpecInfo->getTemplateSpecializationKind(),
+ ExplicitTemplateArgs ? &ConvertedTemplateArgs[Specialization] : nullptr);
// The "previous declaration" for this function template specialization is
// the prior function template specialization.
@@ -7429,11 +7468,16 @@ Sema::ActOnExplicitInstantiation(Scope *S,
}
}
+ // Set the template specialization kind. Make sure it is set before
+ // instantiating the members which will trigger ASTConsumer callbacks.
+ Specialization->setTemplateSpecializationKind(TSK);
InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK);
+ } else {
+
+ // Set the template specialization kind.
+ Specialization->setTemplateSpecializationKind(TSK);
}
- // Set the template specialization kind.
- Specialization->setTemplateSpecializationKind(TSK);
return Specialization;
}
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index ae8157e70a60..de04c8a7f042 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -679,7 +679,7 @@ public:
new (S.Context) TemplateArgument[Pack.New.size()];
std::copy(Pack.New.begin(), Pack.New.end(), ArgumentPack);
NewPack = DeducedTemplateArgument(
- TemplateArgument(ArgumentPack, Pack.New.size()),
+ TemplateArgument(llvm::makeArrayRef(ArgumentPack, Pack.New.size())),
Pack.New[0].wasDeducedFromArrayBound());
}
@@ -1440,7 +1440,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// We cannot inspect base classes as part of deduction when the type
// is incomplete, so either instantiate any templates necessary to
// complete the type, or skip over it if it cannot be completed.
- if (S.RequireCompleteType(Info.getLocation(), Arg, 0))
+ if (!S.isCompleteType(Info.getLocation(), Arg))
return Result;
// Use data recursion to crawl through the list of base classes.
@@ -1517,10 +1517,19 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (!MemPtrArg)
return Sema::TDK_NonDeducedMismatch;
+ QualType ParamPointeeType = MemPtrParam->getPointeeType();
+ if (ParamPointeeType->isFunctionType())
+ S.adjustMemberFunctionCC(ParamPointeeType, /*IsStatic=*/true,
+ /*IsCtorOrDtor=*/false, Info.getLocation());
+ QualType ArgPointeeType = MemPtrArg->getPointeeType();
+ if (ArgPointeeType->isFunctionType())
+ S.adjustMemberFunctionCC(ArgPointeeType, /*IsStatic=*/true,
+ /*IsCtorOrDtor=*/false, Info.getLocation());
+
if (Sema::TemplateDeductionResult Result
= DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- MemPtrParam->getPointeeType(),
- MemPtrArg->getPointeeType(),
+ ParamPointeeType,
+ ArgPointeeType,
Info, Deduced,
TDF & TDF_IgnoreQualifiers))
return Result;
@@ -2075,9 +2084,8 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
}
// Create the resulting argument pack.
- Output.push_back(TemplateArgument::CreatePackCopy(S.Context,
- PackedArgsBuilder.data(),
- PackedArgsBuilder.size()));
+ Output.push_back(
+ TemplateArgument::CreatePackCopy(S.Context, PackedArgsBuilder));
return false;
}
@@ -2730,7 +2738,7 @@ CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
return false;
if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) &&
- S.IsDerivedFrom(A, DeducedA))
+ S.IsDerivedFrom(SourceLocation(), A, DeducedA))
return false;
return true;
@@ -2850,7 +2858,8 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs,
&NumExplicitArgs)
== Param) {
- Builder.push_back(TemplateArgument(ExplicitArgs, NumExplicitArgs));
+ Builder.push_back(TemplateArgument(
+ llvm::makeArrayRef(ExplicitArgs, NumExplicitArgs)));
// Forget the partially-substituted pack; it's substitution is now
// complete.
@@ -3028,7 +3037,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
// Gather the explicit template arguments, if any.
TemplateArgumentListInfo ExplicitTemplateArgs;
if (Ovl->hasExplicitTemplateArgs())
- Ovl->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs);
+ Ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs);
QualType Match;
for (UnresolvedSetIterator I = Ovl->decls_begin(),
E = Ovl->decls_end(); I != E; ++I) {
@@ -3123,8 +3132,10 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S,
if (ParamRefType) {
// If the argument has incomplete array type, try to complete its type.
- if (ArgType->isIncompleteArrayType() && !S.RequireCompleteExprType(Arg, 0))
+ if (ArgType->isIncompleteArrayType()) {
+ S.completeExprArrayBound(Arg);
ArgType = Arg->getType();
+ }
// C++0x [temp.deduct.call]p3:
// If P is an rvalue reference to a cv-unqualified template
@@ -3203,24 +3214,63 @@ DeduceFromInitializerList(Sema &S, TemplateParameterList *TemplateParams,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned TDF, Sema::TemplateDeductionResult &Result) {
- // If the argument is an initializer list then the parameter is an undeduced
- // context, unless the parameter type is (reference to cv)
- // std::initializer_list<P'>, in which case deduction is done for each element
- // of the initializer list as-if it were an argument in a function call, and
- // the result is the deduced type if it's the same for all elements.
- QualType X;
- if (!S.isStdInitializerList(AdjustedParamType, &X))
+
+ // [temp.deduct.call] p1 (post CWG-1591)
+ // If removing references and cv-qualifiers from P gives
+ // std::initializer_list<P0> or P0[N] for some P0 and N and the argument is a
+ // non-empty initializer list (8.5.4), then deduction is performed instead for
+ // each element of the initializer list, taking P0 as a function template
+ // parameter type and the initializer element as its argument, and in the
+ // P0[N] case, if N is a non-type template parameter, N is deduced from the
+ // length of the initializer list. Otherwise, an initializer list argument
+ // causes the parameter to be considered a non-deduced context
+
+ const bool IsConstSizedArray = AdjustedParamType->isConstantArrayType();
+
+ const bool IsDependentSizedArray =
+ !IsConstSizedArray && AdjustedParamType->isDependentSizedArrayType();
+
+ QualType ElTy; // The element type of the std::initializer_list or the array.
+
+ const bool IsSTDList = !IsConstSizedArray && !IsDependentSizedArray &&
+ S.isStdInitializerList(AdjustedParamType, &ElTy);
+
+ if (!IsConstSizedArray && !IsDependentSizedArray && !IsSTDList)
return false;
Result = Sema::TDK_Success;
-
- // Recurse down into the init list.
- for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) {
- if ((Result = DeduceTemplateArgumentByListElement(
- S, TemplateParams, X, ILE->getInit(i), Info, Deduced, TDF)))
- return true;
+ // If we are not deducing against the 'T' in a std::initializer_list<T> then
+ // deduce against the 'T' in T[N].
+ if (ElTy.isNull()) {
+ assert(!IsSTDList);
+ ElTy = S.Context.getAsArrayType(AdjustedParamType)->getElementType();
+ }
+ // Deduction only needs to be done for dependent types.
+ if (ElTy->isDependentType()) {
+ for (Expr *E : ILE->inits()) {
+ if ((Result = DeduceTemplateArgumentByListElement(S, TemplateParams, ElTy,
+ E, Info, Deduced, TDF)))
+ return true;
+ }
}
+ if (IsDependentSizedArray) {
+ const DependentSizedArrayType *ArrTy =
+ S.Context.getAsDependentSizedArrayType(AdjustedParamType);
+ // Determine the array bound is something we can deduce.
+ if (NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(ArrTy->getSizeExpr())) {
+ // We can perform template argument deduction for the given non-type
+ // template parameter.
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument at depth > 0");
+ llvm::APInt Size(S.Context.getIntWidth(NTTP->getType()),
+ ILE->getNumInits());
+ Result = DeduceNonTypeTemplateArgument(
+ S, NTTP, llvm::APSInt(Size), NTTP->getType(),
+ /*ArrayBound=*/true, Info, Deduced);
+ }
+ }
return true;
}
@@ -3908,7 +3958,7 @@ namespace {
!Replacement.isNull() && Replacement->isDependentType();
QualType Result =
SemaRef.Context.getAutoType(Dependent ? QualType() : Replacement,
- TL.getTypePtr()->isDecltypeAuto(),
+ TL.getTypePtr()->getKeyword(),
Dependent);
AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
@@ -3976,6 +4026,11 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
if (Result.isNull())
return DAR_FailedAlreadyDiagnosed;
return DAR_Succeeded;
+ } else if (!getLangOpts().CPlusPlus) {
+ if (isa<InitListExpr>(Init)) {
+ Diag(Init->getLocStart(), diag::err_auto_init_list_from_c);
+ return DAR_FailedAlreadyDiagnosed;
+ }
}
}
@@ -3989,8 +4044,8 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
nullptr, false, false);
QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0);
NamedDecl *TemplParamPtr = TemplParam;
- FixedSizeTemplateParameterList<1> TemplateParams(Loc, Loc, &TemplParamPtr,
- Loc);
+ FixedSizeTemplateParameterListStorage<1> TemplateParamsSt(
+ Loc, Loc, TemplParamPtr, Loc);
QualType FuncParam = SubstituteAutoTransform(*this, TemplArg).Apply(Type);
assert(!FuncParam.isNull() &&
@@ -4007,20 +4062,24 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
InitListExpr *InitList = dyn_cast<InitListExpr>(Init);
if (InitList) {
for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) {
- if (DeduceTemplateArgumentByListElement(*this, &TemplateParams,
- TemplArg,
- InitList->getInit(i),
+ if (DeduceTemplateArgumentByListElement(*this, TemplateParamsSt.get(),
+ TemplArg, InitList->getInit(i),
Info, Deduced, TDF))
return DAR_Failed;
}
} else {
- if (AdjustFunctionParmAndArgTypesForDeduction(*this, &TemplateParams,
- FuncParam, InitType, Init,
- TDF))
+ if (!getLangOpts().CPlusPlus && Init->refersToBitField()) {
+ Diag(Loc, diag::err_auto_bitfield);
+ return DAR_FailedAlreadyDiagnosed;
+ }
+
+ if (AdjustFunctionParmAndArgTypesForDeduction(
+ *this, TemplateParamsSt.get(), FuncParam, InitType, Init, TDF))
return DAR_Failed;
- if (DeduceTemplateArgumentsByTypeMatch(*this, &TemplateParams, FuncParam,
- InitType, Info, Deduced, TDF))
+ if (DeduceTemplateArgumentsByTypeMatch(*this, TemplateParamsSt.get(),
+ FuncParam, InitType, Info, Deduced,
+ TDF))
return DAR_Failed;
}
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index c1961e516ddf..fb7fc109d2e9 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -817,14 +817,6 @@ namespace {
QualType TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
SubstTemplateTypeParmPackTypeLoc TL);
- ExprResult TransformCallExpr(CallExpr *CE) {
- getSema().CallsUndergoingInstantiation.push_back(CE);
- ExprResult Result =
- TreeTransform<TemplateInstantiator>::TransformCallExpr(CE);
- getSema().CallsUndergoingInstantiation.pop_back();
- return Result;
- }
-
ExprResult TransformLambdaExpr(LambdaExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E);
@@ -1231,7 +1223,7 @@ TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
// Transform each of the parameter expansions into the corresponding
// parameters in the instantiation of the function decl.
- SmallVector<Decl *, 8> Parms;
+ SmallVector<ParmVarDecl *, 8> Parms;
Parms.reserve(E->getNumExpansions());
for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end();
I != End; ++I) {
@@ -1682,15 +1674,17 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg()) {
FunctionDecl *OwningFunc = cast<FunctionDecl>(OldParm->getDeclContext());
- CXXRecordDecl *ClassD = dyn_cast<CXXRecordDecl>(OwningFunc->getDeclContext());
- if (ClassD && ClassD->isLocalClass() && !ClassD->isLambda()) {
- // If this is a method of a local class, as per DR1484 its default
- // arguments must be instantiated.
- Sema::ContextRAII SavedContext(*this, ClassD);
+ if (OwningFunc->isLexicallyWithinFunctionOrMethod()) {
+ // Instantiate default arguments for methods of local classes (DR1484)
+ // and non-defining declarations.
+ Sema::ContextRAII SavedContext(*this, OwningFunc);
LocalInstantiationScope Local(*this);
ExprResult NewArg = SubstExpr(Arg, TemplateArgs);
- if (NewArg.isUsable())
- NewParm->setDefaultArg(NewArg.get());
+ if (NewArg.isUsable()) {
+ // It would be nice if we still had this.
+ SourceLocation EqualLoc = NewArg.get()->getLocStart();
+ SetParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
+ }
} else {
// FIXME: if we non-lazily instantiated non-dependent default args for
// non-dependent parameter types we could remove a bunch of duplicate
@@ -1843,9 +1837,7 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
Invalid = true;
}
- if (!Invalid &&
- AttachBaseSpecifiers(Instantiation, InstantiatedBases.data(),
- InstantiatedBases.size()))
+ if (!Invalid && AttachBaseSpecifiers(Instantiation, InstantiatedBases))
Invalid = true;
return Invalid;
@@ -2050,7 +2042,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
// Default arguments are parsed, if not instantiated. We can go instantiate
// default arg exprs for default constructors if necessary now.
- ActOnFinishCXXMemberDefaultArgs(Instantiation);
+ ActOnFinishCXXNonNestedClass(Instantiation);
// Instantiate late parsed attributes, and attach them to their decls.
// See Sema::InstantiateAttrs
@@ -2672,16 +2664,17 @@ ExprResult Sema::SubstInitializer(Expr *Init,
return Instantiator.TransformInitializer(Init, CXXDirectInit);
}
-bool Sema::SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
+bool Sema::SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs) {
- if (NumExprs == 0)
+ if (Exprs.empty())
return false;
TemplateInstantiator Instantiator(*this, TemplateArgs,
SourceLocation(),
DeclarationName());
- return Instantiator.TransformExprs(Exprs, NumExprs, IsCall, Outputs);
+ return Instantiator.TransformExprs(Exprs.data(), Exprs.size(),
+ IsCall, Outputs);
}
NestedNameSpecifierLoc
@@ -2806,14 +2799,14 @@ void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
#endif
Stored = Inst;
} else if (DeclArgumentPack *Pack = Stored.dyn_cast<DeclArgumentPack *>()) {
- Pack->push_back(Inst);
+ Pack->push_back(cast<ParmVarDecl>(Inst));
} else {
assert(Stored.get<Decl *>() == Inst && "Already instantiated this local");
}
}
-void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
- Decl *Inst) {
+void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
+ ParmVarDecl *Inst) {
D = getCanonicalParmVarDecl(D);
DeclArgumentPack *Pack = LocalDecls[D].get<DeclArgumentPack *>();
Pack->push_back(Inst);
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index aff2d1c96676..7a452af77839 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -813,6 +813,14 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
Enum->setAccess(D->getAccess());
// Forward the mangling number from the template to the instantiated decl.
SemaRef.Context.setManglingNumber(Enum, SemaRef.Context.getManglingNumber(D));
+ // See if the old tag was defined along with a declarator.
+ // If it did, mark the new tag as being associated with that declarator.
+ if (DeclaratorDecl *DD = SemaRef.Context.getDeclaratorForUnnamedTagDecl(D))
+ SemaRef.Context.addDeclaratorForUnnamedTagDecl(Enum, DD);
+ // See if the old tag was defined along with a typedef.
+ // If it did, mark the new tag as being associated with that typedef.
+ if (TypedefNameDecl *TND = SemaRef.Context.getTypedefNameForUnnamedTagDecl(D))
+ SemaRef.Context.addTypedefNameForUnnamedTagDecl(Enum, TND);
if (SubstQualifier(D, Enum)) return nullptr;
Owner->addDecl(Enum);
@@ -827,7 +835,8 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
SemaRef.SubstType(TI->getType(), TemplateArgs,
UnderlyingLoc, DeclarationName());
SemaRef.CheckEnumRedeclaration(Def->getLocation(), Def->isScoped(),
- DefnUnderlying, Enum);
+ DefnUnderlying,
+ /*EnumUnderlyingIsImplicit=*/false, Enum);
}
}
@@ -913,6 +922,11 @@ Decl *TemplateDeclInstantiator::VisitEnumConstantDecl(EnumConstantDecl *D) {
llvm_unreachable("EnumConstantDecls can only occur within EnumDecls.");
}
+Decl *
+TemplateDeclInstantiator::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) {
+ llvm_unreachable("BuiltinTemplateDecls cannot be instantiated.");
+}
+
Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
bool isFriend = (D->getFriendObjectKind() != Decl::FOK_None);
@@ -1143,6 +1157,7 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateDecl(VarTemplateDecl *D) {
VarDecl *VarInst =
cast_or_null<VarDecl>(VisitVarDecl(Pattern,
/*InstantiatingVarTemplate=*/true));
+ if (!VarInst) return nullptr;
DeclContext *DC = Owner;
@@ -1297,6 +1312,16 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
SemaRef.Context.setManglingNumber(Record,
SemaRef.Context.getManglingNumber(D));
+ // See if the old tag was defined along with a declarator.
+ // If it did, mark the new tag as being associated with that declarator.
+ if (DeclaratorDecl *DD = SemaRef.Context.getDeclaratorForUnnamedTagDecl(D))
+ SemaRef.Context.addDeclaratorForUnnamedTagDecl(Record, DD);
+
+ // See if the old tag was defined along with a typedef.
+ // If it did, mark the new tag as being associated with that typedef.
+ if (TypedefNameDecl *TND = SemaRef.Context.getTypedefNameForUnnamedTagDecl(D))
+ SemaRef.Context.addTypedefNameForUnnamedTagDecl(Record, TND);
+
Owner->addDecl(Record);
// DR1484 clarifies that the members of a local class are instantiated as part
@@ -1657,7 +1682,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
SmallVector<TemplateParameterList *, 4> TempParamLists;
unsigned NumTempParamLists = 0;
if (isFriend && (NumTempParamLists = D->getNumTemplateParameterLists())) {
- TempParamLists.set_size(NumTempParamLists);
+ TempParamLists.resize(NumTempParamLists);
for (unsigned I = 0; I != NumTempParamLists; ++I) {
TemplateParameterList *TempParams = D->getTemplateParameterList(I);
TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
@@ -1809,9 +1834,9 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
// context (which will be a namespace scope) as the template.
if (isFriend) {
if (NumTempParamLists)
- Method->setTemplateParameterListsInfo(SemaRef.Context,
- NumTempParamLists,
- TempParamLists.data());
+ Method->setTemplateParameterListsInfo(
+ SemaRef.Context,
+ llvm::makeArrayRef(TempParamLists.data(), NumTempParamLists));
Method->setLexicalDeclContext(Owner);
Method->setObjectOfFriendDecl();
@@ -2742,7 +2767,7 @@ TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
TemplateParameterList *InstL
= TemplateParameterList::Create(SemaRef.Context, L->getTemplateLoc(),
- L->getLAngleLoc(), &Params.front(), N,
+ L->getLAngleLoc(), Params,
L->getRAngleLoc());
return InstL;
}
@@ -3246,16 +3271,11 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
// exception specification.
// DR1484: Local classes and their members are instantiated along with the
// containing function.
- bool RequireInstantiation = false;
- if (CXXRecordDecl *Cls = dyn_cast<CXXRecordDecl>(Tmpl->getDeclContext())) {
- if (Cls->isLocalClass())
- RequireInstantiation = true;
- }
if (SemaRef.getLangOpts().CPlusPlus11 &&
EPI.ExceptionSpec.Type != EST_None &&
EPI.ExceptionSpec.Type != EST_DynamicNone &&
EPI.ExceptionSpec.Type != EST_BasicNoexcept &&
- !RequireInstantiation) {
+ !Tmpl->isLexicallyWithinFunctionOrMethod()) {
FunctionDecl *ExceptionSpecTemplate = Tmpl;
if (EPI.ExceptionSpec.Type == EST_Uninstantiated)
ExceptionSpecTemplate = EPI.ExceptionSpec.SourceTemplate;
@@ -3619,19 +3639,6 @@ void Sema::BuildVariableInstantiation(
NewVar->setReferenced(OldVar->isReferenced());
}
- // See if the old variable had a type-specifier that defined an anonymous tag.
- // If it did, mark the new variable as being the declarator for the new
- // anonymous tag.
- if (const TagType *OldTagType = OldVar->getType()->getAs<TagType>()) {
- TagDecl *OldTag = OldTagType->getDecl();
- if (OldTag->getDeclaratorForAnonDecl() == OldVar) {
- TagDecl *NewTag = NewVar->getType()->castAs<TagType>()->getDecl();
- assert(!NewTag->hasNameForLinkage() &&
- !NewTag->hasDeclaratorForAnonDecl());
- NewTag->setDeclaratorForAnonDecl(NewVar);
- }
- }
-
InstantiateAttrs(TemplateArgs, OldVar, NewVar, LateAttrs, StartingScope);
LookupResult Previous(
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index fd3ba3549b4c..61052f06c834 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -737,6 +737,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_interface:
case TST_class:
case TST_auto:
+ case TST_auto_type:
case TST_decltype_auto:
case TST_unknown_anytype:
case TST_error:
@@ -867,8 +868,8 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
MarkAnyDeclReferenced(OpLoc, ParameterPack, true);
- return new (Context) SizeOfPackExpr(Context.getSizeType(), OpLoc,
- ParameterPack, NameLoc, RParenLoc);
+ return SizeOfPackExpr::Create(Context, OpLoc, ParameterPack, NameLoc,
+ RParenLoc);
}
TemplateArgumentLoc
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index 02a31ef8d79c..c70568c23b57 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -26,7 +26,6 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
@@ -211,10 +210,8 @@ namespace {
/// Diagnose all the ignored type attributes, given that the
/// declarator worked out to the given type.
void diagnoseIgnoredTypeAttrs(QualType type) const {
- for (SmallVectorImpl<AttributeList*>::const_iterator
- i = ignoredTypeAttrs.begin(), e = ignoredTypeAttrs.end();
- i != e; ++i)
- diagnoseBadTypeAttribute(getSema(), **i, type);
+ for (auto *Attr : ignoredTypeAttrs)
+ diagnoseBadTypeAttribute(getSema(), *Attr, type);
}
~TypeProcessingState() {
@@ -702,7 +699,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*VolatileQualifierLoc=*/NoLoc,
/*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc, EST_None,
- /*ESpecLoc=*/NoLoc,
+ /*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
@@ -792,18 +789,33 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
TypeSourceInfo *typeArgInfo = typeArgs[i];
QualType typeArg = typeArgInfo->getType();
- // Type arguments cannot explicitly specify nullability.
- if (auto nullability = AttributedType::stripOuterNullability(typeArg)) {
- SourceLocation nullabilityLoc
- = typeArgInfo->getTypeLoc().findNullabilityLoc();
- SourceLocation diagLoc = nullabilityLoc.isValid()? nullabilityLoc
- : typeArgInfo->getTypeLoc().getLocStart();
- S.Diag(diagLoc,
- diag::err_type_arg_explicit_nullability)
- << typeArg
- << FixItHint::CreateRemoval(nullabilityLoc);
+ // Type arguments cannot have explicit qualifiers or nullability.
+ // We ignore indirect sources of these, e.g. behind typedefs or
+ // template arguments.
+ if (TypeLoc qual = typeArgInfo->getTypeLoc().findExplicitQualifierLoc()) {
+ bool diagnosed = false;
+ SourceRange rangeToRemove;
+ if (auto attr = qual.getAs<AttributedTypeLoc>()) {
+ rangeToRemove = attr.getLocalSourceRange();
+ if (attr.getTypePtr()->getImmediateNullability()) {
+ typeArg = attr.getTypePtr()->getModifiedType();
+ S.Diag(attr.getLocStart(),
+ diag::err_objc_type_arg_explicit_nullability)
+ << typeArg << FixItHint::CreateRemoval(rangeToRemove);
+ diagnosed = true;
+ }
+ }
+
+ if (!diagnosed) {
+ S.Diag(qual.getLocStart(), diag::err_objc_type_arg_qualified)
+ << typeArg << typeArg.getQualifiers().getAsString()
+ << FixItHint::CreateRemoval(rangeToRemove);
+ }
}
+ // Remove qualifiers even if they're non-local.
+ typeArg = typeArg.getUnqualifiedType();
+
finalTypeArgs.push_back(typeArg);
if (typeArg->getAs<PackExpansionType>())
@@ -1377,11 +1389,13 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
if (Result.isNull()) {
declarator.setInvalidType(true);
} else if (S.getLangOpts().OpenCL) {
- if (const AtomicType *AT = Result->getAs<AtomicType>()) {
- const BuiltinType *BT = AT->getValueType()->getAs<BuiltinType>();
- bool NoExtTypes = BT && (BT->getKind() == BuiltinType::Int ||
- BT->getKind() == BuiltinType::UInt ||
- BT->getKind() == BuiltinType::Float);
+ if (Result->getAs<AtomicType>()) {
+ StringRef TypeName = Result.getBaseTypeIdentifier()->getName();
+ bool NoExtTypes =
+ llvm::StringSwitch<bool>(TypeName)
+ .Cases("atomic_int", "atomic_uint", "atomic_float",
+ "atomic_flag", true)
+ .Default(false);
if (!S.getOpenCLOptions().cl_khr_int64_base_atomics && !NoExtTypes) {
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
<< Result << "cl_khr_int64_base_atomics";
@@ -1393,12 +1407,19 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
<< Result << "cl_khr_int64_extended_atomics";
declarator.setInvalidType(true);
}
- if (!S.getOpenCLOptions().cl_khr_fp64 && BT &&
- BT->getKind() == BuiltinType::Double) {
+ if (!S.getOpenCLOptions().cl_khr_fp64 &&
+ !TypeName.compare("atomic_double")) {
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
<< Result << "cl_khr_fp64";
declarator.setInvalidType(true);
}
+ } else if (!S.getOpenCLOptions().cl_khr_gl_msaa_sharing &&
+ (Result->isImage2dMSAAT() || Result->isImage2dArrayMSAAT() ||
+ Result->isImage2dArrayMSAATDepth() ||
+ Result->isImage2dMSAATDepth())) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
+ << Result << "cl_khr_gl_msaa_sharing";
+ declarator.setInvalidType(true);
}
}
@@ -1482,14 +1503,17 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// template type parameter.
Result = QualType(CorrespondingTemplateParam->getTypeForDecl(), 0);
} else {
- Result = Context.getAutoType(QualType(), /*decltype(auto)*/false, false);
+ Result = Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false);
}
break;
+ case DeclSpec::TST_auto_type:
+ Result = Context.getAutoType(QualType(), AutoTypeKeyword::GNUAutoType, false);
+ break;
+
case DeclSpec::TST_decltype_auto:
- Result = Context.getAutoType(QualType(),
- /*decltype(auto)*/true,
- /*IsDependent*/ false);
+ Result = Context.getAutoType(QualType(), AutoTypeKeyword::DecltypeAuto,
+ /*IsDependent*/ false);
break;
case DeclSpec::TST_unknown_anytype:
@@ -1540,8 +1564,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// Apply any type attributes from the decl spec. This may cause the
// list of type attributes to be temporarily saved while the type
// attributes are pushed around.
- if (AttributeList *attrs = DS.getAttributes().getList())
- processTypeAttrs(state, Result, TAL_DeclSpec, attrs);
+ processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes().getList());
// Apply const/volatile/restrict qualifiers to T.
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
@@ -1975,7 +1998,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
if (!MPTy->getClass()->isDependentType())
- RequireCompleteType(Loc, T, 0);
+ (void)isCompleteType(Loc, T);
} else {
// C99 6.7.5.2p1: If the element type is an incomplete or function type,
@@ -2103,12 +2126,9 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
if (T->isVariableArrayType()) {
// Prohibit the use of non-POD types in VLAs.
QualType BaseT = Context.getBaseElementType(T);
- if (!T->isDependentType() &&
- !RequireCompleteType(Loc, BaseT, 0) &&
- !BaseT.isPODType(Context) &&
- !BaseT->isObjCLifetimeType()) {
- Diag(Loc, diag::err_vla_non_pod)
- << BaseT;
+ if (!T->isDependentType() && isCompleteType(Loc, BaseT) &&
+ !BaseT.isPODType(Context) && !BaseT->isObjCLifetimeType()) {
+ Diag(Loc, diag::err_vla_non_pod) << BaseT;
return QualType();
}
// Prohibit the use of VLAs during template argument deduction.
@@ -2122,7 +2142,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
} else if (ASM != ArrayType::Normal || Quals != 0)
Diag(Loc,
getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx
- : diag::ext_c99_array_usage) << ASM;
+ : diag::ext_c99_array_usage) << ASM;
}
if (T->isVariableArrayType()) {
@@ -2271,8 +2291,11 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
// Adjust the default free function calling convention to the default method
// calling convention.
+ bool IsCtorOrDtor =
+ (Entity.getNameKind() == DeclarationName::CXXConstructorName) ||
+ (Entity.getNameKind() == DeclarationName::CXXDestructorName);
if (T->isFunctionType())
- adjustMemberFunctionCC(T, /*IsStatic=*/false);
+ adjustMemberFunctionCC(T, /*IsStatic=*/false, IsCtorOrDtor, Loc);
return Context.getMemberPointerType(T, Class.getTypePtr());
}
@@ -2432,14 +2455,14 @@ void Sema::diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
return;
struct Qual {
- unsigned Mask;
const char *Name;
+ unsigned Mask;
SourceLocation Loc;
} const QualKinds[4] = {
- { DeclSpec::TQ_const, "const", ConstQualLoc },
- { DeclSpec::TQ_volatile, "volatile", VolatileQualLoc },
- { DeclSpec::TQ_restrict, "restrict", RestrictQualLoc },
- { DeclSpec::TQ_atomic, "_Atomic", AtomicQualLoc }
+ { "const", DeclSpec::TQ_const, ConstQualLoc },
+ { "volatile", DeclSpec::TQ_volatile, VolatileQualLoc },
+ { "restrict", DeclSpec::TQ_restrict, RestrictQualLoc },
+ { "_Atomic", DeclSpec::TQ_atomic, AtomicQualLoc }
};
SmallString<32> QualStr;
@@ -2455,7 +2478,7 @@ void Sema::diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
// If we have a location for the qualifier, offer a fixit.
SourceLocation QualLoc = QualKinds[I].Loc;
- if (!QualLoc.isInvalid()) {
+ if (QualLoc.isValid()) {
FixIts[NumQuals] = FixItHint::CreateRemoval(QualLoc);
if (Loc.isInvalid() ||
getSourceManager().isBeforeInTranslationUnit(QualLoc, Loc))
@@ -2548,8 +2571,6 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
// The TagDecl owned by the DeclSpec.
TagDecl *OwnedTagDecl = nullptr;
- bool ContainsPlaceholderType = false;
-
switch (D.getName().getKind()) {
case UnqualifiedId::IK_ImplicitSelfParam:
case UnqualifiedId::IK_OperatorFunctionId:
@@ -2557,7 +2578,6 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case UnqualifiedId::IK_LiteralOperatorId:
case UnqualifiedId::IK_TemplateId:
T = ConvertDeclSpecToType(state);
- ContainsPlaceholderType = D.getDeclSpec().containsPlaceholderType();
if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
OwnedTagDecl = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
@@ -2572,8 +2592,8 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
// Constructors and destructors don't have return types. Use
// "void" instead.
T = SemaRef.Context.VoidTy;
- if (AttributeList *attrs = D.getDeclSpec().getAttributes().getList())
- processTypeAttrs(state, T, TAL_DeclSpec, attrs);
+ processTypeAttrs(state, T, TAL_DeclSpec,
+ D.getDeclSpec().getAttributes().getList());
break;
case UnqualifiedId::IK_ConversionFunctionId:
@@ -2581,7 +2601,6 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
// converts to.
T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
&ReturnTypeInfo);
- ContainsPlaceholderType = T->getContainedAutoType();
break;
}
@@ -2589,17 +2608,10 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
distributeTypeAttrsFromDeclarator(state, T);
// C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
- // In C++11, a function declarator using 'auto' must have a trailing return
- // type (this is checked later) and we can skip this. In other languages
- // using auto, we need to check regardless.
- // C++14 In generic lambdas allow 'auto' in their parameters.
- if (ContainsPlaceholderType &&
- (!SemaRef.getLangOpts().CPlusPlus11 || !D.isFunctionDeclarator())) {
+ if (D.getDeclSpec().containsPlaceholderType()) {
int Error = -1;
switch (D.getContext()) {
- case Declarator::KNRTypeListContext:
- llvm_unreachable("K&R type lists aren't allowed in C++");
case Declarator::LambdaExprContext:
llvm_unreachable("Can't specify a type specifier in lambda grammar");
case Declarator::ObjCParameterContext:
@@ -2608,69 +2620,88 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Error = 0;
break;
case Declarator::LambdaExprParameterContext:
+ // In C++14, generic lambdas allow 'auto' in their parameters.
if (!(SemaRef.getLangOpts().CPlusPlus14
&& D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto))
- Error = 14;
+ Error = 16;
break;
- case Declarator::MemberContext:
- if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)
+ case Declarator::MemberContext: {
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
+ D.isFunctionDeclarator())
break;
+ bool Cxx = SemaRef.getLangOpts().CPlusPlus;
switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
case TTK_Enum: llvm_unreachable("unhandled tag kind");
- case TTK_Struct: Error = 1; /* Struct member */ break;
- case TTK_Union: Error = 2; /* Union member */ break;
- case TTK_Class: Error = 3; /* Class member */ break;
- case TTK_Interface: Error = 4; /* Interface member */ break;
+ case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
+ case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
+ case TTK_Class: Error = 5; /* Class member */ break;
+ case TTK_Interface: Error = 6; /* Interface member */ break;
}
break;
+ }
case Declarator::CXXCatchContext:
case Declarator::ObjCCatchContext:
- Error = 5; // Exception declaration
+ Error = 7; // Exception declaration
break;
case Declarator::TemplateParamContext:
- Error = 6; // Template parameter
+ Error = 8; // Template parameter
break;
case Declarator::BlockLiteralContext:
- Error = 7; // Block literal
+ Error = 9; // Block literal
break;
case Declarator::TemplateTypeArgContext:
- Error = 8; // Template type argument
+ Error = 10; // Template type argument
break;
case Declarator::AliasDeclContext:
case Declarator::AliasTemplateContext:
- Error = 10; // Type alias
+ Error = 12; // Type alias
break;
case Declarator::TrailingReturnContext:
- if (!SemaRef.getLangOpts().CPlusPlus14)
- Error = 11; // Function return type
+ if (!SemaRef.getLangOpts().CPlusPlus14 ||
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type)
+ Error = 13; // Function return type
break;
case Declarator::ConversionIdContext:
- if (!SemaRef.getLangOpts().CPlusPlus14)
- Error = 12; // conversion-type-id
+ if (!SemaRef.getLangOpts().CPlusPlus14 ||
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type)
+ Error = 14; // conversion-type-id
break;
case Declarator::TypeNameContext:
- Error = 13; // Generic
+ Error = 15; // Generic
break;
case Declarator::FileContext:
case Declarator::BlockContext:
case Declarator::ForContext:
case Declarator::ConditionContext:
+ break;
case Declarator::CXXNewContext:
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type)
+ Error = 17; // 'new' type
+ break;
+ case Declarator::KNRTypeListContext:
+ Error = 18; // K&R function parameter
break;
}
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
- Error = 9;
-
- // In Objective-C it is an error to use 'auto' on a function declarator.
- if (D.isFunctionDeclarator())
Error = 11;
+ // In Objective-C it is an error to use 'auto' on a function declarator
+ // (and everywhere for '__auto_type').
+ if (D.isFunctionDeclarator() &&
+ (!SemaRef.getLangOpts().CPlusPlus11 ||
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto_type))
+ Error = 13;
+
+ bool HaveTrailing = false;
+
// C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator
// contains a trailing return type. That is only legal at the outermost
// level. Check all declarator chunks (outermost first) anyway, to give
// better diagnostics.
- if (SemaRef.getLangOpts().CPlusPlus11 && Error != -1) {
+ // We don't support '__auto_type' with trailing return types.
+ if (SemaRef.getLangOpts().CPlusPlus11 &&
+ D.getDeclSpec().getTypeSpecType() != DeclSpec::TST_auto_type) {
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
unsigned chunkIndex = e - i - 1;
state.setCurrentChunkIndex(chunkIndex);
@@ -2678,6 +2709,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
if (DeclType.Kind == DeclaratorChunk::Function) {
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
if (FTI.hasTrailingReturnType()) {
+ HaveTrailing = true;
Error = -1;
break;
}
@@ -2690,22 +2722,31 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
AutoRange = D.getName().getSourceRange();
if (Error != -1) {
- const bool IsDeclTypeAuto =
- D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_decltype_auto;
+ unsigned Keyword;
+ switch (D.getDeclSpec().getTypeSpecType()) {
+ case DeclSpec::TST_auto: Keyword = 0; break;
+ case DeclSpec::TST_decltype_auto: Keyword = 1; break;
+ case DeclSpec::TST_auto_type: Keyword = 2; break;
+ default: llvm_unreachable("unknown auto TypeSpecType");
+ }
SemaRef.Diag(AutoRange.getBegin(), diag::err_auto_not_allowed)
- << IsDeclTypeAuto << Error << AutoRange;
+ << Keyword << Error << AutoRange;
T = SemaRef.Context.IntTy;
D.setInvalidType(true);
- } else
+ } else if (!HaveTrailing) {
+ // If there was a trailing return type, we already got
+ // warn_cxx98_compat_trailing_return_type in the parser.
SemaRef.Diag(AutoRange.getBegin(),
diag::warn_cxx98_compat_auto_type_specifier)
<< AutoRange;
+ }
}
if (SemaRef.getLangOpts().CPlusPlus &&
OwnedTagDecl && OwnedTagDecl->isCompleteDefinition()) {
// Check the contexts where C++ forbids the declaration of a new class
// or enumeration in a type-specifier-seq.
+ unsigned DiagID = 0;
switch (D.getContext()) {
case Declarator::TrailingReturnContext:
// Class and enumeration definitions are syntactically not allowed in
@@ -2725,10 +2766,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case Declarator::AliasDeclContext:
break;
case Declarator::AliasTemplateContext:
- SemaRef.Diag(OwnedTagDecl->getLocation(),
- diag::err_type_defined_in_alias_template)
- << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
- D.setInvalidType(true);
+ DiagID = diag::err_type_defined_in_alias_template;
break;
case Declarator::TypeNameContext:
case Declarator::ConversionIdContext:
@@ -2737,10 +2775,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case Declarator::CXXCatchContext:
case Declarator::ObjCCatchContext:
case Declarator::TemplateTypeArgContext:
- SemaRef.Diag(OwnedTagDecl->getLocation(),
- diag::err_type_defined_in_type_specifier)
- << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
- D.setInvalidType(true);
+ DiagID = diag::err_type_defined_in_type_specifier;
break;
case Declarator::PrototypeContext:
case Declarator::LambdaExprParameterContext:
@@ -2749,20 +2784,21 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case Declarator::KNRTypeListContext:
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
- SemaRef.Diag(OwnedTagDecl->getLocation(),
- diag::err_type_defined_in_param_type)
- << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
- D.setInvalidType(true);
+ DiagID = diag::err_type_defined_in_param_type;
break;
case Declarator::ConditionContext:
// C++ 6.4p2:
// The type-specifier-seq shall not contain typedef and shall not declare
// a new class or enumeration.
- SemaRef.Diag(OwnedTagDecl->getLocation(),
- diag::err_type_defined_in_condition);
- D.setInvalidType(true);
+ DiagID = diag::err_type_defined_in_condition;
break;
}
+
+ if (DiagID != 0) {
+ SemaRef.Diag(OwnedTagDecl->getLocation(), DiagID)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ D.setInvalidType(true);
+ }
}
assert(!T.isNull() && "This function should not return a null type");
@@ -3286,14 +3322,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Are we in an assume-nonnull region?
bool inAssumeNonNullRegion = false;
- if (S.PP.getPragmaAssumeNonNullLoc().isValid() &&
- !state.getDeclarator().isObjCWeakProperty() &&
- !S.deduceWeakPropertyFromType(T)) {
+ if (S.PP.getPragmaAssumeNonNullLoc().isValid()) {
inAssumeNonNullRegion = true;
// Determine which file we saw the assume-nonnull region in.
FileID file = getNullabilityCompletenessCheckFileID(
S, S.PP.getPragmaAssumeNonNullLoc());
- if (!file.isInvalid()) {
+ if (file.isValid()) {
FileNullability &fileNullability = S.NullabilityMap[file];
// If we haven't seen any type nullability before, now we have.
@@ -3367,6 +3401,13 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
complainAboutMissingNullability = CAMN_No;
break;
}
+
+ // Weak properties are inferred to be nullable.
+ if (state.getDeclarator().isObjCWeakProperty() && inAssumeNonNullRegion) {
+ inferNullability = NullabilityKind::Nullable;
+ break;
+ }
+
// fallthrough
case Declarator::FileContext:
@@ -3699,7 +3740,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
D.setInvalidType(true);
} else if (D.getContext() != Declarator::LambdaExprContext &&
(T.hasQualifiers() || !isa<AutoType>(T) ||
- cast<AutoType>(T)->isDecltypeAuto())) {
+ cast<AutoType>(T)->getKeyword() != AutoTypeKeyword::Auto)) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_trailing_return_without_auto)
<< T << D.getDeclSpec().getSourceRange();
@@ -3835,9 +3876,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Exception specs are not allowed in typedefs. Complain, but add it
// anyway.
if (IsTypedefName && FTI.getExceptionSpecType())
- S.Diag(FTI.getExceptionSpecLoc(), diag::err_exception_spec_in_typedef)
- << (D.getContext() == Declarator::AliasDeclContext ||
- D.getContext() == Declarator::AliasTemplateContext);
+ S.Diag(FTI.getExceptionSpecLocBeg(),
+ diag::err_exception_spec_in_typedef)
+ << (D.getContext() == Declarator::AliasDeclContext ||
+ D.getContext() == Declarator::AliasTemplateContext);
// If we see "T var();" or "T var(T());" at block scope, it is probably
// an attempt to initialize a variable, not a function declaration.
@@ -4062,8 +4104,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
// See if there are any attributes on this declarator chunk.
- if (AttributeList *attrs = const_cast<AttributeList*>(DeclType.getAttrs()))
- processTypeAttrs(state, T, TAL_DeclChunk, attrs);
+ processTypeAttrs(state, T, TAL_DeclChunk,
+ const_cast<AttributeList *>(DeclType.getAttrs()));
}
assert(!T.isNull() && "T must not be null after this point");
@@ -4156,8 +4198,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
// Apply any undistributed attributes from the declarator.
- if (AttributeList *attrs = D.getAttributes())
- processTypeAttrs(state, T, TAL_DeclName, attrs);
+ processTypeAttrs(state, T, TAL_DeclName, D.getAttributes());
// Diagnose any ignored type attributes.
state.diagnoseIgnoredTypeAttrs(T);
@@ -4377,7 +4418,7 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
TypeSourceInfo *ReturnTypeInfo = nullptr;
QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
- if (getLangOpts().ObjCAutoRefCount) {
+ if (getLangOpts().ObjC1) {
Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
if (ownership != Qualifiers::OCL_None)
transferARCOwnership(state, declSpecTy, ownership);
@@ -4402,6 +4443,7 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
case AttributedType::attr_objc_gc:
return AttributeList::AT_ObjCGC;
case AttributedType::attr_objc_ownership:
+ case AttributedType::attr_objc_inert_unsafe_unretained:
return AttributeList::AT_ObjCOwnership;
case AttributedType::attr_noreturn:
return AttributeList::AT_NoReturn;
@@ -5061,11 +5103,6 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
return true;
}
- // Consume lifetime attributes without further comment outside of
- // ARC mode.
- if (!S.getLangOpts().ObjCAutoRefCount)
- return true;
-
IdentifierInfo *II = attr.getArgAsIdent(0)->Ident;
Qualifiers::ObjCLifetime lifetime;
if (II->isStr("none"))
@@ -5083,6 +5120,14 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
return true;
}
+ // Just ignore lifetime attributes other than __weak and __unsafe_unretained
+ // outside of ARC mode.
+ if (!S.getLangOpts().ObjCAutoRefCount &&
+ lifetime != Qualifiers::OCL_Weak &&
+ lifetime != Qualifiers::OCL_ExplicitNone) {
+ return true;
+ }
+
SplitQualType underlyingType = type.split();
// Check for redundant/conflicting ownership qualifiers.
@@ -5123,6 +5168,25 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
<< TDS_ObjCObjOrBlock << type;
}
+ // Don't actually add the __unsafe_unretained qualifier in non-ARC files,
+ // because having both 'T' and '__unsafe_unretained T' exist in the type
+ // system causes unfortunate widespread consistency problems. (For example,
+ // they're not considered compatible types, and we mangle them identicially
+ // as template arguments.) These problems are all individually fixable,
+ // but it's easier to just not add the qualifier and instead sniff it out
+ // in specific places using isObjCInertUnsafeUnretainedType().
+ //
+ // Doing this does means we miss some trivial consistency checks that
+ // would've triggered in ARC, but that's better than trying to solve all
+ // the coexistence problems with __unsafe_unretained.
+ if (!S.getLangOpts().ObjCAutoRefCount &&
+ lifetime == Qualifiers::OCL_ExplicitNone) {
+ type = S.Context.getAttributedType(
+ AttributedType::attr_objc_inert_unsafe_unretained,
+ type, type);
+ return true;
+ }
+
QualType origType = type;
if (!NonObjCPointer)
type = S.Context.getQualifiedType(underlyingType);
@@ -5133,19 +5197,29 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
type = S.Context.getAttributedType(AttributedType::attr_objc_ownership,
origType, type);
- // Forbid __weak if the runtime doesn't support it.
- if (lifetime == Qualifiers::OCL_Weak &&
- !S.getLangOpts().ObjCARCWeak && !NonObjCPointer) {
-
- // Actually, delay this until we know what we're parsing.
+ auto diagnoseOrDelay = [](Sema &S, SourceLocation loc,
+ unsigned diagnostic, QualType type) {
if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
S.DelayedDiagnostics.add(
sema::DelayedDiagnostic::makeForbiddenType(
- S.getSourceManager().getExpansionLoc(AttrLoc),
- diag::err_arc_weak_no_runtime, type, /*ignored*/ 0));
+ S.getSourceManager().getExpansionLoc(loc),
+ diagnostic, type, /*ignored*/ 0));
} else {
- S.Diag(AttrLoc, diag::err_arc_weak_no_runtime);
+ S.Diag(loc, diagnostic);
}
+ };
+
+ // Sometimes, __weak isn't allowed.
+ if (lifetime == Qualifiers::OCL_Weak &&
+ !S.getLangOpts().ObjCWeak && !NonObjCPointer) {
+
+ // Use a specialized diagnostic if the runtime just doesn't support them.
+ unsigned diagnostic =
+ (S.getLangOpts().ObjCWeakRuntime ? diag::err_arc_weak_disabled
+ : diag::err_arc_weak_no_runtime);
+
+ // In any case, delay the diagnostic until we know what we're parsing.
+ diagnoseOrDelay(S, AttrLoc, diagnostic, type);
attr.setInvalid();
return true;
@@ -5158,9 +5232,9 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
type->getAs<ObjCObjectPointerType>()) {
if (ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl()) {
if (Class->isArcWeakrefUnavailable()) {
- S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
- S.Diag(ObjT->getInterfaceDecl()->getLocation(),
- diag::note_class_declared);
+ S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
+ S.Diag(ObjT->getInterfaceDecl()->getLocation(),
+ diag::note_class_declared);
}
}
}
@@ -5402,9 +5476,12 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
// Pointer type qualifiers can only operate on pointer types, but not
// pointer-to-member types.
if (!isa<PointerType>(Desugared)) {
- S.Diag(Attr.getLoc(), Type->isMemberPointerType() ?
- diag::err_attribute_no_member_pointers :
- diag::err_attribute_pointers_only) << Attr.getName();
+ if (Type->isMemberPointerType())
+ S.Diag(Attr.getLoc(), diag::err_attribute_no_member_pointers)
+ << Attr.getName();
+ else
+ S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
+ << Attr.getName() << 0;
return true;
}
@@ -5843,25 +5920,41 @@ bool Sema::hasExplicitCallingConv(QualType &T) {
return false;
}
-void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic) {
+void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
+ SourceLocation Loc) {
FunctionTypeUnwrapper Unwrapped(*this, T);
const FunctionType *FT = Unwrapped.get();
bool IsVariadic = (isa<FunctionProtoType>(FT) &&
cast<FunctionProtoType>(FT)->isVariadic());
-
- // Only adjust types with the default convention. For example, on Windows we
- // should adjust a __cdecl type to __thiscall for instance methods, and a
- // __thiscall type to __cdecl for static methods.
CallingConv CurCC = FT->getCallConv();
- CallingConv FromCC =
- Context.getDefaultCallingConvention(IsVariadic, IsStatic);
CallingConv ToCC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic);
- if (CurCC != FromCC || FromCC == ToCC)
- return;
- if (hasExplicitCallingConv(T))
+ if (CurCC == ToCC)
return;
+ // MS compiler ignores explicit calling convention attributes on structors. We
+ // should do the same.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() && IsCtorOrDtor) {
+ // Issue a warning on ignored calling convention -- except of __stdcall.
+ // Again, this is what MS compiler does.
+ if (CurCC != CC_X86StdCall)
+ Diag(Loc, diag::warn_cconv_structors)
+ << FunctionType::getNameForCallConv(CurCC);
+ // Default adjustment.
+ } else {
+ // Only adjust types with the default convention. For example, on Windows
+ // we should adjust a __cdecl type to __thiscall for instance methods, and a
+ // __thiscall type to __cdecl for static methods.
+ CallingConv DefaultCC =
+ Context.getDefaultCallingConvention(IsVariadic, IsStatic);
+
+ if (CurCC != DefaultCC || DefaultCC == ToCC)
+ return;
+
+ if (hasExplicitCallingConv(T))
+ return;
+ }
+
FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(ToCC));
QualType Wrapped = Unwrapped.wrap(*this, FT);
T = Context.getAdjustedType(T, Wrapped);
@@ -6077,10 +6170,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// type, but others can be present in the type specifiers even though they
// apply to the decl. Here we apply type attributes and ignore the rest.
- AttributeList *next;
- do {
+ bool hasOpenCLAddressSpace = false;
+ while (attrs) {
AttributeList &attr = *attrs;
- next = attr.getNext();
+ attrs = attr.getNext(); // reset to the next here due to early loop continue
+ // stmts
// Skip attributes that were marked to be invalid.
if (attr.isInvalid())
@@ -6139,6 +6233,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case AttributeList::AT_AddressSpace:
HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
+ hasOpenCLAddressSpace = true;
break;
OBJC_POINTER_TYPE_ATTRS_CASELIST:
if (!handleObjCPointerTypeAttr(state, attr, type))
@@ -6233,7 +6328,83 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
distributeFunctionTypeAttr(state, attr, type);
break;
}
- } while ((attrs = next));
+ }
+
+ // If address space is not set, OpenCL 2.0 defines non private default
+ // address spaces for some cases:
+ // OpenCL 2.0, section 6.5:
+ // The address space for a variable at program scope or a static variable
+ // inside a function can either be __global or __constant, but defaults to
+ // __global if not specified.
+ // (...)
+ // Pointers that are declared without pointing to a named address space point
+ // to the generic address space.
+ if (state.getSema().getLangOpts().OpenCLVersion >= 200 &&
+ !hasOpenCLAddressSpace && type.getAddressSpace() == 0 &&
+ (TAL == TAL_DeclSpec || TAL == TAL_DeclChunk)) {
+ Declarator &D = state.getDeclarator();
+ if (state.getCurrentChunkIndex() > 0 &&
+ D.getTypeObject(state.getCurrentChunkIndex() - 1).Kind ==
+ DeclaratorChunk::Pointer) {
+ type = state.getSema().Context.getAddrSpaceQualType(
+ type, LangAS::opencl_generic);
+ } else if (state.getCurrentChunkIndex() == 0 &&
+ D.getContext() == Declarator::FileContext &&
+ !D.isFunctionDeclarator() && !D.isFunctionDefinition() &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ !type->isSamplerT())
+ type = state.getSema().Context.getAddrSpaceQualType(
+ type, LangAS::opencl_global);
+ else if (state.getCurrentChunkIndex() == 0 &&
+ D.getContext() == Declarator::BlockContext &&
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)
+ type = state.getSema().Context.getAddrSpaceQualType(
+ type, LangAS::opencl_global);
+ }
+}
+
+void Sema::completeExprArrayBound(Expr *E) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (isTemplateInstantiation(Var->getTemplateSpecializationKind())) {
+ SourceLocation PointOfInstantiation = E->getExprLoc();
+
+ if (MemberSpecializationInfo *MSInfo =
+ Var->getMemberSpecializationInfo()) {
+ // If we don't already have a point of instantiation, this is it.
+ if (MSInfo->getPointOfInstantiation().isInvalid()) {
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+
+ // This is a modification of an existing AST node. Notify
+ // listeners.
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->StaticDataMemberInstantiated(Var);
+ }
+ } else {
+ VarTemplateSpecializationDecl *VarSpec =
+ cast<VarTemplateSpecializationDecl>(Var);
+ if (VarSpec->getPointOfInstantiation().isInvalid())
+ VarSpec->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ InstantiateVariableDefinition(PointOfInstantiation, Var);
+
+ // Update the type to the newly instantiated definition's type both
+ // here and within the expression.
+ if (VarDecl *Def = Var->getDefinition()) {
+ DRE->setDecl(Def);
+ QualType T = Def->getType();
+ DRE->setType(T);
+ // FIXME: Update the type on all intervening expressions.
+ E->setType(T);
+ }
+
+ // We still go on to try to complete the type independently, as it
+ // may also require instantiations or diagnostics if it remains
+ // incomplete.
+ }
+ }
+ }
}
/// \brief Ensure that the type of the given expression is complete.
@@ -6250,87 +6421,26 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
///
/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
/// otherwise.
-bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser){
+bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser) {
QualType T = E->getType();
- // Fast path the case where the type is already complete.
- if (!T->isIncompleteType())
- // FIXME: The definition might not be visible.
- return false;
-
// Incomplete array types may be completed by the initializer attached to
// their definitions. For static data members of class templates and for
// variable templates, we need to instantiate the definition to get this
// initializer and complete the type.
if (T->isIncompleteArrayType()) {
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
- if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
- if (isTemplateInstantiation(Var->getTemplateSpecializationKind())) {
- SourceLocation PointOfInstantiation = E->getExprLoc();
-
- if (MemberSpecializationInfo *MSInfo =
- Var->getMemberSpecializationInfo()) {
- // If we don't already have a point of instantiation, this is it.
- if (MSInfo->getPointOfInstantiation().isInvalid()) {
- MSInfo->setPointOfInstantiation(PointOfInstantiation);
-
- // This is a modification of an existing AST node. Notify
- // listeners.
- if (ASTMutationListener *L = getASTMutationListener())
- L->StaticDataMemberInstantiated(Var);
- }
- } else {
- VarTemplateSpecializationDecl *VarSpec =
- cast<VarTemplateSpecializationDecl>(Var);
- if (VarSpec->getPointOfInstantiation().isInvalid())
- VarSpec->setPointOfInstantiation(PointOfInstantiation);
- }
-
- InstantiateVariableDefinition(PointOfInstantiation, Var);
-
- // Update the type to the newly instantiated definition's type both
- // here and within the expression.
- if (VarDecl *Def = Var->getDefinition()) {
- DRE->setDecl(Def);
- T = Def->getType();
- DRE->setType(T);
- E->setType(T);
- }
-
- // We still go on to try to complete the type independently, as it
- // may also require instantiations or diagnostics if it remains
- // incomplete.
- }
- }
- }
+ completeExprArrayBound(E);
+ T = E->getType();
}
// FIXME: Are there other cases which require instantiating something other
// than the type to complete the type of an expression?
- // Look through reference types and complete the referred type.
- if (const ReferenceType *Ref = T->getAs<ReferenceType>())
- T = Ref->getPointeeType();
-
return RequireCompleteType(E->getExprLoc(), T, Diagnoser);
}
-namespace {
- struct TypeDiagnoserDiag : Sema::TypeDiagnoser {
- unsigned DiagID;
-
- TypeDiagnoserDiag(unsigned DiagID)
- : Sema::TypeDiagnoser(DiagID == 0), DiagID(DiagID) {}
-
- void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
- if (Suppressed) return;
- S.Diag(Loc, DiagID) << T;
- }
- };
-}
-
bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
- TypeDiagnoserDiag Diagnoser(DiagID);
+ BoundTypeDiagnoser<> Diagnoser(DiagID);
return RequireCompleteExprType(E, Diagnoser);
}
@@ -6353,7 +6463,7 @@ bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
/// @c false otherwise.
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
- if (RequireCompleteTypeImpl(Loc, T, Diagnoser))
+ if (RequireCompleteTypeImpl(Loc, T, &Diagnoser))
return true;
if (const TagType *Tag = T->getAs<TagType>()) {
if (!Tag->getDecl()->isCompleteDefinitionRequired()) {
@@ -6457,7 +6567,7 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
/// \brief The implementation of RequireCompleteType
bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
- TypeDiagnoser &Diagnoser) {
+ TypeDiagnoser *Diagnoser) {
// FIXME: Add this assertion to make sure we always get instantiation points.
// assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType");
// FIXME: Add this assertion to help us flush out problems with
@@ -6466,24 +6576,31 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// assert(!T->isDependentType() &&
// "Can't ask whether a dependent type is complete");
+ // We lock in the inheritance model once somebody has asked us to ensure
+ // that a pointer-to-member type is complete.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
+ if (!MPTy->getClass()->isDependentType()) {
+ (void)isCompleteType(Loc, QualType(MPTy->getClass(), 0));
+ assignInheritanceModel(*this, MPTy->getMostRecentCXXRecordDecl());
+ }
+ }
+ }
+
// If we have a complete type, we're done.
NamedDecl *Def = nullptr;
if (!T->isIncompleteType(&Def)) {
// If we know about the definition but it is not visible, complain.
NamedDecl *SuggestedDef = nullptr;
- if (!Diagnoser.Suppressed && Def &&
- !hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true))
- diagnoseMissingImport(Loc, SuggestedDef, /*NeedDefinition*/true);
-
- // We lock in the inheritance model once somebody has asked us to ensure
- // that a pointer-to-member type is complete.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
- if (!MPTy->getClass()->isDependentType()) {
- RequireCompleteType(Loc, QualType(MPTy->getClass(), 0), 0);
- assignInheritanceModel(*this, MPTy->getMostRecentCXXRecordDecl());
- }
- }
+ if (Def &&
+ !hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true)) {
+ // If the user is going to see an error here, recover by making the
+ // definition visible.
+ bool TreatAsComplete = Diagnoser && !isSFINAEContext();
+ if (Diagnoser)
+ diagnoseMissingImport(Loc, SuggestedDef, /*NeedDefinition*/true,
+ /*Recover*/TreatAsComplete);
+ return !TreatAsComplete;
}
return false;
@@ -6500,6 +6617,9 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// chain for a declaration that can be accessed through a mechanism other
// than name lookup (eg, referenced in a template, or a variable whose type
// could be completed by the module)?
+ //
+ // FIXME: Should we map through to the base array element type before
+ // checking for a tag type?
if (Tag || IFace) {
NamedDecl *D =
Tag ? static_cast<NamedDecl *>(Tag->getDecl()) : IFace->getDecl();
@@ -6530,12 +6650,16 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
= Context.getAsConstantArrayType(MaybeTemplate))
MaybeTemplate = Array->getElementType();
if (const RecordType *Record = MaybeTemplate->getAs<RecordType>()) {
+ bool Instantiated = false;
+ bool Diagnosed = false;
if (ClassTemplateSpecializationDecl *ClassTemplateSpec
= dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
- if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared)
- return InstantiateClassTemplateSpecialization(Loc, ClassTemplateSpec,
- TSK_ImplicitInstantiation,
- /*Complain=*/!Diagnoser.Suppressed);
+ if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) {
+ Diagnosed = InstantiateClassTemplateSpecialization(
+ Loc, ClassTemplateSpec, TSK_ImplicitInstantiation,
+ /*Complain=*/Diagnoser);
+ Instantiated = true;
+ }
} else if (CXXRecordDecl *Rec
= dyn_cast<CXXRecordDecl>(Record->getDecl())) {
CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass();
@@ -6543,16 +6667,31 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
MemberSpecializationInfo *MSI = Rec->getMemberSpecializationInfo();
assert(MSI && "Missing member specialization information?");
// This record was instantiated from a class within a template.
- if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
- return InstantiateClass(Loc, Rec, Pattern,
- getTemplateInstantiationArgs(Rec),
- TSK_ImplicitInstantiation,
- /*Complain=*/!Diagnoser.Suppressed);
+ if (MSI->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization) {
+ Diagnosed = InstantiateClass(Loc, Rec, Pattern,
+ getTemplateInstantiationArgs(Rec),
+ TSK_ImplicitInstantiation,
+ /*Complain=*/Diagnoser);
+ Instantiated = true;
+ }
}
}
+
+ if (Instantiated) {
+ // Instantiate* might have already complained that the template is not
+ // defined, if we asked it to.
+ if (Diagnoser && Diagnosed)
+ return true;
+ // If we instantiated a definition, check that it's usable, even if
+ // instantiation produced an error, so that repeated calls to this
+ // function give consistent answers.
+ if (!T->isIncompleteType())
+ return RequireCompleteTypeImpl(Loc, T, Diagnoser);
+ }
}
- if (Diagnoser.Suppressed)
+ if (!Diagnoser)
return true;
// We have an incomplete type. Produce a diagnostic.
@@ -6562,7 +6701,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
return true;
}
- Diagnoser.diagnose(*this, Loc, T);
+ Diagnoser->diagnose(*this, Loc, T);
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
@@ -6586,7 +6725,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID) {
- TypeDiagnoserDiag Diagnoser(DiagID);
+ BoundTypeDiagnoser<> Diagnoser(DiagID);
return RequireCompleteType(Loc, T, Diagnoser);
}
@@ -6627,14 +6766,10 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
assert(!T->isDependentType() && "type should not be dependent");
QualType ElemType = Context.getBaseElementType(T);
- RequireCompleteType(Loc, ElemType, 0);
-
- if (T->isLiteralType(Context))
+ if ((isCompleteType(Loc, ElemType) || ElemType->isVoidType()) &&
+ T->isLiteralType(Context))
return false;
- if (Diagnoser.Suppressed)
- return true;
-
Diagnoser.diagnose(*this, Loc, T);
if (T->isVariableArrayType())
@@ -6649,10 +6784,8 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
// A partially-defined class type can't be a literal type, because a literal
// class type must have a trivial destructor (which can't be checked until
// the class definition is complete).
- if (!RD->isCompleteDefinition()) {
- RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T);
+ if (RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T))
return true;
- }
// If the class has virtual base classes, then it's not an aggregate, and
// cannot have any constexpr constructors or a trivial default constructor,
@@ -6704,7 +6837,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
}
bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
- TypeDiagnoserDiag Diagnoser(DiagID);
+ BoundTypeDiagnoser<> Diagnoser(DiagID);
return RequireLiteralType(Loc, T, Diagnoser);
}
@@ -6730,6 +6863,9 @@ QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
if (ER.isInvalid()) return QualType();
E = ER.get();
+ if (!getLangOpts().CPlusPlus && E->refersToBitField())
+ Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 2;
+
if (!E->isTypeDependent()) {
QualType T = E->getType();
if (const TagType *TT = T->getAs<TagType>())
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index 6e193a3529c9..e0a9653eb93b 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
@@ -390,7 +391,7 @@ public:
/// due to transformation.
///
/// \returns true if an error occurred, false otherwise.
- bool TransformExprs(Expr **Inputs, unsigned NumInputs, bool IsCall,
+ bool TransformExprs(Expr *const *Inputs, unsigned NumInputs, bool IsCall,
SmallVectorImpl<Expr *> &Outputs,
bool *ArgChanged = nullptr);
@@ -503,7 +504,8 @@ public:
///
/// Returns true if there was an error.
bool TransformTemplateArgument(const TemplateArgumentLoc &Input,
- TemplateArgumentLoc &Output);
+ TemplateArgumentLoc &Output,
+ bool Uneval = false);
/// \brief Transform the given set of template arguments.
///
@@ -525,8 +527,10 @@ public:
/// Returns true if an error occurred.
bool TransformTemplateArguments(const TemplateArgumentLoc *Inputs,
unsigned NumInputs,
- TemplateArgumentListInfo &Outputs) {
- return TransformTemplateArguments(Inputs, Inputs + NumInputs, Outputs);
+ TemplateArgumentListInfo &Outputs,
+ bool Uneval = false) {
+ return TransformTemplateArguments(Inputs, Inputs + NumInputs, Outputs,
+ Uneval);
}
/// \brief Transform the given set of template arguments.
@@ -546,7 +550,8 @@ public:
template<typename InputIterator>
bool TransformTemplateArguments(InputIterator First,
InputIterator Last,
- TemplateArgumentListInfo &Outputs);
+ TemplateArgumentListInfo &Outputs,
+ bool Uneval = false);
/// \brief Fakes up a TemplateArgumentLoc for a given TemplateArgument.
void InventTemplateArgumentLoc(const TemplateArgument &Arg,
@@ -843,11 +848,11 @@ public:
/// \brief Build a new C++11 auto type.
///
/// By default, builds a new AutoType with the given deduced type.
- QualType RebuildAutoType(QualType Deduced, bool IsDecltypeAuto) {
+ QualType RebuildAutoType(QualType Deduced, AutoTypeKeyword Keyword) {
// Note, IsDependent is always false here: we implicitly convert an 'auto'
// which has been deduced to a dependent type into an undeduced 'auto', so
// that we'll retry deduction after the transformation.
- return SemaRef.Context.getAutoType(Deduced, IsDecltypeAuto,
+ return SemaRef.Context.getAutoType(Deduced, Keyword,
/*IsDependent*/ false);
}
@@ -1282,6 +1287,30 @@ public:
Constraints, Clobbers, Exprs, EndLoc);
}
+ /// \brief Build a new co_return statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCoreturnStmt(SourceLocation CoreturnLoc, Expr *Result) {
+ return getSema().BuildCoreturnStmt(CoreturnLoc, Result);
+ }
+
+ /// \brief Build a new co_await expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCoawaitExpr(SourceLocation CoawaitLoc, Expr *Result) {
+ return getSema().BuildCoawaitExpr(CoawaitLoc, Result);
+ }
+
+ /// \brief Build a new co_yield expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCoyieldExpr(SourceLocation CoyieldLoc, Expr *Result) {
+ return getSema().BuildCoyieldExpr(CoyieldLoc, Result);
+ }
+
/// \brief Build a new Objective-C \@try statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -1354,12 +1383,15 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPIfClause(Expr *Condition,
- SourceLocation StartLoc,
+ OMPClause *RebuildOMPIfClause(OpenMPDirectiveKind NameModifier,
+ Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation NameModifierLoc,
+ SourceLocation ColonLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPIfClause(Condition, StartLoc,
- LParenLoc, EndLoc);
+ return getSema().ActOnOpenMPIfClause(NameModifier, Condition, StartLoc,
+ LParenLoc, NameModifierLoc, ColonLoc,
+ EndLoc);
}
/// \brief Build a new OpenMP 'final' clause.
@@ -1395,6 +1427,16 @@ public:
return getSema().ActOnOpenMPSafelenClause(Len, StartLoc, LParenLoc, EndLoc);
}
+ /// \brief Build a new OpenMP 'simdlen' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPSimdlenClause(Len, StartLoc, LParenLoc, EndLoc);
+ }
+
/// \brief Build a new OpenMP 'collapse' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -1436,15 +1478,24 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPScheduleClause(OpenMPScheduleClauseKind Kind,
- Expr *ChunkSize,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation KindLoc,
- SourceLocation CommaLoc,
- SourceLocation EndLoc) {
+ OMPClause *RebuildOMPScheduleClause(
+ OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
+ OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
+ SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) {
return getSema().ActOnOpenMPScheduleClause(
- Kind, ChunkSize, StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc);
+ M1, M2, Kind, ChunkSize, StartLoc, LParenLoc, M1Loc, M2Loc, KindLoc,
+ CommaLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'ordered' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPOrderedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ SourceLocation LParenLoc, Expr *Num) {
+ return getSema().ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Num);
}
/// \brief Build a new OpenMP 'private' clause.
@@ -1518,10 +1569,13 @@ public:
OMPClause *RebuildOMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
+ OpenMPLinearClauseKind Modifier,
+ SourceLocation ModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc) {
return getSema().ActOnOpenMPLinearClause(VarList, Step, StartLoc, LParenLoc,
- ColonLoc, EndLoc);
+ Modifier, ModifierLoc, ColonLoc,
+ EndLoc);
}
/// \brief Build a new OpenMP 'aligned' clause.
@@ -1586,6 +1640,97 @@ public:
StartLoc, LParenLoc, EndLoc);
}
+ /// \brief Build a new OpenMP 'device' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDeviceClause(Device, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'map' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPMapClause(
+ OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType,
+ SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPMapClause(MapTypeModifier, MapType, MapLoc,
+ ColonLoc, VarList,StartLoc,
+ LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'num_teams' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPNumTeamsClause(NumTeams, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'thread_limit' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPThreadLimitClause(Expr *ThreadLimit,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPThreadLimitClause(ThreadLimit, StartLoc,
+ LParenLoc, EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'priority' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPPriorityClause(Priority, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'grainsize' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPGrainsizeClause(Expr *Grainsize, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPGrainsizeClause(Grainsize, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'num_tasks' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPNumTasksClause(NumTasks, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// \brief Build a new OpenMP 'hint' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPHintClause(Expr *Hint, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPHintClause(Hint, StartLoc, LParenLoc, EndLoc);
+ }
+
/// \brief Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -1673,6 +1818,7 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildCXXForRangeStmt(SourceLocation ForLoc,
+ SourceLocation CoawaitLoc,
SourceLocation ColonLoc,
Stmt *Range, Stmt *BeginEnd,
Expr *Cond, Expr *Inc,
@@ -1695,7 +1841,8 @@ public:
}
}
- return getSema().BuildCXXForRangeStmt(ForLoc, ColonLoc, Range, BeginEnd,
+ return getSema().BuildCXXForRangeStmt(ForLoc, CoawaitLoc, ColonLoc,
+ Range, BeginEnd,
Cond, Inc, LoopVar, RParenLoc,
Sema::BFRK_Rebuild);
}
@@ -1808,12 +1955,11 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildOffsetOfExpr(SourceLocation OperatorLoc,
- TypeSourceInfo *Type,
- Sema::OffsetOfComponent *Components,
- unsigned NumComponents,
- SourceLocation RParenLoc) {
+ TypeSourceInfo *Type,
+ ArrayRef<Sema::OffsetOfComponent> Components,
+ SourceLocation RParenLoc) {
return getSema().BuildBuiltinOffsetOf(OperatorLoc, Type, Components,
- NumComponents, RParenLoc);
+ RParenLoc);
}
/// \brief Build a new sizeof, alignof or vec_step expression with a
@@ -1857,6 +2003,18 @@ public:
RBracketLoc);
}
+ /// \brief Build a new array section expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOMPArraySectionExpr(Expr *Base, SourceLocation LBracketLoc,
+ Expr *LowerBound,
+ SourceLocation ColonLoc, Expr *Length,
+ SourceLocation RBracketLoc) {
+ return getSema().ActOnOMPArraySectionExpr(Base, LBracketLoc, LowerBound,
+ ColonLoc, Length, RBracketLoc);
+ }
+
/// \brief Build a new call expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -1921,7 +2079,8 @@ public:
return getSema().BuildMemberReferenceExpr(Base, BaseType, OpLoc, isArrow,
SS, TemplateKWLoc,
FirstQualifierInScope,
- R, ExplicitTemplateArgs);
+ R, ExplicitTemplateArgs,
+ /*S*/nullptr);
}
/// \brief Build a new binary operator expression.
@@ -1987,7 +2146,8 @@ public:
SS, SourceLocation(),
/*FirstQualifierInScope*/ nullptr,
NameInfo,
- /* TemplateArgs */ nullptr);
+ /* TemplateArgs */ nullptr,
+ /*S*/ nullptr);
}
/// \brief Build a new initializer list expression.
@@ -2435,7 +2595,7 @@ public:
TemplateArgs);
return getSema().BuildQualifiedDeclarationNameExpr(
- SS, NameInfo, IsAddressOfOperand, RecoveryTSI);
+ SS, NameInfo, IsAddressOfOperand, /*S*/nullptr, RecoveryTSI);
}
/// \brief Build a new template-id expression.
@@ -2529,7 +2689,7 @@ public:
SS, TemplateKWLoc,
FirstQualifierInScope,
MemberNameInfo,
- TemplateArgs);
+ TemplateArgs, /*S*/nullptr);
}
/// \brief Build a new member reference expression.
@@ -2551,7 +2711,7 @@ public:
OperatorLoc, IsArrow,
SS, TemplateKWLoc,
FirstQualifierInScope,
- R, TemplateArgs);
+ R, TemplateArgs, /*S*/nullptr);
}
/// \brief Build a new noexcept expression.
@@ -2563,18 +2723,14 @@ public:
}
/// \brief Build a new expression to compute the length of a parameter pack.
- ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc, NamedDecl *Pack,
+ ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc,
+ NamedDecl *Pack,
SourceLocation PackLoc,
SourceLocation RParenLoc,
- Optional<unsigned> Length) {
- if (Length)
- return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
- OperatorLoc, Pack, PackLoc,
- RParenLoc, *Length);
-
- return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
- OperatorLoc, Pack, PackLoc,
- RParenLoc);
+ Optional<unsigned> Length,
+ ArrayRef<TemplateArgument> PartialArgs) {
+ return SizeOfPackExpr::Create(SemaRef.Context, OperatorLoc, Pack, PackLoc,
+ RParenLoc, Length, PartialArgs);
}
/// \brief Build a new Objective-C boxed expression.
@@ -2608,9 +2764,8 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildObjCDictionaryLiteral(SourceRange Range,
- ObjCDictionaryElement *Elements,
- unsigned NumElements) {
- return getSema().BuildObjCDictionaryLiteral(Range, Elements, NumElements);
+ MutableArrayRef<ObjCDictionaryElement> Elements) {
+ return getSema().BuildObjCDictionaryLiteral(Range, Elements);
}
/// \brief Build a new Objective-C \@encode expression.
@@ -2657,20 +2812,18 @@ public:
ExprResult RebuildObjCMessageExpr(SourceLocation SuperLoc,
Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
+ QualType SuperType,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
MultiExprArg Args,
SourceLocation RBracLoc) {
- ObjCInterfaceDecl *Class = Method->getClassInterface();
- QualType ReceiverTy = SemaRef.Context.getObjCInterfaceType(Class);
-
return Method->isInstanceMethod() ? SemaRef.BuildInstanceMessage(nullptr,
- ReceiverTy,
+ SuperType,
SuperLoc,
Sel, Method, LBracLoc, SelectorLocs,
RBracLoc, Args)
: SemaRef.BuildClassMessage(nullptr,
- ReceiverTy,
+ SuperType,
SuperLoc,
Sel, Method, LBracLoc, SelectorLocs,
RBracLoc, Args);
@@ -2693,7 +2846,8 @@ public:
SS, SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
NameInfo,
- /*TemplateArgs=*/nullptr);
+ /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
}
/// \brief Build a new Objective-C property reference expression.
@@ -2711,7 +2865,8 @@ public:
SS, SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
NameInfo,
- /*TemplateArgs=*/nullptr);
+ /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
}
/// \brief Build a new Objective-C property reference expression.
@@ -2743,7 +2898,8 @@ public:
SS, SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
NameInfo,
- /*TemplateArgs=*/nullptr);
+ /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
}
/// \brief Build a new shuffle vector expression.
@@ -3043,7 +3199,7 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
}
template<typename Derived>
-bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
+bool TreeTransform<Derived>::TransformExprs(Expr *const *Inputs,
unsigned NumInputs,
bool IsCall,
SmallVectorImpl<Expr *> &Outputs,
@@ -3458,7 +3614,7 @@ void TreeTransform<Derived>::InventTemplateArgumentLoc(
template<typename Derived>
bool TreeTransform<Derived>::TransformTemplateArgument(
const TemplateArgumentLoc &Input,
- TemplateArgumentLoc &Output) {
+ TemplateArgumentLoc &Output, bool Uneval) {
const TemplateArgument &Arg = Input.getArgument();
switch (Arg.getKind()) {
case TemplateArgument::Null:
@@ -3506,8 +3662,8 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
case TemplateArgument::Expression: {
// Template argument expressions are constant expressions.
- EnterExpressionEvaluationContext Unevaluated(getSema(),
- Sema::ConstantEvaluated);
+ EnterExpressionEvaluationContext Unevaluated(
+ getSema(), Uneval ? Sema::Unevaluated : Sema::ConstantEvaluated);
Expr *InputExpr = Input.getSourceExpression();
if (!InputExpr) InputExpr = Input.getArgument().getAsExpr();
@@ -3585,9 +3741,9 @@ public:
template<typename Derived>
template<typename InputIterator>
-bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
- InputIterator Last,
- TemplateArgumentListInfo &Outputs) {
+bool TreeTransform<Derived>::TransformTemplateArguments(
+ InputIterator First, InputIterator Last, TemplateArgumentListInfo &Outputs,
+ bool Uneval) {
for (; First != Last; ++First) {
TemplateArgumentLoc Out;
TemplateArgumentLoc In = *First;
@@ -3605,7 +3761,7 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
In.getArgument().pack_begin()),
PackLocIterator(*this,
In.getArgument().pack_end()),
- Outputs))
+ Outputs, Uneval))
return true;
continue;
@@ -3643,7 +3799,7 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
// expansion.
TemplateArgumentLoc OutPattern;
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
- if (getDerived().TransformTemplateArgument(Pattern, OutPattern))
+ if (getDerived().TransformTemplateArgument(Pattern, OutPattern, Uneval))
return true;
Out = getDerived().RebuildPackExpansion(OutPattern, Ellipsis,
@@ -3660,7 +3816,7 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
for (unsigned I = 0; I != *NumExpansions; ++I) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
- if (getDerived().TransformTemplateArgument(Pattern, Out))
+ if (getDerived().TransformTemplateArgument(Pattern, Out, Uneval))
return true;
if (Out.getArgument().containsUnexpandedParameterPack()) {
@@ -3678,7 +3834,7 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
if (RetainExpansion) {
ForgetPartiallySubstitutedPackRAII Forget(getDerived());
- if (getDerived().TransformTemplateArgument(Pattern, Out))
+ if (getDerived().TransformTemplateArgument(Pattern, Out, Uneval))
return true;
Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
@@ -3693,7 +3849,7 @@ bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
}
// The simple case:
- if (getDerived().TransformTemplateArgument(In, Out))
+ if (getDerived().TransformTemplateArgument(In, Out, Uneval))
return true;
Outputs.addArgument(Out);
@@ -3810,7 +3966,7 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
Qs.removeObjCLifetime();
Deduced = SemaRef.Context.getQualifiedType(Deduced.getUnqualifiedType(),
Qs);
- Result = SemaRef.Context.getAutoType(Deduced, AutoTy->isDecltypeAuto(),
+ Result = SemaRef.Context.getAutoType(Deduced, AutoTy->getKeyword(),
AutoTy->isDependentType());
TLB.TypeWasModifiedSafely(Result);
} else {
@@ -4700,9 +4856,7 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType() ||
- T->getNumParams() != ParamTypes.size() ||
- !std::equal(T->param_type_begin(), T->param_type_end(),
- ParamTypes.begin()) || EPIChanged) {
+ T->getParamTypes() != llvm::makeArrayRef(ParamTypes) || EPIChanged) {
Result = getDerived().RebuildFunctionProtoType(ResultType, ParamTypes, EPI);
if (Result.isNull())
return QualType();
@@ -5015,7 +5169,7 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced ||
T->isDependentType()) {
- Result = getDerived().RebuildAutoType(NewDeduced, T->isDecltypeAuto());
+ Result = getDerived().RebuildAutoType(NewDeduced, T->getKeyword());
if (Result.isNull())
return QualType();
}
@@ -6119,6 +6273,11 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
if (Init.isInvalid())
return StmtError();
+ // In OpenMP loop region loop control variable must be captured and be
+ // private. Perform analysis of first part (if any).
+ if (getSema().getLangOpts().OpenMP && Init.isUsable())
+ getSema().ActOnOpenMPLoopInitialization(S->getForLoc(), Init.get());
+
// Transform the condition
ExprResult Cond;
VarDecl *ConditionVar = nullptr;
@@ -6351,6 +6510,56 @@ TreeTransform<Derived>::TransformMSAsmStmt(MSAsmStmt *S) {
TransformedExprs, S->getEndLoc());
}
+// C++ Coroutines TS
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ // The coroutine body should be re-formed by the caller if necessary.
+ return getDerived().TransformStmt(S->getBody());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCoreturnStmt(CoreturnStmt *S) {
+ ExprResult Result = getDerived().TransformInitializer(S->getOperand(),
+ /*NotCopyInit*/false);
+ if (Result.isInvalid())
+ return StmtError();
+
+ // Always rebuild; we don't know if this needs to be injected into a new
+ // context or if the promise type has changed.
+ return getDerived().RebuildCoreturnStmt(S->getKeywordLoc(), Result.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCoawaitExpr(CoawaitExpr *E) {
+ ExprResult Result = getDerived().TransformInitializer(E->getOperand(),
+ /*NotCopyInit*/false);
+ if (Result.isInvalid())
+ return ExprError();
+
+ // Always rebuild; we don't know if this needs to be injected into a new
+ // context or if the promise type has changed.
+ return getDerived().RebuildCoawaitExpr(E->getKeywordLoc(), Result.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCoyieldExpr(CoyieldExpr *E) {
+ ExprResult Result = getDerived().TransformInitializer(E->getOperand(),
+ /*NotCopyInit*/false);
+ if (Result.isInvalid())
+ return ExprError();
+
+ // Always rebuild; we don't know if this needs to be injected into a new
+ // context or if the promise type has changed.
+ return getDerived().RebuildCoyieldExpr(E->getKeywordLoc(), Result.get());
+}
+
+// Objective-C Statements.
+
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
@@ -6640,6 +6849,7 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
Inc.get() != S->getInc() ||
LoopVar.get() != S->getLoopVarStmt()) {
NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
+ S->getCoawaitLoc(),
S->getColonLoc(), Range.get(),
BeginEnd.get(), Cond.get(),
Inc.get(), LoopVar.get(),
@@ -6656,6 +6866,7 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
// it now so we have a new statement to attach the body to.
if (Body.get() != S->getBody() && NewStmt.get() == S) {
NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
+ S->getCoawaitLoc(),
S->getColonLoc(), Range.get(),
BeginEnd.get(), Cond.get(),
Inc.get(), LoopVar.get(),
@@ -6766,6 +6977,25 @@ TreeTransform<Derived>::TransformMSPropertyRefExpr(MSPropertyRefExpr *E) {
}
template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformMSPropertySubscriptExpr(
+ MSPropertySubscriptExpr *E) {
+ auto BaseRes = getDerived().TransformExpr(E->getBase());
+ if (BaseRes.isInvalid())
+ return ExprError();
+ auto IdxRes = getDerived().TransformExpr(E->getIdx());
+ if (IdxRes.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ BaseRes.get() == E->getBase() &&
+ IdxRes.get() == E->getIdx())
+ return E;
+
+ return getDerived().RebuildArraySubscriptExpr(
+ BaseRes.get(), SourceLocation(), IdxRes.get(), E->getRBracketLoc());
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformSEHTryStmt(SEHTryStmt *S) {
StmtResult TryBlock = getDerived().TransformCompoundStmt(S->getTryBlock());
if (TryBlock.isInvalid())
@@ -6844,10 +7074,7 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
}
}
StmtResult AssociatedStmt;
- if (D->hasAssociatedStmt()) {
- if (!D->getAssociatedStmt()) {
- return StmtError();
- }
+ if (D->hasAssociatedStmt() && D->getAssociatedStmt()) {
getDerived().getSema().ActOnOpenMPRegionStart(D->getDirectiveKind(),
/*CurScope=*/nullptr);
StmtResult Body;
@@ -7115,6 +7342,17 @@ TreeTransform<Derived>::TransformOMPTargetDirective(OMPTargetDirective *D) {
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTargetDataDirective(
+ OMPTargetDataDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_data, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPTeamsDirective(OMPTeamsDirective *D) {
DeclarationNameInfo DirName;
@@ -7147,6 +7385,39 @@ TreeTransform<Derived>::TransformOMPCancelDirective(OMPCancelDirective *D) {
return Res;
}
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTaskLoopSimdDirective(
+ OMPTaskLoopSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop_simd, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPDistributeDirective(
+ OMPDistributeDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_distribute, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
//===----------------------------------------------------------------------===//
@@ -7155,8 +7426,9 @@ OMPClause *TreeTransform<Derived>::TransformOMPIfClause(OMPIfClause *C) {
ExprResult Cond = getDerived().TransformExpr(C->getCondition());
if (Cond.isInvalid())
return nullptr;
- return getDerived().RebuildOMPIfClause(Cond.get(), C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPIfClause(
+ C->getNameModifier(), Cond.get(), C->getLocStart(), C->getLParenLoc(),
+ C->getNameModifierLoc(), C->getColonLoc(), C->getLocEnd());
}
template <typename Derived>
@@ -7190,10 +7462,20 @@ TreeTransform<Derived>::TransformOMPSafelenClause(OMPSafelenClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPSimdlenClause(OMPSimdlenClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getSimdlen());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPSimdlenClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPCollapseClause(OMPCollapseClause *C) {
ExprResult E = getDerived().TransformExpr(C->getNumForLoops());
if (E.isInvalid())
- return 0;
+ return nullptr;
return getDerived().RebuildOMPCollapseClause(
E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
}
@@ -7221,15 +7503,23 @@ TreeTransform<Derived>::TransformOMPScheduleClause(OMPScheduleClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPScheduleClause(
+ C->getFirstScheduleModifier(), C->getSecondScheduleModifier(),
C->getScheduleKind(), E.get(), C->getLocStart(), C->getLParenLoc(),
+ C->getFirstScheduleModifierLoc(), C->getSecondScheduleModifierLoc(),
C->getScheduleKindLoc(), C->getCommaLoc(), C->getLocEnd());
}
template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPOrderedClause(OMPOrderedClause *C) {
- // No need to rebuild this clause, no template-dependent parameters.
- return C;
+ ExprResult E;
+ if (auto *Num = C->getNumForLoops()) {
+ E = getDerived().TransformExpr(Num);
+ if (E.isInvalid())
+ return nullptr;
+ }
+ return getDerived().RebuildOMPOrderedClause(C->getLocStart(), C->getLocEnd(),
+ C->getLParenLoc(), E.get());
}
template <typename Derived>
@@ -7288,6 +7578,26 @@ TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPThreadsClause(OMPThreadsClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPSIMDClause(OMPSIMDClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNogroupClause(OMPNogroupClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
Vars.reserve(C->varlist_size());
@@ -7385,9 +7695,9 @@ TreeTransform<Derived>::TransformOMPLinearClause(OMPLinearClause *C) {
ExprResult Step = getDerived().TransformExpr(C->getStep());
if (Step.isInvalid())
return nullptr;
- return getDerived().RebuildOMPLinearClause(Vars, Step.get(), C->getLocStart(),
- C->getLParenLoc(),
- C->getColonLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPLinearClause(
+ Vars, Step.get(), C->getLocStart(), C->getLParenLoc(), C->getModifier(),
+ C->getModifierLoc(), C->getColonLoc(), C->getLocEnd());
}
template <typename Derived>
@@ -7469,6 +7779,91 @@ TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
}
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDeviceClause(OMPDeviceClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getDevice());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPDeviceClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPMapClause(OMPMapClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPMapClause(
+ C->getMapTypeModifier(), C->getMapType(), C->getMapLoc(),
+ C->getColonLoc(), Vars, C->getLocStart(), C->getLParenLoc(),
+ C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getNumTeams());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPNumTeamsClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getThreadLimit());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPThreadLimitClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPPriorityClause(OMPPriorityClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getPriority());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPPriorityClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getGrainsize());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPGrainsizeClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNumTasksClause(OMPNumTasksClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getNumTasks());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPNumTasksClause(
+ E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPHintClause(OMPHintClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getHint());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPHintClause(E.get(), C->getLocStart(),
+ C->getLParenLoc(), C->getLocEnd());
+}
+
//===----------------------------------------------------------------------===//
// Expression transformation
//===----------------------------------------------------------------------===//
@@ -7668,16 +8063,15 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
// template code that we don't care.
bool ExprChanged = false;
typedef Sema::OffsetOfComponent Component;
- typedef OffsetOfExpr::OffsetOfNode Node;
SmallVector<Component, 4> Components;
for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
- const Node &ON = E->getComponent(I);
+ const OffsetOfNode &ON = E->getComponent(I);
Component Comp;
Comp.isBrackets = true;
Comp.LocStart = ON.getSourceRange().getBegin();
Comp.LocEnd = ON.getSourceRange().getEnd();
switch (ON.getKind()) {
- case Node::Array: {
+ case OffsetOfNode::Array: {
Expr *FromIndex = E->getIndexExpr(ON.getArrayExprIndex());
ExprResult Index = getDerived().TransformExpr(FromIndex);
if (Index.isInvalid())
@@ -7689,8 +8083,8 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
break;
}
- case Node::Field:
- case Node::Identifier:
+ case OffsetOfNode::Field:
+ case OffsetOfNode::Identifier:
Comp.isBrackets = false;
Comp.U.IdentInfo = ON.getFieldName();
if (!Comp.U.IdentInfo)
@@ -7698,7 +8092,7 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
break;
- case Node::Base:
+ case OffsetOfNode::Base:
// Will be recomputed during the rebuild.
continue;
}
@@ -7714,14 +8108,13 @@ TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
// Build a new offsetof expression.
return getDerived().RebuildOffsetOfExpr(E->getOperatorLoc(), Type,
- Components.data(), Components.size(),
- E->getRParenLoc());
+ Components, E->getRParenLoc());
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformOpaqueValueExpr(OpaqueValueExpr *E) {
- assert(getDerived().AlreadyTransformed(E->getType()) &&
+ assert((!E->getSourceExpr() || getDerived().AlreadyTransformed(E->getType())) &&
"opaque value expression requires transformation");
return E;
}
@@ -7829,6 +8222,36 @@ TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) {
E->getRBracketLoc());
}
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ ExprResult LowerBound;
+ if (E->getLowerBound()) {
+ LowerBound = getDerived().TransformExpr(E->getLowerBound());
+ if (LowerBound.isInvalid())
+ return ExprError();
+ }
+
+ ExprResult Length;
+ if (E->getLength()) {
+ Length = getDerived().TransformExpr(E->getLength());
+ if (Length.isInvalid())
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() &&
+ LowerBound.get() == E->getLowerBound() && Length.get() == E->getLength())
+ return E;
+
+ return getDerived().RebuildOMPArraySectionExpr(
+ Base.get(), E->getBase()->getLocEnd(), LowerBound.get(), E->getColonLoc(),
+ Length.get(), E->getRBracketLoc());
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
@@ -8995,9 +9418,20 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
// If we have neither explicit template arguments, nor the template keyword,
- // it's a normal declaration name.
- if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid())
+ // it's a normal declaration name or member reference.
+ if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid()) {
+ NamedDecl *D = R.getAsSingle<NamedDecl>();
+ // In a C++11 unevaluated context, an UnresolvedLookupExpr might refer to an
+ // instance member. In other contexts, BuildPossibleImplicitMemberExpr will
+ // give a good diagnostic.
+ if (D && D->isCXXInstanceMember()) {
+ return SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R,
+ /*TemplateArgs=*/nullptr,
+ /*Scope=*/nullptr);
+ }
+
return getDerived().RebuildDeclarationNameExpr(SS, R, Old->requiresADL());
+ }
// If we have template arguments, rebuild them, then rebuild the
// templateid expression.
@@ -9404,9 +9838,10 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
VarDecl *OldVD = C->getCapturedVar();
QualType NewInitCaptureType =
- getSema().performLambdaInitCaptureInitialization(C->getLocation(),
- OldVD->getType()->isReferenceType(), OldVD->getIdentifier(),
- NewExprInit);
+ getSema().buildLambdaInitCaptureInitialization(
+ C->getLocation(), OldVD->getType()->isReferenceType(),
+ OldVD->getIdentifier(),
+ C->getCapturedVar()->getInitStyle() != VarDecl::CInit, NewExprInit);
NewExprInitResult = NewExprInit;
InitCaptureExprsAndTypes[C - E->capture_begin()] =
std::make_pair(NewExprInitResult, NewInitCaptureType);
@@ -9513,8 +9948,8 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
VarDecl *OldVD = C->getCapturedVar();
VarDecl *NewVD = getSema().createLambdaInitCaptureVarDecl(
- OldVD->getLocation(), InitExprTypePair.second,
- OldVD->getIdentifier(), Init.get());
+ OldVD->getLocation(), InitExprTypePair.second, OldVD->getIdentifier(),
+ OldVD->getInitStyle(), Init.get());
if (!NewVD)
Invalid = true;
else {
@@ -9886,36 +10321,86 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
if (!E->isValueDependent())
return E;
- // Note: None of the implementations of TryExpandParameterPacks can ever
- // produce a diagnostic when given only a single unexpanded parameter pack,
- // so
- UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc());
- bool ShouldExpand = false;
- bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
- if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
- Unexpanded,
- ShouldExpand, RetainExpansion,
- NumExpansions))
- return ExprError();
+ EnterExpressionEvaluationContext Unevaluated(getSema(), Sema::Unevaluated);
- if (RetainExpansion)
- return E;
+ ArrayRef<TemplateArgument> PackArgs;
+ TemplateArgument ArgStorage;
- NamedDecl *Pack = E->getPack();
- if (!ShouldExpand) {
- Pack = cast_or_null<NamedDecl>(getDerived().TransformDecl(E->getPackLoc(),
- Pack));
+ // Find the argument list to transform.
+ if (E->isPartiallySubstituted()) {
+ PackArgs = E->getPartialArguments();
+ } else if (E->isValueDependent()) {
+ UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc());
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
+ Unexpanded,
+ ShouldExpand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ // If we need to expand the pack, build a template argument from it and
+ // expand that.
+ if (ShouldExpand) {
+ auto *Pack = E->getPack();
+ if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Pack)) {
+ ArgStorage = getSema().Context.getPackExpansionType(
+ getSema().Context.getTypeDeclType(TTPD), None);
+ } else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Pack)) {
+ ArgStorage = TemplateArgument(TemplateName(TTPD), None);
+ } else {
+ auto *VD = cast<ValueDecl>(Pack);
+ ExprResult DRE = getSema().BuildDeclRefExpr(VD, VD->getType(),
+ VK_RValue, E->getPackLoc());
+ if (DRE.isInvalid())
+ return ExprError();
+ ArgStorage = new (getSema().Context) PackExpansionExpr(
+ getSema().Context.DependentTy, DRE.get(), E->getPackLoc(), None);
+ }
+ PackArgs = ArgStorage;
+ }
+ }
+
+ // If we're not expanding the pack, just transform the decl.
+ if (!PackArgs.size()) {
+ auto *Pack = cast_or_null<NamedDecl>(
+ getDerived().TransformDecl(E->getPackLoc(), E->getPack()));
if (!Pack)
return ExprError();
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack,
+ E->getPackLoc(),
+ E->getRParenLoc(), None, None);
}
+ TemplateArgumentListInfo TransformedPackArgs(E->getPackLoc(),
+ E->getPackLoc());
+ {
+ TemporaryBase Rebase(*this, E->getPackLoc(), getBaseEntity());
+ typedef TemplateArgumentLocInventIterator<
+ Derived, const TemplateArgument*> PackLocIterator;
+ if (TransformTemplateArguments(PackLocIterator(*this, PackArgs.begin()),
+ PackLocIterator(*this, PackArgs.end()),
+ TransformedPackArgs, /*Uneval*/true))
+ return ExprError();
+ }
- // We now know the length of the parameter pack, so build a new expression
- // that stores that length.
- return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack,
+ SmallVector<TemplateArgument, 8> Args;
+ bool PartialSubstitution = false;
+ for (auto &Loc : TransformedPackArgs.arguments()) {
+ Args.push_back(Loc.getArgument());
+ if (Loc.getArgument().isPackExpansion())
+ PartialSubstitution = true;
+ }
+
+ if (PartialSubstitution)
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
+ E->getPackLoc(),
+ E->getRParenLoc(), None, Args);
+
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
E->getPackLoc(), E->getRParenLoc(),
- NumExpansions);
+ Args.size(), None);
}
template<typename Derived>
@@ -10242,8 +10727,7 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
return SemaRef.MaybeBindToTemporary(E);
return getDerived().RebuildObjCDictionaryLiteral(E->getSourceRange(),
- Elements.data(),
- Elements.size());
+ Elements);
}
template<typename Derived>
@@ -10337,6 +10821,7 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
return getDerived().RebuildObjCMessageExpr(E->getSuperLoc(),
E->getSelector(),
SelLocs,
+ E->getReceiverType(),
E->getMethodDecl(),
E->getLeftLoc(),
Args,
@@ -11075,7 +11560,8 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
SS, TemplateKWLoc,
/*FIXME: FirstQualifier*/ nullptr,
NameInfo,
- /*TemplateArgs*/ nullptr);
+ /*TemplateArgs*/ nullptr,
+ /*S*/nullptr);
}
template<typename Derived>
@@ -11114,4 +11600,4 @@ TreeTransform<Derived>::TransformCapturedStmt(CapturedStmt *S) {
} // end namespace clang
-#endif
+#endif // LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H
diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp
index b1bf4a6bff8b..2b78d745864a 100644
--- a/lib/Serialization/ASTCommon.cpp
+++ b/lib/Serialization/ASTCommon.cpp
@@ -27,52 +27,166 @@ serialization::TypeIdx
serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
unsigned ID = 0;
switch (BT->getKind()) {
- case BuiltinType::Void: ID = PREDEF_TYPE_VOID_ID; break;
- case BuiltinType::Bool: ID = PREDEF_TYPE_BOOL_ID; break;
- case BuiltinType::Char_U: ID = PREDEF_TYPE_CHAR_U_ID; break;
- case BuiltinType::UChar: ID = PREDEF_TYPE_UCHAR_ID; break;
- case BuiltinType::UShort: ID = PREDEF_TYPE_USHORT_ID; break;
- case BuiltinType::UInt: ID = PREDEF_TYPE_UINT_ID; break;
- case BuiltinType::ULong: ID = PREDEF_TYPE_ULONG_ID; break;
- case BuiltinType::ULongLong: ID = PREDEF_TYPE_ULONGLONG_ID; break;
- case BuiltinType::UInt128: ID = PREDEF_TYPE_UINT128_ID; break;
- case BuiltinType::Char_S: ID = PREDEF_TYPE_CHAR_S_ID; break;
- case BuiltinType::SChar: ID = PREDEF_TYPE_SCHAR_ID; break;
+ case BuiltinType::Void:
+ ID = PREDEF_TYPE_VOID_ID;
+ break;
+ case BuiltinType::Bool:
+ ID = PREDEF_TYPE_BOOL_ID;
+ break;
+ case BuiltinType::Char_U:
+ ID = PREDEF_TYPE_CHAR_U_ID;
+ break;
+ case BuiltinType::UChar:
+ ID = PREDEF_TYPE_UCHAR_ID;
+ break;
+ case BuiltinType::UShort:
+ ID = PREDEF_TYPE_USHORT_ID;
+ break;
+ case BuiltinType::UInt:
+ ID = PREDEF_TYPE_UINT_ID;
+ break;
+ case BuiltinType::ULong:
+ ID = PREDEF_TYPE_ULONG_ID;
+ break;
+ case BuiltinType::ULongLong:
+ ID = PREDEF_TYPE_ULONGLONG_ID;
+ break;
+ case BuiltinType::UInt128:
+ ID = PREDEF_TYPE_UINT128_ID;
+ break;
+ case BuiltinType::Char_S:
+ ID = PREDEF_TYPE_CHAR_S_ID;
+ break;
+ case BuiltinType::SChar:
+ ID = PREDEF_TYPE_SCHAR_ID;
+ break;
case BuiltinType::WChar_S:
- case BuiltinType::WChar_U: ID = PREDEF_TYPE_WCHAR_ID; break;
- case BuiltinType::Short: ID = PREDEF_TYPE_SHORT_ID; break;
- case BuiltinType::Int: ID = PREDEF_TYPE_INT_ID; break;
- case BuiltinType::Long: ID = PREDEF_TYPE_LONG_ID; break;
- case BuiltinType::LongLong: ID = PREDEF_TYPE_LONGLONG_ID; break;
- case BuiltinType::Int128: ID = PREDEF_TYPE_INT128_ID; break;
- case BuiltinType::Half: ID = PREDEF_TYPE_HALF_ID; break;
- case BuiltinType::Float: ID = PREDEF_TYPE_FLOAT_ID; break;
- case BuiltinType::Double: ID = PREDEF_TYPE_DOUBLE_ID; break;
- case BuiltinType::LongDouble: ID = PREDEF_TYPE_LONGDOUBLE_ID; break;
- case BuiltinType::NullPtr: ID = PREDEF_TYPE_NULLPTR_ID; break;
- case BuiltinType::Char16: ID = PREDEF_TYPE_CHAR16_ID; break;
- case BuiltinType::Char32: ID = PREDEF_TYPE_CHAR32_ID; break;
- case BuiltinType::Overload: ID = PREDEF_TYPE_OVERLOAD_ID; break;
- case BuiltinType::BoundMember:ID = PREDEF_TYPE_BOUND_MEMBER; break;
- case BuiltinType::PseudoObject:ID = PREDEF_TYPE_PSEUDO_OBJECT;break;
- case BuiltinType::Dependent: ID = PREDEF_TYPE_DEPENDENT_ID; break;
- case BuiltinType::UnknownAny: ID = PREDEF_TYPE_UNKNOWN_ANY; break;
+ case BuiltinType::WChar_U:
+ ID = PREDEF_TYPE_WCHAR_ID;
+ break;
+ case BuiltinType::Short:
+ ID = PREDEF_TYPE_SHORT_ID;
+ break;
+ case BuiltinType::Int:
+ ID = PREDEF_TYPE_INT_ID;
+ break;
+ case BuiltinType::Long:
+ ID = PREDEF_TYPE_LONG_ID;
+ break;
+ case BuiltinType::LongLong:
+ ID = PREDEF_TYPE_LONGLONG_ID;
+ break;
+ case BuiltinType::Int128:
+ ID = PREDEF_TYPE_INT128_ID;
+ break;
+ case BuiltinType::Half:
+ ID = PREDEF_TYPE_HALF_ID;
+ break;
+ case BuiltinType::Float:
+ ID = PREDEF_TYPE_FLOAT_ID;
+ break;
+ case BuiltinType::Double:
+ ID = PREDEF_TYPE_DOUBLE_ID;
+ break;
+ case BuiltinType::LongDouble:
+ ID = PREDEF_TYPE_LONGDOUBLE_ID;
+ break;
+ case BuiltinType::NullPtr:
+ ID = PREDEF_TYPE_NULLPTR_ID;
+ break;
+ case BuiltinType::Char16:
+ ID = PREDEF_TYPE_CHAR16_ID;
+ break;
+ case BuiltinType::Char32:
+ ID = PREDEF_TYPE_CHAR32_ID;
+ break;
+ case BuiltinType::Overload:
+ ID = PREDEF_TYPE_OVERLOAD_ID;
+ break;
+ case BuiltinType::BoundMember:
+ ID = PREDEF_TYPE_BOUND_MEMBER;
+ break;
+ case BuiltinType::PseudoObject:
+ ID = PREDEF_TYPE_PSEUDO_OBJECT;
+ break;
+ case BuiltinType::Dependent:
+ ID = PREDEF_TYPE_DEPENDENT_ID;
+ break;
+ case BuiltinType::UnknownAny:
+ ID = PREDEF_TYPE_UNKNOWN_ANY;
+ break;
case BuiltinType::ARCUnbridgedCast:
- ID = PREDEF_TYPE_ARC_UNBRIDGED_CAST; break;
- case BuiltinType::ObjCId: ID = PREDEF_TYPE_OBJC_ID; break;
- case BuiltinType::ObjCClass: ID = PREDEF_TYPE_OBJC_CLASS; break;
- case BuiltinType::ObjCSel: ID = PREDEF_TYPE_OBJC_SEL; break;
- case BuiltinType::OCLImage1d: ID = PREDEF_TYPE_IMAGE1D_ID; break;
- case BuiltinType::OCLImage1dArray: ID = PREDEF_TYPE_IMAGE1D_ARR_ID; break;
- case BuiltinType::OCLImage1dBuffer: ID = PREDEF_TYPE_IMAGE1D_BUFF_ID; break;
- case BuiltinType::OCLImage2d: ID = PREDEF_TYPE_IMAGE2D_ID; break;
- case BuiltinType::OCLImage2dArray: ID = PREDEF_TYPE_IMAGE2D_ARR_ID; break;
- case BuiltinType::OCLImage3d: ID = PREDEF_TYPE_IMAGE3D_ID; break;
- case BuiltinType::OCLSampler: ID = PREDEF_TYPE_SAMPLER_ID; break;
- case BuiltinType::OCLEvent: ID = PREDEF_TYPE_EVENT_ID; break;
+ ID = PREDEF_TYPE_ARC_UNBRIDGED_CAST;
+ break;
+ case BuiltinType::ObjCId:
+ ID = PREDEF_TYPE_OBJC_ID;
+ break;
+ case BuiltinType::ObjCClass:
+ ID = PREDEF_TYPE_OBJC_CLASS;
+ break;
+ case BuiltinType::ObjCSel:
+ ID = PREDEF_TYPE_OBJC_SEL;
+ break;
+ case BuiltinType::OCLImage1d:
+ ID = PREDEF_TYPE_IMAGE1D_ID;
+ break;
+ case BuiltinType::OCLImage1dArray:
+ ID = PREDEF_TYPE_IMAGE1D_ARR_ID;
+ break;
+ case BuiltinType::OCLImage1dBuffer:
+ ID = PREDEF_TYPE_IMAGE1D_BUFF_ID;
+ break;
+ case BuiltinType::OCLImage2d:
+ ID = PREDEF_TYPE_IMAGE2D_ID;
+ break;
+ case BuiltinType::OCLImage2dArray:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_ID;
+ break;
+ case BuiltinType::OCLImage2dDepth:
+ ID = PREDEF_TYPE_IMAGE2D_DEP_ID;
+ break;
+ case BuiltinType::OCLImage2dArrayDepth:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_DEP_ID;
+ break;
+ case BuiltinType::OCLImage2dMSAA:
+ ID = PREDEF_TYPE_IMAGE2D_MSAA_ID;
+ break;
+ case BuiltinType::OCLImage2dArrayMSAA:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_MSAA_ID;
+ break;
+ case BuiltinType::OCLImage2dMSAADepth:
+ ID = PREDEF_TYPE_IMAGE2D_MSAA_DEP_ID;
+ break;
+ case BuiltinType::OCLImage2dArrayMSAADepth:
+ ID = PREDEF_TYPE_IMAGE2D_ARR_MSAA_DEPTH_ID;
+ break;
+ case BuiltinType::OCLImage3d:
+ ID = PREDEF_TYPE_IMAGE3D_ID;
+ break;
+ case BuiltinType::OCLSampler:
+ ID = PREDEF_TYPE_SAMPLER_ID;
+ break;
+ case BuiltinType::OCLEvent:
+ ID = PREDEF_TYPE_EVENT_ID;
+ break;
+ case BuiltinType::OCLClkEvent:
+ ID = PREDEF_TYPE_CLK_EVENT_ID;
+ break;
+ case BuiltinType::OCLQueue:
+ ID = PREDEF_TYPE_QUEUE_ID;
+ break;
+ case BuiltinType::OCLNDRange:
+ ID = PREDEF_TYPE_NDRANGE_ID;
+ break;
+ case BuiltinType::OCLReserveID:
+ ID = PREDEF_TYPE_RESERVE_ID_ID;
+ break;
case BuiltinType::BuiltinFn:
- ID = PREDEF_TYPE_BUILTIN_FN; break;
-
+ ID = PREDEF_TYPE_BUILTIN_FN;
+ break;
+ case BuiltinType::OMPArraySection:
+ ID = PREDEF_TYPE_OMP_ARRAY_SECTION;
+ break;
}
return TypeIdx(ID);
@@ -215,6 +329,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::ClassScopeFunctionSpecialization:
case Decl::Import:
case Decl::OMPThreadPrivate:
+ case Decl::BuiltinTemplate:
return false;
// These indirectly derive from Redeclarable<T> but are not actually
diff --git a/lib/Serialization/ASTCommon.h b/lib/Serialization/ASTCommon.h
index f21e8a7ea030..e59bc891f9b9 100644
--- a/lib/Serialization/ASTCommon.h
+++ b/lib/Serialization/ASTCommon.h
@@ -62,8 +62,6 @@ TypeID MakeTypeID(ASTContext &Context, QualType T, IdxForTypeTy IdxForType) {
return TypeIdx(PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
if (T == Context.AutoRRefDeductTy)
return TypeIdx(PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
- if (T == Context.VaListTagTy)
- return TypeIdx(PREDEF_TYPE_VA_LIST_TAG).asTypeID(FastQuals);
return IdxForType(T).asTypeID(FastQuals);
}
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 9fbf55bf15d1..7d88a31f44a7 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLocVisitor.h"
@@ -138,23 +139,33 @@ bool ChainedASTReaderListener::needsSystemInputFileVisitation() {
return First->needsSystemInputFileVisitation() ||
Second->needsSystemInputFileVisitation();
}
-void ChainedASTReaderListener::visitModuleFile(StringRef Filename) {
- First->visitModuleFile(Filename);
- Second->visitModuleFile(Filename);
+void ChainedASTReaderListener::visitModuleFile(StringRef Filename,
+ ModuleKind Kind) {
+ First->visitModuleFile(Filename, Kind);
+ Second->visitModuleFile(Filename, Kind);
}
bool ChainedASTReaderListener::visitInputFile(StringRef Filename,
bool isSystem,
- bool isOverridden) {
+ bool isOverridden,
+ bool isExplicitModule) {
bool Continue = false;
if (First->needsInputFileVisitation() &&
(!isSystem || First->needsSystemInputFileVisitation()))
- Continue |= First->visitInputFile(Filename, isSystem, isOverridden);
+ Continue |= First->visitInputFile(Filename, isSystem, isOverridden,
+ isExplicitModule);
if (Second->needsInputFileVisitation() &&
(!isSystem || Second->needsSystemInputFileVisitation()))
- Continue |= Second->visitInputFile(Filename, isSystem, isOverridden);
+ Continue |= Second->visitInputFile(Filename, isSystem, isOverridden,
+ isExplicitModule);
return Continue;
}
+void ChainedASTReaderListener::readModuleFileExtension(
+ const ModuleFileExtensionMetadata &Metadata) {
+ First->readModuleFileExtension(Metadata);
+ Second->readModuleFileExtension(Metadata);
+}
+
//===----------------------------------------------------------------------===//
// PCH validator implementation
//===----------------------------------------------------------------------===//
@@ -735,13 +746,26 @@ ASTIdentifierLookupTraitBase::ReadKey(const unsigned char* d, unsigned n) {
}
/// \brief Whether the given identifier is "interesting".
-static bool isInterestingIdentifier(IdentifierInfo &II) {
- return II.isPoisoned() ||
- II.isExtensionToken() ||
- II.getObjCOrBuiltinID() ||
+static bool isInterestingIdentifier(ASTReader &Reader, IdentifierInfo &II,
+ bool IsModule) {
+ return II.hadMacroDefinition() ||
+ II.isPoisoned() ||
+ (IsModule ? II.hasRevertedBuiltin() : II.getObjCOrBuiltinID()) ||
II.hasRevertedTokenIDToIdentifier() ||
- II.hadMacroDefinition() ||
- II.getFETokenInfo<void>();
+ (!(IsModule && Reader.getContext().getLangOpts().CPlusPlus) &&
+ II.getFETokenInfo<void>());
+}
+
+static bool readBit(unsigned &Bits) {
+ bool Value = Bits & 0x1;
+ Bits >>= 1;
+ return Value;
+}
+
+IdentID ASTIdentifierLookupTrait::ReadIdentifierID(const unsigned char *d) {
+ using namespace llvm::support;
+ unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
+ return Reader.getGlobalIdentifierID(F, RawID >> 1);
}
IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
@@ -754,62 +778,52 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// Wipe out the "is interesting" bit.
RawID = RawID >> 1;
+ // Build the IdentifierInfo and link the identifier ID with it.
+ IdentifierInfo *II = KnownII;
+ if (!II) {
+ II = &Reader.getIdentifierTable().getOwn(k);
+ KnownII = II;
+ }
+ if (!II->isFromAST()) {
+ II->setIsFromAST();
+ bool IsModule = Reader.PP.getCurrentModule() != nullptr;
+ if (isInterestingIdentifier(Reader, *II, IsModule))
+ II->setChangedSinceDeserialization();
+ }
+ Reader.markIdentifierUpToDate(II);
+
IdentID ID = Reader.getGlobalIdentifierID(F, RawID);
if (!IsInteresting) {
- // For uninteresting identifiers, just build the IdentifierInfo
- // and associate it with the persistent ID.
- IdentifierInfo *II = KnownII;
- if (!II) {
- II = &Reader.getIdentifierTable().getOwn(k);
- KnownII = II;
- }
+ // For uninteresting identifiers, there's nothing else to do. Just notify
+ // the reader that we've finished loading this identifier.
Reader.SetIdentifierInfo(ID, II);
- if (!II->isFromAST()) {
- bool WasInteresting = isInterestingIdentifier(*II);
- II->setIsFromAST();
- if (WasInteresting)
- II->setChangedSinceDeserialization();
- }
- Reader.markIdentifierUpToDate(II);
return II;
}
unsigned ObjCOrBuiltinID = endian::readNext<uint16_t, little, unaligned>(d);
unsigned Bits = endian::readNext<uint16_t, little, unaligned>(d);
- bool CPlusPlusOperatorKeyword = Bits & 0x01;
- Bits >>= 1;
- bool HasRevertedTokenIDToIdentifier = Bits & 0x01;
- Bits >>= 1;
- bool Poisoned = Bits & 0x01;
- Bits >>= 1;
- bool ExtensionToken = Bits & 0x01;
- Bits >>= 1;
- bool hadMacroDefinition = Bits & 0x01;
- Bits >>= 1;
+ bool CPlusPlusOperatorKeyword = readBit(Bits);
+ bool HasRevertedTokenIDToIdentifier = readBit(Bits);
+ bool HasRevertedBuiltin = readBit(Bits);
+ bool Poisoned = readBit(Bits);
+ bool ExtensionToken = readBit(Bits);
+ bool HadMacroDefinition = readBit(Bits);
assert(Bits == 0 && "Extra bits in the identifier?");
DataLen -= 8;
- // Build the IdentifierInfo itself and link the identifier ID with
- // the new IdentifierInfo.
- IdentifierInfo *II = KnownII;
- if (!II) {
- II = &Reader.getIdentifierTable().getOwn(StringRef(k));
- KnownII = II;
- }
- Reader.markIdentifierUpToDate(II);
- if (!II->isFromAST()) {
- bool WasInteresting = isInterestingIdentifier(*II);
- II->setIsFromAST();
- if (WasInteresting)
- II->setChangedSinceDeserialization();
- }
-
// Set or check the various bits in the IdentifierInfo structure.
// Token IDs are read-only.
if (HasRevertedTokenIDToIdentifier && II->getTokenID() != tok::identifier)
- II->RevertTokenIDToIdentifier();
- II->setObjCOrBuiltinID(ObjCOrBuiltinID);
+ II->revertTokenIDToIdentifier();
+ if (!F.isModule())
+ II->setObjCOrBuiltinID(ObjCOrBuiltinID);
+ else if (HasRevertedBuiltin && II->getBuiltinID()) {
+ II->revertBuiltin();
+ assert((II->hasRevertedBuiltin() ||
+ II->getObjCOrBuiltinID() == ObjCOrBuiltinID) &&
+ "Incorrect ObjC keyword or builtin ID");
+ }
assert(II->isExtensionToken() == ExtensionToken &&
"Incorrect extension token flag");
(void)ExtensionToken;
@@ -821,7 +835,7 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// If this identifier is a macro, deserialize the macro
// definition.
- if (hadMacroDefinition) {
+ if (HadMacroDefinition) {
uint32_t MacroDirectivesOffset =
endian::readNext<uint32_t, little, unaligned>(d);
DataLen -= 4;
@@ -844,168 +858,187 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
return II;
}
-unsigned
-ASTDeclContextNameLookupTrait::ComputeHash(const DeclNameKey &Key) {
- llvm::FoldingSetNodeID ID;
- ID.AddInteger(Key.Kind);
-
- switch (Key.Kind) {
+DeclarationNameKey::DeclarationNameKey(DeclarationName Name)
+ : Kind(Name.getNameKind()) {
+ switch (Kind) {
case DeclarationName::Identifier:
- case DeclarationName::CXXLiteralOperatorName:
- ID.AddString(((IdentifierInfo*)Key.Data)->getName());
+ Data = (uint64_t)Name.getAsIdentifierInfo();
break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- ID.AddInteger(serialization::ComputeHash(Selector(Key.Data)));
+ Data = (uint64_t)Name.getObjCSelector().getAsOpaquePtr();
break;
case DeclarationName::CXXOperatorName:
- ID.AddInteger((OverloadedOperatorKind)Key.Data);
+ Data = Name.getCXXOverloadedOperator();
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ Data = (uint64_t)Name.getCXXLiteralIdentifier();
break;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXUsingDirective:
+ Data = 0;
break;
}
-
- return ID.ComputeHash();
}
-ASTDeclContextNameLookupTrait::internal_key_type
-ASTDeclContextNameLookupTrait::GetInternalKey(
- const external_key_type& Name) {
- DeclNameKey Key;
- Key.Kind = Name.getNameKind();
- switch (Name.getNameKind()) {
+unsigned DeclarationNameKey::getHash() const {
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(Kind);
+
+ switch (Kind) {
case DeclarationName::Identifier:
- Key.Data = (uint64_t)Name.getAsIdentifierInfo();
+ case DeclarationName::CXXLiteralOperatorName:
+ ID.AddString(((IdentifierInfo*)Data)->getName());
break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- Key.Data = (uint64_t)Name.getObjCSelector().getAsOpaquePtr();
+ ID.AddInteger(serialization::ComputeHash(Selector(Data)));
break;
case DeclarationName::CXXOperatorName:
- Key.Data = Name.getCXXOverloadedOperator();
- break;
- case DeclarationName::CXXLiteralOperatorName:
- Key.Data = (uint64_t)Name.getCXXLiteralIdentifier();
+ ID.AddInteger((OverloadedOperatorKind)Data);
break;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXUsingDirective:
- Key.Data = 0;
break;
}
- return Key;
+ return ID.ComputeHash();
+}
+
+ModuleFile *
+ASTDeclContextNameLookupTrait::ReadFileRef(const unsigned char *&d) {
+ using namespace llvm::support;
+ uint32_t ModuleFileID = endian::readNext<uint32_t, little, unaligned>(d);
+ return Reader.getLocalModuleFile(F, ModuleFileID);
}
std::pair<unsigned, unsigned>
-ASTDeclContextNameLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
+ASTDeclContextNameLookupTrait::ReadKeyDataLength(const unsigned char *&d) {
using namespace llvm::support;
unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
return std::make_pair(KeyLen, DataLen);
}
-ASTDeclContextNameLookupTrait::internal_key_type
-ASTDeclContextNameLookupTrait::ReadKey(const unsigned char* d, unsigned) {
+ASTDeclContextNameLookupTrait::internal_key_type
+ASTDeclContextNameLookupTrait::ReadKey(const unsigned char *d, unsigned) {
using namespace llvm::support;
- DeclNameKey Key;
- Key.Kind = (DeclarationName::NameKind)*d++;
- switch (Key.Kind) {
+ auto Kind = (DeclarationName::NameKind)*d++;
+ uint64_t Data;
+ switch (Kind) {
case DeclarationName::Identifier:
- Key.Data = (uint64_t)Reader.getLocalIdentifier(
+ Data = (uint64_t)Reader.getLocalIdentifier(
F, endian::readNext<uint32_t, little, unaligned>(d));
break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- Key.Data =
+ Data =
(uint64_t)Reader.getLocalSelector(
F, endian::readNext<uint32_t, little, unaligned>(
d)).getAsOpaquePtr();
break;
case DeclarationName::CXXOperatorName:
- Key.Data = *d++; // OverloadedOperatorKind
+ Data = *d++; // OverloadedOperatorKind
break;
case DeclarationName::CXXLiteralOperatorName:
- Key.Data = (uint64_t)Reader.getLocalIdentifier(
+ Data = (uint64_t)Reader.getLocalIdentifier(
F, endian::readNext<uint32_t, little, unaligned>(d));
break;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXUsingDirective:
- Key.Data = 0;
+ Data = 0;
break;
}
- return Key;
+ return DeclarationNameKey(Kind, Data);
}
-ASTDeclContextNameLookupTrait::data_type
-ASTDeclContextNameLookupTrait::ReadData(internal_key_type,
- const unsigned char* d,
- unsigned DataLen) {
+void ASTDeclContextNameLookupTrait::ReadDataInto(internal_key_type,
+ const unsigned char *d,
+ unsigned DataLen,
+ data_type_builder &Val) {
using namespace llvm::support;
- unsigned NumDecls = endian::readNext<uint16_t, little, unaligned>(d);
- LE32DeclID *Start = reinterpret_cast<LE32DeclID *>(
- const_cast<unsigned char *>(d));
- return std::make_pair(Start, Start + NumDecls);
+ for (unsigned NumDecls = DataLen / 4; NumDecls; --NumDecls) {
+ uint32_t LocalID = endian::readNext<uint32_t, little, unaligned>(d);
+ Val.insert(Reader.getGlobalDeclID(F, LocalID));
+ }
}
-bool ASTReader::ReadDeclContextStorage(ModuleFile &M,
- BitstreamCursor &Cursor,
- const std::pair<uint64_t, uint64_t> &Offsets,
- DeclContextInfo &Info) {
- SavedStreamPosition SavedPosition(Cursor);
- // First the lexical decls.
- if (Offsets.first != 0) {
- Cursor.JumpToBit(Offsets.first);
+bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M,
+ BitstreamCursor &Cursor,
+ uint64_t Offset,
+ DeclContext *DC) {
+ assert(Offset != 0);
- RecordData Record;
- StringRef Blob;
- unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
- if (RecCode != DECL_CONTEXT_LEXICAL) {
- Error("Expected lexical block");
- return true;
- }
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
- Info.LexicalDecls = reinterpret_cast<const KindDeclIDPair*>(Blob.data());
- Info.NumLexicalDecls = Blob.size() / sizeof(KindDeclIDPair);
+ RecordData Record;
+ StringRef Blob;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
+ if (RecCode != DECL_CONTEXT_LEXICAL) {
+ Error("Expected lexical block");
+ return true;
}
- // Now the lookup table.
- if (Offsets.second != 0) {
- Cursor.JumpToBit(Offsets.second);
+ assert(!isa<TranslationUnitDecl>(DC) &&
+ "expected a TU_UPDATE_LEXICAL record for TU");
+ // If we are handling a C++ class template instantiation, we can see multiple
+ // lexical updates for the same record. It's important that we select only one
+ // of them, so that field numbering works properly. Just pick the first one we
+ // see.
+ auto &Lex = LexicalDecls[DC];
+ if (!Lex.first) {
+ Lex = std::make_pair(
+ &M, llvm::makeArrayRef(
+ reinterpret_cast<const llvm::support::unaligned_uint32_t *>(
+ Blob.data()),
+ Blob.size() / 4));
+ }
+ DC->setHasExternalLexicalStorage(true);
+ return false;
+}
- RecordData Record;
- StringRef Blob;
- unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
- if (RecCode != DECL_CONTEXT_VISIBLE) {
- Error("Expected visible lookup table block");
- return true;
- }
- Info.NameLookupTableData = ASTDeclContextNameLookupTable::Create(
- (const unsigned char *)Blob.data() + Record[0],
- (const unsigned char *)Blob.data() + sizeof(uint32_t),
- (const unsigned char *)Blob.data(),
- ASTDeclContextNameLookupTrait(*this, M));
+bool ASTReader::ReadVisibleDeclContextStorage(ModuleFile &M,
+ BitstreamCursor &Cursor,
+ uint64_t Offset,
+ DeclID ID) {
+ assert(Offset != 0);
+
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
+
+ RecordData Record;
+ StringRef Blob;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record, &Blob);
+ if (RecCode != DECL_CONTEXT_VISIBLE) {
+ Error("Expected visible lookup table block");
+ return true;
}
+ // We can't safely determine the primary context yet, so delay attaching the
+ // lookup table until we're done with recursive deserialization.
+ auto *Data = (const unsigned char*)Blob.data();
+ PendingVisibleUpdates[ID].push_back(PendingVisibleUpdate{&M, Data});
return false;
}
void ASTReader::Error(StringRef Msg) {
Error(diag::err_fe_pch_malformed, Msg);
- if (Context.getLangOpts().Modules && !Diags.isDiagnosticInFlight()) {
+ if (Context.getLangOpts().Modules && !Diags.isDiagnosticInFlight() &&
+ !PP.getHeaderSearchInfo().getModuleCachePath().empty()) {
Diag(diag::note_module_cache_path)
<< PP.getHeaderSearchInfo().getModuleCachePath();
}
@@ -1032,11 +1065,12 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
// Parse the file names
std::map<int, int> FileIDs;
- for (int I = 0, N = Record[Idx++]; I != N; ++I) {
+ for (unsigned I = 0; Record[Idx]; ++I) {
// Extract the file name
auto Filename = ReadPath(F, Record, Idx);
FileIDs[I] = LineTable.getLineTableFilenameID(Filename);
}
+ ++Idx;
// Parse the line entries
std::vector<LineEntry> Entries;
@@ -1048,7 +1082,7 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
// Extract the line entries
unsigned NumEntries = Record[Idx++];
- assert(NumEntries && "Numentries is 00000");
+ assert(NumEntries && "no line entries for file ID");
Entries.clear();
Entries.reserve(NumEntries);
for (unsigned I = 0; I != NumEntries; ++I) {
@@ -1225,7 +1259,8 @@ bool ASTReader::ReadSLocEntry(int ID) {
= SourceMgr.getOrCreateContentCache(File,
/*isSystemFile=*/FileCharacter != SrcMgr::C_User);
if (OverriddenBuffer && !ContentCache->BufferOverridden &&
- ContentCache->ContentsEntry == ContentCache->OrigEntry) {
+ ContentCache->ContentsEntry == ContentCache->OrigEntry &&
+ !ContentCache->getRawBuffer()) {
unsigned Code = SLocEntryCursor.ReadCode();
Record.clear();
unsigned RecCode = SLocEntryCursor.readRecord(Code, Record, &Blob);
@@ -1313,7 +1348,7 @@ SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
// location of its includer.
if (F->ImportedBy.empty() || !F->ImportedBy[0]) {
// Main file is the importer.
- assert(!SourceMgr.getMainFileID().isInvalid() && "missing main file");
+ assert(SourceMgr.getMainFileID().isValid() && "missing main file");
return SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
}
return F->ImportedBy[0]->FirstLoc;
@@ -1323,10 +1358,8 @@ SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
/// specified cursor. Read the abbreviations that are at the top of the block
/// and then leave the cursor pointing into the block.
bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID) {
- if (Cursor.EnterSubBlock(BlockID)) {
- Error("malformed block record in AST file");
- return Failure;
- }
+ if (Cursor.EnterSubBlock(BlockID))
+ return true;
while (true) {
uint64_t Offset = Cursor.GetCurrentBitNo();
@@ -1425,8 +1458,7 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
if (isC99VarArgs) MI->setIsC99Varargs();
if (isGNUVarArgs) MI->setIsGNUVarargs();
if (hasCommaPasting) MI->setHasCommaPasting();
- MI->setArgumentList(MacroArgs.data(), MacroArgs.size(),
- PP.getPreprocessorAllocator());
+ MI->setArgumentList(MacroArgs, PP.getPreprocessorAllocator());
}
// Remember that we saw this macro last so that we add the tokens that
@@ -1481,13 +1513,14 @@ unsigned HeaderFileInfoTrait::ComputeHash(internal_key_ref ikey) {
HeaderFileInfoTrait::internal_key_type
HeaderFileInfoTrait::GetInternalKey(const FileEntry *FE) {
- internal_key_type ikey = { FE->getSize(), FE->getModificationTime(),
- FE->getName(), /*Imported*/false };
+ internal_key_type ikey = {FE->getSize(),
+ M.HasTimestamps ? FE->getModificationTime() : 0,
+ FE->getName(), /*Imported*/ false};
return ikey;
}
bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
- if (a.Size != b.Size || a.ModTime != b.ModTime)
+ if (a.Size != b.Size || (a.ModTime && b.ModTime && a.ModTime != b.ModTime))
return false;
if (llvm::sys::path::is_absolute(a.Filename) &&
@@ -1536,14 +1569,15 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
using namespace llvm::support;
HeaderFileInfo HFI;
unsigned Flags = *d++;
- HFI.HeaderRole = static_cast<ModuleMap::ModuleHeaderRole>
- ((Flags >> 6) & 0x03);
- HFI.isImport = (Flags >> 5) & 0x01;
- HFI.isPragmaOnce = (Flags >> 4) & 0x01;
- HFI.DirInfo = (Flags >> 2) & 0x03;
- HFI.Resolved = (Flags >> 1) & 0x01;
+ // FIXME: Refactor with mergeHeaderFileInfo in HeaderSearch.cpp.
+ HFI.isImport |= (Flags >> 4) & 0x01;
+ HFI.isPragmaOnce |= (Flags >> 3) & 0x01;
+ HFI.DirInfo = (Flags >> 1) & 0x03;
HFI.IndexHeaderMapHeader = Flags & 0x01;
- HFI.NumIncludes = endian::readNext<uint16_t, little, unaligned>(d);
+ // FIXME: Find a better way to handle this. Maybe just store a
+ // "has been included" flag?
+ HFI.NumIncludes = std::max(endian::readNext<uint16_t, little, unaligned>(d),
+ HFI.NumIncludes);
HFI.ControllingMacroID = Reader.getGlobalIdentifierID(
M, endian::readNext<uint32_t, little, unaligned>(d));
if (unsigned FrameworkOffset =
@@ -1553,34 +1587,36 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
HFI.Framework = HS->getUniqueFrameworkName(FrameworkName);
}
-
- if (d != End) {
+
+ assert((End - d) % 4 == 0 &&
+ "Wrong data length in HeaderFileInfo deserialization");
+ while (d != End) {
uint32_t LocalSMID = endian::readNext<uint32_t, little, unaligned>(d);
- if (LocalSMID) {
- // This header is part of a module. Associate it with the module to enable
- // implicit module import.
- SubmoduleID GlobalSMID = Reader.getGlobalSubmoduleID(M, LocalSMID);
- Module *Mod = Reader.getSubmodule(GlobalSMID);
- HFI.isModuleHeader = true;
- FileManager &FileMgr = Reader.getFileManager();
- ModuleMap &ModMap =
- Reader.getPreprocessor().getHeaderSearchInfo().getModuleMap();
- // FIXME: This information should be propagated through the
- // SUBMODULE_HEADER etc records rather than from here.
- // FIXME: We don't ever mark excluded headers.
- std::string Filename = key.Filename;
- if (key.Imported)
- Reader.ResolveImportedPath(M, Filename);
- Module::Header H = { key.Filename, FileMgr.getFile(Filename) };
- ModMap.addHeader(Mod, H, HFI.getHeaderRole());
- }
- }
-
- assert(End == d && "Wrong data length in HeaderFileInfo deserialization");
- (void)End;
-
+ auto HeaderRole = static_cast<ModuleMap::ModuleHeaderRole>(LocalSMID & 3);
+ LocalSMID >>= 2;
+
+ // This header is part of a module. Associate it with the module to enable
+ // implicit module import.
+ SubmoduleID GlobalSMID = Reader.getGlobalSubmoduleID(M, LocalSMID);
+ Module *Mod = Reader.getSubmodule(GlobalSMID);
+ FileManager &FileMgr = Reader.getFileManager();
+ ModuleMap &ModMap =
+ Reader.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+
+ std::string Filename = key.Filename;
+ if (key.Imported)
+ Reader.ResolveImportedPath(M, Filename);
+ // FIXME: This is not always the right filename-as-written, but we're not
+ // going to use this information to rebuild the module, so it doesn't make
+ // a lot of difference.
+ Module::Header H = { key.Filename, FileMgr.getFile(Filename) };
+ ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
+ HFI.isModuleHeader |= !(HeaderRole & ModuleMap::TextualHeader);
+ }
+
// This HeaderFileInfo was externally loaded.
HFI.External = true;
+ HFI.IsValid = true;
return HFI;
}
@@ -1595,16 +1631,15 @@ void ASTReader::ReadDefinedMacros() {
// Note that we are loading defined macros.
Deserializing Macros(this);
- for (ModuleReverseIterator I = ModuleMgr.rbegin(),
- E = ModuleMgr.rend(); I != E; ++I) {
- BitstreamCursor &MacroCursor = (*I)->MacroCursor;
+ for (auto &I : llvm::reverse(ModuleMgr)) {
+ BitstreamCursor &MacroCursor = I->MacroCursor;
// If there was no preprocessor block, skip this file.
if (!MacroCursor.getBitStreamReader())
continue;
BitstreamCursor Cursor = MacroCursor;
- Cursor.JumpToBit((*I)->MacroStartOffset);
+ Cursor.JumpToBit(I->MacroStartOffset);
RecordData Record;
while (true) {
@@ -1626,7 +1661,7 @@ void ASTReader::ReadDefinedMacros() {
case PP_MACRO_OBJECT_LIKE:
case PP_MACRO_FUNCTION_LIKE:
- getLocalIdentifier(**I, Record[0]);
+ getLocalIdentifier(*I, Record[0]);
break;
case PP_TOKEN:
@@ -1661,33 +1696,30 @@ namespace {
Found()
{
}
-
- static bool visit(ModuleFile &M, void *UserData) {
- IdentifierLookupVisitor *This
- = static_cast<IdentifierLookupVisitor *>(UserData);
-
+
+ bool operator()(ModuleFile &M) {
// If we've already searched this module file, skip it now.
- if (M.Generation <= This->PriorGeneration)
+ if (M.Generation <= PriorGeneration)
return true;
ASTIdentifierLookupTable *IdTable
= (ASTIdentifierLookupTable *)M.IdentifierLookupTable;
if (!IdTable)
return false;
-
- ASTIdentifierLookupTrait Trait(IdTable->getInfoObj().getReader(),
- M, This->Found);
- ++This->NumIdentifierLookups;
+
+ ASTIdentifierLookupTrait Trait(IdTable->getInfoObj().getReader(), M,
+ Found);
+ ++NumIdentifierLookups;
ASTIdentifierLookupTable::iterator Pos =
- IdTable->find_hashed(This->Name, This->NameHash, &Trait);
+ IdTable->find_hashed(Name, NameHash, &Trait);
if (Pos == IdTable->end())
return false;
// Dereferencing the iterator has the effect of building the
// IdentifierInfo node and populating it with the various
// declarations it needs.
- ++This->NumIdentifierLookupHits;
- This->Found = *Pos;
+ ++NumIdentifierLookupHits;
+ Found = *Pos;
return true;
}
@@ -1718,7 +1750,7 @@ void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
IdentifierLookupVisitor Visitor(II.getName(), PriorGeneration,
NumIdentifierLookups,
NumIdentifierLookupHits);
- ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor, HitsPtr);
+ ModuleMgr.visit(Visitor, HitsPtr);
markIdentifierUpToDate(&II);
}
@@ -1859,26 +1891,17 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
"invalid record type for input file");
(void)Result;
- std::string Filename;
- off_t StoredSize;
- time_t StoredTime;
- bool Overridden;
-
assert(Record[0] == ID && "Bogus stored ID or offset");
- StoredSize = static_cast<off_t>(Record[1]);
- StoredTime = static_cast<time_t>(Record[2]);
- Overridden = static_cast<bool>(Record[3]);
- Filename = Blob;
- ResolveImportedPath(F, Filename);
-
- InputFileInfo R = { std::move(Filename), StoredSize, StoredTime, Overridden };
+ InputFileInfo R;
+ R.StoredSize = static_cast<off_t>(Record[1]);
+ R.StoredTime = static_cast<time_t>(Record[2]);
+ R.Overridden = static_cast<bool>(Record[3]);
+ R.Transient = static_cast<bool>(Record[4]);
+ R.Filename = Blob;
+ ResolveImportedPath(F, R.Filename);
return R;
}
-std::string ASTReader::getInputFileName(ModuleFile &F, unsigned int ID) {
- return readInputFileInfo(F, ID).Filename;
-}
-
InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// If this ID is bogus, just return an empty input file.
if (ID == 0 || ID > F.InputFilesLoaded.size())
@@ -1900,11 +1923,10 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
off_t StoredSize = FI.StoredSize;
time_t StoredTime = FI.StoredTime;
bool Overridden = FI.Overridden;
+ bool Transient = FI.Transient;
StringRef Filename = FI.Filename;
- const FileEntry *File
- = Overridden? FileMgr.getVirtualFile(Filename, StoredSize, StoredTime)
- : FileMgr.getFile(Filename, /*OpenFile=*/false);
+ const FileEntry *File = FileMgr.getFile(Filename, /*OpenFile=*/false);
// If we didn't find the file, resolve it relative to the
// original directory from which this AST file was created.
@@ -1919,15 +1941,16 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// For an overridden file, create a virtual file with the stored
// size/timestamp.
- if (Overridden && File == nullptr) {
+ if ((Overridden || Transient) && File == nullptr)
File = FileMgr.getVirtualFile(Filename, StoredSize, StoredTime);
- }
if (File == nullptr) {
if (Complain) {
std::string ErrorStr = "could not find file '";
ErrorStr += Filename;
- ErrorStr += "' referenced by AST file";
+ ErrorStr += "' referenced by AST file '";
+ ErrorStr += F.FileName;
+ ErrorStr += "'";
Error(ErrorStr.c_str());
}
// Record that we didn't find the file.
@@ -1940,11 +1963,17 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// can lead to problems when lexing using the source locations from the
// PCH.
SourceManager &SM = getSourceManager();
- if (!Overridden && SM.isFileOverridden(File)) {
+ // FIXME: Reject if the overrides are different.
+ if ((!Overridden && !Transient) && SM.isFileOverridden(File)) {
if (Complain)
Error(diag::err_fe_pch_file_overridden, Filename);
// After emitting the diagnostic, recover by disabling the override so
// that the original file will be used.
+ //
+ // FIXME: This recovery is just as broken as the original state; there may
+ // be another precompiled module that's using the overridden contents, or
+ // we might be half way through parsing it. Instead, we should treat the
+ // overridden contents as belonging to a separate FileEntry.
SM.disableFileContentsOverride(File);
// The FileEntry is a virtual file entry with the size of the contents
// that would override the original contents. Set it to the original's
@@ -1965,14 +1994,9 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// have inconsistent modification times that sometimes
// erroneously trigger this error-handling path.
//
- // This also happens in networked file systems, so disable this
- // check if validation is disabled or if we have an explicitly
- // built PCM file.
- //
- // FIXME: Should we also do this for PCH files? They could also
- // reasonably get shared across a network during a distributed build.
- (StoredTime != File->getModificationTime() && !DisableValidation &&
- F.Kind != MK_ExplicitModule)
+ // FIXME: This probably also breaks HeaderFileInfo lookups on Windows.
+ (StoredTime && StoredTime != File->getModificationTime() &&
+ !DisableValidation)
#endif
)) {
if (Complain) {
@@ -2000,8 +2024,10 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
IsOutOfDate = true;
}
+ // FIXME: If the file is overridden and we've already opened it,
+ // issue an error (or split it into a separate FileEntry).
- InputFile IF = InputFile(File, Overridden, IsOutOfDate);
+ InputFile IF = InputFile(File, Overridden || Transient, IsOutOfDate);
// Note that we've loaded this input file.
F.InputFilesLoaded[ID-1] = IF;
@@ -2026,24 +2052,114 @@ void ASTReader::ResolveImportedPath(std::string &Filename, StringRef Prefix) {
Filename.assign(Buffer.begin(), Buffer.end());
}
+static bool isDiagnosedResult(ASTReader::ASTReadResult ARR, unsigned Caps) {
+ switch (ARR) {
+ case ASTReader::Failure: return true;
+ case ASTReader::Missing: return !(Caps & ASTReader::ARR_Missing);
+ case ASTReader::OutOfDate: return !(Caps & ASTReader::ARR_OutOfDate);
+ case ASTReader::VersionMismatch: return !(Caps & ASTReader::ARR_VersionMismatch);
+ case ASTReader::ConfigurationMismatch:
+ return !(Caps & ASTReader::ARR_ConfigurationMismatch);
+ case ASTReader::HadErrors: return true;
+ case ASTReader::Success: return false;
+ }
+
+ llvm_unreachable("unknown ASTReadResult");
+}
+
+ASTReader::ASTReadResult ASTReader::ReadOptionsBlock(
+ BitstreamCursor &Stream, unsigned ClientLoadCapabilities,
+ bool AllowCompatibleConfigurationMismatch, ASTReaderListener &Listener,
+ std::string &SuggestedPredefines) {
+ if (Stream.EnterSubBlock(OPTIONS_BLOCK_ID))
+ return Failure;
+
+ // Read all of the records in the options block.
+ RecordData Record;
+ ASTReadResult Result = Success;
+ while (1) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ case llvm::BitstreamEntry::SubBlock:
+ return Failure;
+
+ case llvm::BitstreamEntry::EndBlock:
+ return Result;
+
+ case llvm::BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read and process a record.
+ Record.clear();
+ switch ((OptionsRecordTypes)Stream.readRecord(Entry.ID, Record)) {
+ case LANGUAGE_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (ParseLanguageOptions(Record, Complain, Listener,
+ AllowCompatibleConfigurationMismatch))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case TARGET_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (ParseTargetOptions(Record, Complain, Listener,
+ AllowCompatibleConfigurationMismatch))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case DIAGNOSTIC_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParseDiagnosticOptions(Record, Complain, Listener))
+ return OutOfDate;
+ break;
+ }
+
+ case FILE_SYSTEM_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParseFileSystemOptions(Record, Complain, Listener))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case HEADER_SEARCH_OPTIONS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParseHeaderSearchOptions(Record, Complain, Listener))
+ Result = ConfigurationMismatch;
+ break;
+ }
+
+ case PREPROCESSOR_OPTIONS:
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParsePreprocessorOptions(Record, Complain, Listener,
+ SuggestedPredefines))
+ Result = ConfigurationMismatch;
+ break;
+ }
+ }
+}
+
ASTReader::ASTReadResult
ASTReader::ReadControlBlock(ModuleFile &F,
SmallVectorImpl<ImportedModule> &Loaded,
const ModuleFile *ImportedBy,
unsigned ClientLoadCapabilities) {
BitstreamCursor &Stream = F.Stream;
+ ASTReadResult Result = Success;
if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) {
Error("malformed block record in AST file");
return Failure;
}
- // Should we allow the configuration of the module file to differ from the
- // configuration of the current translation unit in a compatible way?
- //
- // FIXME: Allow this for files explicitly specified with -include-pch too.
- bool AllowCompatibleConfigurationMismatch = F.Kind == MK_ExplicitModule;
-
// Read all of the records and blocks in the control block.
RecordData Record;
unsigned NumInputs = 0;
@@ -2061,8 +2177,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
PP.getHeaderSearchInfo().getHeaderSearchOpts();
// All user input files reside at the index range [0, NumUserInputs), and
- // system input files reside at [NumUserInputs, NumInputs).
- if (!DisableValidation) {
+ // system input files reside at [NumUserInputs, NumInputs). For explicitly
+ // loaded module files, ignore missing inputs.
+ if (!DisableValidation && F.Kind != MK_ExplicitModule) {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
// If we are reading a module, we will create a verification timestamp,
@@ -2084,7 +2201,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
if (Listener)
- Listener->visitModuleFile(F.FileName);
+ Listener->visitModuleFile(F.FileName, F.Kind);
if (Listener && Listener->needsInputFileVisitation()) {
unsigned N = Listener->needsSystemInputFileVisitation() ? NumInputs
@@ -2092,11 +2209,12 @@ ASTReader::ReadControlBlock(ModuleFile &F,
for (unsigned I = 0; I < N; ++I) {
bool IsSystem = I >= NumUserInputs;
InputFileInfo FI = readInputFileInfo(F, I+1);
- Listener->visitInputFile(FI.Filename, IsSystem, FI.Overridden);
+ Listener->visitInputFile(FI.Filename, IsSystem, FI.Overridden,
+ F.Kind == MK_ExplicitModule);
}
}
- return Success;
+ return Result;
}
case llvm::BitstreamEntry::SubBlock:
@@ -2110,6 +2228,41 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return Failure;
}
continue;
+
+ case OPTIONS_BLOCK_ID:
+ // If we're reading the first module for this group, check its options
+ // are compatible with ours. For modules it imports, no further checking
+ // is required, because we checked them when we built it.
+ if (Listener && !ImportedBy) {
+ // Should we allow the configuration of the module file to differ from
+ // the configuration of the current translation unit in a compatible
+ // way?
+ //
+ // FIXME: Allow this for files explicitly specified with -include-pch.
+ bool AllowCompatibleConfigurationMismatch =
+ F.Kind == MK_ExplicitModule;
+
+ Result = ReadOptionsBlock(Stream, ClientLoadCapabilities,
+ AllowCompatibleConfigurationMismatch,
+ *Listener, SuggestedPredefines);
+ if (Result == Failure) {
+ Error("malformed block record in AST file");
+ return Result;
+ }
+
+ if (DisableValidation ||
+ (AllowConfigurationMismatch && Result == ConfigurationMismatch))
+ Result = Success;
+
+ // If we've diagnosed a problem, we're done.
+ if (Result != Success &&
+ isDiagnosedResult(Result, ClientLoadCapabilities))
+ return Result;
+ } else if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
default:
if (Stream.SkipBlock()) {
@@ -2136,7 +2289,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return VersionMismatch;
}
- bool hasErrors = Record[5];
+ bool hasErrors = Record[6];
if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
Diag(diag::err_pch_with_compiler_errors);
return HadErrors;
@@ -2147,6 +2300,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
if (F.RelocatablePCH)
F.BaseDirectory = isysroot.empty() ? "/" : isysroot;
+ F.HasTimestamps = Record[5];
+
const std::string &CurBranch = getClangFullRepositoryVersion();
StringRef ASTBranch = Blob;
if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
@@ -2178,10 +2333,23 @@ ASTReader::ReadControlBlock(ModuleFile &F,
ASTFileSignature StoredSignature = Record[Idx++];
auto ImportedFile = ReadPath(F, Record, Idx);
+ // If our client can't cope with us being out of date, we can't cope with
+ // our dependency being missing.
+ unsigned Capabilities = ClientLoadCapabilities;
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Capabilities &= ~ARR_Missing;
+
// Load the AST file.
- switch(ReadASTCore(ImportedFile, ImportedKind, ImportLoc, &F, Loaded,
- StoredSize, StoredModTime, StoredSignature,
- ClientLoadCapabilities)) {
+ auto Result = ReadASTCore(ImportedFile, ImportedKind, ImportLoc, &F,
+ Loaded, StoredSize, StoredModTime,
+ StoredSignature, Capabilities);
+
+ // If we diagnosed a problem, produce a backtrace.
+ if (isDiagnosedResult(Result, Capabilities))
+ Diag(diag::note_module_file_imported_by)
+ << F.FileName << !F.ModuleName.empty() << F.ModuleName;
+
+ switch (Result) {
case Failure: return Failure;
// If we have to ignore the dependency, we'll have to ignore this too.
case Missing:
@@ -2195,71 +2363,6 @@ ASTReader::ReadControlBlock(ModuleFile &F,
break;
}
- case KNOWN_MODULE_FILES:
- break;
-
- case LANGUAGE_OPTIONS: {
- bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
- // FIXME: The &F == *ModuleMgr.begin() check is wrong for modules.
- if (Listener && &F == *ModuleMgr.begin() &&
- ParseLanguageOptions(Record, Complain, *Listener,
- AllowCompatibleConfigurationMismatch) &&
- !DisableValidation && !AllowConfigurationMismatch)
- return ConfigurationMismatch;
- break;
- }
-
- case TARGET_OPTIONS: {
- bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
- if (Listener && &F == *ModuleMgr.begin() &&
- ParseTargetOptions(Record, Complain, *Listener,
- AllowCompatibleConfigurationMismatch) &&
- !DisableValidation && !AllowConfigurationMismatch)
- return ConfigurationMismatch;
- break;
- }
-
- case DIAGNOSTIC_OPTIONS: {
- bool Complain = (ClientLoadCapabilities & ARR_OutOfDate)==0;
- if (Listener && &F == *ModuleMgr.begin() &&
- !AllowCompatibleConfigurationMismatch &&
- ParseDiagnosticOptions(Record, Complain, *Listener) &&
- !DisableValidation)
- return OutOfDate;
- break;
- }
-
- case FILE_SYSTEM_OPTIONS: {
- bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
- if (Listener && &F == *ModuleMgr.begin() &&
- !AllowCompatibleConfigurationMismatch &&
- ParseFileSystemOptions(Record, Complain, *Listener) &&
- !DisableValidation && !AllowConfigurationMismatch)
- return ConfigurationMismatch;
- break;
- }
-
- case HEADER_SEARCH_OPTIONS: {
- bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
- if (Listener && &F == *ModuleMgr.begin() &&
- !AllowCompatibleConfigurationMismatch &&
- ParseHeaderSearchOptions(Record, Complain, *Listener) &&
- !DisableValidation && !AllowConfigurationMismatch)
- return ConfigurationMismatch;
- break;
- }
-
- case PREPROCESSOR_OPTIONS: {
- bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0;
- if (Listener && &F == *ModuleMgr.begin() &&
- !AllowCompatibleConfigurationMismatch &&
- ParsePreprocessorOptions(Record, Complain, *Listener,
- SuggestedPredefines) &&
- !DisableValidation && !AllowConfigurationMismatch)
- return ConfigurationMismatch;
- break;
- }
-
case ORIGINAL_FILE:
F.OriginalSourceFileID = FileID::get(Record[0]);
F.ActualOriginalSourceFileName = Blob;
@@ -2499,10 +2602,11 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case TU_UPDATE_LEXICAL: {
DeclContext *TU = Context.getTranslationUnitDecl();
- DeclContextInfo &Info = F.DeclContextInfos[TU];
- Info.LexicalDecls = reinterpret_cast<const KindDeclIDPair *>(Blob.data());
- Info.NumLexicalDecls
- = static_cast<unsigned int>(Blob.size() / sizeof(KindDeclIDPair));
+ LexicalContents Contents(
+ reinterpret_cast<const llvm::support::unaligned_uint32_t *>(
+ Blob.data()),
+ static_cast<unsigned int>(Blob.size() / 4));
+ TULexicalDecls.push_back(std::make_pair(&F, Contents));
TU->setHasExternalLexicalStorage(true);
break;
}
@@ -2510,20 +2614,12 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case UPDATE_VISIBLE: {
unsigned Idx = 0;
serialization::DeclID ID = ReadDeclID(F, Record, Idx);
- ASTDeclContextNameLookupTable *Table =
- ASTDeclContextNameLookupTable::Create(
- (const unsigned char *)Blob.data() + Record[Idx++],
- (const unsigned char *)Blob.data() + sizeof(uint32_t),
- (const unsigned char *)Blob.data(),
- ASTDeclContextNameLookupTrait(*this, F));
- if (Decl *D = GetExistingDecl(ID)) {
- auto *DC = cast<DeclContext>(D);
- DC->getPrimaryContext()->setHasExternalVisibleStorage(true);
- auto *&LookupTable = F.DeclContextInfos[DC].NameLookupTableData;
- delete LookupTable;
- LookupTable = Table;
- } else
- PendingVisibleUpdates[ID].push_back(std::make_pair(Table, &F));
+ auto *Data = (const unsigned char*)Blob.data();
+ PendingVisibleUpdates[ID].push_back(PendingVisibleUpdate{&F, Data});
+ // If we've already loaded the decl, perform the updates when we finish
+ // loading this block.
+ if (Decl *D = GetExistingDecl(ID))
+ PendingUpdateRecords.push_back(std::make_pair(ID, D));
break;
}
@@ -2568,6 +2664,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
+ case INTERESTING_IDENTIFIERS:
+ F.PreloadIdentifierOffsets.assign(Record.begin(), Record.end());
+ break;
+
case EAGERLY_DESERIALIZED_DECLS:
// FIXME: Skip reading this record if our ASTConsumer doesn't care
// about "interesting" decls (for instance, if we're building a module).
@@ -2696,6 +2796,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
std::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
SLocSpaceSize);
+ if (!F.SLocEntryBaseID) {
+ Error("ran out of source locations");
+ break;
+ }
// Make our entry in the range map. BaseID is negative and growing, so
// we invert it. Because we invert it, though, we need the other end of
// the range.
@@ -2744,7 +2848,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
RemapBuilder DeclRemap(F.DeclRemap);
RemapBuilder TypeRemap(F.TypeRemap);
- while(Data < DataEnd) {
+ while (Data < DataEnd) {
using namespace llvm::support;
uint16_t Len = endian::readNext<uint16_t, little, unaligned>(Data);
StringRef Name = StringRef((const char*)Data, Len);
@@ -3060,22 +3164,6 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
- case LOCAL_REDECLARATIONS: {
- F.RedeclarationChains.swap(Record);
- break;
- }
-
- case LOCAL_REDECLARATIONS_MAP: {
- if (F.LocalNumRedeclarationsInMap != 0) {
- Error("duplicate LOCAL_REDECLARATIONS_MAP record in AST file");
- return Failure;
- }
-
- F.LocalNumRedeclarationsInMap = Record[0];
- F.RedeclarationsMap = (const LocalRedeclarationsInfo *)Blob.data();
- break;
- }
-
case MACRO_OFFSET: {
if (F.LocalNumMacros != 0) {
Error("duplicate MACRO_OFFSET record in AST file");
@@ -3150,11 +3238,18 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
const FileEntry *ModMap = M ? Map.getModuleMapFileForUniquing(M) : nullptr;
if (!ModMap) {
assert(ImportedBy && "top-level import should be verified");
- if ((ClientLoadCapabilities & ARR_Missing) == 0)
- Diag(diag::err_imported_module_not_found) << F.ModuleName << F.FileName
- << ImportedBy->FileName
- << F.ModuleMapPath;
- return Missing;
+ if ((ClientLoadCapabilities & ARR_OutOfDate) == 0) {
+ if (auto *ASTFE = M ? M->getASTFile() : nullptr)
+ // This module was defined by an imported (explicit) module.
+ Diag(diag::err_module_file_conflict) << F.ModuleName << F.FileName
+ << ASTFE->getName();
+ else
+ // This module was built with a different module map.
+ Diag(diag::err_imported_module_not_found)
+ << F.ModuleName << F.FileName << ImportedBy->FileName
+ << F.ModuleMapPath;
+ }
+ return OutOfDate;
}
assert(M->Name == F.ModuleName && "found module with different name");
@@ -3342,6 +3437,36 @@ static void updateModuleTimestamp(ModuleFile &MF) {
OS << "Timestamp file\n";
}
+/// \brief Given a cursor at the start of an AST file, scan ahead and drop the
+/// cursor into the start of the given block ID, returning false on success and
+/// true on failure.
+static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
+ while (1) {
+ llvm::BitstreamEntry Entry = Cursor.advance();
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::Error:
+ case llvm::BitstreamEntry::EndBlock:
+ return true;
+
+ case llvm::BitstreamEntry::Record:
+ // Ignore top-level records.
+ Cursor.skipRecord(Entry.ID);
+ break;
+
+ case llvm::BitstreamEntry::SubBlock:
+ if (Entry.ID == BlockID) {
+ if (Cursor.EnterSubBlock(BlockID))
+ return true;
+ // Found it!
+ return false;
+ }
+
+ if (Cursor.SkipBlock())
+ return true;
+ }
+ }
+}
+
ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
ModuleKind Type,
SourceLocation ImportLoc,
@@ -3399,6 +3524,12 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
if (ASTReadResult Result = ReadASTBlock(F, ClientLoadCapabilities))
return Result;
+ // Read the extension blocks.
+ while (!SkipCursorToBlock(F.Stream, EXTENSION_BLOCK_ID)) {
+ if (ASTReadResult Result = ReadExtensionBlock(F))
+ return Result;
+ }
+
// Once read, set the ModuleFile bit base offset and update the size in
// bits of all files we've seen.
F.GlobalBitOffset = TotalModulesSizeInBits;
@@ -3414,6 +3545,32 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
// SourceManager.
SourceMgr.getLoadedSLocEntryByID(Index);
}
+
+ // Preload all the pending interesting identifiers by marking them out of
+ // date.
+ for (auto Offset : F.PreloadIdentifierOffsets) {
+ const unsigned char *Data = reinterpret_cast<const unsigned char *>(
+ F.IdentifierTableData + Offset);
+
+ ASTIdentifierLookupTrait Trait(*this, F);
+ auto KeyDataLen = Trait.ReadKeyDataLength(Data);
+ auto Key = Trait.ReadKey(Data, KeyDataLen.first);
+ auto &II = PP.getIdentifierTable().getOwn(Key);
+ II.setOutOfDate(true);
+
+ // Mark this identifier as being from an AST file so that we can track
+ // whether we need to serialize it.
+ if (!II.isFromAST()) {
+ II.setIsFromAST();
+ bool IsModule = PP.getCurrentModule() != nullptr;
+ if (isInterestingIdentifier(*this, II, IsModule))
+ II.setChangedSinceDeserialization();
+ }
+
+ // Associate the ID with the identifier so that the writer can reuse it.
+ auto ID = Trait.ReadIdentifierID(Data + KeyDataLen.first);
+ SetIdentifierInfo(ID, &II);
+ }
}
// Setup the import locations and notify the module manager that we've
@@ -3434,13 +3591,20 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
M->ImportLoc.getRawEncoding());
}
- // Mark all of the identifiers in the identifier table as being out of date,
- // so that various accessors know to check the loaded modules when the
- // identifier is used.
- for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
- IdEnd = PP.getIdentifierTable().end();
- Id != IdEnd; ++Id)
- Id->second->setOutOfDate(true);
+ if (!Context.getLangOpts().CPlusPlus ||
+ (Type != MK_ImplicitModule && Type != MK_ExplicitModule)) {
+ // Mark all of the identifiers in the identifier table as being out of date,
+ // so that various accessors know to check the loaded modules when the
+ // identifier is used.
+ //
+ // For C++ modules, we don't need information on many identifiers (just
+ // those that provide macros or are poisoned), so we mark all of
+ // the interesting ones via PreloadIdentifierOffsets.
+ for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
+ IdEnd = PP.getIdentifierTable().end();
+ Id != IdEnd; ++Id)
+ Id->second->setOutOfDate(true);
+ }
// Resolve any unresolved module exports.
for (unsigned I = 0, N = UnresolvedModuleRefs.size(); I != N; ++I) {
@@ -3485,7 +3649,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
DeserializationListener->ReaderInitialized(this);
ModuleFile &PrimaryModule = ModuleMgr.getPrimaryModule();
- if (!PrimaryModule.OriginalSourceFileID.isInvalid()) {
+ if (PrimaryModule.OriginalSourceFileID.isValid()) {
PrimaryModule.OriginalSourceFileID
= FileID::get(PrimaryModule.SLocEntryBaseID
+ PrimaryModule.OriginalSourceFileID.getOpaqueValue() - 1);
@@ -3536,6 +3700,20 @@ static bool startsWithASTFileMagic(BitstreamCursor &Stream) {
Stream.Read(8) == 'H';
}
+static unsigned moduleKindForDiagnostic(ModuleKind Kind) {
+ switch (Kind) {
+ case MK_PCH:
+ return 0; // PCH
+ case MK_ImplicitModule:
+ case MK_ExplicitModule:
+ return 1; // module
+ case MK_MainFile:
+ case MK_Preamble:
+ return 2; // main source file
+ }
+ llvm_unreachable("unknown module kind");
+}
+
ASTReader::ASTReadResult
ASTReader::ReadASTCore(StringRef FileName,
ModuleKind Type,
@@ -3568,11 +3746,9 @@ ASTReader::ReadASTCore(StringRef FileName,
return Missing;
// Otherwise, return an error.
- {
- std::string Msg = "Unable to load module \"" + FileName.str() + "\": "
- + ErrorStr;
- Error(Msg);
- }
+ Diag(diag::err_module_file_not_found) << moduleKindForDiagnostic(Type)
+ << FileName << ErrorStr.empty()
+ << ErrorStr;
return Failure;
case ModuleManager::OutOfDate:
@@ -3582,11 +3758,9 @@ ASTReader::ReadASTCore(StringRef FileName,
return OutOfDate;
// Otherwise, return an error.
- {
- std::string Msg = "Unable to load module \"" + FileName.str() + "\": "
- + ErrorStr;
- Error(Msg);
- }
+ Diag(diag::err_module_file_out_of_date) << moduleKindForDiagnostic(Type)
+ << FileName << ErrorStr.empty()
+ << ErrorStr;
return Failure;
}
@@ -3607,20 +3781,20 @@ ASTReader::ReadASTCore(StringRef FileName,
// Sniff for the signature.
if (!startsWithASTFileMagic(Stream)) {
- Diag(diag::err_not_a_pch_file) << FileName;
+ Diag(diag::err_module_file_invalid) << moduleKindForDiagnostic(Type)
+ << FileName;
return Failure;
}
// This is used for compatibility with older PCH formats.
bool HaveReadControlBlock = false;
-
while (1) {
llvm::BitstreamEntry Entry = Stream.advance();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
- case llvm::BitstreamEntry::EndBlock:
case llvm::BitstreamEntry::Record:
+ case llvm::BitstreamEntry::EndBlock:
Error("invalid record at top-level of AST file");
return Failure;
@@ -3628,18 +3802,23 @@ ASTReader::ReadASTCore(StringRef FileName,
break;
}
- // We only know the control subblock ID.
switch (Entry.ID) {
- case llvm::bitc::BLOCKINFO_BLOCK_ID:
- if (Stream.ReadBlockInfoBlock()) {
- Error("malformed BlockInfoBlock in AST file");
- return Failure;
- }
- break;
case CONTROL_BLOCK_ID:
HaveReadControlBlock = true;
switch (ReadControlBlock(F, Loaded, ImportedBy, ClientLoadCapabilities)) {
case Success:
+ // Check that we didn't try to load a non-module AST file as a module.
+ //
+ // FIXME: Should we also perform the converse check? Loading a module as
+ // a PCH file sort of works, but it's a bit wonky.
+ if ((Type == MK_ImplicitModule || Type == MK_ExplicitModule) &&
+ F.ModuleName.empty()) {
+ auto Result = (Type == MK_ImplicitModule) ? OutOfDate : Failure;
+ if (Result != OutOfDate ||
+ (ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ Diag(diag::err_module_file_not_module) << FileName;
+ return Result;
+ }
break;
case Failure: return Failure;
@@ -3650,6 +3829,7 @@ ASTReader::ReadASTCore(StringRef FileName,
case HadErrors: return HadErrors;
}
break;
+
case AST_BLOCK_ID:
if (!HaveReadControlBlock) {
if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0)
@@ -3669,7 +3849,78 @@ ASTReader::ReadASTCore(StringRef FileName,
break;
}
}
-
+
+ return Success;
+}
+
+/// Parse a record and blob containing module file extension metadata.
+static bool parseModuleFileExtensionMetadata(
+ const SmallVectorImpl<uint64_t> &Record,
+ StringRef Blob,
+ ModuleFileExtensionMetadata &Metadata) {
+ if (Record.size() < 4) return true;
+
+ Metadata.MajorVersion = Record[0];
+ Metadata.MinorVersion = Record[1];
+
+ unsigned BlockNameLen = Record[2];
+ unsigned UserInfoLen = Record[3];
+
+ if (BlockNameLen + UserInfoLen > Blob.size()) return true;
+
+ Metadata.BlockName = std::string(Blob.data(), Blob.data() + BlockNameLen);
+ Metadata.UserInfo = std::string(Blob.data() + BlockNameLen,
+ Blob.data() + BlockNameLen + UserInfoLen);
+ return false;
+}
+
+ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
+ BitstreamCursor &Stream = F.Stream;
+
+ RecordData Record;
+ while (true) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock:
+ if (Stream.SkipBlock())
+ return Failure;
+
+ continue;
+
+ case llvm::BitstreamEntry::EndBlock:
+ return Success;
+
+ case llvm::BitstreamEntry::Error:
+ return HadErrors;
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ StringRef Blob;
+ unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
+ switch (RecCode) {
+ case EXTENSION_METADATA: {
+ ModuleFileExtensionMetadata Metadata;
+ if (parseModuleFileExtensionMetadata(Record, Blob, Metadata))
+ return Failure;
+
+ // Find a module file extension with this block name.
+ auto Known = ModuleFileExtensions.find(Metadata.BlockName);
+ if (Known == ModuleFileExtensions.end()) break;
+
+ // Form a reader.
+ if (auto Reader = Known->second->createExtensionReader(Metadata, *this,
+ F, Stream)) {
+ F.ExtensionReaders.push_back(std::move(Reader));
+ }
+
+ break;
+ }
+ }
+ }
+
return Success;
}
@@ -3811,36 +4062,6 @@ void ASTReader::finalizeForWriting() {
// Nothing to do for now.
}
-/// \brief Given a cursor at the start of an AST file, scan ahead and drop the
-/// cursor into the start of the given block ID, returning false on success and
-/// true on failure.
-static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
- while (1) {
- llvm::BitstreamEntry Entry = Cursor.advance();
- switch (Entry.Kind) {
- case llvm::BitstreamEntry::Error:
- case llvm::BitstreamEntry::EndBlock:
- return true;
-
- case llvm::BitstreamEntry::Record:
- // Ignore top-level records.
- Cursor.skipRecord(Entry.ID);
- break;
-
- case llvm::BitstreamEntry::SubBlock:
- if (Entry.ID == BlockID) {
- if (Cursor.EnterSubBlock(BlockID))
- return true;
- // Found it!
- return false;
- }
-
- if (Cursor.SkipBlock())
- return true;
- }
- }
-}
-
/// \brief Reads and return the signature record from \p StreamFile's control
/// block, or else returns 0.
static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile){
@@ -3968,6 +4189,7 @@ namespace {
bool ASTReader::readASTFileControlBlock(
StringRef Filename, FileManager &FileMgr,
const PCHContainerReader &PCHContainerRdr,
+ bool FindModuleFileExtensions,
ASTReaderListener &Listener) {
// Open the AST file.
// FIXME: This allows use of the VFS; we do not allow use of the
@@ -3994,36 +4216,55 @@ bool ASTReader::readASTFileControlBlock(
bool NeedsSystemInputFiles = Listener.needsSystemInputFileVisitation();
bool NeedsImports = Listener.needsImportVisitation();
BitstreamCursor InputFilesCursor;
- if (NeedsInputFiles) {
- InputFilesCursor = Stream;
- if (SkipCursorToBlock(InputFilesCursor, INPUT_FILES_BLOCK_ID))
- return true;
- // Read the abbreviations
- while (true) {
- uint64_t Offset = InputFilesCursor.GetCurrentBitNo();
- unsigned Code = InputFilesCursor.ReadCode();
+ RecordData Record;
+ std::string ModuleDir;
+ bool DoneWithControlBlock = false;
+ while (!DoneWithControlBlock) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock: {
+ switch (Entry.ID) {
+ case OPTIONS_BLOCK_ID: {
+ std::string IgnoredSuggestedPredefines;
+ if (ReadOptionsBlock(Stream, ARR_ConfigurationMismatch | ARR_OutOfDate,
+ /*AllowCompatibleConfigurationMismatch*/ false,
+ Listener, IgnoredSuggestedPredefines) != Success)
+ return true;
+ break;
+ }
+
+ case INPUT_FILES_BLOCK_ID:
+ InputFilesCursor = Stream;
+ if (Stream.SkipBlock() ||
+ (NeedsInputFiles &&
+ ReadBlockAbbrevs(InputFilesCursor, INPUT_FILES_BLOCK_ID)))
+ return true;
+ break;
- // We expect all abbrevs to be at the start of the block.
- if (Code != llvm::bitc::DEFINE_ABBREV) {
- InputFilesCursor.JumpToBit(Offset);
+ default:
+ if (Stream.SkipBlock())
+ return true;
break;
}
- InputFilesCursor.ReadAbbrevRecord();
+
+ continue;
}
- }
-
- // Scan for ORIGINAL_FILE inside the control block.
- RecordData Record;
- std::string ModuleDir;
- while (1) {
- llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
- if (Entry.Kind == llvm::BitstreamEntry::EndBlock)
- return false;
-
- if (Entry.Kind != llvm::BitstreamEntry::Record)
+
+ case llvm::BitstreamEntry::EndBlock:
+ DoneWithControlBlock = true;
+ break;
+
+ case llvm::BitstreamEntry::Error:
return true;
-
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ if (DoneWithControlBlock) break;
+
Record.clear();
StringRef Blob;
unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
@@ -4050,41 +4291,6 @@ bool ASTReader::readASTFileControlBlock(
Listener.ReadModuleMapFile(Path);
break;
}
- case LANGUAGE_OPTIONS:
- if (ParseLanguageOptions(Record, false, Listener,
- /*AllowCompatibleConfigurationMismatch*/false))
- return true;
- break;
-
- case TARGET_OPTIONS:
- if (ParseTargetOptions(Record, false, Listener,
- /*AllowCompatibleConfigurationMismatch*/ false))
- return true;
- break;
-
- case DIAGNOSTIC_OPTIONS:
- if (ParseDiagnosticOptions(Record, false, Listener))
- return true;
- break;
-
- case FILE_SYSTEM_OPTIONS:
- if (ParseFileSystemOptions(Record, false, Listener))
- return true;
- break;
-
- case HEADER_SEARCH_OPTIONS:
- if (ParseHeaderSearchOptions(Record, false, Listener))
- return true;
- break;
-
- case PREPROCESSOR_OPTIONS: {
- std::string IgnoredSuggestedPredefines;
- if (ParsePreprocessorOptions(Record, false, Listener,
- IgnoredSuggestedPredefines))
- return true;
- break;
- }
-
case INPUT_FILE_OFFSETS: {
if (!NeedsInputFiles)
break;
@@ -4112,8 +4318,8 @@ bool ASTReader::readASTFileControlBlock(
bool Overridden = static_cast<bool>(Record[3]);
std::string Filename = Blob;
ResolveImportedPath(Filename, ModuleDir);
- shouldContinue =
- Listener.visitInputFile(Filename, isSystemFile, Overridden);
+ shouldContinue = Listener.visitInputFile(
+ Filename, isSystemFile, Overridden, /*IsExplicitModule*/false);
break;
}
if (!shouldContinue)
@@ -4137,25 +4343,55 @@ bool ASTReader::readASTFileControlBlock(
break;
}
- case KNOWN_MODULE_FILES: {
- // Known-but-not-technically-used module files are treated as imports.
- if (!NeedsImports)
- break;
-
- unsigned Idx = 0, N = Record.size();
- while (Idx < N) {
- std::string Filename = ReadString(Record, Idx);
- ResolveImportedPath(Filename, ModuleDir);
- Listener.visitImport(Filename);
- }
- break;
- }
-
default:
// No other validation to perform.
break;
}
}
+
+ // Look for module file extension blocks, if requested.
+ if (FindModuleFileExtensions) {
+ while (!SkipCursorToBlock(Stream, EXTENSION_BLOCK_ID)) {
+ bool DoneWithExtensionBlock = false;
+ while (!DoneWithExtensionBlock) {
+ llvm::BitstreamEntry Entry = Stream.advance();
+
+ switch (Entry.Kind) {
+ case llvm::BitstreamEntry::SubBlock:
+ if (Stream.SkipBlock())
+ return true;
+
+ continue;
+
+ case llvm::BitstreamEntry::EndBlock:
+ DoneWithExtensionBlock = true;
+ continue;
+
+ case llvm::BitstreamEntry::Error:
+ return true;
+
+ case llvm::BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ StringRef Blob;
+ unsigned RecCode = Stream.readRecord(Entry.ID, Record, &Blob);
+ switch (RecCode) {
+ case EXTENSION_METADATA: {
+ ModuleFileExtensionMetadata Metadata;
+ if (parseModuleFileExtensionMetadata(Record, Blob, Metadata))
+ return true;
+
+ Listener.readModuleFileExtension(Metadata);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
}
bool ASTReader::isAcceptableASTFile(
@@ -4166,6 +4402,7 @@ bool ASTReader::isAcceptableASTFile(
SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts,
ExistingModuleCachePath, FileMgr);
return !readASTFileControlBlock(Filename, FileMgr, PCHContainerRdr,
+ /*FindModuleFileExtensions=*/false,
validator);
}
@@ -4846,22 +5083,19 @@ namespace {
public:
explicit HeaderFileInfoVisitor(const FileEntry *FE)
: FE(FE) { }
-
- static bool visit(ModuleFile &M, void *UserData) {
- HeaderFileInfoVisitor *This
- = static_cast<HeaderFileInfoVisitor *>(UserData);
-
+
+ bool operator()(ModuleFile &M) {
HeaderFileInfoLookupTable *Table
= static_cast<HeaderFileInfoLookupTable *>(M.HeaderFileInfoTable);
if (!Table)
return false;
// Look in the on-disk hash table for an entry for this file name.
- HeaderFileInfoLookupTable::iterator Pos = Table->find(This->FE);
+ HeaderFileInfoLookupTable::iterator Pos = Table->find(FE);
if (Pos == Table->end())
return false;
- This->HFI = *Pos;
+ HFI = *Pos;
return true;
}
@@ -4871,7 +5105,7 @@ namespace {
HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
HeaderFileInfoVisitor Visitor(FE);
- ModuleMgr.visit(&HeaderFileInfoVisitor::visit, &Visitor);
+ ModuleMgr.visit(Visitor);
if (Optional<HeaderFileInfo> HFI = Visitor.getHeaderFileInfo())
return *HFI;
@@ -5181,9 +5415,9 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
case TYPE_AUTO: {
QualType Deduced = readType(*Loc.F, Record, Idx);
- bool IsDecltypeAuto = Record[Idx++];
+ AutoTypeKeyword Keyword = (AutoTypeKeyword)Record[Idx++];
bool IsDependent = Deduced.isNull() ? Record[Idx++] : false;
- return Context.getAutoType(Deduced, IsDecltypeAuto, IsDependent);
+ return Context.getAutoType(Deduced, Keyword, IsDependent);
}
case TYPE_RECORD: {
@@ -5335,7 +5569,7 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
unsigned Idx = 0;
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
- const IdentifierInfo *Name = this->GetIdentifierInfo(*Loc.F, Record, Idx);
+ const IdentifierInfo *Name = GetIdentifierInfo(*Loc.F, Record, Idx);
QualType Canon = readType(*Loc.F, Record, Idx);
if (!Canon.isNull())
Canon = Context.getCanonicalType(Canon);
@@ -5346,7 +5580,7 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
unsigned Idx = 0;
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
- const IdentifierInfo *Name = this->GetIdentifierInfo(*Loc.F, Record, Idx);
+ const IdentifierInfo *Name = GetIdentifierInfo(*Loc.F, Record, Idx);
unsigned NumArgs = Record[Idx++];
SmallVector<TemplateArgument, 8> Args;
Args.reserve(NumArgs);
@@ -5692,9 +5926,14 @@ QualType ASTReader::GetType(TypeID ID) {
if (Index < NUM_PREDEF_TYPE_IDS) {
QualType T;
switch ((PredefinedTypeIDs)Index) {
- case PREDEF_TYPE_NULL_ID: return QualType();
- case PREDEF_TYPE_VOID_ID: T = Context.VoidTy; break;
- case PREDEF_TYPE_BOOL_ID: T = Context.BoolTy; break;
+ case PREDEF_TYPE_NULL_ID:
+ return QualType();
+ case PREDEF_TYPE_VOID_ID:
+ T = Context.VoidTy;
+ break;
+ case PREDEF_TYPE_BOOL_ID:
+ T = Context.BoolTy;
+ break;
case PREDEF_TYPE_CHAR_U_ID:
case PREDEF_TYPE_CHAR_S_ID:
@@ -5702,59 +5941,163 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.CharTy;
break;
- case PREDEF_TYPE_UCHAR_ID: T = Context.UnsignedCharTy; break;
- case PREDEF_TYPE_USHORT_ID: T = Context.UnsignedShortTy; break;
- case PREDEF_TYPE_UINT_ID: T = Context.UnsignedIntTy; break;
- case PREDEF_TYPE_ULONG_ID: T = Context.UnsignedLongTy; break;
- case PREDEF_TYPE_ULONGLONG_ID: T = Context.UnsignedLongLongTy; break;
- case PREDEF_TYPE_UINT128_ID: T = Context.UnsignedInt128Ty; break;
- case PREDEF_TYPE_SCHAR_ID: T = Context.SignedCharTy; break;
- case PREDEF_TYPE_WCHAR_ID: T = Context.WCharTy; break;
- case PREDEF_TYPE_SHORT_ID: T = Context.ShortTy; break;
- case PREDEF_TYPE_INT_ID: T = Context.IntTy; break;
- case PREDEF_TYPE_LONG_ID: T = Context.LongTy; break;
- case PREDEF_TYPE_LONGLONG_ID: T = Context.LongLongTy; break;
- case PREDEF_TYPE_INT128_ID: T = Context.Int128Ty; break;
- case PREDEF_TYPE_HALF_ID: T = Context.HalfTy; break;
- case PREDEF_TYPE_FLOAT_ID: T = Context.FloatTy; break;
- case PREDEF_TYPE_DOUBLE_ID: T = Context.DoubleTy; break;
- case PREDEF_TYPE_LONGDOUBLE_ID: T = Context.LongDoubleTy; break;
- case PREDEF_TYPE_OVERLOAD_ID: T = Context.OverloadTy; break;
- case PREDEF_TYPE_BOUND_MEMBER: T = Context.BoundMemberTy; break;
- case PREDEF_TYPE_PSEUDO_OBJECT: T = Context.PseudoObjectTy; break;
- case PREDEF_TYPE_DEPENDENT_ID: T = Context.DependentTy; break;
- case PREDEF_TYPE_UNKNOWN_ANY: T = Context.UnknownAnyTy; break;
- case PREDEF_TYPE_NULLPTR_ID: T = Context.NullPtrTy; break;
- case PREDEF_TYPE_CHAR16_ID: T = Context.Char16Ty; break;
- case PREDEF_TYPE_CHAR32_ID: T = Context.Char32Ty; break;
- case PREDEF_TYPE_OBJC_ID: T = Context.ObjCBuiltinIdTy; break;
- case PREDEF_TYPE_OBJC_CLASS: T = Context.ObjCBuiltinClassTy; break;
- case PREDEF_TYPE_OBJC_SEL: T = Context.ObjCBuiltinSelTy; break;
- case PREDEF_TYPE_IMAGE1D_ID: T = Context.OCLImage1dTy; break;
- case PREDEF_TYPE_IMAGE1D_ARR_ID: T = Context.OCLImage1dArrayTy; break;
- case PREDEF_TYPE_IMAGE1D_BUFF_ID: T = Context.OCLImage1dBufferTy; break;
- case PREDEF_TYPE_IMAGE2D_ID: T = Context.OCLImage2dTy; break;
- case PREDEF_TYPE_IMAGE2D_ARR_ID: T = Context.OCLImage2dArrayTy; break;
- case PREDEF_TYPE_IMAGE3D_ID: T = Context.OCLImage3dTy; break;
- case PREDEF_TYPE_SAMPLER_ID: T = Context.OCLSamplerTy; break;
- case PREDEF_TYPE_EVENT_ID: T = Context.OCLEventTy; break;
- case PREDEF_TYPE_AUTO_DEDUCT: T = Context.getAutoDeductType(); break;
-
- case PREDEF_TYPE_AUTO_RREF_DEDUCT:
- T = Context.getAutoRRefDeductType();
+ case PREDEF_TYPE_UCHAR_ID:
+ T = Context.UnsignedCharTy;
+ break;
+ case PREDEF_TYPE_USHORT_ID:
+ T = Context.UnsignedShortTy;
+ break;
+ case PREDEF_TYPE_UINT_ID:
+ T = Context.UnsignedIntTy;
+ break;
+ case PREDEF_TYPE_ULONG_ID:
+ T = Context.UnsignedLongTy;
+ break;
+ case PREDEF_TYPE_ULONGLONG_ID:
+ T = Context.UnsignedLongLongTy;
+ break;
+ case PREDEF_TYPE_UINT128_ID:
+ T = Context.UnsignedInt128Ty;
+ break;
+ case PREDEF_TYPE_SCHAR_ID:
+ T = Context.SignedCharTy;
+ break;
+ case PREDEF_TYPE_WCHAR_ID:
+ T = Context.WCharTy;
+ break;
+ case PREDEF_TYPE_SHORT_ID:
+ T = Context.ShortTy;
+ break;
+ case PREDEF_TYPE_INT_ID:
+ T = Context.IntTy;
+ break;
+ case PREDEF_TYPE_LONG_ID:
+ T = Context.LongTy;
+ break;
+ case PREDEF_TYPE_LONGLONG_ID:
+ T = Context.LongLongTy;
+ break;
+ case PREDEF_TYPE_INT128_ID:
+ T = Context.Int128Ty;
+ break;
+ case PREDEF_TYPE_HALF_ID:
+ T = Context.HalfTy;
+ break;
+ case PREDEF_TYPE_FLOAT_ID:
+ T = Context.FloatTy;
+ break;
+ case PREDEF_TYPE_DOUBLE_ID:
+ T = Context.DoubleTy;
+ break;
+ case PREDEF_TYPE_LONGDOUBLE_ID:
+ T = Context.LongDoubleTy;
+ break;
+ case PREDEF_TYPE_OVERLOAD_ID:
+ T = Context.OverloadTy;
+ break;
+ case PREDEF_TYPE_BOUND_MEMBER:
+ T = Context.BoundMemberTy;
+ break;
+ case PREDEF_TYPE_PSEUDO_OBJECT:
+ T = Context.PseudoObjectTy;
+ break;
+ case PREDEF_TYPE_DEPENDENT_ID:
+ T = Context.DependentTy;
+ break;
+ case PREDEF_TYPE_UNKNOWN_ANY:
+ T = Context.UnknownAnyTy;
+ break;
+ case PREDEF_TYPE_NULLPTR_ID:
+ T = Context.NullPtrTy;
+ break;
+ case PREDEF_TYPE_CHAR16_ID:
+ T = Context.Char16Ty;
+ break;
+ case PREDEF_TYPE_CHAR32_ID:
+ T = Context.Char32Ty;
+ break;
+ case PREDEF_TYPE_OBJC_ID:
+ T = Context.ObjCBuiltinIdTy;
+ break;
+ case PREDEF_TYPE_OBJC_CLASS:
+ T = Context.ObjCBuiltinClassTy;
+ break;
+ case PREDEF_TYPE_OBJC_SEL:
+ T = Context.ObjCBuiltinSelTy;
+ break;
+ case PREDEF_TYPE_IMAGE1D_ID:
+ T = Context.OCLImage1dTy;
+ break;
+ case PREDEF_TYPE_IMAGE1D_ARR_ID:
+ T = Context.OCLImage1dArrayTy;
+ break;
+ case PREDEF_TYPE_IMAGE1D_BUFF_ID:
+ T = Context.OCLImage1dBufferTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ID:
+ T = Context.OCLImage2dTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_ID:
+ T = Context.OCLImage2dArrayTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_DEP_ID:
+ T = Context.OCLImage2dDepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_DEP_ID:
+ T = Context.OCLImage2dArrayDepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_MSAA_ID:
+ T = Context.OCLImage2dMSAATy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_MSAA_ID:
+ T = Context.OCLImage2dArrayMSAATy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_MSAA_DEP_ID:
+ T = Context.OCLImage2dMSAADepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE2D_ARR_MSAA_DEPTH_ID:
+ T = Context.OCLImage2dArrayMSAADepthTy;
+ break;
+ case PREDEF_TYPE_IMAGE3D_ID:
+ T = Context.OCLImage3dTy;
+ break;
+ case PREDEF_TYPE_SAMPLER_ID:
+ T = Context.OCLSamplerTy;
+ break;
+ case PREDEF_TYPE_EVENT_ID:
+ T = Context.OCLEventTy;
+ break;
+ case PREDEF_TYPE_CLK_EVENT_ID:
+ T = Context.OCLClkEventTy;
+ break;
+ case PREDEF_TYPE_QUEUE_ID:
+ T = Context.OCLQueueTy;
+ break;
+ case PREDEF_TYPE_NDRANGE_ID:
+ T = Context.OCLNDRangeTy;
+ break;
+ case PREDEF_TYPE_RESERVE_ID_ID:
+ T = Context.OCLReserveIDTy;
+ break;
+ case PREDEF_TYPE_AUTO_DEDUCT:
+ T = Context.getAutoDeductType();
break;
- case PREDEF_TYPE_ARC_UNBRIDGED_CAST:
- T = Context.ARCUnbridgedCastTy;
+ case PREDEF_TYPE_AUTO_RREF_DEDUCT:
+ T = Context.getAutoRRefDeductType();
break;
- case PREDEF_TYPE_VA_LIST_TAG:
- T = Context.getVaListTagType();
+ case PREDEF_TYPE_ARC_UNBRIDGED_CAST:
+ T = Context.ARCUnbridgedCastTy;
break;
case PREDEF_TYPE_BUILTIN_FN:
T = Context.BuiltinFnTy;
break;
+
+ case PREDEF_TYPE_OMP_ARRAY_SECTION:
+ T = Context.OMPArraySectionTy;
+ break;
}
assert(!T.isNull() && "Unknown predefined type");
@@ -5889,17 +6232,25 @@ void ASTReader::CompleteRedeclChain(const Decl *D) {
if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC) ||
isa<CXXRecordDecl>(DC) || isa<EnumDecl>(DC)) {
if (DeclarationName Name = cast<NamedDecl>(D)->getDeclName()) {
- auto *II = Name.getAsIdentifierInfo();
- if (isa<TranslationUnitDecl>(DC) && II) {
+ if (!getContext().getLangOpts().CPlusPlus &&
+ isa<TranslationUnitDecl>(DC)) {
// Outside of C++, we don't have a lookup table for the TU, so update
- // the identifier instead. In C++, either way should work fine.
+ // the identifier instead. (For C++ modules, we don't store decls
+ // in the serialized identifier table, so we do the lookup in the TU.)
+ auto *II = Name.getAsIdentifierInfo();
+ assert(II && "non-identifier name in C?");
if (II->isOutOfDate())
updateOutOfDateIdentifier(*II);
} else
DC->lookup(Name);
} else if (needsAnonymousDeclarationNumber(cast<NamedDecl>(D))) {
- // FIXME: It'd be nice to do something a bit more targeted here.
- D->getDeclContext()->decls_begin();
+ // Find all declarations of this kind from the relevant context.
+ for (auto *DCDecl : cast<Decl>(D->getLexicalDeclContext())->redecls()) {
+ auto *DC = cast<DeclContext>(DCDecl);
+ SmallVector<Decl*, 8> Decls;
+ FindExternalLexicalDecls(
+ DC, [&](Decl::Kind K) { return K == D->getKind(); }, Decls);
+ }
}
}
@@ -6061,8 +6412,17 @@ static Decl *getPredefinedDecl(ASTContext &Context, PredefinedDeclIDs ID) {
case PREDEF_DECL_BUILTIN_VA_LIST_ID:
return Context.getBuiltinVaListDecl();
+ case PREDEF_DECL_VA_LIST_TAG:
+ return Context.getVaListTagDecl();
+
+ case PREDEF_DECL_BUILTIN_MS_VA_LIST_ID:
+ return Context.getBuiltinMSVaListDecl();
+
case PREDEF_DECL_EXTERN_C_CONTEXT_ID:
return Context.getExternCContextDecl();
+
+ case PREDEF_DECL_MAKE_INTEGER_SEQ_ID:
+ return Context.getMakeIntegerSeqDecl();
}
llvm_unreachable("PredefinedDeclIDs unknown enum value");
}
@@ -6155,71 +6515,47 @@ Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
return ReadStmtFromStream(*Loc.F);
}
-namespace {
- class FindExternalLexicalDeclsVisitor {
- ASTReader &Reader;
- const DeclContext *DC;
- bool (*isKindWeWant)(Decl::Kind);
-
- SmallVectorImpl<Decl*> &Decls;
- bool PredefsVisited[NUM_PREDEF_DECL_IDS];
-
- public:
- FindExternalLexicalDeclsVisitor(ASTReader &Reader, const DeclContext *DC,
- bool (*isKindWeWant)(Decl::Kind),
- SmallVectorImpl<Decl*> &Decls)
- : Reader(Reader), DC(DC), isKindWeWant(isKindWeWant), Decls(Decls)
- {
- for (unsigned I = 0; I != NUM_PREDEF_DECL_IDS; ++I)
- PredefsVisited[I] = false;
- }
+void ASTReader::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Decls) {
+ bool PredefsVisited[NUM_PREDEF_DECL_IDS] = {};
- static bool visitPostorder(ModuleFile &M, void *UserData) {
- FindExternalLexicalDeclsVisitor *This
- = static_cast<FindExternalLexicalDeclsVisitor *>(UserData);
+ auto Visit = [&] (ModuleFile *M, LexicalContents LexicalDecls) {
+ assert(LexicalDecls.size() % 2 == 0 && "expected an even number of entries");
+ for (int I = 0, N = LexicalDecls.size(); I != N; I += 2) {
+ auto K = (Decl::Kind)+LexicalDecls[I];
+ if (!IsKindWeWant(K))
+ continue;
- ModuleFile::DeclContextInfosMap::iterator Info
- = M.DeclContextInfos.find(This->DC);
- if (Info == M.DeclContextInfos.end() || !Info->second.LexicalDecls)
- return false;
+ auto ID = (serialization::DeclID)+LexicalDecls[I + 1];
- // Load all of the declaration IDs
- for (const KindDeclIDPair *ID = Info->second.LexicalDecls,
- *IDE = ID + Info->second.NumLexicalDecls;
- ID != IDE; ++ID) {
- if (This->isKindWeWant && !This->isKindWeWant((Decl::Kind)ID->first))
+ // Don't add predefined declarations to the lexical context more
+ // than once.
+ if (ID < NUM_PREDEF_DECL_IDS) {
+ if (PredefsVisited[ID])
continue;
- // Don't add predefined declarations to the lexical context more
- // than once.
- if (ID->second < NUM_PREDEF_DECL_IDS) {
- if (This->PredefsVisited[ID->second])
- continue;
-
- This->PredefsVisited[ID->second] = true;
- }
-
- if (Decl *D = This->Reader.GetLocalDecl(M, ID->second)) {
- if (!This->DC->isDeclInLexicalTraversal(D))
- This->Decls.push_back(D);
- }
+ PredefsVisited[ID] = true;
}
- return false;
+ if (Decl *D = GetLocalDecl(*M, ID)) {
+ assert(D->getKind() == K && "wrong kind for lexical decl");
+ if (!DC->isDeclInLexicalTraversal(D))
+ Decls.push_back(D);
+ }
}
};
-}
-ExternalLoadResult ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
- bool (*isKindWeWant)(Decl::Kind),
- SmallVectorImpl<Decl*> &Decls) {
- // There might be lexical decls in multiple modules, for the TU at
- // least. Walk all of the modules in the order they were loaded.
- FindExternalLexicalDeclsVisitor Visitor(*this, DC, isKindWeWant, Decls);
- ModuleMgr.visitDepthFirst(
- nullptr, &FindExternalLexicalDeclsVisitor::visitPostorder, &Visitor);
+ if (isa<TranslationUnitDecl>(DC)) {
+ for (auto Lexical : TULexicalDecls)
+ Visit(Lexical.first, Lexical.second);
+ } else {
+ auto I = LexicalDecls.find(DC);
+ if (I != LexicalDecls.end())
+ Visit(I->second.first, I->second.second);
+ }
+
++NumLexicalDeclContextsRead;
- return ELR_Success;
}
namespace {
@@ -6298,168 +6634,26 @@ void ASTReader::FindFileRegionDecls(FileID File,
Decls.push_back(GetDecl(getGlobalDeclID(*DInfo.Mod, *DIt)));
}
-/// \brief Retrieve the "definitive" module file for the definition of the
-/// given declaration context, if there is one.
-///
-/// The "definitive" module file is the only place where we need to look to
-/// find information about the declarations within the given declaration
-/// context. For example, C++ and Objective-C classes, C structs/unions, and
-/// Objective-C protocols, categories, and extensions are all defined in a
-/// single place in the source code, so they have definitive module files
-/// associated with them. C++ namespaces, on the other hand, can have
-/// definitions in multiple different module files.
-///
-/// Note: this needs to be kept in sync with ASTWriter::AddedVisibleDecl's
-/// NDEBUG checking.
-static ModuleFile *getDefinitiveModuleFileFor(const DeclContext *DC,
- ASTReader &Reader) {
- if (const DeclContext *DefDC = getDefinitiveDeclContext(DC))
- return Reader.getOwningModuleFile(cast<Decl>(DefDC));
-
- return nullptr;
-}
-
-namespace {
- /// \brief ModuleFile visitor used to perform name lookup into a
- /// declaration context.
- class DeclContextNameLookupVisitor {
- ASTReader &Reader;
- ArrayRef<const DeclContext *> Contexts;
- DeclarationName Name;
- ASTDeclContextNameLookupTrait::DeclNameKey NameKey;
- unsigned NameHash;
- SmallVectorImpl<NamedDecl *> &Decls;
- llvm::SmallPtrSetImpl<NamedDecl *> &DeclSet;
-
- public:
- DeclContextNameLookupVisitor(ASTReader &Reader,
- DeclarationName Name,
- SmallVectorImpl<NamedDecl *> &Decls,
- llvm::SmallPtrSetImpl<NamedDecl *> &DeclSet)
- : Reader(Reader), Name(Name),
- NameKey(ASTDeclContextNameLookupTrait::GetInternalKey(Name)),
- NameHash(ASTDeclContextNameLookupTrait::ComputeHash(NameKey)),
- Decls(Decls), DeclSet(DeclSet) {}
-
- void visitContexts(ArrayRef<const DeclContext*> Contexts) {
- if (Contexts.empty())
- return;
- this->Contexts = Contexts;
-
- // If we can definitively determine which module file to look into,
- // only look there. Otherwise, look in all module files.
- ModuleFile *Definitive;
- if (Contexts.size() == 1 &&
- (Definitive = getDefinitiveModuleFileFor(Contexts[0], Reader))) {
- visit(*Definitive, this);
- } else {
- Reader.getModuleManager().visit(&visit, this);
- }
- }
-
- private:
- static bool visit(ModuleFile &M, void *UserData) {
- DeclContextNameLookupVisitor *This
- = static_cast<DeclContextNameLookupVisitor *>(UserData);
-
- // Check whether we have any visible declaration information for
- // this context in this module.
- ModuleFile::DeclContextInfosMap::iterator Info;
- bool FoundInfo = false;
- for (auto *DC : This->Contexts) {
- Info = M.DeclContextInfos.find(DC);
- if (Info != M.DeclContextInfos.end() &&
- Info->second.NameLookupTableData) {
- FoundInfo = true;
- break;
- }
- }
-
- if (!FoundInfo)
- return false;
-
- // Look for this name within this module.
- ASTDeclContextNameLookupTable *LookupTable =
- Info->second.NameLookupTableData;
- ASTDeclContextNameLookupTable::iterator Pos
- = LookupTable->find_hashed(This->NameKey, This->NameHash);
- if (Pos == LookupTable->end())
- return false;
-
- bool FoundAnything = false;
- ASTDeclContextNameLookupTrait::data_type Data = *Pos;
- for (; Data.first != Data.second; ++Data.first) {
- NamedDecl *ND = This->Reader.GetLocalDeclAs<NamedDecl>(M, *Data.first);
- if (!ND)
- continue;
-
- if (ND->getDeclName() != This->Name) {
- // A name might be null because the decl's redeclarable part is
- // currently read before reading its name. The lookup is triggered by
- // building that decl (likely indirectly), and so it is later in the
- // sense of "already existing" and can be ignored here.
- // FIXME: This should not happen; deserializing declarations should
- // not perform lookups since that can lead to deserialization cycles.
- continue;
- }
-
- // Record this declaration.
- FoundAnything = true;
- if (This->DeclSet.insert(ND).second)
- This->Decls.push_back(ND);
- }
-
- return FoundAnything;
- }
- };
-}
-
bool
ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclarationName Name) {
- assert(DC->hasExternalVisibleStorage() &&
+ assert(DC->hasExternalVisibleStorage() && DC == DC->getPrimaryContext() &&
"DeclContext has no visible decls in storage");
if (!Name)
return false;
+ auto It = Lookups.find(DC);
+ if (It == Lookups.end())
+ return false;
+
Deserializing LookupResults(this);
+ // Load the list of declarations.
SmallVector<NamedDecl *, 64> Decls;
- llvm::SmallPtrSet<NamedDecl*, 64> DeclSet;
-
- // Compute the declaration contexts we need to look into. Multiple such
- // declaration contexts occur when two declaration contexts from disjoint
- // modules get merged, e.g., when two namespaces with the same name are
- // independently defined in separate modules.
- SmallVector<const DeclContext *, 2> Contexts;
- Contexts.push_back(DC);
-
- if (DC->isNamespace()) {
- auto Key = KeyDecls.find(const_cast<Decl *>(cast<Decl>(DC)));
- if (Key != KeyDecls.end()) {
- for (unsigned I = 0, N = Key->second.size(); I != N; ++I)
- Contexts.push_back(cast<DeclContext>(GetDecl(Key->second[I])));
- }
- }
-
- DeclContextNameLookupVisitor Visitor(*this, Name, Decls, DeclSet);
- Visitor.visitContexts(Contexts);
-
- // If this might be an implicit special member function, then also search
- // all merged definitions of the surrounding class. We need to search them
- // individually, because finding an entity in one of them doesn't imply that
- // we can't find a different entity in another one.
- if (isa<CXXRecordDecl>(DC)) {
- auto Merged = MergedLookups.find(DC);
- if (Merged != MergedLookups.end()) {
- for (unsigned I = 0; I != Merged->second.size(); ++I) {
- const DeclContext *Context = Merged->second[I];
- Visitor.visitContexts(Context);
- // We might have just added some more merged lookups. If so, our
- // iterator is now invalid, so grab a fresh one before continuing.
- Merged = MergedLookups.find(DC);
- }
- }
+ for (DeclID ID : It->second.Table.find(Name)) {
+ NamedDecl *ND = cast<NamedDecl>(GetDecl(ID));
+ if (ND->getDeclName() == Name)
+ Decls.push_back(ND);
}
++NumVisibleDeclContextsRead;
@@ -6467,92 +6661,21 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
return !Decls.empty();
}
-namespace {
- /// \brief ModuleFile visitor used to retrieve all visible names in a
- /// declaration context.
- class DeclContextAllNamesVisitor {
- ASTReader &Reader;
- SmallVectorImpl<const DeclContext *> &Contexts;
- DeclsMap &Decls;
- llvm::SmallPtrSet<NamedDecl *, 256> DeclSet;
- bool VisitAll;
-
- public:
- DeclContextAllNamesVisitor(ASTReader &Reader,
- SmallVectorImpl<const DeclContext *> &Contexts,
- DeclsMap &Decls, bool VisitAll)
- : Reader(Reader), Contexts(Contexts), Decls(Decls), VisitAll(VisitAll) { }
-
- static bool visit(ModuleFile &M, void *UserData) {
- DeclContextAllNamesVisitor *This
- = static_cast<DeclContextAllNamesVisitor *>(UserData);
-
- // Check whether we have any visible declaration information for
- // this context in this module.
- ModuleFile::DeclContextInfosMap::iterator Info;
- bool FoundInfo = false;
- for (unsigned I = 0, N = This->Contexts.size(); I != N; ++I) {
- Info = M.DeclContextInfos.find(This->Contexts[I]);
- if (Info != M.DeclContextInfos.end() &&
- Info->second.NameLookupTableData) {
- FoundInfo = true;
- break;
- }
- }
-
- if (!FoundInfo)
- return false;
-
- ASTDeclContextNameLookupTable *LookupTable =
- Info->second.NameLookupTableData;
- bool FoundAnything = false;
- for (ASTDeclContextNameLookupTable::data_iterator
- I = LookupTable->data_begin(), E = LookupTable->data_end();
- I != E;
- ++I) {
- ASTDeclContextNameLookupTrait::data_type Data = *I;
- for (; Data.first != Data.second; ++Data.first) {
- NamedDecl *ND = This->Reader.GetLocalDeclAs<NamedDecl>(M,
- *Data.first);
- if (!ND)
- continue;
-
- // Record this declaration.
- FoundAnything = true;
- if (This->DeclSet.insert(ND).second)
- This->Decls[ND->getDeclName()].push_back(ND);
- }
- }
-
- return FoundAnything && !This->VisitAll;
- }
- };
-}
-
void ASTReader::completeVisibleDeclsMap(const DeclContext *DC) {
if (!DC->hasExternalVisibleStorage())
return;
- DeclsMap Decls;
- // Compute the declaration contexts we need to look into. Multiple such
- // declaration contexts occur when two declaration contexts from disjoint
- // modules get merged, e.g., when two namespaces with the same name are
- // independently defined in separate modules.
- SmallVector<const DeclContext *, 2> Contexts;
- Contexts.push_back(DC);
+ auto It = Lookups.find(DC);
+ assert(It != Lookups.end() &&
+ "have external visible storage but no lookup tables");
- if (DC->isNamespace()) {
- KeyDeclsMap::iterator Key =
- KeyDecls.find(const_cast<Decl *>(cast<Decl>(DC)));
- if (Key != KeyDecls.end()) {
- for (unsigned I = 0, N = Key->second.size(); I != N; ++I)
- Contexts.push_back(cast<DeclContext>(GetDecl(Key->second[I])));
- }
+ DeclsMap Decls;
+
+ for (DeclID ID : It->second.Table.findAll()) {
+ NamedDecl *ND = cast<NamedDecl>(GetDecl(ID));
+ Decls[ND->getDeclName()].push_back(ND);
}
- DeclContextAllNamesVisitor Visitor(*this, Contexts, Decls,
- /*VisitAll=*/DC->isFileContext());
- ModuleMgr.visit(&DeclContextAllNamesVisitor::visit, &Visitor);
++NumVisibleDeclContextsRead;
for (DeclsMap::iterator I = Decls.begin(), E = Decls.end(); I != E; ++I) {
@@ -6561,6 +6684,12 @@ void ASTReader::completeVisibleDeclsMap(const DeclContext *DC) {
const_cast<DeclContext *>(DC)->setHasExternalVisibleStorage(false);
}
+const serialization::reader::DeclContextLookupTable *
+ASTReader::getLoadedLookupTables(DeclContext *Primary) const {
+ auto I = Lookups.find(Primary);
+ return I == Lookups.end() ? nullptr : &I->second;
+}
+
/// \brief Under non-PCH compilation the consumer receives the objc methods
/// before receiving the implementation, and codegen depends on this.
/// We simulate this by deserializing and passing to consumer the methods of the
@@ -6824,24 +6953,36 @@ void ASTReader::UpdateSema() {
SemaObj->ActOnPragmaOptimize(/* IsOn = */ false, OptimizeOffPragmaLocation);
}
-IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) {
+IdentifierInfo *ASTReader::get(StringRef Name) {
// Note that we are loading an identifier.
Deserializing AnIdentifier(this);
- StringRef Name(NameStart, NameEnd - NameStart);
- // If there is a global index, look there first to determine which modules
- // provably do not have any results for this identifier.
- GlobalModuleIndex::HitSet Hits;
- GlobalModuleIndex::HitSet *HitsPtr = nullptr;
- if (!loadGlobalIndex()) {
- if (GlobalIndex->lookupIdentifier(Name, Hits)) {
- HitsPtr = &Hits;
- }
- }
IdentifierLookupVisitor Visitor(Name, /*PriorGeneration=*/0,
NumIdentifierLookups,
NumIdentifierLookupHits);
- ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor, HitsPtr);
+
+ // We don't need to do identifier table lookups in C++ modules (we preload
+ // all interesting declarations, and don't need to use the scope for name
+ // lookups). Perform the lookup in PCH files, though, since we don't build
+ // a complete initial identifier table if we're carrying on from a PCH.
+ if (Context.getLangOpts().CPlusPlus) {
+ for (auto F : ModuleMgr.pch_modules())
+ if (Visitor(*F))
+ break;
+ } else {
+ // If there is a global index, look there first to determine which modules
+ // provably do not have any results for this identifier.
+ GlobalModuleIndex::HitSet Hits;
+ GlobalModuleIndex::HitSet *HitsPtr = nullptr;
+ if (!loadGlobalIndex()) {
+ if (GlobalIndex->lookupIdentifier(Name, Hits)) {
+ HitsPtr = &Hits;
+ }
+ }
+
+ ModuleMgr.visit(Visitor, HitsPtr);
+ }
+
IdentifierInfo *II = Visitor.getIdentifierInfo();
markIdentifierUpToDate(II);
return II;
@@ -6928,41 +7069,37 @@ namespace clang { namespace serialization {
InstanceBits(0), FactoryBits(0), InstanceHasMoreThanOneDecl(false),
FactoryHasMoreThanOneDecl(false) {}
- static bool visit(ModuleFile &M, void *UserData) {
- ReadMethodPoolVisitor *This
- = static_cast<ReadMethodPoolVisitor *>(UserData);
-
+ bool operator()(ModuleFile &M) {
if (!M.SelectorLookupTable)
return false;
// If we've already searched this module file, skip it now.
- if (M.Generation <= This->PriorGeneration)
+ if (M.Generation <= PriorGeneration)
return true;
- ++This->Reader.NumMethodPoolTableLookups;
+ ++Reader.NumMethodPoolTableLookups;
ASTSelectorLookupTable *PoolTable
= (ASTSelectorLookupTable*)M.SelectorLookupTable;
- ASTSelectorLookupTable::iterator Pos = PoolTable->find(This->Sel);
+ ASTSelectorLookupTable::iterator Pos = PoolTable->find(Sel);
if (Pos == PoolTable->end())
return false;
- ++This->Reader.NumMethodPoolTableHits;
- ++This->Reader.NumSelectorsRead;
+ ++Reader.NumMethodPoolTableHits;
+ ++Reader.NumSelectorsRead;
// FIXME: Not quite happy with the statistics here. We probably should
// disable this tracking when called via LoadSelector.
// Also, should entries without methods count as misses?
- ++This->Reader.NumMethodPoolEntriesRead;
+ ++Reader.NumMethodPoolEntriesRead;
ASTSelectorLookupTrait::data_type Data = *Pos;
- if (This->Reader.DeserializationListener)
- This->Reader.DeserializationListener->SelectorRead(Data.ID,
- This->Sel);
-
- This->InstanceMethods.append(Data.Instance.begin(), Data.Instance.end());
- This->FactoryMethods.append(Data.Factory.begin(), Data.Factory.end());
- This->InstanceBits = Data.InstanceBits;
- This->FactoryBits = Data.FactoryBits;
- This->InstanceHasMoreThanOneDecl = Data.InstanceHasMoreThanOneDecl;
- This->FactoryHasMoreThanOneDecl = Data.FactoryHasMoreThanOneDecl;
+ if (Reader.DeserializationListener)
+ Reader.DeserializationListener->SelectorRead(Data.ID, Sel);
+
+ InstanceMethods.append(Data.Instance.begin(), Data.Instance.end());
+ FactoryMethods.append(Data.Factory.begin(), Data.Factory.end());
+ InstanceBits = Data.InstanceBits;
+ FactoryBits = Data.FactoryBits;
+ InstanceHasMoreThanOneDecl = Data.InstanceHasMoreThanOneDecl;
+ FactoryHasMoreThanOneDecl = Data.FactoryHasMoreThanOneDecl;
return true;
}
@@ -7002,8 +7139,8 @@ void ASTReader::ReadMethodPool(Selector Sel) {
// Search for methods defined with this selector.
++NumMethodPoolLookups;
ReadMethodPoolVisitor Visitor(*this, Sel, PriorGeneration);
- ModuleMgr.visit(&ReadMethodPoolVisitor::visit, &Visitor);
-
+ ModuleMgr.visit(Visitor);
+
if (Visitor.getInstanceMethods().empty() &&
Visitor.getFactoryMethods().empty())
return;
@@ -7384,33 +7521,47 @@ Module *ASTReader::getModule(unsigned ID) {
return getSubmodule(ID);
}
-ExternalASTSource::ASTSourceDescriptor
-ASTReader::getSourceDescriptor(const Module &M) {
- StringRef Dir, Filename;
- if (M.Directory)
- Dir = M.Directory->getName();
- if (auto *File = M.getASTFile())
- Filename = File->getName();
- return ASTReader::ASTSourceDescriptor{
- M.getFullModuleName(), Dir, Filename,
- M.Signature
- };
+ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &F, unsigned ID) {
+ if (ID & 1) {
+ // It's a module, look it up by submodule ID.
+ auto I = GlobalSubmoduleMap.find(getGlobalSubmoduleID(F, ID >> 1));
+ return I == GlobalSubmoduleMap.end() ? nullptr : I->second;
+ } else {
+ // It's a prefix (preamble, PCH, ...). Look it up by index.
+ unsigned IndexFromEnd = ID >> 1;
+ assert(IndexFromEnd && "got reference to unknown module file");
+ return getModuleManager().pch_modules().end()[-IndexFromEnd];
+ }
+}
+
+unsigned ASTReader::getModuleFileID(ModuleFile *F) {
+ if (!F)
+ return 1;
+
+ // For a file representing a module, use the submodule ID of the top-level
+ // module as the file ID. For any other kind of file, the number of such
+ // files loaded beforehand will be the same on reload.
+ // FIXME: Is this true even if we have an explicit module file and a PCH?
+ if (F->isModule())
+ return ((F->BaseSubmoduleID + NUM_PREDEF_SUBMODULE_IDS) << 1) | 1;
+
+ auto PCHModules = getModuleManager().pch_modules();
+ auto I = std::find(PCHModules.begin(), PCHModules.end(), F);
+ assert(I != PCHModules.end() && "emitting reference to unknown file");
+ return (I - PCHModules.end()) << 1;
}
llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
ASTReader::getSourceDescriptor(unsigned ID) {
if (const Module *M = getSubmodule(ID))
- return getSourceDescriptor(*M);
+ return ExternalASTSource::ASTSourceDescriptor(*M);
// If there is only a single PCH, return it instead.
// Chained PCH are not suported.
if (ModuleMgr.size() == 1) {
ModuleFile &MF = ModuleMgr.getPrimaryModule();
- return ASTReader::ASTSourceDescriptor{
- MF.OriginalSourceFileName, MF.OriginalDir,
- MF.FileName,
- MF.Signature
- };
+ return ASTReader::ASTSourceDescriptor(
+ MF.OriginalSourceFileName, MF.OriginalDir, MF.FileName, MF.Signature);
}
return None;
}
@@ -7619,9 +7770,19 @@ ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
llvm_unreachable("Unhandled template name kind!");
}
-TemplateArgument
-ASTReader::ReadTemplateArgument(ModuleFile &F,
- const RecordData &Record, unsigned &Idx) {
+TemplateArgument ASTReader::ReadTemplateArgument(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx,
+ bool Canonicalize) {
+ if (Canonicalize) {
+ // The caller wants a canonical template argument. Sometimes the AST only
+ // wants template arguments in canonical form (particularly as the template
+ // argument lists of template specializations) so ensure we preserve that
+ // canonical form across serialization.
+ TemplateArgument Arg = ReadTemplateArgument(F, Record, Idx, false);
+ return Context.getCanonicalTemplateArgument(Arg);
+ }
+
TemplateArgument::ArgKind Kind = (TemplateArgument::ArgKind)Record[Idx++];
switch (Kind) {
case TemplateArgument::Null:
@@ -7655,7 +7816,7 @@ ASTReader::ReadTemplateArgument(ModuleFile &F,
TemplateArgument *Args = new (Context) TemplateArgument[NumArgs];
for (unsigned I = 0; I != NumArgs; ++I)
Args[I] = ReadTemplateArgument(F, Record, Idx);
- return TemplateArgument(Args, NumArgs);
+ return TemplateArgument(llvm::makeArrayRef(Args, NumArgs));
}
}
@@ -7677,7 +7838,7 @@ ASTReader::ReadTemplateParameterList(ModuleFile &F,
TemplateParameterList* TemplateParams =
TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
- Params.data(), Params.size(), RAngleLoc);
+ Params, RAngleLoc);
return TemplateParams;
}
@@ -7685,11 +7846,11 @@ void
ASTReader::
ReadTemplateArgumentList(SmallVectorImpl<TemplateArgument> &TemplArgs,
ModuleFile &F, const RecordData &Record,
- unsigned &Idx) {
+ unsigned &Idx, bool Canonicalize) {
unsigned NumTemplateArgs = Record[Idx++];
TemplArgs.reserve(NumTemplateArgs);
while (NumTemplateArgs--)
- TemplArgs.push_back(ReadTemplateArgument(F, Record, Idx));
+ TemplArgs.push_back(ReadTemplateArgument(F, Record, Idx, Canonicalize));
}
/// \brief Read a UnresolvedSet structure.
@@ -8070,14 +8231,6 @@ void ASTReader::ReadComments() {
}
}
-void ASTReader::getInputFiles(ModuleFile &F,
- SmallVectorImpl<serialization::InputFile> &Files) {
- for (unsigned I = 0, E = F.InputFilesLoaded.size(); I != E; ++I) {
- unsigned ID = I+1;
- Files.push_back(getInputFile(F, ID));
- }
-}
-
std::string ASTReader::getOwningModuleNameForDiagnostic(const Decl *D) {
// If we know the owning module, use it.
if (Module *M = D->getImportedOwningModule())
@@ -8119,11 +8272,8 @@ void ASTReader::finishPendingActions() {
PendingIncompleteDeclChains.clear();
// Load pending declaration chains.
- for (unsigned I = 0; I != PendingDeclChains.size(); ++I) {
- PendingDeclChainsKnown.erase(PendingDeclChains[I]);
- loadPendingDeclChain(PendingDeclChains[I]);
- }
- assert(PendingDeclChainsKnown.empty());
+ for (unsigned I = 0; I != PendingDeclChains.size(); ++I)
+ loadPendingDeclChain(PendingDeclChains[I].first, PendingDeclChains[I].second);
PendingDeclChains.clear();
// Make the most recent of the top-level declarations visible.
@@ -8232,9 +8382,8 @@ void ASTReader::finishPendingActions() {
// Load the bodies of any functions or methods we've encountered. We do
// this now (delayed) so that we can be sure that the declaration chains
- // have been fully wired up.
- // FIXME: There seems to be no point in delaying this, it does not depend
- // on the redecl chains having been wired up.
+ // have been fully wired up (hasBody relies on this).
+ // FIXME: We shouldn't require complete redeclaration chains here.
for (PendingBodiesMap::iterator PB = PendingBodies.begin(),
PBEnd = PendingBodies.end();
PB != PBEnd; ++PB) {
@@ -8310,21 +8459,26 @@ void ASTReader::diagnoseOdrViolations() {
if (Found)
continue;
+ // Quick check failed, time to do the slow thing. Note, we can't just
+ // look up the name of D in CanonDef here, because the member that is
+ // in CanonDef might not be found by name lookup (it might have been
+ // replaced by a more recent declaration in the lookup table), and we
+ // can't necessarily find it in the redeclaration chain because it might
+ // be merely mergeable, not redeclarable.
llvm::SmallVector<const NamedDecl*, 4> Candidates;
- DeclContext::lookup_result R = CanonDef->lookup(D->getDeclName());
- for (DeclContext::lookup_iterator I = R.begin(), E = R.end();
- !Found && I != E; ++I) {
- for (auto RI : (*I)->redecls()) {
- if (RI->getLexicalDeclContext() == CanonDef) {
- // This declaration is present in the canonical definition. If it's
- // in the same redecl chain, it's the one we're looking for.
- if (RI->getCanonicalDecl() == DCanon)
- Found = true;
- else
- Candidates.push_back(cast<NamedDecl>(RI));
- break;
- }
+ for (auto *CanonMember : CanonDef->decls()) {
+ if (CanonMember->getCanonicalDecl() == DCanon) {
+ // This can happen if the declaration is merely mergeable and not
+ // actually redeclarable (we looked for redeclarations earlier).
+ //
+ // FIXME: We should be able to detect this more efficiently, without
+ // pulling in all of the members of CanonDef.
+ Found = true;
+ break;
}
+ if (auto *ND = dyn_cast<NamedDecl>(CanonMember))
+ if (ND->getDeclName() == D->getDeclName())
+ Candidates.push_back(ND);
}
if (!Found) {
@@ -8428,16 +8582,19 @@ void ASTReader::FinishedDeserializing() {
PendingExceptionSpecUpdates.clear();
for (auto Update : Updates) {
auto *FPT = Update.second->getType()->castAs<FunctionProtoType>();
- SemaObj->UpdateExceptionSpec(Update.second,
- FPT->getExtProtoInfo().ExceptionSpec);
+ auto ESI = FPT->getExtProtoInfo().ExceptionSpec;
+ if (auto *Listener = Context.getASTMutationListener())
+ Listener->ResolvedExceptionSpec(cast<FunctionDecl>(Update.second));
+ for (auto *Redecl : Update.second->redecls())
+ Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI);
}
}
- diagnoseOdrViolations();
-
if (ReadTimer)
ReadTimer->stopTimer();
+ diagnoseOdrViolations();
+
// We are not in recursive loading, so it's safe to pass the "interesting"
// decls to the consumer.
if (Consumer)
@@ -8450,7 +8607,7 @@ void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
// Remove any fake results before adding any real ones.
auto It = PendingFakeLookupResults.find(II);
if (It != PendingFakeLookupResults.end()) {
- for (auto *ND : PendingFakeLookupResults[II])
+ for (auto *ND : It->second)
SemaObj->IdResolver.RemoveDecl(ND);
// FIXME: this works around module+PCH performance issue.
// Rather than erase the result from the map, which is O(n), just clear
@@ -8471,13 +8628,15 @@ void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
}
}
-ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
- const PCHContainerReader &PCHContainerRdr,
- StringRef isysroot, bool DisableValidation,
- bool AllowASTWithCompilerErrors,
- bool AllowConfigurationMismatch, bool ValidateSystemInputs,
- bool UseGlobalIndex,
- std::unique_ptr<llvm::Timer> ReadTimer)
+ASTReader::ASTReader(
+ Preprocessor &PP, ASTContext &Context,
+ const PCHContainerReader &PCHContainerRdr,
+ ArrayRef<IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ StringRef isysroot, bool DisableValidation,
+ bool AllowASTWithCompilerErrors,
+ bool AllowConfigurationMismatch, bool ValidateSystemInputs,
+ bool UseGlobalIndex,
+ std::unique_ptr<llvm::Timer> ReadTimer)
: Listener(new PCHValidator(PP, *this)), DeserializationListener(nullptr),
OwnsDeserializationListener(false), SourceMgr(PP.getSourceManager()),
FileMgr(PP.getFileManager()), PCHContainerRdr(PCHContainerRdr),
@@ -8501,19 +8660,21 @@ ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
TotalModulesSizeInBits(0), NumCurrentElementsDeserializing(0),
PassingDeclsToConsumer(false), ReadingKind(Read_None) {
SourceMgr.setExternalSLocEntrySource(this);
+
+ for (const auto &Ext : Extensions) {
+ auto BlockName = Ext->getExtensionMetadata().BlockName;
+ auto Known = ModuleFileExtensions.find(BlockName);
+ if (Known != ModuleFileExtensions.end()) {
+ Diags.Report(diag::warn_duplicate_module_file_extension)
+ << BlockName;
+ continue;
+ }
+
+ ModuleFileExtensions.insert({BlockName, Ext});
+ }
}
ASTReader::~ASTReader() {
if (OwnsDeserializationListener)
delete DeserializationListener;
-
- for (DeclContextVisibleUpdatesPending::iterator
- I = PendingVisibleUpdates.begin(),
- E = PendingVisibleUpdates.end();
- I != E; ++I) {
- for (DeclContextVisibleUpdates::iterator J = I->second.begin(),
- F = I->second.end();
- J != F; ++J)
- delete J->first;
- }
}
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index 1a0c5b58e7f6..8fb110e4551d 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -26,6 +26,7 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/Support/SaveAndRestore.h"
+
using namespace clang;
using namespace clang::serialization;
@@ -120,45 +121,20 @@ namespace clang {
static void setAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC,
unsigned Index, NamedDecl *D);
- /// \brief RAII class used to capture the first ID within a redeclaration
- /// chain and to introduce it into the list of pending redeclaration chains
- /// on destruction.
+ /// Results from loading a RedeclarableDecl.
class RedeclarableResult {
- ASTReader &Reader;
GlobalDeclID FirstID;
Decl *MergeWith;
- mutable bool Owning;
bool IsKeyDecl;
- Decl::Kind DeclKind;
-
- void operator=(RedeclarableResult &) = delete;
public:
- RedeclarableResult(ASTReader &Reader, GlobalDeclID FirstID,
- Decl *MergeWith, Decl::Kind DeclKind,
- bool IsKeyDecl)
- : Reader(Reader), FirstID(FirstID), MergeWith(MergeWith),
- Owning(true), IsKeyDecl(IsKeyDecl), DeclKind(DeclKind) {}
-
- RedeclarableResult(RedeclarableResult &&Other)
- : Reader(Other.Reader), FirstID(Other.FirstID),
- MergeWith(Other.MergeWith), Owning(Other.Owning),
- IsKeyDecl(Other.IsKeyDecl), DeclKind(Other.DeclKind) {
- Other.Owning = false;
- }
-
- ~RedeclarableResult() {
- if (FirstID && Owning && isRedeclarableDeclKind(DeclKind)) {
- auto Canon = Reader.GetDecl(FirstID)->getCanonicalDecl();
- if (Reader.PendingDeclChainsKnown.insert(Canon).second)
- Reader.PendingDeclChains.push_back(Canon);
- }
- }
+ RedeclarableResult(GlobalDeclID FirstID, Decl *MergeWith, bool IsKeyDecl)
+ : FirstID(FirstID), MergeWith(MergeWith), IsKeyDecl(IsKeyDecl) {}
/// \brief Retrieve the first ID.
GlobalDeclID getFirstID() const { return FirstID; }
- /// \brief Is this declaration the key declaration?
+ /// \brief Is this declaration a key declaration?
bool isKeyDecl() const { return IsKeyDecl; }
/// \brief Get a known declaration that this should be merged with, if
@@ -185,7 +161,7 @@ namespace clang {
public:
FindExistingResult(ASTReader &Reader)
: Reader(Reader), New(nullptr), Existing(nullptr), AddResult(false),
- AnonymousDeclNumber(0), TypedefNameForLinkage(0) {}
+ AnonymousDeclNumber(0), TypedefNameForLinkage(nullptr) {}
FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing,
unsigned AnonymousDeclNumber,
@@ -317,6 +293,7 @@ namespace clang {
DeclID VisitTemplateDecl(TemplateDecl *D);
RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D);
void VisitVarTemplateDecl(VarTemplateDecl *D);
void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
@@ -395,7 +372,7 @@ namespace clang {
}
}
};
-}
+} // end namespace clang
namespace {
/// Iterator over the redeclarations of a declaration that have already
@@ -431,12 +408,12 @@ public:
return A.Current != B.Current;
}
};
-}
+} // end anonymous namespace
+
template<typename DeclT>
llvm::iterator_range<MergedRedeclIterator<DeclT>> merged_redecls(DeclT *D) {
- return llvm::iterator_range<MergedRedeclIterator<DeclT>>(
- MergedRedeclIterator<DeclT>(D),
- MergedRedeclIterator<DeclT>());
+ return llvm::make_range(MergedRedeclIterator<DeclT>(D),
+ MergedRedeclIterator<DeclT>());
}
uint64_t ASTDeclReader::GetCurrentCursorOffset() {
@@ -465,8 +442,8 @@ void ASTDeclReader::Visit(Decl *D) {
// If this is a tag declaration with a typedef name for linkage, it's safe
// to load that typedef now.
if (NamedDeclForTagDecl)
- cast<TagDecl>(D)->NamedDeclOrQualifier =
- cast<NamedDecl>(Reader.GetDecl(NamedDeclForTagDecl));
+ cast<TagDecl>(D)->TypedefNameDeclOrQualifier =
+ cast<TypedefNameDecl>(Reader.GetDecl(NamedDeclForTagDecl));
} else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
// if we have a fully initialized TypeDecl, we can safely read its type now.
ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull();
@@ -499,6 +476,8 @@ void ASTDeclReader::VisitDecl(Decl *D) {
// placeholder.
GlobalDeclID SemaDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
GlobalDeclID LexicalDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
+ if (!LexicalDCIDForTemplateParmDecl)
+ LexicalDCIDForTemplateParmDecl = SemaDCIDForTemplateParmDecl;
Reader.addPendingDeclContextInfo(D,
SemaDCIDForTemplateParmDecl,
LexicalDCIDForTemplateParmDecl);
@@ -506,6 +485,8 @@ void ASTDeclReader::VisitDecl(Decl *D) {
} else {
DeclContext *SemaDC = ReadDeclAs<DeclContext>(Record, Idx);
DeclContext *LexicalDC = ReadDeclAs<DeclContext>(Record, Idx);
+ if (!LexicalDC)
+ LexicalDC = SemaDC;
DeclContext *MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
// Avoid calling setLexicalDeclContext() directly because it uses
// Decl::getASTContext() internally which is unsafe during derialization.
@@ -619,16 +600,13 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
case 1: { // ExtInfo
TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo();
ReadQualifierInfo(*Info, Record, Idx);
- TD->NamedDeclOrQualifier = Info;
+ TD->TypedefNameDeclOrQualifier = Info;
break;
}
case 2: // TypedefNameForAnonDecl
NamedDeclForTagDecl = ReadDeclID(Record, Idx);
TypedefNameForLinkage = Reader.GetIdentifierInfo(F, Record, Idx);
break;
- case 3: // DeclaratorForAnonDecl
- NamedDeclForTagDecl = ReadDeclID(Record, Idx);
- break;
default:
llvm_unreachable("unexpected tag info kind");
}
@@ -771,8 +749,9 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
// Template arguments.
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
-
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
+ /*Canonicalize*/ true);
+
// Template args as written.
SmallVector<TemplateArgumentLoc, 8> TemplArgLocs;
SourceLocation LAngleLoc, RAngleLoc;
@@ -909,6 +888,7 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
void ASTDeclReader::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
VisitTypedefNameDecl(D);
+
D->Variance = Record[Idx++];
D->Index = Record[Idx++];
D->VarianceLoc = ReadSourceLocation(Record, Idx);
@@ -1121,7 +1101,6 @@ void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
D->IvarInitializers = Reader.ReadCXXCtorInitializersRef(F, Record, Idx);
}
-
void ASTDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
VisitDecl(D);
D->setAtLoc(ReadSourceLocation(Record, Idx));
@@ -1168,6 +1147,8 @@ void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
for (unsigned I = 0; I != FD->ChainingSize; ++I)
FD->Chaining[I] = ReadDeclAs<NamedDecl>(Record, Idx);
+
+ mergeMergeable(FD);
}
ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
@@ -1208,8 +1189,9 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
};
switch ((VarKind)Record[Idx++]) {
case VarNotTemplate:
- // Only true variables (not parameters or implicit parameters) can be merged
- if (VD->getKind() != Decl::ParmVar && VD->getKind() != Decl::ImplicitParam &&
+ // Only true variables (not parameters or implicit parameters) can be
+ // merged; the other kinds are not really redeclarable at all.
+ if (!isa<ParmVarDecl>(VD) && !isa<ImplicitParamDecl>(VD) &&
!isa<VarTemplateSpecializationDecl>(VD))
mergeRedeclarable(VD, Redecl);
break;
@@ -1290,8 +1272,7 @@ void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
captures.push_back(BlockDecl::Capture(decl, byRef, nested, copyExpr));
}
- BD->setCaptures(Reader.getContext(), captures.begin(),
- captures.end(), capturesCXXThis);
+ BD->setCaptures(Reader.getContext(), captures, capturesCXXThis);
}
void ASTDeclReader::VisitCapturedDecl(CapturedDecl *CD) {
@@ -1319,7 +1300,6 @@ void ASTDeclReader::VisitLabelDecl(LabelDecl *D) {
D->setLocStart(ReadSourceLocation(Record, Idx));
}
-
void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
@@ -1506,21 +1486,14 @@ void ASTDeclReader::MergeDefinitionData(
auto &DD = *D->DefinitionData.getNotUpdated();
if (DD.Definition != MergeDD.Definition) {
- // If the new definition has new special members, let the name lookup
- // code know that it needs to look in the new definition too.
- //
- // FIXME: We only need to do this if the merged definition declares members
- // that this definition did not declare, or if it defines members that this
- // definition did not define.
- Reader.MergedLookups[DD.Definition].push_back(MergeDD.Definition);
- DD.Definition->setHasExternalVisibleStorage();
-
// Track that we merged the definitions.
Reader.MergedDeclContexts.insert(std::make_pair(MergeDD.Definition,
DD.Definition));
Reader.PendingDefinitions.erase(MergeDD.Definition);
MergeDD.Definition->IsCompleteDefinition = false;
mergeDefinitionVisibility(DD.Definition, MergeDD.Definition);
+ assert(Reader.Lookups.find(MergeDD.Definition) == Reader.Lookups.end() &&
+ "already loaded pending lookups for merged definition");
}
auto PFDI = Reader.PendingFakeDefinitionData.find(&DD);
@@ -1758,7 +1731,7 @@ void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
VisitDecl(D);
D->ImportedAndComplete.setPointer(readModule(Record, Idx));
D->ImportedAndComplete.setInt(Record[Idx++]);
- SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(D + 1);
+ SourceLocation *StoredLocs = D->getTrailingObjects<SourceLocation>();
for (unsigned I = 0, N = Record.back(); I != N; ++I)
StoredLocs[I] = ReadSourceLocation(Record, Idx);
++Idx; // The number of stored source locations.
@@ -1776,7 +1749,8 @@ void ASTDeclReader::VisitFriendDecl(FriendDecl *D) {
else
D->Friend = GetTypeSourceInfo(Record, Idx);
for (unsigned i = 0; i != D->NumTPLists; ++i)
- D->getTPLists()[i] = Reader.ReadTemplateParameterList(F, Record, Idx);
+ D->getTrailingObjects<TemplateParameterList *>()[i] =
+ Reader.ReadTemplateParameterList(F, Record, Idx);
D->NextFriend = ReadDeclID(Record, Idx);
D->UnsupportedFriend = (Record[Idx++] != 0);
D->FriendLoc = ReadSourceLocation(Record, Idx);
@@ -1887,6 +1861,10 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
}
}
+void ASTDeclReader::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) {
+ llvm_unreachable("BuiltinTemplates are not serialized");
+}
+
/// TODO: Unify with ClassTemplateDecl version?
/// May require unifying ClassTemplateDecl and
/// VarTemplateDecl beyond TemplateDecl...
@@ -1933,7 +1911,8 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
}
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
+ /*Canonicalize*/ true);
D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs.data(),
TemplArgs.size());
D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
@@ -2060,7 +2039,8 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
}
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
+ /*Canonicalize*/ true);
D->TemplateArgs =
TemplateArgumentList::CreateCopy(C, TemplArgs.data(), TemplArgs.size());
D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
@@ -2070,6 +2050,7 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
if (writtenAsCanonicalDecl) {
VarTemplateDecl *CanonPattern = ReadDeclAs<VarTemplateDecl>(Record, Idx);
if (D->isCanonicalDecl()) { // It's kept in the folding set.
+ // FIXME: If it's already present, merge it.
if (VarTemplatePartialSpecializationDecl *Partial =
dyn_cast<VarTemplatePartialSpecializationDecl>(D)) {
CanonPattern->getCommonPtr()->PartialSpecializations
@@ -2118,10 +2099,11 @@ void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
D->setDepth(Record[Idx++]);
D->setPosition(Record[Idx++]);
if (D->isExpandedParameterPack()) {
- void **Data = reinterpret_cast<void **>(D + 1);
+ auto TypesAndInfos =
+ D->getTrailingObjects<std::pair<QualType, TypeSourceInfo *>>();
for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
- Data[2*I] = Reader.readType(F, Record, Idx).getAsOpaquePtr();
- Data[2*I + 1] = GetTypeSourceInfo(Record, Idx);
+ new (&TypesAndInfos[I].first) QualType(Reader.readType(F, Record, Idx));
+ TypesAndInfos[I].second = GetTypeSourceInfo(Record, Idx);
}
} else {
// Rest of NonTypeTemplateParmDecl.
@@ -2137,7 +2119,8 @@ void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
D->setDepth(Record[Idx++]);
D->setPosition(Record[Idx++]);
if (D->isExpandedParameterPack()) {
- void **Data = reinterpret_cast<void **>(D + 1);
+ TemplateParameterList **Data =
+ D->getTrailingObjects<TemplateParameterList *>();
for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
I != N; ++I)
Data[I] = Reader.ReadTemplateParameterList(F, Record, Idx);
@@ -2178,23 +2161,37 @@ ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
DeclID FirstDeclID = ReadDeclID(Record, Idx);
Decl *MergeWith = nullptr;
+
bool IsKeyDecl = ThisDeclID == FirstDeclID;
+ bool IsFirstLocalDecl = false;
+
+ uint64_t RedeclOffset = 0;
// 0 indicates that this declaration was the only declaration of its entity,
// and is used for space optimization.
if (FirstDeclID == 0) {
FirstDeclID = ThisDeclID;
IsKeyDecl = true;
+ IsFirstLocalDecl = true;
} else if (unsigned N = Record[Idx++]) {
- IsKeyDecl = false;
+ // This declaration was the first local declaration, but may have imported
+ // other declarations.
+ IsKeyDecl = N == 1;
+ IsFirstLocalDecl = true;
// We have some declarations that must be before us in our redeclaration
// chain. Read them now, and remember that we ought to merge with one of
// them.
// FIXME: Provide a known merge target to the second and subsequent such
// declaration.
- for (unsigned I = 0; I != N; ++I)
+ for (unsigned I = 0; I != N - 1; ++I)
MergeWith = ReadDecl(Record, Idx/*, MergeWith*/);
+
+ RedeclOffset = Record[Idx++];
+ } else {
+ // This declaration was not the first local declaration. Read the first
+ // local declaration now, to trigger the import of other redeclarations.
+ (void)ReadDecl(Record, Idx);
}
T *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID));
@@ -2206,14 +2203,17 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
D->RedeclLink = Redeclarable<T>::PreviousDeclLink(FirstDecl);
D->First = FirstDecl->getCanonicalDecl();
}
-
- // Note that this declaration has been deserialized.
- Reader.RedeclsDeserialized.insert(static_cast<T *>(D));
-
- // The result structure takes care to note that we need to load the
- // other declaration chains for this ID.
- return RedeclarableResult(Reader, FirstDeclID, MergeWith,
- static_cast<T *>(D)->getKind(), IsKeyDecl);
+
+ T *DAsT = static_cast<T*>(D);
+
+ // Note that we need to load local redeclarations of this decl and build a
+ // decl chain for them. This must happen *after* we perform the preloading
+ // above; this ensures that the redeclaration chain is built in the correct
+ // order.
+ if (IsFirstLocalDecl)
+ Reader.PendingDeclChains.push_back(std::make_pair(DAsT, RedeclOffset));
+
+ return RedeclarableResult(FirstDeclID, MergeWith, IsKeyDecl);
}
/// \brief Attempts to merge the given declaration (D) with another declaration
@@ -2255,9 +2255,8 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
DeclID DsID, bool IsKeyDecl) {
auto *DPattern = D->getTemplatedDecl();
auto *ExistingPattern = Existing->getTemplatedDecl();
- RedeclarableResult Result(Reader, DPattern->getCanonicalDecl()->getGlobalID(),
- /*MergeWith*/ExistingPattern, DPattern->getKind(),
- IsKeyDecl);
+ RedeclarableResult Result(DPattern->getCanonicalDecl()->getGlobalID(),
+ /*MergeWith*/ ExistingPattern, IsKeyDecl);
if (auto *DClass = dyn_cast<CXXRecordDecl>(DPattern)) {
// Merge with any existing definition.
@@ -2323,11 +2322,8 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing,
TemplatePatternID, Redecl.isKeyDecl());
// If this declaration is a key declaration, make a note of that.
- if (Redecl.isKeyDecl()) {
+ if (Redecl.isKeyDecl())
Reader.KeyDecls[ExistingCanon].push_back(Redecl.getFirstID());
- if (Reader.PendingDeclChainsKnown.insert(ExistingCanon).second)
- Reader.PendingDeclChains.push_back(ExistingCanon);
- }
}
}
@@ -2626,6 +2622,13 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
return X->getASTContext().hasSameType(FDX->getType(), FDY->getType());
}
+ // Indirect fields with the same target field match.
+ if (auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
+ auto *IFDY = cast<IndirectFieldDecl>(Y);
+ return IFDX->getAnonField()->getCanonicalDecl() ==
+ IFDY->getAnonField()->getCanonicalDecl();
+ }
+
// Enumerators with the same name match.
if (isa<EnumConstantDecl>(X))
// FIXME: Also check the value is odr-equivalent.
@@ -2749,12 +2752,12 @@ static NamedDecl *getDeclForMerging(NamedDecl *Found,
// declaration, then we want that inner declaration. Declarations from
// AST files are handled via ImportedTypedefNamesForLinkage.
if (Found->isFromASTFile())
- return 0;
+ return nullptr;
if (auto *TND = dyn_cast<TypedefNameDecl>(Found))
return TND->getAnonDeclWithTypedefName();
- return 0;
+ return nullptr;
}
NamedDecl *ASTDeclReader::getAnonymousDeclForMerging(ASTReader &Reader,
@@ -2924,6 +2927,7 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
D->RedeclLink.setPrevious(cast<DeclT>(Previous));
D->First = cast<DeclT>(Previous)->First;
}
+
namespace clang {
template<>
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
@@ -2969,7 +2973,8 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
std::make_pair(Canon, IsUnresolved ? PrevFD : FD));
}
}
-}
+} // end namespace clang
+
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, ...) {
llvm_unreachable("attachPreviousDecl on non-redeclarable declaration");
}
@@ -3319,37 +3324,13 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
// If this declaration is also a declaration context, get the
// offsets for its tables of lexical and visible declarations.
if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
- // FIXME: This should really be
- // DeclContext *LookupDC = DC->getPrimaryContext();
- // but that can walk the redeclaration chain, which might not work yet.
- DeclContext *LookupDC = DC;
- if (isa<NamespaceDecl>(DC))
- LookupDC = DC->getPrimaryContext();
std::pair<uint64_t, uint64_t> Offsets = Reader.VisitDeclContext(DC);
- if (Offsets.first || Offsets.second) {
- if (Offsets.first != 0)
- DC->setHasExternalLexicalStorage(true);
- if (Offsets.second != 0)
- LookupDC->setHasExternalVisibleStorage(true);
- if (ReadDeclContextStorage(*Loc.F, DeclsCursor, Offsets,
- Loc.F->DeclContextInfos[DC]))
- return nullptr;
- }
-
- // Now add the pending visible updates for this decl context, if it has any.
- DeclContextVisibleUpdatesPending::iterator I =
- PendingVisibleUpdates.find(ID);
- if (I != PendingVisibleUpdates.end()) {
- // There are updates. This means the context has external visible
- // storage, even if the original stored version didn't.
- LookupDC->setHasExternalVisibleStorage(true);
- for (const auto &Update : I->second) {
- DeclContextInfo &Info = Update.second->DeclContextInfos[DC];
- delete Info.NameLookupTableData;
- Info.NameLookupTableData = Update.first;
- }
- PendingVisibleUpdates.erase(I);
- }
+ if (Offsets.first &&
+ ReadLexicalDeclContextStorage(*Loc.F, DeclsCursor, Offsets.first, DC))
+ return nullptr;
+ if (Offsets.second &&
+ ReadVisibleDeclContextStorage(*Loc.F, DeclsCursor, Offsets.second, ID))
+ return nullptr;
}
assert(Idx == Record.size());
@@ -3372,17 +3353,32 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
}
void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
+ // Load the pending visible updates for this decl context, if it has any.
+ auto I = PendingVisibleUpdates.find(ID);
+ if (I != PendingVisibleUpdates.end()) {
+ auto VisibleUpdates = std::move(I->second);
+ PendingVisibleUpdates.erase(I);
+
+ auto *DC = cast<DeclContext>(D)->getPrimaryContext();
+ for (const PendingVisibleUpdate &Update : VisibleUpdates)
+ Lookups[DC].Table.add(
+ Update.Mod, Update.Data,
+ reader::ASTDeclContextNameLookupTrait(*this, *Update.Mod));
+ DC->setHasExternalVisibleStorage(true);
+ }
+
// The declaration may have been modified by files later in the chain.
// If this is the case, read the record containing the updates from each file
// and pass it to ASTDeclReader to make the modifications.
DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID);
if (UpdI != DeclUpdateOffsets.end()) {
- FileOffsetsTy &UpdateOffsets = UpdI->second;
+ auto UpdateOffsets = std::move(UpdI->second);
+ DeclUpdateOffsets.erase(UpdI);
+
bool WasInteresting = isConsumerInterestedIn(D, false);
- for (FileOffsetsTy::iterator
- I = UpdateOffsets.begin(), E = UpdateOffsets.end(); I != E; ++I) {
- ModuleFile *F = I->first;
- uint64_t Offset = I->second;
+ for (auto &FileAndOffset : UpdateOffsets) {
+ ModuleFile *F = FileAndOffset.first;
+ uint64_t Offset = FileAndOffset.second;
llvm::BitstreamCursor &Cursor = F->DeclsCursor;
SavedStreamPosition SavedPosition(Cursor);
Cursor.JumpToBit(Offset);
@@ -3407,154 +3403,42 @@ void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
}
}
-namespace {
- /// \brief Module visitor class that finds all of the redeclarations of a
- /// redeclarable declaration.
- class RedeclChainVisitor {
- ASTReader &Reader;
- SmallVectorImpl<DeclID> &SearchDecls;
- llvm::SmallPtrSetImpl<Decl *> &Deserialized;
- GlobalDeclID CanonID;
- SmallVector<Decl *, 4> Chain;
-
- public:
- RedeclChainVisitor(ASTReader &Reader, SmallVectorImpl<DeclID> &SearchDecls,
- llvm::SmallPtrSetImpl<Decl *> &Deserialized,
- GlobalDeclID CanonID)
- : Reader(Reader), SearchDecls(SearchDecls), Deserialized(Deserialized),
- CanonID(CanonID) {
- // Ensure that the canonical ID goes at the start of the chain.
- addToChain(Reader.GetDecl(CanonID));
- }
-
- static ModuleManager::DFSPreorderControl
- visitPreorder(ModuleFile &M, void *UserData) {
- return static_cast<RedeclChainVisitor *>(UserData)->visitPreorder(M);
- }
-
- static bool visitPostorder(ModuleFile &M, void *UserData) {
- return static_cast<RedeclChainVisitor *>(UserData)->visitPostorder(M);
- }
-
- void addToChain(Decl *D) {
- if (!D)
- return;
-
- if (Deserialized.erase(D))
- Chain.push_back(D);
- }
-
- void searchForID(ModuleFile &M, GlobalDeclID GlobalID) {
- // Map global ID of the first declaration down to the local ID
- // used in this module file.
- DeclID ID = Reader.mapGlobalIDToModuleFileGlobalID(M, GlobalID);
- if (!ID)
- return;
-
- // If the search decl was from this module, add it to the chain before any
- // of its redeclarations in this module or users of it, and after any from
- // imported modules.
- if (CanonID != GlobalID && Reader.isDeclIDFromModule(GlobalID, M))
- addToChain(Reader.GetDecl(GlobalID));
-
- // Perform a binary search to find the local redeclarations for this
- // declaration (if any).
- const LocalRedeclarationsInfo Compare = { ID, 0 };
- const LocalRedeclarationsInfo *Result
- = std::lower_bound(M.RedeclarationsMap,
- M.RedeclarationsMap + M.LocalNumRedeclarationsInMap,
- Compare);
- if (Result == M.RedeclarationsMap + M.LocalNumRedeclarationsInMap ||
- Result->FirstID != ID)
- return;
-
- // Dig out all of the redeclarations.
- unsigned Offset = Result->Offset;
- unsigned N = M.RedeclarationChains[Offset];
- M.RedeclarationChains[Offset++] = 0; // Don't try to deserialize again
- for (unsigned I = 0; I != N; ++I)
- addToChain(Reader.GetLocalDecl(M, M.RedeclarationChains[Offset++]));
- }
-
- bool needsToVisitImports(ModuleFile &M, GlobalDeclID GlobalID) {
- DeclID ID = Reader.mapGlobalIDToModuleFileGlobalID(M, GlobalID);
- if (!ID)
- return false;
-
- const LocalRedeclarationsInfo Compare = {ID, 0};
- const LocalRedeclarationsInfo *Result = std::lower_bound(
- M.RedeclarationsMap,
- M.RedeclarationsMap + M.LocalNumRedeclarationsInMap, Compare);
- if (Result == M.RedeclarationsMap + M.LocalNumRedeclarationsInMap ||
- Result->FirstID != ID) {
- return true;
- }
- unsigned Offset = Result->Offset;
- unsigned N = M.RedeclarationChains[Offset];
- // We don't need to visit a module or any of its imports if we've already
- // deserialized the redecls from this module.
- return N != 0;
- }
+void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) {
+ // Attach FirstLocal to the end of the decl chain.
+ Decl *CanonDecl = FirstLocal->getCanonicalDecl();
+ if (FirstLocal != CanonDecl) {
+ Decl *PrevMostRecent = ASTDeclReader::getMostRecentDecl(CanonDecl);
+ ASTDeclReader::attachPreviousDecl(
+ *this, FirstLocal, PrevMostRecent ? PrevMostRecent : CanonDecl,
+ CanonDecl);
+ }
- ModuleManager::DFSPreorderControl visitPreorder(ModuleFile &M) {
- for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I) {
- if (needsToVisitImports(M, SearchDecls[I]))
- return ModuleManager::Continue;
- }
- return ModuleManager::SkipImports;
- }
+ if (!LocalOffset) {
+ ASTDeclReader::attachLatestDecl(CanonDecl, FirstLocal);
+ return;
+ }
- bool visitPostorder(ModuleFile &M) {
- // Visit each of the declarations.
- for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I)
- searchForID(M, SearchDecls[I]);
- return false;
- }
-
- ArrayRef<Decl *> getChain() const {
- return Chain;
- }
- };
-}
+ // Load the list of other redeclarations from this module file.
+ ModuleFile *M = getOwningModuleFile(FirstLocal);
+ assert(M && "imported decl from no module file");
-void ASTReader::loadPendingDeclChain(Decl *CanonDecl) {
- // The decl might have been merged into something else after being added to
- // our list. If it was, just skip it.
- if (!CanonDecl->isCanonicalDecl())
- return;
+ llvm::BitstreamCursor &Cursor = M->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(LocalOffset);
- // Determine the set of declaration IDs we'll be searching for.
- SmallVector<DeclID, 16> SearchDecls;
- GlobalDeclID CanonID = CanonDecl->getGlobalID();
- if (CanonID)
- SearchDecls.push_back(CanonDecl->getGlobalID()); // Always first.
- KeyDeclsMap::iterator KeyPos = KeyDecls.find(CanonDecl);
- if (KeyPos != KeyDecls.end())
- SearchDecls.append(KeyPos->second.begin(), KeyPos->second.end());
-
- // Build up the list of redeclarations.
- RedeclChainVisitor Visitor(*this, SearchDecls, RedeclsDeserialized, CanonID);
- ModuleMgr.visitDepthFirst(&RedeclChainVisitor::visitPreorder,
- &RedeclChainVisitor::visitPostorder, &Visitor);
-
- // Retrieve the chains.
- ArrayRef<Decl *> Chain = Visitor.getChain();
- if (Chain.empty() || (Chain.size() == 1 && Chain[0] == CanonDecl))
- return;
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.readRecord(Code, Record);
+ (void)RecCode;
+ assert(RecCode == LOCAL_REDECLARATIONS && "expected LOCAL_REDECLARATIONS record!");
- // Hook up the chains.
- //
- // FIXME: We have three different dispatches on decl kind here; maybe
+ // FIXME: We have several different dispatches on decl kind here; maybe
// we should instead generate one loop per kind and dispatch up-front?
- Decl *MostRecent = ASTDeclReader::getMostRecentDecl(CanonDecl);
- if (!MostRecent)
- MostRecent = CanonDecl;
- for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
- if (Chain[I] == CanonDecl)
- continue;
-
- ASTDeclReader::attachPreviousDecl(*this, Chain[I], MostRecent, CanonDecl);
- MostRecent = Chain[I];
+ Decl *MostRecent = FirstLocal;
+ for (unsigned I = 0, N = Record.size(); I != N; ++I) {
+ auto *D = GetLocalDecl(*M, Record[N - I - 1]);
+ ASTDeclReader::attachPreviousDecl(*this, D, MostRecent, CanonDecl);
+ MostRecent = D;
}
ASTDeclReader::attachLatestDecl(CanonDecl, MostRecent);
}
@@ -3630,11 +3514,7 @@ namespace {
}
}
- static bool visit(ModuleFile &M, void *UserData) {
- return static_cast<ObjCCategoriesVisitor *>(UserData)->visit(M);
- }
-
- bool visit(ModuleFile &M) {
+ bool operator()(ModuleFile &M) {
// If we've loaded all of the category information we care about from
// this module file, we're done.
if (M.Generation <= PreviousGeneration)
@@ -3672,14 +3552,14 @@ namespace {
return true;
}
};
-}
+} // end anonymous namespace
void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
ObjCInterfaceDecl *D,
unsigned PreviousGeneration) {
ObjCCategoriesVisitor Visitor(*this, ID, D, CategoriesDeserialized,
PreviousGeneration);
- ModuleMgr.visit(ObjCCategoriesVisitor::visit, &Visitor);
+ ModuleMgr.visit(Visitor);
}
template<typename DeclT, typename Fn>
@@ -3716,17 +3596,6 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
// FIXME: We should call addHiddenDecl instead, to add the member
// to its DeclContext.
RD->addedMember(MD);
-
- // If we've added a new special member to a class definition that is not
- // the canonical definition, then we need special member lookups in the
- // canonical definition to also look into our class.
- auto *DD = RD->DefinitionData.getNotUpdated();
- if (DD && DD->Definition != RD) {
- auto &Merged = Reader.MergedLookups[DD->Definition];
- // FIXME: Avoid the linear-time scan here.
- if (std::find(Merged.begin(), Merged.end(), RD) == Merged.end())
- Merged.push_back(RD);
- }
break;
}
@@ -3798,10 +3667,8 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
// Visible update is handled separately.
uint64_t LexicalOffset = Record[Idx++];
if (!HadRealDefinition && LexicalOffset) {
- RD->setHasExternalLexicalStorage(true);
- Reader.ReadDeclContextStorage(ModuleFile, ModuleFile.DeclsCursor,
- std::make_pair(LexicalOffset, 0),
- ModuleFile.DeclContextInfos[RD]);
+ Reader.ReadLexicalDeclContextStorage(ModuleFile, ModuleFile.DeclsCursor,
+ LexicalOffset, RD);
Reader.PendingFakeDefinitionData.erase(OldDD);
}
diff --git a/lib/Serialization/ASTReaderInternals.h b/lib/Serialization/ASTReaderInternals.h
index 5b1c4f4963e4..d392364a971b 100644
--- a/lib/Serialization/ASTReaderInternals.h
+++ b/lib/Serialization/ASTReaderInternals.h
@@ -15,8 +15,12 @@
#include "clang/AST/DeclarationName.h"
#include "clang/Serialization/ASTBitCodes.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/OnDiskHashTable.h"
+#include "MultiOnDiskHashTable.h"
#include <utility>
namespace clang {
@@ -39,45 +43,86 @@ class ASTDeclContextNameLookupTrait {
ModuleFile &F;
public:
- /// \brief Pair of begin/end iterators for DeclIDs.
- ///
- /// Note that these declaration IDs are local to the module that contains this
- /// particular lookup t
- typedef llvm::support::ulittle32_t LE32DeclID;
- typedef std::pair<LE32DeclID *, LE32DeclID *> data_type;
- typedef unsigned hash_value_type;
- typedef unsigned offset_type;
+ // Maximum number of lookup tables we allow before condensing the tables.
+ static const int MaxTables = 4;
+
+ /// The lookup result is a list of global declaration IDs.
+ typedef llvm::SmallVector<DeclID, 4> data_type;
+ struct data_type_builder {
+ data_type &Data;
+ llvm::DenseSet<DeclID> Found;
- /// \brief Special internal key for declaration names.
- /// The hash table creates keys for comparison; we do not create
- /// a DeclarationName for the internal key to avoid deserializing types.
- struct DeclNameKey {
- DeclarationName::NameKind Kind;
- uint64_t Data;
- DeclNameKey() : Kind((DeclarationName::NameKind)0), Data(0) { }
+ data_type_builder(data_type &D) : Data(D) {}
+ void insert(DeclID ID) {
+ // Just use a linear scan unless we have more than a few IDs.
+ if (Found.empty() && !Data.empty()) {
+ if (Data.size() <= 4) {
+ for (auto I : Found)
+ if (I == ID)
+ return;
+ Data.push_back(ID);
+ return;
+ }
+
+ // Switch to tracking found IDs in the set.
+ Found.insert(Data.begin(), Data.end());
+ }
+
+ if (Found.insert(ID).second)
+ Data.push_back(ID);
+ }
};
+ typedef unsigned hash_value_type;
+ typedef unsigned offset_type;
+ typedef ModuleFile *file_type;
typedef DeclarationName external_key_type;
- typedef DeclNameKey internal_key_type;
+ typedef DeclarationNameKey internal_key_type;
explicit ASTDeclContextNameLookupTrait(ASTReader &Reader, ModuleFile &F)
: Reader(Reader), F(F) { }
- static bool EqualKey(const internal_key_type& a,
- const internal_key_type& b) {
- return a.Kind == b.Kind && a.Data == b.Data;
+ static bool EqualKey(const internal_key_type &a, const internal_key_type &b) {
+ return a == b;
}
- static hash_value_type ComputeHash(const DeclNameKey &Key);
- static internal_key_type GetInternalKey(const external_key_type& Name);
+ static hash_value_type ComputeHash(const internal_key_type &Key) {
+ return Key.getHash();
+ }
+ static internal_key_type GetInternalKey(const external_key_type &Name) {
+ return Name;
+ }
static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d);
+ ReadKeyDataLength(const unsigned char *&d);
- internal_key_type ReadKey(const unsigned char* d, unsigned);
+ internal_key_type ReadKey(const unsigned char *d, unsigned);
- data_type ReadData(internal_key_type, const unsigned char* d,
- unsigned DataLen);
+ void ReadDataInto(internal_key_type, const unsigned char *d,
+ unsigned DataLen, data_type_builder &Val);
+
+ static void MergeDataInto(const data_type &From, data_type_builder &To) {
+ To.Data.reserve(To.Data.size() + From.size());
+ for (DeclID ID : From)
+ To.insert(ID);
+ }
+
+ file_type ReadFileRef(const unsigned char *&d);
+};
+
+struct DeclContextLookupTable {
+ MultiOnDiskHashTable<ASTDeclContextNameLookupTrait> Table;
+
+ // These look redundant, but don't remove them -- they work around MSVC 2013's
+ // inability to synthesize move operations. Without them, the
+ // MultiOnDiskHashTable will be copied (despite being move-only!).
+ DeclContextLookupTable() : Table() {}
+ DeclContextLookupTable(DeclContextLookupTable &&O)
+ : Table(std::move(O.Table)) {}
+ DeclContextLookupTable &operator=(DeclContextLookupTable &&O) {
+ Table = std::move(O.Table);
+ return *this;
+ }
};
/// \brief Base class for the trait describing the on-disk hash table for the
@@ -137,6 +182,8 @@ public:
const unsigned char* d,
unsigned DataLen);
+ IdentID ReadIdentifierID(const unsigned char *d);
+
ASTReader &getReader() const { return Reader; }
};
@@ -226,7 +273,7 @@ public:
: Reader(Reader), M(M), HS(HS), FrameworkStrings(FrameworkStrings) { }
static hash_value_type ComputeHash(internal_key_ref ikey);
- static internal_key_type GetInternalKey(const FileEntry *FE);
+ internal_key_type GetInternalKey(const FileEntry *FE);
bool EqualKey(internal_key_ref a, internal_key_ref b);
static std::pair<unsigned, unsigned>
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 76e8334695f7..4082dec48c0a 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -93,6 +93,7 @@ namespace clang {
/// \brief Read and initialize a ExplicitTemplateArgumentList structure.
void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ TemplateArgumentLoc *ArgsLocArray,
unsigned NumTemplateArgs);
/// \brief Read and initialize a ExplicitTemplateArgumentList structure.
void ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
@@ -105,9 +106,9 @@ namespace clang {
};
}
-void ASTStmtReader::
-ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
- unsigned NumTemplateArgs) {
+void ASTStmtReader::ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ TemplateArgumentLoc *ArgsLocArray,
+ unsigned NumTemplateArgs) {
SourceLocation TemplateKWLoc = ReadSourceLocation(Record, Idx);
TemplateArgumentListInfo ArgInfo;
ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
@@ -115,7 +116,7 @@ ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
for (unsigned i = 0; i != NumTemplateArgs; ++i)
ArgInfo.addArgument(
Reader.ReadTemplateArgumentLoc(F, Record, Idx));
- Args.initializeFrom(TemplateKWLoc, ArgInfo);
+ Args.initializeFrom(TemplateKWLoc, ArgInfo, ArgsLocArray);
}
void ASTStmtReader::VisitStmt(Stmt *S) {
@@ -134,7 +135,7 @@ void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
unsigned NumStmts = Record[Idx++];
while (NumStmts--)
Stmts.push_back(Reader.ReadSubStmt());
- S->setStmts(Reader.getContext(), Stmts.data(), Stmts.size());
+ S->setStmts(Reader.getContext(), Stmts);
S->LBraceLoc = ReadSourceLocation(Record, Idx);
S->RBraceLoc = ReadSourceLocation(Record, Idx);
}
@@ -381,6 +382,26 @@ void ASTStmtReader::VisitMSAsmStmt(MSAsmStmt *S) {
Constraints, Exprs, Clobbers);
}
+void ASTStmtReader::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtReader::VisitCoreturnStmt(CoreturnStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtReader::VisitCoawaitExpr(CoawaitExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtReader::VisitCoyieldExpr(CoyieldExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) {
VisitStmt(S);
++Idx;
@@ -439,15 +460,17 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
NumTemplateArgs = Record[Idx++];
if (E->hasQualifier())
- E->getInternalQualifierLoc()
- = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ new (E->getTrailingObjects<NestedNameSpecifierLoc>())
+ NestedNameSpecifierLoc(
+ Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
if (E->hasFoundDecl())
- E->getInternalFoundDecl() = ReadDeclAs<NamedDecl>(Record, Idx);
+ *E->getTrailingObjects<NamedDecl *>() = ReadDeclAs<NamedDecl>(Record, Idx);
if (E->hasTemplateKWAndArgsInfo())
- ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
- NumTemplateArgs);
+ ReadTemplateKWAndArgsInfo(
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>(), NumTemplateArgs);
E->setDecl(ReadDeclAs<ValueDecl>(Record, Idx));
E->setLocation(ReadSourceLocation(Record, Idx));
@@ -527,7 +550,6 @@ void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
}
void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
- typedef OffsetOfExpr::OffsetOfNode Node;
VisitExpr(E);
assert(E->getNumComponents() == Record[Idx]);
++Idx;
@@ -537,29 +559,29 @@ void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
E->setRParenLoc(ReadSourceLocation(Record, Idx));
E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
- Node::Kind Kind = static_cast<Node::Kind>(Record[Idx++]);
+ OffsetOfNode::Kind Kind = static_cast<OffsetOfNode::Kind>(Record[Idx++]);
SourceLocation Start = ReadSourceLocation(Record, Idx);
SourceLocation End = ReadSourceLocation(Record, Idx);
switch (Kind) {
- case Node::Array:
- E->setComponent(I, Node(Start, Record[Idx++], End));
+ case OffsetOfNode::Array:
+ E->setComponent(I, OffsetOfNode(Start, Record[Idx++], End));
break;
-
- case Node::Field:
- E->setComponent(I, Node(Start, ReadDeclAs<FieldDecl>(Record, Idx), End));
+
+ case OffsetOfNode::Field:
+ E->setComponent(
+ I, OffsetOfNode(Start, ReadDeclAs<FieldDecl>(Record, Idx), End));
break;
- case Node::Identifier:
- E->setComponent(I,
- Node(Start,
- Reader.GetIdentifierInfo(F, Record, Idx),
- End));
+ case OffsetOfNode::Identifier:
+ E->setComponent(
+ I,
+ OffsetOfNode(Start, Reader.GetIdentifierInfo(F, Record, Idx), End));
break;
-
- case Node::Base: {
+
+ case OffsetOfNode::Base: {
CXXBaseSpecifier *Base = new (Reader.getContext()) CXXBaseSpecifier();
*Base = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
- E->setComponent(I, Node(Base));
+ E->setComponent(I, OffsetOfNode(Base));
break;
}
}
@@ -589,6 +611,15 @@ void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
E->setRBracketLoc(ReadSourceLocation(Record, Idx));
}
+void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setLowerBound(Reader.ReadSubExpr());
+ E->setLength(Reader.ReadSubExpr());
+ E->setColonLoc(ReadSourceLocation(Record, Idx));
+ E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+}
+
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
E->setNumArgs(Reader.getContext(), Record[Idx++]);
@@ -821,6 +852,7 @@ void ASTStmtReader::VisitVAArgExpr(VAArgExpr *E) {
E->setWrittenTypeInfo(GetTypeSourceInfo(Record, Idx));
E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setIsMicrosoftABI(Record[Idx++]);
}
void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
@@ -1168,9 +1200,10 @@ void ASTStmtReader::VisitCXXTryStmt(CXXTryStmt *S) {
void ASTStmtReader::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
VisitStmt(S);
- S->setForLoc(ReadSourceLocation(Record, Idx));
- S->setColonLoc(ReadSourceLocation(Record, Idx));
- S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->ForLoc = ReadSourceLocation(Record, Idx);
+ S->CoawaitLoc = ReadSourceLocation(Record, Idx);
+ S->ColonLoc = ReadSourceLocation(Record, Idx);
+ S->RParenLoc = ReadSourceLocation(Record, Idx);
S->setRangeStmt(Reader.ReadSubStmt());
S->setBeginEndStmt(Reader.ReadSubStmt());
S->setCond(Reader.ReadSubExpr());
@@ -1422,8 +1455,10 @@ ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
VisitExpr(E);
if (Record[Idx++]) // HasTemplateKWAndArgsInfo
- ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
- /*NumTemplateArgs=*/Record[Idx++]);
+ ReadTemplateKWAndArgsInfo(
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>(),
+ /*NumTemplateArgs=*/Record[Idx++]);
E->Base = Reader.ReadSubExpr();
E->BaseType = Reader.readType(F, Record, Idx);
@@ -1439,8 +1474,10 @@ ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
VisitExpr(E);
if (Record[Idx++]) // HasTemplateKWAndArgsInfo
- ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
- /*NumTemplateArgs=*/Record[Idx++]);
+ ReadTemplateKWAndArgsInfo(
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>(),
+ /*NumTemplateArgs=*/Record[Idx++]);
E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
@@ -1462,7 +1499,8 @@ void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
if (Record[Idx++]) // HasTemplateKWAndArgsInfo
- ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ ReadTemplateKWAndArgsInfo(*E->getTrailingASTTemplateKWAndArgsInfo(),
+ E->getTrailingTemplateArgumentLoc(),
/*NumTemplateArgs=*/Record[Idx++]);
unsigned NumDecls = Record[Idx++];
@@ -1544,11 +1582,20 @@ void ASTStmtReader::VisitPackExpansionExpr(PackExpansionExpr *E) {
void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
VisitExpr(E);
+ unsigned NumPartialArgs = Record[Idx++];
E->OperatorLoc = ReadSourceLocation(Record, Idx);
E->PackLoc = ReadSourceLocation(Record, Idx);
E->RParenLoc = ReadSourceLocation(Record, Idx);
- E->Length = Record[Idx++];
- E->Pack = ReadDeclAs<NamedDecl>(Record, Idx);
+ E->Pack = Reader.ReadDeclAs<NamedDecl>(F, Record, Idx);
+ if (E->isPartiallySubstituted()) {
+ assert(E->Length == NumPartialArgs);
+ for (auto *I = reinterpret_cast<TemplateArgument *>(E + 1),
+ *E = I + NumPartialArgs;
+ I != E; ++I)
+ new (I) TemplateArgument(Reader.ReadTemplateArgument(F, Record, Idx));
+ } else if (!E->isValueDependent()) {
+ E->Length = Record[Idx++];
+ }
}
void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
@@ -1622,6 +1669,13 @@ void ASTStmtReader::VisitMSPropertyRefExpr(MSPropertyRefExpr *E) {
E->TheDecl = ReadDeclAs<MSPropertyDecl>(Record, Idx);
}
+void ASTStmtReader::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setIdx(Reader.ReadSubExpr());
+ E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+}
+
void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
VisitExpr(E);
E->setSourceRange(ReadSourceRange(Record, Idx));
@@ -1716,6 +1770,9 @@ OMPClause *OMPClauseReader::readClause() {
case OMPC_safelen:
C = new (Context) OMPSafelenClause();
break;
+ case OMPC_simdlen:
+ C = new (Context) OMPSimdlenClause();
+ break;
case OMPC_collapse:
C = new (Context) OMPCollapseClause();
break;
@@ -1755,6 +1812,15 @@ OMPClause *OMPClauseReader::readClause() {
case OMPC_seq_cst:
C = new (Context) OMPSeqCstClause();
break;
+ case OMPC_threads:
+ C = new (Context) OMPThreadsClause();
+ break;
+ case OMPC_simd:
+ C = new (Context) OMPSIMDClause();
+ break;
+ case OMPC_nogroup:
+ C = new (Context) OMPNogroupClause();
+ break;
case OMPC_private:
C = OMPPrivateClause::CreateEmpty(Context, Record[Idx++]);
break;
@@ -1788,6 +1854,30 @@ OMPClause *OMPClauseReader::readClause() {
case OMPC_depend:
C = OMPDependClause::CreateEmpty(Context, Record[Idx++]);
break;
+ case OMPC_device:
+ C = new (Context) OMPDeviceClause();
+ break;
+ case OMPC_map:
+ C = OMPMapClause::CreateEmpty(Context, Record[Idx++]);
+ break;
+ case OMPC_num_teams:
+ C = new (Context) OMPNumTeamsClause();
+ break;
+ case OMPC_thread_limit:
+ C = new (Context) OMPThreadLimitClause();
+ break;
+ case OMPC_priority:
+ C = new (Context) OMPPriorityClause();
+ break;
+ case OMPC_grainsize:
+ C = new (Context) OMPGrainsizeClause();
+ break;
+ case OMPC_num_tasks:
+ C = new (Context) OMPNumTasksClause();
+ break;
+ case OMPC_hint:
+ C = new (Context) OMPHintClause();
+ break;
}
Visit(C);
C->setLocStart(Reader->ReadSourceLocation(Record, Idx));
@@ -1797,6 +1887,9 @@ OMPClause *OMPClauseReader::readClause() {
}
void OMPClauseReader::VisitOMPIfClause(OMPIfClause *C) {
+ C->setNameModifier(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
+ C->setNameModifierLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
C->setCondition(Reader->Reader.ReadSubExpr());
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
}
@@ -1816,6 +1909,11 @@ void OMPClauseReader::VisitOMPSafelenClause(OMPSafelenClause *C) {
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
}
+void OMPClauseReader::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
+ C->setSimdlen(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
C->setNumForLoops(Reader->Reader.ReadSubExpr());
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
@@ -1838,14 +1936,23 @@ void OMPClauseReader::VisitOMPProcBindClause(OMPProcBindClause *C) {
void OMPClauseReader::VisitOMPScheduleClause(OMPScheduleClause *C) {
C->setScheduleKind(
static_cast<OpenMPScheduleClauseKind>(Record[Idx++]));
+ C->setFirstScheduleModifier(
+ static_cast<OpenMPScheduleClauseModifier>(Record[Idx++]));
+ C->setSecondScheduleModifier(
+ static_cast<OpenMPScheduleClauseModifier>(Record[Idx++]));
C->setChunkSize(Reader->Reader.ReadSubExpr());
C->setHelperChunkSize(Reader->Reader.ReadSubExpr());
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setFirstScheduleModifierLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setSecondScheduleModifierLoc(Reader->ReadSourceLocation(Record, Idx));
C->setScheduleKindLoc(Reader->ReadSourceLocation(Record, Idx));
C->setCommaLoc(Reader->ReadSourceLocation(Record, Idx));
}
-void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *) {}
+void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
+ C->setNumForLoops(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
void OMPClauseReader::VisitOMPNowaitClause(OMPNowaitClause *) {}
@@ -1863,6 +1970,12 @@ void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+void OMPClauseReader::VisitOMPThreadsClause(OMPThreadsClause *) {}
+
+void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
+
+void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
+
void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
unsigned NumVars = C->varlist_size();
@@ -1950,6 +2063,10 @@ void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
C->setLHSExprs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
@@ -1964,6 +2081,8 @@ void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) {
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setModifier(static_cast<OpenMPLinearClauseKind>(Record[Idx++]));
+ C->setModifierLoc(Reader->ReadSourceLocation(Record, Idx));
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
@@ -1973,6 +2092,10 @@ void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) {
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
Vars.push_back(Reader->Reader.ReadSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Reader.ReadSubExpr());
C->setInits(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
@@ -2065,6 +2188,58 @@ void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
C->setVarRefs(Vars);
}
+void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
+ C->setDevice(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setMapTypeModifier(
+ static_cast<OpenMPMapClauseKind>(Record[Idx++]));
+ C->setMapType(
+ static_cast<OpenMPMapClauseKind>(Record[Idx++]));
+ C->setMapLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ auto NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i) {
+ Vars.push_back(Reader->Reader.ReadSubExpr());
+ }
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ C->setNumTeams(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ C->setThreadLimit(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPPriorityClause(OMPPriorityClause *C) {
+ C->setPriority(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ C->setGrainsize(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
+ C->setNumTasks(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
+void OMPClauseReader::VisitOMPHintClause(OMPHintClause *C) {
+ C->setHint(Reader->Reader.ReadSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+}
+
//===----------------------------------------------------------------------===//
// OpenMP Directives.
//===----------------------------------------------------------------------===//
@@ -2110,6 +2285,10 @@ void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
Sub.push_back(Reader.ReadSubExpr());
+ D->setPrivateCounters(Sub);
+ Sub.clear();
+ for (unsigned i = 0; i < CollapsedNum; ++i)
+ Sub.push_back(Reader.ReadSubExpr());
D->setInits(Sub);
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
@@ -2126,6 +2305,7 @@ void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
// The NumClauses field was read in ReadStmtFromStream.
++Idx;
VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
}
void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
@@ -2134,6 +2314,7 @@ void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record[Idx++]);
}
void ASTStmtReader::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
@@ -2145,11 +2326,13 @@ void ASTStmtReader::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
// The NumClauses field was read in ReadStmtFromStream.
++Idx;
VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
}
void ASTStmtReader::VisitOMPSectionDirective(OMPSectionDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
}
void ASTStmtReader::VisitOMPSingleDirective(OMPSingleDirective *D) {
@@ -2166,12 +2349,15 @@ void ASTStmtReader::VisitOMPMasterDirective(OMPMasterDirective *D) {
void ASTStmtReader::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
VisitOMPExecutableDirective(D);
ReadDeclarationNameInfo(D->DirName, Record, Idx);
}
void ASTStmtReader::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record[Idx++]);
}
void ASTStmtReader::VisitOMPParallelForSimdDirective(
@@ -2185,6 +2371,7 @@ void ASTStmtReader::VisitOMPParallelSectionsDirective(
// The NumClauses field was read in ReadStmtFromStream.
++Idx;
VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
}
void ASTStmtReader::VisitOMPTaskDirective(OMPTaskDirective *D) {
@@ -2192,6 +2379,7 @@ void ASTStmtReader::VisitOMPTaskDirective(OMPTaskDirective *D) {
// The NumClauses field was read in ReadStmtFromStream.
++Idx;
VisitOMPExecutableDirective(D);
+ D->setHasCancel(Record[Idx++]);
}
void ASTStmtReader::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *D) {
@@ -2223,6 +2411,8 @@ void ASTStmtReader::VisitOMPFlushDirective(OMPFlushDirective *D) {
void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
VisitOMPExecutableDirective(D);
}
@@ -2246,6 +2436,12 @@ void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
+ VisitStmt(D);
+ ++Idx;
+ VisitOMPExecutableDirective(D);
+}
+
void ASTStmtReader::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
@@ -2262,10 +2458,24 @@ void ASTStmtReader::VisitOMPCancellationPointDirective(
void ASTStmtReader::VisitOMPCancelDirective(OMPCancelDirective *D) {
VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ ++Idx;
VisitOMPExecutableDirective(D);
D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
}
+void ASTStmtReader::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -2497,6 +2707,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) ArraySubscriptExpr(Empty);
break;
+ case EXPR_OMP_ARRAY_SECTION:
+ S = new (Context) OMPArraySectionExpr(Empty);
+ break;
+
case EXPR_CALL:
S = new (Context) CallExpr(Context, Stmt::CallExprClass, Empty);
break;
@@ -2800,7 +3014,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_CRITICAL_DIRECTIVE:
- S = OMPCriticalDirective::CreateEmpty(Context, Empty);
+ S = OMPCriticalDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
case STMT_OMP_PARALLEL_FOR_DIRECTIVE: {
@@ -2851,7 +3066,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_ORDERED_DIRECTIVE:
- S = OMPOrderedDirective::CreateEmpty(Context, Empty);
+ S = OMPOrderedDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
case STMT_OMP_ATOMIC_DIRECTIVE:
@@ -2864,6 +3080,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
+ case STMT_OMP_TARGET_DATA_DIRECTIVE:
+ S = OMPTargetDataDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case STMT_OMP_TEAMS_DIRECTIVE:
S = OMPTeamsDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -2874,9 +3095,34 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_CANCEL_DIRECTIVE:
- S = OMPCancelDirective::CreateEmpty(Context, Empty);
+ S = OMPCancelDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
+ case STMT_OMP_TASKLOOP_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTaskLoopDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
+ case STMT_OMP_TASKLOOP_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTaskLoopSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_DISTRIBUTE_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPDistributeDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
case EXPR_CXX_OPERATOR_CALL:
S = new (Context) CXXOperatorCallExpr(Context, Empty);
break;
@@ -2944,6 +3190,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CXX_PROPERTY_REF_EXPR:
S = new (Context) MSPropertyRefExpr(Empty);
break;
+ case EXPR_CXX_PROPERTY_SUBSCRIPT_EXPR:
+ S = new (Context) MSPropertySubscriptExpr(Empty);
+ break;
case EXPR_CXX_UUIDOF_TYPE:
S = new (Context) CXXUuidofExpr(Empty, false);
break;
@@ -3047,7 +3296,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_SIZEOF_PACK:
- S = new (Context) SizeOfPackExpr(Empty);
+ S = SizeOfPackExpr::CreateDeserialized(
+ Context,
+ /*NumPartialArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM:
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 8b6863822c69..128935c5c73b 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -1,4 +1,4 @@
-//===--- ASTWriter.cpp - AST File Writer ----------------------------------===//
+//===--- ASTWriter.cpp - AST File Writer ------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,7 +12,10 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ModuleFileExtension.h"
#include "ASTCommon.h"
+#include "ASTReaderInternals.h"
+#include "MultiOnDiskHashTable.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclContextInternals.h"
@@ -41,6 +44,7 @@
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/Sema.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/SerializationDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Hashing.h"
@@ -56,6 +60,7 @@
#include <cstdio>
#include <string.h>
#include <utility>
+
using namespace clang;
using namespace clang::serialization;
@@ -98,7 +103,7 @@ namespace {
#define ABSTRACT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
};
-}
+} // end anonymous namespace
void ASTTypeWriter::VisitBuiltinType(const BuiltinType *T) {
llvm_unreachable("Built-in types are never serialized");
@@ -277,7 +282,7 @@ void ASTTypeWriter::VisitUnaryTransformType(const UnaryTransformType *T) {
void ASTTypeWriter::VisitAutoType(const AutoType *T) {
Writer.AddTypeRef(T->getDeducedType(), Record);
- Record.push_back(T->isDecltypeAuto());
+ Record.push_back((unsigned)T->getKeyword());
if (T->getDeducedType().isNull())
Record.push_back(T->isDependentType());
Code = TYPE_AUTO;
@@ -329,9 +334,8 @@ ASTTypeWriter::VisitTemplateSpecializationType(
Record.push_back(T->isDependentType());
Writer.AddTemplateName(T->getTemplateName(), Record);
Record.push_back(T->getNumArgs());
- for (TemplateSpecializationType::iterator ArgI = T->begin(), ArgE = T->end();
- ArgI != ArgE; ++ArgI)
- Writer.AddTemplateArgument(*ArgI, Record);
+ for (const auto &ArgI : *T)
+ Writer.AddTemplateArgument(ArgI, Record);
Writer.AddTypeRef(T->isTypeAlias() ? T->getAliasedType() :
T->isCanonicalUnqualified() ? QualType()
: T->getCanonicalTypeInternal(),
@@ -381,9 +385,8 @@ ASTTypeWriter::VisitDependentTemplateSpecializationType(
Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
Writer.AddIdentifierRef(T->getIdentifier(), Record);
Record.push_back(T->getNumArgs());
- for (DependentTemplateSpecializationType::iterator
- I = T->begin(), E = T->end(); I != E; ++I)
- Writer.AddTemplateArgument(*I, Record);
+ for (const auto &I : *T)
+ Writer.AddTemplateArgument(I, Record);
Code = TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION;
}
@@ -462,7 +465,7 @@ public:
void VisitFunctionTypeLoc(FunctionTypeLoc TyLoc);
};
-}
+} // end anonymous namespace
void TypeLocWriter::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
// nothing to do
@@ -875,15 +878,17 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(METADATA);
RECORD(SIGNATURE);
RECORD(MODULE_NAME);
+ RECORD(MODULE_DIRECTORY);
RECORD(MODULE_MAP_FILE);
RECORD(IMPORTS);
- RECORD(KNOWN_MODULE_FILES);
- RECORD(LANGUAGE_OPTIONS);
- RECORD(TARGET_OPTIONS);
RECORD(ORIGINAL_FILE);
RECORD(ORIGINAL_PCH_DIR);
RECORD(ORIGINAL_FILE_ID);
RECORD(INPUT_FILE_OFFSETS);
+
+ BLOCK(OPTIONS_BLOCK);
+ RECORD(LANGUAGE_OPTIONS);
+ RECORD(TARGET_OPTIONS);
RECORD(DIAGNOSTIC_OPTIONS);
RECORD(FILE_SYSTEM_OPTIONS);
RECORD(HEADER_SEARCH_OPTIONS);
@@ -902,17 +907,17 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(SPECIAL_TYPES);
RECORD(STATISTICS);
RECORD(TENTATIVE_DEFINITIONS);
- RECORD(UNUSED_FILESCOPED_DECLS);
RECORD(SELECTOR_OFFSETS);
RECORD(METHOD_POOL);
RECORD(PP_COUNTER_VALUE);
RECORD(SOURCE_LOCATION_OFFSETS);
RECORD(SOURCE_LOCATION_PRELOADS);
RECORD(EXT_VECTOR_DECLS);
+ RECORD(UNUSED_FILESCOPED_DECLS);
RECORD(PPD_ENTITIES_OFFSETS);
+ RECORD(VTABLE_USES);
RECORD(REFERENCED_SELECTOR_POOL);
RECORD(TU_UPDATE_LEXICAL);
- RECORD(LOCAL_REDECLARATIONS_MAP);
RECORD(SEMA_DECL_REFS);
RECORD(WEAK_UNDECLARED_IDENTIFIERS);
RECORD(PENDING_IMPLICIT_INSTANTIATIONS);
@@ -928,17 +933,20 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(OPENCL_EXTENSIONS);
RECORD(DELEGATING_CTORS);
RECORD(KNOWN_NAMESPACES);
- RECORD(UNDEFINED_BUT_USED);
RECORD(MODULE_OFFSET_MAP);
RECORD(SOURCE_MANAGER_LINE_TABLE);
RECORD(OBJC_CATEGORIES_MAP);
RECORD(FILE_SORTED_DECLS);
RECORD(IMPORTED_MODULES);
- RECORD(LOCAL_REDECLARATIONS);
RECORD(OBJC_CATEGORIES);
RECORD(MACRO_OFFSET);
+ RECORD(INTERESTING_IDENTIFIERS);
+ RECORD(UNDEFINED_BUT_USED);
RECORD(LATE_PARSED_TEMPLATE);
RECORD(OPTIMIZE_PRAGMA_OPTIONS);
+ RECORD(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES);
+ RECORD(CXX_CTOR_INITIALIZERS_OFFSETS);
+ RECORD(DELETE_EXPRS_TO_ANALYZE);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -955,6 +963,29 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(PP_MODULE_MACRO);
RECORD(PP_TOKEN);
+ // Submodule Block.
+ BLOCK(SUBMODULE_BLOCK);
+ RECORD(SUBMODULE_METADATA);
+ RECORD(SUBMODULE_DEFINITION);
+ RECORD(SUBMODULE_UMBRELLA_HEADER);
+ RECORD(SUBMODULE_HEADER);
+ RECORD(SUBMODULE_TOPHEADER);
+ RECORD(SUBMODULE_UMBRELLA_DIR);
+ RECORD(SUBMODULE_IMPORTS);
+ RECORD(SUBMODULE_EXPORTS);
+ RECORD(SUBMODULE_REQUIRES);
+ RECORD(SUBMODULE_EXCLUDED_HEADER);
+ RECORD(SUBMODULE_LINK_LIBRARY);
+ RECORD(SUBMODULE_CONFIG_MACRO);
+ RECORD(SUBMODULE_CONFLICT);
+ RECORD(SUBMODULE_PRIVATE_HEADER);
+ RECORD(SUBMODULE_TEXTUAL_HEADER);
+ RECORD(SUBMODULE_PRIVATE_TEXTUAL_HEADER);
+
+ // Comments Block.
+ BLOCK(COMMENTS_BLOCK);
+ RECORD(COMMENTS_RAW_COMMENT);
+
// Decls and Types block.
BLOCK(DECLTYPES_BLOCK);
RECORD(TYPE_EXT_QUAL);
@@ -998,6 +1029,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_ATOMIC);
RECORD(TYPE_DECAYED);
RECORD(TYPE_ADJUSTED);
+ RECORD(LOCAL_REDECLARATIONS);
RECORD(DECL_TYPEDEF);
RECORD(DECL_TYPEALIAS);
RECORD(DECL_ENUM);
@@ -1062,7 +1094,11 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(PPD_MACRO_EXPANSION);
RECORD(PPD_MACRO_DEFINITION);
RECORD(PPD_INCLUSION_DIRECTIVE);
-
+
+ // Decls and Types block.
+ BLOCK(EXTENSION_BLOCK);
+ RECORD(EXTENSION_METADATA);
+
#undef RECORD
#undef BLOCK
Stream.ExitBlock();
@@ -1074,14 +1110,8 @@ void ASTWriter::WriteBlockInfoBlock() {
/// \return \c true if the path was changed.
static bool cleanPathForOutput(FileManager &FileMgr,
SmallVectorImpl<char> &Path) {
- bool Changed = false;
-
- if (!llvm::sys::path::is_absolute(StringRef(Path.data(), Path.size()))) {
- llvm::sys::fs::make_absolute(Path);
- Changed = true;
- }
-
- return Changed | FileMgr.removeDotPaths(Path);
+ bool Changed = FileMgr.makeAbsolutePath(Path);
+ return Changed | llvm::sys::path::remove_dots(Path);
}
/// \brief Adjusts the given filename to only write out the portion of the
@@ -1140,69 +1170,78 @@ static ASTFileSignature getSignature() {
}
/// \brief Write the control block.
-void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
- StringRef isysroot,
- const std::string &OutputFile) {
+uint64_t ASTWriter::WriteControlBlock(Preprocessor &PP,
+ ASTContext &Context,
+ StringRef isysroot,
+ const std::string &OutputFile) {
+ ASTFileSignature Signature = 0;
+
using namespace llvm;
Stream.EnterSubblock(CONTROL_BLOCK_ID, 5);
RecordData Record;
// Metadata
- BitCodeAbbrev *MetadataAbbrev = new BitCodeAbbrev();
+ auto *MetadataAbbrev = new BitCodeAbbrev();
MetadataAbbrev->Add(BitCodeAbbrevOp(METADATA));
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Major
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Minor
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang maj.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Timestamps
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
unsigned MetadataAbbrevCode = Stream.EmitAbbrev(MetadataAbbrev);
- Record.push_back(METADATA);
- Record.push_back(VERSION_MAJOR);
- Record.push_back(VERSION_MINOR);
- Record.push_back(CLANG_VERSION_MAJOR);
- Record.push_back(CLANG_VERSION_MINOR);
assert((!WritingModule || isysroot.empty()) &&
"writing module as a relocatable PCH?");
- Record.push_back(!isysroot.empty());
- Record.push_back(ASTHasCompilerErrors);
- Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
- getClangFullRepositoryVersion());
-
+ {
+ RecordData::value_type Record[] = {METADATA, VERSION_MAJOR, VERSION_MINOR,
+ CLANG_VERSION_MAJOR, CLANG_VERSION_MINOR,
+ !isysroot.empty(), IncludeTimestamps,
+ ASTHasCompilerErrors};
+ Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
+ getClangFullRepositoryVersion());
+ }
if (WritingModule) {
// For implicit modules we output a signature that we can use to ensure
// duplicate module builds don't collide in the cache as their output order
// is non-deterministic.
// FIXME: Remove this when output is deterministic.
if (Context.getLangOpts().ImplicitModules) {
- Record.clear();
- Record.push_back(getSignature());
+ Signature = getSignature();
+ RecordData::value_type Record[] = {Signature};
Stream.EmitRecord(SIGNATURE, Record);
}
// Module name
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(MODULE_NAME));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
- RecordData Record;
- Record.push_back(MODULE_NAME);
+ RecordData::value_type Record[] = {MODULE_NAME};
Stream.EmitRecordWithBlob(AbbrevCode, Record, WritingModule->Name);
}
if (WritingModule && WritingModule->Directory) {
- // Module directory.
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
- Abbrev->Add(BitCodeAbbrevOp(MODULE_DIRECTORY));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Directory
- unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
- RecordData Record;
- Record.push_back(MODULE_DIRECTORY);
-
SmallString<128> BaseDir(WritingModule->Directory->getName());
cleanPathForOutput(Context.getSourceManager().getFileManager(), BaseDir);
- Stream.EmitRecordWithBlob(AbbrevCode, Record, BaseDir);
+
+ // If the home of the module is the current working directory, then we
+ // want to pick up the cwd of the build process loading the module, not
+ // our cwd, when we load this module.
+ if (!PP.getHeaderSearchInfo()
+ .getHeaderSearchOpts()
+ .ModuleMapFileHomeIsCwd ||
+ WritingModule->Directory->getName() != StringRef(".")) {
+ // Module directory.
+ auto *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MODULE_DIRECTORY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Directory
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+
+ RecordData::value_type Record[] = {MODULE_DIRECTORY};
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, BaseDir);
+ }
// Write out all other paths relative to the base directory if possible.
BaseDirectory.assign(BaseDir.begin(), BaseDir.end());
@@ -1246,22 +1285,16 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.push_back((unsigned)M->Kind); // FIXME: Stable encoding
AddSourceLocation(M->ImportLoc, Record);
Record.push_back(M->File->getSize());
- Record.push_back(M->File->getModificationTime());
+ Record.push_back(getTimestampForOutput(M->File));
Record.push_back(M->Signature);
AddPath(M->FileName, Record);
}
Stream.EmitRecord(IMPORTS, Record);
-
- // Also emit a list of known module files that were not imported,
- // but are made available by this module.
- // FIXME: Should we also include a signature here?
- Record.clear();
- for (auto *E : Mgr.getAdditionalKnownModuleFiles())
- AddPath(E->getName(), Record);
- if (!Record.empty())
- Stream.EmitRecord(KNOWN_MODULE_FILES, Record);
}
+ // Write the options block.
+ Stream.EnterSubblock(OPTIONS_BLOCK_ID, 4);
+
// Language options.
Record.clear();
const LangOptions &LangOpts = Context.getLangOpts();
@@ -1285,11 +1318,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// Comment options.
Record.push_back(LangOpts.CommentOpts.BlockCommandNames.size());
- for (CommentOptions::BlockCommandNamesTy::const_iterator
- I = LangOpts.CommentOpts.BlockCommandNames.begin(),
- IEnd = LangOpts.CommentOpts.BlockCommandNames.end();
- I != IEnd; ++I) {
- AddString(*I, Record);
+ for (const auto &I : LangOpts.CommentOpts.BlockCommandNames) {
+ AddString(I, Record);
}
Record.push_back(LangOpts.CommentOpts.ParseAllComments);
@@ -1332,8 +1362,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// File system options.
Record.clear();
- const FileSystemOptions &FSOpts
- = Context.getSourceManager().getFileManager().getFileSystemOptions();
+ const FileSystemOptions &FSOpts =
+ Context.getSourceManager().getFileManager().getFileSystemOpts();
AddString(FSOpts.WorkingDir, Record);
Stream.EmitRecord(FILE_SYSTEM_OPTIONS, Record);
@@ -1401,10 +1431,13 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.push_back(static_cast<unsigned>(PPOpts.ObjCXXARCStandardLibrary));
Stream.EmitRecord(PREPROCESSOR_OPTIONS, Record);
+ // Leave the options block.
+ Stream.ExitBlock();
+
// Original file name and file ID
SourceManager &SM = Context.getSourceManager();
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- BitCodeAbbrev *FileAbbrev = new BitCodeAbbrev();
+ auto *FileAbbrev = new BitCodeAbbrev();
FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE));
FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // File ID
FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
@@ -1422,18 +1455,17 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// Original PCH directory
if (!OutputFile.empty() && OutputFile != "-") {
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(ORIGINAL_PCH_DIR));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
SmallString<128> OutputPath(OutputFile);
- llvm::sys::fs::make_absolute(OutputPath);
+ SM.getFileManager().makeAbsolutePath(OutputPath);
StringRef origDir = llvm::sys::path::parent_path(OutputPath);
- RecordData Record;
- Record.push_back(ORIGINAL_PCH_DIR);
+ RecordData::value_type Record[] = {ORIGINAL_PCH_DIR};
Stream.EmitRecordWithBlob(AbbrevCode, Record, origDir);
}
@@ -1441,6 +1473,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
PP.getHeaderSearchInfo().getHeaderSearchOpts(),
PP.getLangOpts().Modules);
Stream.ExitBlock();
+ return Signature;
}
namespace {
@@ -1448,24 +1481,25 @@ namespace {
struct InputFileEntry {
const FileEntry *File;
bool IsSystemFile;
+ bool IsTransient;
bool BufferOverridden;
};
-}
+} // end anonymous namespace
void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
HeaderSearchOptions &HSOpts,
bool Modules) {
using namespace llvm;
Stream.EnterSubblock(INPUT_FILES_BLOCK_ID, 4);
- RecordData Record;
-
+
// Create input-file abbreviation.
- BitCodeAbbrev *IFAbbrev = new BitCodeAbbrev();
+ auto *IFAbbrev = new BitCodeAbbrev();
IFAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE));
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Overridden
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Transient
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
unsigned IFAbbrevCode = Stream.EmitAbbrev(IFAbbrev);
@@ -1487,6 +1521,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
InputFileEntry Entry;
Entry.File = Cache->OrigEntry;
Entry.IsSystemFile = Cache->IsSystemFile;
+ Entry.IsTransient = Cache->IsTransient;
Entry.BufferOverridden = Cache->BufferOverridden;
if (Cache->IsSystemFile)
SortedFiles.push_back(Entry);
@@ -1497,10 +1532,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
unsigned UserFilesNum = 0;
// Write out all of the input files.
std::vector<uint64_t> InputFileOffsets;
- for (std::deque<InputFileEntry>::iterator
- I = SortedFiles.begin(), E = SortedFiles.end(); I != E; ++I) {
- const InputFileEntry &Entry = *I;
-
+ for (const auto &Entry : SortedFiles) {
uint32_t &InputFileID = InputFileIDs[Entry.File];
if (InputFileID != 0)
continue; // already recorded this file.
@@ -1513,16 +1545,15 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
if (!Entry.IsSystemFile)
++UserFilesNum;
- Record.clear();
- Record.push_back(INPUT_FILE);
- Record.push_back(InputFileOffsets.size());
-
// Emit size/modification time for this file.
- Record.push_back(Entry.File->getSize());
- Record.push_back(Entry.File->getModificationTime());
-
- // Whether this file was overridden.
- Record.push_back(Entry.BufferOverridden);
+ // And whether this file was overridden.
+ RecordData::value_type Record[] = {
+ INPUT_FILE,
+ InputFileOffsets.size(),
+ (uint64_t)Entry.File->getSize(),
+ (uint64_t)getTimestampForOutput(Entry.File),
+ Entry.BufferOverridden,
+ Entry.IsTransient};
EmitRecordWithPath(IFAbbrevCode, Record, Entry.File->getName());
}
@@ -1530,7 +1561,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
Stream.ExitBlock();
// Create input file offsets abbreviation.
- BitCodeAbbrev *OffsetsAbbrev = new BitCodeAbbrev();
+ auto *OffsetsAbbrev = new BitCodeAbbrev();
OffsetsAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE_OFFSETS));
OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # input files
OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # non-system
@@ -1539,10 +1570,8 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
unsigned OffsetsAbbrevCode = Stream.EmitAbbrev(OffsetsAbbrev);
// Write input file offsets.
- Record.clear();
- Record.push_back(INPUT_FILE_OFFSETS);
- Record.push_back(InputFileOffsets.size());
- Record.push_back(UserFilesNum);
+ RecordData::value_type Record[] = {INPUT_FILE_OFFSETS,
+ InputFileOffsets.size(), UserFilesNum};
Stream.EmitRecordWithBlob(OffsetsAbbrevCode, Record, bytes(InputFileOffsets));
}
@@ -1554,7 +1583,8 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
/// file.
static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_FILE_ENTRY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
@@ -1572,7 +1602,8 @@ static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
/// buffer.
static unsigned CreateSLocBufferAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_BUFFER_ENTRY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
@@ -1586,7 +1617,8 @@ static unsigned CreateSLocBufferAbbrev(llvm::BitstreamWriter &Stream) {
/// buffer's blob.
static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_BUFFER_BLOB));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Blob
return Stream.EmitAbbrev(Abbrev);
@@ -1596,7 +1628,8 @@ static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream) {
/// expansion.
static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_EXPANSION_ENTRY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
@@ -1631,27 +1664,25 @@ namespace {
typedef unsigned hash_value_type;
typedef unsigned offset_type;
- static hash_value_type ComputeHash(key_type_ref key) {
+ hash_value_type ComputeHash(key_type_ref key) {
// The hash is based only on size/time of the file, so that the reader can
// match even when symlinking or excess path elements ("foo/../", "../")
// change the form of the name. However, complete path is still the key.
- //
- // FIXME: Using the mtime here will cause problems for explicit module
- // imports.
return llvm::hash_combine(key.FE->getSize(),
- key.FE->getModificationTime());
+ Writer.getTimestampForOutput(key.FE));
}
std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
using namespace llvm::support;
- endian::Writer<little> Writer(Out);
+ endian::Writer<little> LE(Out);
unsigned KeyLen = strlen(key.Filename) + 1 + 8 + 8;
- Writer.write<uint16_t>(KeyLen);
+ LE.write<uint16_t>(KeyLen);
unsigned DataLen = 1 + 2 + 4 + 4;
- if (Data.isModuleHeader)
- DataLen += 4;
- Writer.write<uint8_t>(DataLen);
+ for (auto ModInfo : HS.getModuleMap().findAllModulesForHeader(key.FE))
+ if (Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule()))
+ DataLen += 4;
+ LE.write<uint8_t>(DataLen);
return std::make_pair(KeyLen, DataLen);
}
@@ -1660,7 +1691,7 @@ namespace {
endian::Writer<little> LE(Out);
LE.write<uint64_t>(key.FE->getSize());
KeyLen -= 8;
- LE.write<uint64_t>(key.FE->getModificationTime());
+ LE.write<uint64_t>(Writer.getTimestampForOutput(key.FE));
KeyLen -= 8;
Out.write(key.Filename, KeyLen);
}
@@ -1671,11 +1702,9 @@ namespace {
endian::Writer<little> LE(Out);
uint64_t Start = Out.tell(); (void)Start;
- unsigned char Flags = (Data.HeaderRole << 6)
- | (Data.isImport << 5)
- | (Data.isPragmaOnce << 4)
- | (Data.DirInfo << 2)
- | (Data.Resolved << 1)
+ unsigned char Flags = (Data.isImport << 4)
+ | (Data.isPragmaOnce << 3)
+ | (Data.DirInfo << 1)
| Data.IndexHeaderMapHeader;
LE.write<uint8_t>(Flags);
LE.write<uint16_t>(Data.NumIncludes);
@@ -1702,9 +1731,15 @@ namespace {
}
LE.write<uint32_t>(Offset);
- if (Data.isModuleHeader) {
- Module *Mod = HS.findModuleForHeader(key.FE).getModule();
- LE.write<uint32_t>(Writer.getExistingSubmoduleID(Mod));
+ // FIXME: If the header is excluded, we should write out some
+ // record of that fact.
+ for (auto ModInfo : HS.getModuleMap().findAllModulesForHeader(key.FE)) {
+ if (uint32_t ModID =
+ Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule())) {
+ uint32_t Value = (ModID << 2) | (unsigned)ModInfo.getRole();
+ assert((Value >> 2) == ModID && "overflow in header module info");
+ LE.write<uint32_t>(Value);
+ }
}
assert(Out.tell() - Start == DataLen && "Wrong data length");
@@ -1734,12 +1769,15 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
if (!File)
continue;
- // Use HeaderSearch's getFileInfo to make sure we get the HeaderFileInfo
- // from the external source if it was not provided already.
- HeaderFileInfo HFI;
- if (!HS.tryGetFileInfo(File, HFI) ||
- (HFI.External && Chain) ||
- (HFI.isModuleHeader && !HFI.isCompilingModuleHeader))
+ // Get the file info. This will load info from the external source if
+ // necessary. Skip emitting this file if we have no information on it
+ // as a header file (in which case HFI will be null) or if it hasn't
+ // changed since it was loaded. Also skip it if it's for a modular header
+ // from a different module; in that case, we rely on the module(s)
+ // containing the header to provide this information.
+ const HeaderFileInfo *HFI =
+ HS.getExistingFileInfo(File, /*WantExternal*/!Chain);
+ if (!HFI || (HFI->isModuleHeader && !HFI->isCompilingModuleHeader))
continue;
// Massage the file path into an appropriate form.
@@ -1753,7 +1791,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
}
HeaderFileInfoTrait::key_type key = { File, Filename };
- Generator.insert(key, HFI, GeneratorTrait);
+ Generator.insert(key, *HFI, GeneratorTrait);
++NumHeaderSearchEntries;
}
@@ -1770,7 +1808,8 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
// Create a blob abbreviation
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(HEADER_SEARCH_TABLE));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
@@ -1779,11 +1818,8 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
unsigned TableAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the header search table
- RecordData Record;
- Record.push_back(HEADER_SEARCH_TABLE);
- Record.push_back(BucketOffset);
- Record.push_back(NumHeaderSearchEntries);
- Record.push_back(TableData.size());
+ RecordData::value_type Record[] = {HEADER_SEARCH_TABLE, BucketOffset,
+ NumHeaderSearchEntries, TableData.size()};
TableData.append(GeneratorTrait.strings_begin(),GeneratorTrait.strings_end());
Stream.EmitRecordWithBlob(TableAbbrev, Record, TableData);
@@ -1871,9 +1907,8 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Stream.EmitRecordWithAbbrev(SLocFileAbbrv, Record);
- if (Content->BufferOverridden) {
- Record.clear();
- Record.push_back(SM_SLOC_BUFFER_BLOB);
+ if (Content->BufferOverridden || Content->IsTransient) {
+ RecordData::value_type Record[] = {SM_SLOC_BUFFER_BLOB};
const llvm::MemoryBuffer *Buffer
= Content->getBuffer(PP.getDiagnostics(), PP.getSourceManager());
Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
@@ -1892,8 +1927,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
const char *Name = Buffer->getBufferIdentifier();
Stream.EmitRecordWithBlob(SLocBufferAbbrv, Record,
StringRef(Name, strlen(Name) + 1));
- Record.clear();
- Record.push_back(SM_SLOC_BUFFER_BLOB);
+ RecordData::value_type Record[] = {SM_SLOC_BUFFER_BLOB};
Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
StringRef(Buffer->getBufferStart(),
Buffer->getBufferSize() + 1));
@@ -1927,19 +1961,20 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Write the source-location offsets table into the AST block. This
// table is used for lazily loading source-location information.
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SOURCE_LOCATION_OFFSETS));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // total size
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
unsigned SLocOffsetsAbbrev = Stream.EmitAbbrev(Abbrev);
-
- Record.clear();
- Record.push_back(SOURCE_LOCATION_OFFSETS);
- Record.push_back(SLocEntryOffsets.size());
- Record.push_back(SourceMgr.getNextLocalOffset() - 1); // skip dummy
- Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record, bytes(SLocEntryOffsets));
-
+ {
+ RecordData::value_type Record[] = {
+ SOURCE_LOCATION_OFFSETS, SLocEntryOffsets.size(),
+ SourceMgr.getNextLocalOffset() - 1 /* skip dummy */};
+ Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record,
+ bytes(SLocEntryOffsets));
+ }
// Write the source location entry preloads array, telling the AST
// reader which source locations entries it should load eagerly.
Stream.EmitRecord(SOURCE_LOCATION_PRELOADS, PreloadSLocs);
@@ -1950,33 +1985,40 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
LineTableInfo &LineTable = SourceMgr.getLineTable();
Record.clear();
- // Emit the file names.
- Record.push_back(LineTable.getNumFilenames());
- for (unsigned I = 0, N = LineTable.getNumFilenames(); I != N; ++I)
- AddPath(LineTable.getFilename(I), Record);
+
+ // Emit the needed file names.
+ llvm::DenseMap<int, int> FilenameMap;
+ for (const auto &L : LineTable) {
+ if (L.first.ID < 0)
+ continue;
+ for (auto &LE : L.second) {
+ if (FilenameMap.insert(std::make_pair(LE.FilenameID,
+ FilenameMap.size())).second)
+ AddPath(LineTable.getFilename(LE.FilenameID), Record);
+ }
+ }
+ Record.push_back(0);
// Emit the line entries
- for (LineTableInfo::iterator L = LineTable.begin(), LEnd = LineTable.end();
- L != LEnd; ++L) {
+ for (const auto &L : LineTable) {
// Only emit entries for local files.
- if (L->first.ID < 0)
+ if (L.first.ID < 0)
continue;
// Emit the file ID
- Record.push_back(L->first.ID);
+ Record.push_back(L.first.ID);
// Emit the line entries
- Record.push_back(L->second.size());
- for (std::vector<LineEntry>::iterator LE = L->second.begin(),
- LEEnd = L->second.end();
- LE != LEEnd; ++LE) {
- Record.push_back(LE->FileOffset);
- Record.push_back(LE->LineNo);
- Record.push_back(LE->FilenameID);
- Record.push_back((unsigned)LE->FileKind);
- Record.push_back(LE->IncludeOffset);
+ Record.push_back(L.second.size());
+ for (const auto &LE : L.second) {
+ Record.push_back(LE.FileOffset);
+ Record.push_back(LE.LineNo);
+ Record.push_back(FilenameMap[LE.FilenameID]);
+ Record.push_back((unsigned)LE.FileKind);
+ Record.push_back(LE.IncludeOffset);
}
}
+
Stream.EmitRecord(SOURCE_MANAGER_LINE_TABLE, Record);
}
}
@@ -2015,19 +2057,17 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// If the preprocessor __COUNTER__ value has been bumped, remember it.
if (PP.getCounterValue() != 0) {
- Record.push_back(PP.getCounterValue());
+ RecordData::value_type Record[] = {PP.getCounterValue()};
Stream.EmitRecord(PP_COUNTER_VALUE, Record);
- Record.clear();
}
// Enter the preprocessor block.
Stream.EnterSubblock(PREPROCESSOR_BLOCK_ID, 3);
// If the AST file contains __DATE__ or __TIME__ emit a warning about this.
- // FIXME: use diagnostics subsystem for localization etc.
+ // FIXME: Include a location for the use, and say which one was used.
if (PP.SawDateOrTime())
- fprintf(stderr, "warning: precompiled header used __DATE__ or __TIME__.\n");
-
+ PP.Diag(SourceLocation(), diag::warn_module_uses_date_time) << IsModule;
// Loop over all the macro directives that are live at the end of the file,
// emitting each to the PP section.
@@ -2177,6 +2217,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// Write the offsets table for macro IDs.
using namespace llvm;
+
auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(MACRO_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macros
@@ -2184,12 +2225,11 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned MacroOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
- Record.clear();
- Record.push_back(MACRO_OFFSET);
- Record.push_back(MacroOffsets.size());
- Record.push_back(FirstMacroID - NUM_PREDEF_MACRO_IDS);
- Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record,
- bytes(MacroOffsets));
+ {
+ RecordData::value_type Record[] = {MACRO_OFFSET, MacroOffsets.size(),
+ FirstMacroID - NUM_PREDEF_MACRO_IDS};
+ Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, bytes(MacroOffsets));
+ }
}
void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
@@ -2208,7 +2248,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
// Set up the abbreviation for
unsigned InclusionAbbrev = 0;
{
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(PPD_INCLUSION_DIRECTIVE));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // filename length
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // in quotes
@@ -2232,7 +2272,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
PreprocessedEntityOffsets.push_back(
PPEntityOffset((*E)->getSourceRange(), Stream.GetCurrentBitNo()));
- if (MacroDefinitionRecord *MD = dyn_cast<MacroDefinitionRecord>(*E)) {
+ if (auto *MD = dyn_cast<MacroDefinitionRecord>(*E)) {
// Record this macro definition's ID.
MacroDefinitions[MD] = NextPreprocessorEntityID;
@@ -2241,7 +2281,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
continue;
}
- if (MacroExpansion *ME = dyn_cast<MacroExpansion>(*E)) {
+ if (auto *ME = dyn_cast<MacroExpansion>(*E)) {
Record.push_back(ME->isBuiltinMacro());
if (ME->isBuiltinMacro())
AddIdentifierRef(ME->getName(), Record);
@@ -2251,7 +2291,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
continue;
}
- if (InclusionDirective *ID = dyn_cast<InclusionDirective>(*E)) {
+ if (auto *ID = dyn_cast<InclusionDirective>(*E)) {
Record.push_back(PPD_INCLUSION_DIRECTIVE);
Record.push_back(ID->getFileName().size());
Record.push_back(ID->wasInQuotes());
@@ -2277,46 +2317,50 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
// Write the offsets table for identifier IDs.
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(PPD_ENTITIES_OFFSETS));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first pp entity
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned PPEOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
- Record.clear();
- Record.push_back(PPD_ENTITIES_OFFSETS);
- Record.push_back(FirstPreprocessorEntityID - NUM_PREDEF_PP_ENTITY_IDS);
+ RecordData::value_type Record[] = {PPD_ENTITIES_OFFSETS,
+ FirstPreprocessorEntityID -
+ NUM_PREDEF_PP_ENTITY_IDS};
Stream.EmitRecordWithBlob(PPEOffsetAbbrev, Record,
bytes(PreprocessedEntityOffsets));
}
}
-unsigned ASTWriter::getSubmoduleID(Module *Mod) {
+unsigned ASTWriter::getLocalOrImportedSubmoduleID(Module *Mod) {
+ if (!Mod)
+ return 0;
+
llvm::DenseMap<Module *, unsigned>::iterator Known = SubmoduleIDs.find(Mod);
if (Known != SubmoduleIDs.end())
return Known->second;
-
- return SubmoduleIDs[Mod] = NextSubmoduleID++;
-}
-unsigned ASTWriter::getExistingSubmoduleID(Module *Mod) const {
- if (!Mod)
+ if (Mod->getTopLevelModule() != WritingModule)
return 0;
- llvm::DenseMap<Module *, unsigned>::const_iterator
- Known = SubmoduleIDs.find(Mod);
- if (Known != SubmoduleIDs.end())
- return Known->second;
+ return SubmoduleIDs[Mod] = NextSubmoduleID++;
+}
- return 0;
+unsigned ASTWriter::getSubmoduleID(Module *Mod) {
+ // FIXME: This can easily happen, if we have a reference to a submodule that
+ // did not result in us loading a module file for that submodule. For
+ // instance, a cross-top-level-module 'conflict' declaration will hit this.
+ unsigned ID = getLocalOrImportedSubmoduleID(Mod);
+ assert((ID || !Mod) &&
+ "asked for module ID for non-local, non-imported module");
+ return ID;
}
/// \brief Compute the number of modules within the given tree (including the
/// given module).
static unsigned getNumberOfModules(Module *Mod) {
unsigned ChildModules = 0;
- for (Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
+ for (auto Sub = Mod->submodule_begin(), SubEnd = Mod->submodule_end();
Sub != SubEnd; ++Sub)
ChildModules += getNumberOfModules(*Sub);
@@ -2329,7 +2373,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Write the abbreviations needed for the submodules block.
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_DEFINITION));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
@@ -2408,9 +2453,9 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
unsigned ConflictAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the submodule metadata block.
- RecordData Record;
- Record.push_back(getNumberOfModules(WritingModule));
- Record.push_back(FirstSubmoduleID - NUM_PREDEF_SUBMODULE_IDS);
+ RecordData::value_type Record[] = {getNumberOfModules(WritingModule),
+ FirstSubmoduleID -
+ NUM_PREDEF_SUBMODULE_IDS};
Stream.EmitRecord(SUBMODULE_METADATA, Record);
// Write all of the submodules.
@@ -2420,46 +2465,37 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Module *Mod = Q.front();
Q.pop();
unsigned ID = getSubmoduleID(Mod);
-
- // Emit the definition of the block.
- Record.clear();
- Record.push_back(SUBMODULE_DEFINITION);
- Record.push_back(ID);
+
+ uint64_t ParentID = 0;
if (Mod->Parent) {
assert(SubmoduleIDs[Mod->Parent] && "Submodule parent not written?");
- Record.push_back(SubmoduleIDs[Mod->Parent]);
- } else {
- Record.push_back(0);
+ ParentID = SubmoduleIDs[Mod->Parent];
}
- Record.push_back(Mod->IsFramework);
- Record.push_back(Mod->IsExplicit);
- Record.push_back(Mod->IsSystem);
- Record.push_back(Mod->IsExternC);
- Record.push_back(Mod->InferSubmodules);
- Record.push_back(Mod->InferExplicitSubmodules);
- Record.push_back(Mod->InferExportWildcard);
- Record.push_back(Mod->ConfigMacrosExhaustive);
- Stream.EmitRecordWithBlob(DefinitionAbbrev, Record, Mod->Name);
-
+
+ // Emit the definition of the block.
+ {
+ RecordData::value_type Record[] = {
+ SUBMODULE_DEFINITION, ID, ParentID, Mod->IsFramework, Mod->IsExplicit,
+ Mod->IsSystem, Mod->IsExternC, Mod->InferSubmodules,
+ Mod->InferExplicitSubmodules, Mod->InferExportWildcard,
+ Mod->ConfigMacrosExhaustive};
+ Stream.EmitRecordWithBlob(DefinitionAbbrev, Record, Mod->Name);
+ }
+
// Emit the requirements.
- for (unsigned I = 0, N = Mod->Requirements.size(); I != N; ++I) {
- Record.clear();
- Record.push_back(SUBMODULE_REQUIRES);
- Record.push_back(Mod->Requirements[I].second);
- Stream.EmitRecordWithBlob(RequiresAbbrev, Record,
- Mod->Requirements[I].first);
+ for (const auto &R : Mod->Requirements) {
+ RecordData::value_type Record[] = {SUBMODULE_REQUIRES, R.second};
+ Stream.EmitRecordWithBlob(RequiresAbbrev, Record, R.first);
}
// Emit the umbrella header, if there is one.
if (auto UmbrellaHeader = Mod->getUmbrellaHeader()) {
- Record.clear();
- Record.push_back(SUBMODULE_UMBRELLA_HEADER);
+ RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_HEADER};
Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record,
UmbrellaHeader.NameAsWritten);
} else if (auto UmbrellaDir = Mod->getUmbrellaDir()) {
- Record.clear();
- Record.push_back(SUBMODULE_UMBRELLA_DIR);
- Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
+ RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_DIR};
+ Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
UmbrellaDir.NameAsWritten);
}
@@ -2477,8 +2513,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
{SUBMODULE_EXCLUDED_HEADER, ExcludedHeaderAbbrev, Module::HK_Excluded}
};
for (auto &HL : HeaderLists) {
- Record.clear();
- Record.push_back(HL.RecordKind);
+ RecordData::value_type Record[] = {HL.RecordKind};
for (auto &H : Mod->Headers[HL.HeaderKind])
Stream.EmitRecordWithBlob(HL.Abbrev, Record, H.NameAsWritten);
}
@@ -2486,35 +2521,27 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Emit the top headers.
{
auto TopHeaders = Mod->getTopHeaders(PP->getFileManager());
- Record.clear();
- Record.push_back(SUBMODULE_TOPHEADER);
+ RecordData::value_type Record[] = {SUBMODULE_TOPHEADER};
for (auto *H : TopHeaders)
Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, H->getName());
}
// Emit the imports.
if (!Mod->Imports.empty()) {
- Record.clear();
- for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) {
- unsigned ImportedID = getSubmoduleID(Mod->Imports[I]);
- assert(ImportedID && "Unknown submodule!");
- Record.push_back(ImportedID);
- }
+ RecordData Record;
+ for (auto *I : Mod->Imports)
+ Record.push_back(getSubmoduleID(I));
Stream.EmitRecord(SUBMODULE_IMPORTS, Record);
}
// Emit the exports.
if (!Mod->Exports.empty()) {
- Record.clear();
- for (unsigned I = 0, N = Mod->Exports.size(); I != N; ++I) {
- if (Module *Exported = Mod->Exports[I].getPointer()) {
- unsigned ExportedID = getSubmoduleID(Exported);
- Record.push_back(ExportedID);
- } else {
- Record.push_back(0);
- }
-
- Record.push_back(Mod->Exports[I].getInt());
+ RecordData Record;
+ for (const auto &E : Mod->Exports) {
+ // FIXME: This may fail; we don't require that all exported modules
+ // are local or imported.
+ Record.push_back(getSubmoduleID(E.getPointer()));
+ Record.push_back(E.getInt());
}
Stream.EmitRecord(SUBMODULE_EXPORTS, Record);
}
@@ -2524,45 +2551,34 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// module itself.
// Emit the link libraries.
- for (unsigned I = 0, N = Mod->LinkLibraries.size(); I != N; ++I) {
- Record.clear();
- Record.push_back(SUBMODULE_LINK_LIBRARY);
- Record.push_back(Mod->LinkLibraries[I].IsFramework);
- Stream.EmitRecordWithBlob(LinkLibraryAbbrev, Record,
- Mod->LinkLibraries[I].Library);
+ for (const auto &LL : Mod->LinkLibraries) {
+ RecordData::value_type Record[] = {SUBMODULE_LINK_LIBRARY,
+ LL.IsFramework};
+ Stream.EmitRecordWithBlob(LinkLibraryAbbrev, Record, LL.Library);
}
// Emit the conflicts.
- for (unsigned I = 0, N = Mod->Conflicts.size(); I != N; ++I) {
- Record.clear();
- Record.push_back(SUBMODULE_CONFLICT);
- unsigned OtherID = getSubmoduleID(Mod->Conflicts[I].Other);
- assert(OtherID && "Unknown submodule!");
- Record.push_back(OtherID);
- Stream.EmitRecordWithBlob(ConflictAbbrev, Record,
- Mod->Conflicts[I].Message);
+ for (const auto &C : Mod->Conflicts) {
+ // FIXME: This may fail; we don't require that all conflicting modules
+ // are local or imported.
+ RecordData::value_type Record[] = {SUBMODULE_CONFLICT,
+ getSubmoduleID(C.Other)};
+ Stream.EmitRecordWithBlob(ConflictAbbrev, Record, C.Message);
}
// Emit the configuration macros.
- for (unsigned I = 0, N = Mod->ConfigMacros.size(); I != N; ++I) {
- Record.clear();
- Record.push_back(SUBMODULE_CONFIG_MACRO);
- Stream.EmitRecordWithBlob(ConfigMacroAbbrev, Record,
- Mod->ConfigMacros[I]);
+ for (const auto &CM : Mod->ConfigMacros) {
+ RecordData::value_type Record[] = {SUBMODULE_CONFIG_MACRO};
+ Stream.EmitRecordWithBlob(ConfigMacroAbbrev, Record, CM);
}
// Queue up the submodules of this module.
- for (Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub)
- Q.push(*Sub);
+ for (auto *M : Mod->submodules())
+ Q.push(M);
}
Stream.ExitBlock();
- // FIXME: This can easily happen, if we have a reference to a submodule that
- // did not result in us loading a module file for that submodule. For
- // instance, a cross-top-level-module 'conflict' declaration will hit this.
assert((NextSubmoduleID - FirstSubmoduleID ==
getNumberOfModules(WritingModule)) &&
"Wrong # of submodules; found a reference to a non-local, "
@@ -2614,11 +2630,10 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
if (DiagStateID == 0) {
DiagStateID = ++CurrID;
- for (DiagnosticsEngine::DiagState::const_iterator
- I = point.State->begin(), E = point.State->end(); I != E; ++I) {
- if (I->second.isPragma()) {
- Record.push_back(I->first);
- Record.push_back((unsigned)I->second.getSeverity());
+ for (const auto &I : *(point.State)) {
+ if (I.second.isPragma()) {
+ Record.push_back(I.first);
+ Record.push_back((unsigned)I.second.getSeverity());
}
}
Record.push_back(-1); // mark the end of the diag/map pairs for this
@@ -2634,21 +2649,18 @@ void ASTWriter::WriteCXXCtorInitializersOffsets() {
if (CXXCtorInitializersOffsets.empty())
return;
- RecordData Record;
-
// Create a blob abbreviation for the C++ ctor initializer offsets.
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(CXX_CTOR_INITIALIZERS_OFFSETS));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned CtorInitializersOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the base specifier offsets table.
- Record.clear();
- Record.push_back(CXX_CTOR_INITIALIZERS_OFFSETS);
- Record.push_back(CXXCtorInitializersOffsets.size());
+ RecordData::value_type Record[] = {CXX_CTOR_INITIALIZERS_OFFSETS,
+ CXXCtorInitializersOffsets.size()};
Stream.EmitRecordWithBlob(CtorInitializersOffsetAbbrev, Record,
bytes(CXXCtorInitializersOffsets));
}
@@ -2657,21 +2669,18 @@ void ASTWriter::WriteCXXBaseSpecifiersOffsets() {
if (CXXBaseSpecifiersOffsets.empty())
return;
- RecordData Record;
-
// Create a blob abbreviation for the C++ base specifiers offsets.
using namespace llvm;
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(CXX_BASE_SPECIFIER_OFFSETS));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned BaseSpecifierOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the base specifier offsets table.
- Record.clear();
- Record.push_back(CXX_BASE_SPECIFIER_OFFSETS);
- Record.push_back(CXXBaseSpecifiersOffsets.size());
+ RecordData::value_type Record[] = {CXX_BASE_SPECIFIER_OFFSETS,
+ CXXBaseSpecifiersOffsets.size()};
Stream.EmitRecordWithBlob(BaseSpecifierOffsetAbbrev, Record,
bytes(CXXBaseSpecifiersOffsets));
}
@@ -2742,33 +2751,34 @@ uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
return 0;
uint64_t Offset = Stream.GetCurrentBitNo();
- RecordData Record;
- Record.push_back(DECL_CONTEXT_LEXICAL);
- SmallVector<KindDeclIDPair, 64> Decls;
- for (const auto *D : DC->decls())
- Decls.push_back(std::make_pair(D->getKind(), GetDeclRef(D)));
+ SmallVector<uint32_t, 128> KindDeclPairs;
+ for (const auto *D : DC->decls()) {
+ KindDeclPairs.push_back(D->getKind());
+ KindDeclPairs.push_back(GetDeclRef(D));
+ }
++NumLexicalDeclContexts;
- Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record, bytes(Decls));
+ RecordData::value_type Record[] = {DECL_CONTEXT_LEXICAL};
+ Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record,
+ bytes(KindDeclPairs));
return Offset;
}
void ASTWriter::WriteTypeDeclOffsets() {
using namespace llvm;
- RecordData Record;
// Write the type offsets array
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(TYPE_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of types
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base type index
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // types block
unsigned TypeOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
- Record.clear();
- Record.push_back(TYPE_OFFSET);
- Record.push_back(TypeOffsets.size());
- Record.push_back(FirstTypeID - NUM_PREDEF_TYPE_IDS);
- Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record, bytes(TypeOffsets));
+ {
+ RecordData::value_type Record[] = {TYPE_OFFSET, TypeOffsets.size(),
+ FirstTypeID - NUM_PREDEF_TYPE_IDS};
+ Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record, bytes(TypeOffsets));
+ }
// Write the declaration offsets array
Abbrev = new BitCodeAbbrev();
@@ -2777,16 +2787,15 @@ void ASTWriter::WriteTypeDeclOffsets() {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base decl ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // declarations block
unsigned DeclOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
- Record.clear();
- Record.push_back(DECL_OFFSET);
- Record.push_back(DeclOffsets.size());
- Record.push_back(FirstDeclID - NUM_PREDEF_DECL_IDS);
- Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, bytes(DeclOffsets));
+ {
+ RecordData::value_type Record[] = {DECL_OFFSET, DeclOffsets.size(),
+ FirstDeclID - NUM_PREDEF_DECL_IDS};
+ Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, bytes(DeclOffsets));
+ }
}
void ASTWriter::WriteFileDeclIDsMap() {
using namespace llvm;
- RecordData Record;
SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs(
FileDeclIDs.begin(), FileDeclIDs.end());
@@ -2802,13 +2811,13 @@ void ASTWriter::WriteFileDeclIDsMap() {
FileGroupedDeclIDs.push_back(LocDeclEntry.second);
}
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(FILE_SORTED_DECLS));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
- Record.push_back(FILE_SORTED_DECLS);
- Record.push_back(FileGroupedDeclIDs.size());
+ RecordData::value_type Record[] = {FILE_SORTED_DECLS,
+ FileGroupedDeclIDs.size()};
Stream.EmitRecordWithBlob(AbbrevCode, Record, bytes(FileGroupedDeclIDs));
}
@@ -2816,14 +2825,12 @@ void ASTWriter::WriteComments() {
Stream.EnterSubblock(COMMENTS_BLOCK_ID, 3);
ArrayRef<RawComment *> RawComments = Context->Comments.getComments();
RecordData Record;
- for (ArrayRef<RawComment *>::iterator I = RawComments.begin(),
- E = RawComments.end();
- I != E; ++I) {
+ for (const auto *I : RawComments) {
Record.clear();
- AddSourceRange((*I)->getSourceRange(), Record);
- Record.push_back((*I)->getKind());
- Record.push_back((*I)->isTrailingComment());
- Record.push_back((*I)->isAlmostTrailingComment());
+ AddSourceRange(I->getSourceRange(), Record);
+ Record.push_back(I->getKind());
+ Record.push_back(I->isTrailingComment());
+ Record.push_back(I->isAlmostTrailingComment());
Stream.EmitRecord(COMMENTS_RAW_COMMENT, Record);
}
Stream.ExitBlock();
@@ -3010,7 +3017,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
}
// Create a blob abbreviation
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(METHOD_POOL));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
@@ -3018,11 +3025,11 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
unsigned MethodPoolAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the method pool
- RecordData Record;
- Record.push_back(METHOD_POOL);
- Record.push_back(BucketOffset);
- Record.push_back(NumTableEntries);
- Stream.EmitRecordWithBlob(MethodPoolAbbrev, Record, MethodPool);
+ {
+ RecordData::value_type Record[] = {METHOD_POOL, BucketOffset,
+ NumTableEntries};
+ Stream.EmitRecordWithBlob(MethodPoolAbbrev, Record, MethodPool);
+ }
// Create a blob abbreviation for the selector table offsets.
Abbrev = new BitCodeAbbrev();
@@ -3033,12 +3040,13 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
unsigned SelectorOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the selector offsets table.
- Record.clear();
- Record.push_back(SELECTOR_OFFSETS);
- Record.push_back(SelectorOffsets.size());
- Record.push_back(FirstSelectorID - NUM_PREDEF_SELECTOR_IDS);
- Stream.EmitRecordWithBlob(SelectorOffsetAbbrev, Record,
- bytes(SelectorOffsets));
+ {
+ RecordData::value_type Record[] = {
+ SELECTOR_OFFSETS, SelectorOffsets.size(),
+ FirstSelectorID - NUM_PREDEF_SELECTOR_IDS};
+ Stream.EmitRecordWithBlob(SelectorOffsetAbbrev, Record,
+ bytes(SelectorOffsets));
+ }
}
}
@@ -3102,18 +3110,20 @@ class ASTIdentifierTableTrait {
ASTWriter &Writer;
Preprocessor &PP;
IdentifierResolver &IdResolver;
+ bool IsModule;
+ bool NeedDecls;
+ ASTWriter::RecordData *InterestingIdentifierOffsets;
/// \brief Determines whether this is an "interesting" identifier that needs a
/// full IdentifierInfo structure written into the hash table. Notably, this
/// doesn't check whether the name has macros defined; use PublicMacroIterator
/// to check that.
- bool isInterestingIdentifier(IdentifierInfo *II, uint64_t MacroOffset) {
+ bool isInterestingIdentifier(const IdentifierInfo *II, uint64_t MacroOffset) {
if (MacroOffset ||
II->isPoisoned() ||
- II->isExtensionToken() ||
- II->getObjCOrBuiltinID() ||
+ (IsModule ? II->hasRevertedBuiltin() : II->getObjCOrBuiltinID()) ||
II->hasRevertedTokenIDToIdentifier() ||
- II->getFETokenInfo<void>())
+ (NeedDecls && II->getFETokenInfo<void>()))
return true;
return false;
@@ -3130,13 +3140,24 @@ public:
typedef unsigned offset_type;
ASTIdentifierTableTrait(ASTWriter &Writer, Preprocessor &PP,
- IdentifierResolver &IdResolver)
- : Writer(Writer), PP(PP), IdResolver(IdResolver) {}
+ IdentifierResolver &IdResolver, bool IsModule,
+ ASTWriter::RecordData *InterestingIdentifierOffsets)
+ : Writer(Writer), PP(PP), IdResolver(IdResolver), IsModule(IsModule),
+ NeedDecls(!IsModule || !Writer.getLangOpts().CPlusPlus),
+ InterestingIdentifierOffsets(InterestingIdentifierOffsets) {}
static hash_value_type ComputeHash(const IdentifierInfo* II) {
return llvm::HashString(II->getName());
}
+ bool isInterestingIdentifier(const IdentifierInfo *II) {
+ auto MacroOffset = Writer.getMacroDirectivesOffset(II);
+ return isInterestingIdentifier(II, MacroOffset);
+ }
+ bool isInterestingNonMacroIdentifier(const IdentifierInfo *II) {
+ return isInterestingIdentifier(II, 0);
+ }
+
std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) {
unsigned KeyLen = II->getLength() + 1;
@@ -3148,10 +3169,12 @@ public:
if (MacroOffset)
DataLen += 4; // MacroDirectives offset.
- for (IdentifierResolver::iterator D = IdResolver.begin(II),
- DEnd = IdResolver.end();
- D != DEnd; ++D)
- DataLen += 4;
+ if (NeedDecls) {
+ for (IdentifierResolver::iterator D = IdResolver.begin(II),
+ DEnd = IdResolver.end();
+ D != DEnd; ++D)
+ DataLen += 4;
+ }
}
using namespace llvm::support;
endian::Writer<little> LE(Out);
@@ -3170,6 +3193,12 @@ public:
// Record the location of the key data. This is used when generating
// the mapping from persistent IDs to strings.
Writer.SetIdentifierOffset(II, Out.tell());
+
+ // Emit the offset of the key/data length information to the interesting
+ // identifiers table if necessary.
+ if (InterestingIdentifierOffsets && isInterestingIdentifier(II))
+ InterestingIdentifierOffsets->push_back(Out.tell() - 4);
+
Out.write(II->getNameStart(), KeyLen);
}
@@ -3193,6 +3222,7 @@ public:
Bits = (Bits << 1) | unsigned(HadMacroDefinition);
Bits = (Bits << 1) | unsigned(II->isExtensionToken());
Bits = (Bits << 1) | unsigned(II->isPoisoned());
+ Bits = (Bits << 1) | unsigned(II->hasRevertedBuiltin());
Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier());
Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword());
LE.write<uint16_t>(Bits);
@@ -3200,18 +3230,21 @@ public:
if (HadMacroDefinition)
LE.write<uint32_t>(MacroOffset);
- // Emit the declaration IDs in reverse order, because the
- // IdentifierResolver provides the declarations as they would be
- // visible (e.g., the function "stat" would come before the struct
- // "stat"), but the ASTReader adds declarations to the end of the list
- // (so we need to see the struct "stat" before the function "stat").
- // Only emit declarations that aren't from a chained PCH, though.
- SmallVector<NamedDecl *, 16> Decls(IdResolver.begin(II), IdResolver.end());
- for (SmallVectorImpl<NamedDecl *>::reverse_iterator D = Decls.rbegin(),
- DEnd = Decls.rend();
- D != DEnd; ++D)
- LE.write<uint32_t>(
- Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), *D)));
+ if (NeedDecls) {
+ // Emit the declaration IDs in reverse order, because the
+ // IdentifierResolver provides the declarations as they would be
+ // visible (e.g., the function "stat" would come before the struct
+ // "stat"), but the ASTReader adds declarations to the end of the list
+ // (so we need to see the struct "stat" before the function "stat").
+ // Only emit declarations that aren't from a chained PCH, though.
+ SmallVector<NamedDecl *, 16> Decls(IdResolver.begin(II),
+ IdResolver.end());
+ for (SmallVectorImpl<NamedDecl *>::reverse_iterator D = Decls.rbegin(),
+ DEnd = Decls.rend();
+ D != DEnd; ++D)
+ LE.write<uint32_t>(
+ Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), *D)));
+ }
}
};
} // end anonymous namespace
@@ -3226,11 +3259,15 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
bool IsModule) {
using namespace llvm;
+ RecordData InterestingIdents;
+
// Create and write out the blob that contains the identifier
// strings.
{
llvm::OnDiskChainedHashTableGenerator<ASTIdentifierTableTrait> Generator;
- ASTIdentifierTableTrait Trait(*this, PP, IdResolver);
+ ASTIdentifierTableTrait Trait(
+ *this, PP, IdResolver, IsModule,
+ (getLangOpts().CPlusPlus && IsModule) ? &InterestingIdents : nullptr);
// Look for any identifiers that were named while processing the
// headers, but are otherwise not needed. We add these to the hash
@@ -3238,21 +3275,20 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
// where the user adds new macro definitions when building the AST
// file.
SmallVector<const IdentifierInfo *, 128> IIs;
- for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(),
- IDEnd = PP.getIdentifierTable().end();
- ID != IDEnd; ++ID)
- IIs.push_back(ID->second);
+ for (const auto &ID : PP.getIdentifierTable())
+ IIs.push_back(ID.second);
// Sort the identifiers lexicographically before getting them references so
// that their order is stable.
std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
for (const IdentifierInfo *II : IIs)
- getIdentifierRef(II);
+ if (Trait.isInterestingNonMacroIdentifier(II))
+ getIdentifierRef(II);
// Create the on-disk hash table representation. We only store offsets
// for identifiers that appear here for the first time.
IdentifierOffsets.resize(NextIdentID - FirstIdentID);
for (auto IdentIDPair : IdentifierIDs) {
- IdentifierInfo *II = const_cast<IdentifierInfo *>(IdentIDPair.first);
+ auto *II = const_cast<IdentifierInfo *>(IdentIDPair.first);
IdentID ID = IdentIDPair.second;
assert(II && "NULL identifier in identifier table");
if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization())
@@ -3271,21 +3307,19 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
}
// Create a blob abbreviation
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_TABLE));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the identifier table
- RecordData Record;
- Record.push_back(IDENTIFIER_TABLE);
- Record.push_back(BucketOffset);
+ RecordData::value_type Record[] = {IDENTIFIER_TABLE, BucketOffset};
Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable);
}
// Write the offsets table for identifier IDs.
- BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of identifiers
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
@@ -3296,13 +3330,17 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
for (unsigned I = 0, N = IdentifierOffsets.size(); I != N; ++I)
assert(IdentifierOffsets[I] && "Missing identifier offset?");
#endif
-
- RecordData Record;
- Record.push_back(IDENTIFIER_OFFSET);
- Record.push_back(IdentifierOffsets.size());
- Record.push_back(FirstIdentID - NUM_PREDEF_IDENT_IDS);
+
+ RecordData::value_type Record[] = {IDENTIFIER_OFFSET,
+ IdentifierOffsets.size(),
+ FirstIdentID - NUM_PREDEF_IDENT_IDS};
Stream.EmitRecordWithBlob(IdentifierOffsetAbbrev, Record,
bytes(IdentifierOffsets));
+
+ // In C++, write the list of interesting identifiers (those that are
+ // defined as macros, poisoned, or similar unusual things).
+ if (!InterestingIdents.empty())
+ Stream.EmitRecord(INTERESTING_IDENTIFIERS, InterestingIdents);
}
//===----------------------------------------------------------------------===//
@@ -3313,12 +3351,14 @@ namespace {
// Trait used for the on-disk hash table used in the method pool.
class ASTDeclContextNameLookupTrait {
ASTWriter &Writer;
+ llvm::SmallVector<DeclID, 64> DeclIDs;
public:
- typedef DeclarationName key_type;
+ typedef DeclarationNameKey key_type;
typedef key_type key_type_ref;
- typedef DeclContext::lookup_result data_type;
+ /// A start and end index into DeclIDs, representing a sequence of decls.
+ typedef std::pair<unsigned, unsigned> data_type;
typedef const data_type& data_type_ref;
typedef unsigned hash_value_type;
@@ -3326,42 +3366,47 @@ public:
explicit ASTDeclContextNameLookupTrait(ASTWriter &Writer) : Writer(Writer) { }
- hash_value_type ComputeHash(DeclarationName Name) {
- llvm::FoldingSetNodeID ID;
- ID.AddInteger(Name.getNameKind());
-
- switch (Name.getNameKind()) {
- case DeclarationName::Identifier:
- ID.AddString(Name.getAsIdentifierInfo()->getName());
- break;
- case DeclarationName::ObjCZeroArgSelector:
- case DeclarationName::ObjCOneArgSelector:
- case DeclarationName::ObjCMultiArgSelector:
- ID.AddInteger(serialization::ComputeHash(Name.getObjCSelector()));
- break;
- case DeclarationName::CXXConstructorName:
- case DeclarationName::CXXDestructorName:
- case DeclarationName::CXXConversionFunctionName:
- break;
- case DeclarationName::CXXOperatorName:
- ID.AddInteger(Name.getCXXOverloadedOperator());
- break;
- case DeclarationName::CXXLiteralOperatorName:
- ID.AddString(Name.getCXXLiteralIdentifier()->getName());
- case DeclarationName::CXXUsingDirective:
- break;
+ template<typename Coll>
+ data_type getData(const Coll &Decls) {
+ unsigned Start = DeclIDs.size();
+ for (NamedDecl *D : Decls) {
+ DeclIDs.push_back(
+ Writer.GetDeclRef(getDeclForLocalLookup(Writer.getLangOpts(), D)));
}
+ return std::make_pair(Start, DeclIDs.size());
+ }
- return ID.ComputeHash();
+ data_type ImportData(const reader::ASTDeclContextNameLookupTrait::data_type &FromReader) {
+ unsigned Start = DeclIDs.size();
+ for (auto ID : FromReader)
+ DeclIDs.push_back(ID);
+ return std::make_pair(Start, DeclIDs.size());
}
- std::pair<unsigned,unsigned>
- EmitKeyDataLength(raw_ostream& Out, DeclarationName Name,
- data_type_ref Lookup) {
+ static bool EqualKey(key_type_ref a, key_type_ref b) {
+ return a == b;
+ }
+
+ hash_value_type ComputeHash(DeclarationNameKey Name) {
+ return Name.getHash();
+ }
+
+ void EmitFileRef(raw_ostream &Out, ModuleFile *F) const {
+ assert(Writer.hasChain() &&
+ "have reference to loaded module file but no chain?");
+
+ using namespace llvm::support;
+ endian::Writer<little>(Out)
+ .write<uint32_t>(Writer.getChain()->getModuleFileID(F));
+ }
+
+ std::pair<unsigned, unsigned> EmitKeyDataLength(raw_ostream &Out,
+ DeclarationNameKey Name,
+ data_type_ref Lookup) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
unsigned KeyLen = 1;
- switch (Name.getNameKind()) {
+ switch (Name.getKind()) {
case DeclarationName::Identifier:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
@@ -3380,33 +3425,33 @@ public:
}
LE.write<uint16_t>(KeyLen);
- // 2 bytes for num of decls and 4 for each DeclID.
- unsigned DataLen = 2 + 4 * Lookup.size();
+ // 4 bytes for each DeclID.
+ unsigned DataLen = 4 * (Lookup.second - Lookup.first);
+ assert(uint16_t(DataLen) == DataLen &&
+ "too many decls for serialized lookup result");
LE.write<uint16_t>(DataLen);
return std::make_pair(KeyLen, DataLen);
}
- void EmitKey(raw_ostream& Out, DeclarationName Name, unsigned) {
+ void EmitKey(raw_ostream &Out, DeclarationNameKey Name, unsigned) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
- LE.write<uint8_t>(Name.getNameKind());
- switch (Name.getNameKind()) {
+ LE.write<uint8_t>(Name.getKind());
+ switch (Name.getKind()) {
case DeclarationName::Identifier:
- LE.write<uint32_t>(Writer.getIdentifierRef(Name.getAsIdentifierInfo()));
+ case DeclarationName::CXXLiteralOperatorName:
+ LE.write<uint32_t>(Writer.getIdentifierRef(Name.getIdentifier()));
return;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- LE.write<uint32_t>(Writer.getSelectorRef(Name.getObjCSelector()));
+ LE.write<uint32_t>(Writer.getSelectorRef(Name.getSelector()));
return;
case DeclarationName::CXXOperatorName:
- assert(Name.getCXXOverloadedOperator() < NUM_OVERLOADED_OPERATORS &&
+ assert(Name.getOperatorKind() < NUM_OVERLOADED_OPERATORS &&
"Invalid operator?");
- LE.write<uint8_t>(Name.getCXXOverloadedOperator());
- return;
- case DeclarationName::CXXLiteralOperatorName:
- LE.write<uint32_t>(Writer.getIdentifierRef(Name.getCXXLiteralIdentifier()));
+ LE.write<uint8_t>(Name.getOperatorKind());
return;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
@@ -3418,17 +3463,13 @@ public:
llvm_unreachable("Invalid name kind?");
}
- void EmitData(raw_ostream& Out, key_type_ref,
- data_type Lookup, unsigned DataLen) {
+ void EmitData(raw_ostream &Out, key_type_ref, data_type Lookup,
+ unsigned DataLen) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
uint64_t Start = Out.tell(); (void)Start;
- LE.write<uint16_t>(Lookup.size());
- for (DeclContext::lookup_iterator I = Lookup.begin(), E = Lookup.end();
- I != E; ++I)
- LE.write<uint32_t>(
- Writer.GetDeclRef(getDeclForLocalLookup(Writer.getLangOpts(), *I)));
-
+ for (unsigned I = Lookup.first, N = Lookup.second; I != N; ++I)
+ LE.write<uint32_t>(DeclIDs[I]);
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
};
@@ -3448,7 +3489,7 @@ bool ASTWriter::isLookupResultEntirelyExternal(StoredDeclsList &Result,
return true;
}
-uint32_t
+void
ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
llvm::SmallVectorImpl<char> &LookupTable) {
assert(!ConstDC->HasLazyLocalLexicalLookups &&
@@ -3456,12 +3497,12 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
"must call buildLookups first");
// FIXME: We need to build the lookups table, which is logically const.
- DeclContext *DC = const_cast<DeclContext*>(ConstDC);
+ auto *DC = const_cast<DeclContext*>(ConstDC);
assert(DC == DC->getPrimaryContext() && "only primary DC has lookup table");
// Create the on-disk hash table representation.
- llvm::OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait>
- Generator;
+ MultiOnDiskHashTableGenerator<reader::ASTDeclContextNameLookupTrait,
+ ASTDeclContextNameLookupTrait> Generator;
ASTDeclContextNameLookupTrait Trait(*this);
// The first step is to collect the declaration names which we need to
@@ -3477,11 +3518,11 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
auto &Name = Lookup.first;
auto &Result = Lookup.second;
- // If there are no local declarations in our lookup result, we don't
- // need to write an entry for the name at all unless we're rewriting
- // the decl context. If we can't write out a lookup set without
- // performing more deserialization, just skip this entry.
- if (isLookupResultExternal(Result, DC) && !isRewritten(cast<Decl>(DC)) &&
+ // If there are no local declarations in our lookup result, we
+ // don't need to write an entry for the name at all. If we can't
+ // write out a lookup set without performing more deserialization,
+ // just skip this entry.
+ if (isLookupResultExternal(Result, DC) &&
isLookupResultEntirelyExternal(Result, DC))
continue;
@@ -3596,7 +3637,7 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
switch (Name.getNameKind()) {
default:
- Generator.insert(Name, Result, Trait);
+ Generator.insert(Name, Trait.getData(Result), Trait);
break;
case DeclarationName::CXXConstructorName:
@@ -3614,17 +3655,15 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
// the key, only the kind of name is used.
if (!ConstructorDecls.empty())
Generator.insert(ConstructorDecls.front()->getDeclName(),
- DeclContext::lookup_result(ConstructorDecls), Trait);
+ Trait.getData(ConstructorDecls), Trait);
if (!ConversionDecls.empty())
Generator.insert(ConversionDecls.front()->getDeclName(),
- DeclContext::lookup_result(ConversionDecls), Trait);
+ Trait.getData(ConversionDecls), Trait);
- // Create the on-disk hash table in a buffer.
- llvm::raw_svector_ostream Out(LookupTable);
- // Make sure that no bucket is at offset 0
- using namespace llvm::support;
- endian::Writer<little>(Out).write<uint32_t>(0);
- return Generator.Emit(Out, Trait);
+ // Create the on-disk hash table. Also emit the existing imported and
+ // merged table if there is one.
+ auto *Lookups = Chain ? Chain->getLoadedLookupTables(DC) : nullptr;
+ Generator.emit(LookupTable, Trait, Lookups ? &Lookups->Table : nullptr);
}
/// \brief Write the block containing all of the declaration IDs
@@ -3640,7 +3679,7 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
if (isa<NamespaceDecl>(DC) && Chain &&
Chain->getKeyDeclaration(cast<Decl>(DC))->isFromASTFile()) {
// Only do this once, for the first local declaration of the namespace.
- for (NamespaceDecl *Prev = cast<NamespaceDecl>(DC)->getPreviousDecl(); Prev;
+ for (auto *Prev = cast<NamespaceDecl>(DC)->getPreviousDecl(); Prev;
Prev = Prev->getPreviousDecl())
if (!Prev->isFromASTFile())
return 0;
@@ -3707,12 +3746,10 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
// Create the on-disk hash table in a buffer.
SmallString<4096> LookupTable;
- uint32_t BucketOffset = GenerateNameLookupTable(DC, LookupTable);
+ GenerateNameLookupTable(DC, LookupTable);
// Write the lookup table
- RecordData Record;
- Record.push_back(DECL_CONTEXT_VISIBLE);
- Record.push_back(BucketOffset);
+ RecordData::value_type Record[] = {DECL_CONTEXT_VISIBLE};
Stream.EmitRecordWithBlob(DeclContextVisibleLookupAbbrev, Record,
LookupTable);
++NumVisibleDeclContexts;
@@ -3732,7 +3769,7 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
// Create the on-disk hash table in a buffer.
SmallString<4096> LookupTable;
- uint32_t BucketOffset = GenerateNameLookupTable(DC, LookupTable);
+ GenerateNameLookupTable(DC, LookupTable);
// If we're updating a namespace, select a key declaration as the key for the
// update record; those are the only ones that will be checked on reload.
@@ -3740,17 +3777,13 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
DC = cast<DeclContext>(Chain->getKeyDeclaration(cast<Decl>(DC)));
// Write the lookup table
- RecordData Record;
- Record.push_back(UPDATE_VISIBLE);
- Record.push_back(getDeclID(cast<Decl>(DC)));
- Record.push_back(BucketOffset);
+ RecordData::value_type Record[] = {UPDATE_VISIBLE, getDeclID(cast<Decl>(DC))};
Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable);
}
/// \brief Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
- RecordData Record;
- Record.push_back(Opts.fp_contract);
+ RecordData::value_type Record[] = {Opts.fp_contract};
Stream.EmitRecord(FP_PRAGMA_OPTIONS, Record);
}
@@ -3766,81 +3799,6 @@ void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
}
-void ASTWriter::WriteRedeclarations() {
- RecordData LocalRedeclChains;
- SmallVector<serialization::LocalRedeclarationsInfo, 2> LocalRedeclsMap;
-
- for (unsigned I = 0, N = Redeclarations.size(); I != N; ++I) {
- const Decl *Key = Redeclarations[I];
- assert((Chain ? Chain->getKeyDeclaration(Key) == Key
- : Key->isFirstDecl()) &&
- "not the key declaration");
-
- const Decl *First = Key->getCanonicalDecl();
- const Decl *MostRecent = First->getMostRecentDecl();
-
- assert((getDeclID(First) >= NUM_PREDEF_DECL_IDS || First == Key) &&
- "should not have imported key decls for predefined decl");
-
- // If we only have a single declaration, there is no point in storing
- // a redeclaration chain.
- if (First == MostRecent)
- continue;
-
- unsigned Offset = LocalRedeclChains.size();
- unsigned Size = 0;
- LocalRedeclChains.push_back(0); // Placeholder for the size.
-
- // Collect the set of local redeclarations of this declaration.
- for (const Decl *Prev = MostRecent; Prev;
- Prev = Prev->getPreviousDecl()) {
- if (!Prev->isFromASTFile() && Prev != Key) {
- AddDeclRef(Prev, LocalRedeclChains);
- ++Size;
- }
- }
-
- LocalRedeclChains[Offset] = Size;
-
- // Reverse the set of local redeclarations, so that we store them in
- // order (since we found them in reverse order).
- std::reverse(LocalRedeclChains.end() - Size, LocalRedeclChains.end());
-
- // Add the mapping from the first ID from the AST to the set of local
- // declarations.
- LocalRedeclarationsInfo Info = { getDeclID(Key), Offset };
- LocalRedeclsMap.push_back(Info);
-
- assert(N == Redeclarations.size() &&
- "Deserialized a declaration we shouldn't have");
- }
-
- if (LocalRedeclChains.empty())
- return;
-
- // Sort the local redeclarations map by the first declaration ID,
- // since the reader will be performing binary searches on this information.
- llvm::array_pod_sort(LocalRedeclsMap.begin(), LocalRedeclsMap.end());
-
- // Emit the local redeclarations map.
- using namespace llvm;
- llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
- Abbrev->Add(BitCodeAbbrevOp(LOCAL_REDECLARATIONS_MAP));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of entries
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
- unsigned AbbrevID = Stream.EmitAbbrev(Abbrev);
-
- RecordData Record;
- Record.push_back(LOCAL_REDECLARATIONS_MAP);
- Record.push_back(LocalRedeclsMap.size());
- Stream.EmitRecordWithBlob(AbbrevID, Record,
- reinterpret_cast<char*>(LocalRedeclsMap.data()),
- LocalRedeclsMap.size() * sizeof(LocalRedeclarationsInfo));
-
- // Emit the redeclaration chains.
- Stream.EmitRecord(LOCAL_REDECLARATIONS, LocalRedeclChains);
-}
-
void ASTWriter::WriteObjCCategories() {
SmallVector<ObjCCategoriesInfo, 2> CategoriesMap;
RecordData Categories;
@@ -3877,19 +3835,18 @@ void ASTWriter::WriteObjCCategories() {
// Emit the categories map.
using namespace llvm;
- llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(OBJC_CATEGORIES_MAP));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of entries
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned AbbrevID = Stream.EmitAbbrev(Abbrev);
-
- RecordData Record;
- Record.push_back(OBJC_CATEGORIES_MAP);
- Record.push_back(CategoriesMap.size());
- Stream.EmitRecordWithBlob(AbbrevID, Record,
- reinterpret_cast<char*>(CategoriesMap.data()),
+
+ RecordData::value_type Record[] = {OBJC_CATEGORIES_MAP, CategoriesMap.size()};
+ Stream.EmitRecordWithBlob(AbbrevID, Record,
+ reinterpret_cast<char *>(CategoriesMap.data()),
CategoriesMap.size() * sizeof(ObjCCategoriesInfo));
-
+
// Emit the category lists.
Stream.EmitRecord(OBJC_CATEGORIES, Categories);
}
@@ -3908,10 +3865,8 @@ void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) {
AddDeclRef(LPT->D, Record);
Record.push_back(LPT->Toks.size());
- for (CachedTokens::iterator TokIt = LPT->Toks.begin(),
- TokEnd = LPT->Toks.end();
- TokIt != TokEnd; ++TokIt) {
- AddToken(*TokIt, Record);
+ for (const auto &Tok : LPT->Toks) {
+ AddToken(Tok, Record);
}
}
Stream.EmitRecord(LATE_PARSED_TEMPLATE, Record);
@@ -3925,6 +3880,41 @@ void ASTWriter::WriteOptimizePragmaOptions(Sema &SemaRef) {
Stream.EmitRecord(OPTIMIZE_PRAGMA_OPTIONS, Record);
}
+void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
+ ModuleFileExtensionWriter &Writer) {
+ // Enter the extension block.
+ Stream.EnterSubblock(EXTENSION_BLOCK_ID, 4);
+
+ // Emit the metadata record abbreviation.
+ auto *Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(llvm::BitCodeAbbrevOp(EXTENSION_METADATA));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ unsigned Abbrev = Stream.EmitAbbrev(Abv);
+
+ // Emit the metadata record.
+ RecordData Record;
+ auto Metadata = Writer.getExtension()->getExtensionMetadata();
+ Record.push_back(EXTENSION_METADATA);
+ Record.push_back(Metadata.MajorVersion);
+ Record.push_back(Metadata.MinorVersion);
+ Record.push_back(Metadata.BlockName.size());
+ Record.push_back(Metadata.UserInfo.size());
+ SmallString<64> Buffer;
+ Buffer += Metadata.BlockName;
+ Buffer += Metadata.UserInfo;
+ Stream.EmitRecordWithBlob(Abbrev, Record, Buffer);
+
+ // Emit the contents of the extension block.
+ Writer.writeExtensionContents(SemaRef, Stream);
+
+ // Exit the extension block.
+ Stream.ExitBlock();
+}
+
//===----------------------------------------------------------------------===//
// General Serialization Routines
//===----------------------------------------------------------------------===//
@@ -3933,9 +3923,7 @@ void ASTWriter::WriteOptimizePragmaOptions(Sema &SemaRef) {
void ASTWriter::WriteAttributes(ArrayRef<const Attr*> Attrs,
RecordDataImpl &Record) {
Record.push_back(Attrs.size());
- for (ArrayRef<const Attr *>::iterator i = Attrs.begin(),
- e = Attrs.end(); i != e; ++i){
- const Attr *A = *i;
+ for (const auto *A : Attrs) {
Record.push_back(A->getKind()); // FIXME: stable encoding, target attrs
AddSourceRange(A->getRange(), Record);
@@ -3986,7 +3974,7 @@ void ASTWriter::AddPath(StringRef Path, RecordDataImpl &Record) {
AddString(FilePath, Record);
}
-void ASTWriter::EmitRecordWithPath(unsigned Abbrev, RecordDataImpl &Record,
+void ASTWriter::EmitRecordWithPath(unsigned Abbrev, RecordDataRef Record,
StringRef Path) {
SmallString<128> FilePath(Path);
PreparePathForOutput(FilePath);
@@ -4028,28 +4016,35 @@ void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
SelectorOffsets[ID - FirstSelectorID] = Offset;
}
-ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
+ASTWriter::ASTWriter(
+ llvm::BitstreamWriter &Stream,
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ bool IncludeTimestamps)
: Stream(Stream), Context(nullptr), PP(nullptr), Chain(nullptr),
- WritingModule(nullptr), WritingAST(false),
- DoneWritingDeclsAndTypes(false), ASTHasCompilerErrors(false),
- FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID),
- FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
- FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
- FirstMacroID(NUM_PREDEF_MACRO_IDS), NextMacroID(FirstMacroID),
- FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
+ WritingModule(nullptr), IncludeTimestamps(IncludeTimestamps),
+ WritingAST(false), DoneWritingDeclsAndTypes(false),
+ ASTHasCompilerErrors(false), FirstDeclID(NUM_PREDEF_DECL_IDS),
+ NextDeclID(FirstDeclID), FirstTypeID(NUM_PREDEF_TYPE_IDS),
+ NextTypeID(FirstTypeID), FirstIdentID(NUM_PREDEF_IDENT_IDS),
+ NextIdentID(FirstIdentID), FirstMacroID(NUM_PREDEF_MACRO_IDS),
+ NextMacroID(FirstMacroID), FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
NextSubmoduleID(FirstSubmoduleID),
FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID),
CollectedStmts(&StmtsToEmit), NumStatements(0), NumMacros(0),
NumLexicalDeclContexts(0), NumVisibleDeclContexts(0),
NextCXXBaseSpecifiersID(1), NextCXXCtorInitializersID(1),
- TypeExtQualAbbrev(0),
- TypeFunctionProtoAbbrev(0), DeclParmVarAbbrev(0),
+ TypeExtQualAbbrev(0), TypeFunctionProtoAbbrev(0), DeclParmVarAbbrev(0),
DeclContextLexicalAbbrev(0), DeclContextVisibleLookupAbbrev(0),
UpdateVisibleAbbrev(0), DeclRecordAbbrev(0), DeclTypedefAbbrev(0),
DeclVarAbbrev(0), DeclFieldAbbrev(0), DeclEnumAbbrev(0),
DeclObjCIvarAbbrev(0), DeclCXXMethodAbbrev(0), DeclRefExprAbbrev(0),
CharacterLiteralAbbrev(0), IntegerLiteralAbbrev(0),
- ExprImplicitCastAbbrev(0) {}
+ ExprImplicitCastAbbrev(0) {
+ for (const auto &Ext : Extensions) {
+ if (auto Writer = Ext->createExtensionWriter(*this))
+ ModuleFileExtensionWriters.push_back(std::move(Writer));
+ }
+}
ASTWriter::~ASTWriter() {
llvm::DeleteContainerSeconds(FileDeclIDs);
@@ -4060,12 +4055,15 @@ const LangOptions &ASTWriter::getLangOpts() const {
return Context->getLangOpts();
}
-void ASTWriter::WriteAST(Sema &SemaRef,
- const std::string &OutputFile,
- Module *WritingModule, StringRef isysroot,
- bool hasErrors) {
+time_t ASTWriter::getTimestampForOutput(const FileEntry *E) const {
+ return IncludeTimestamps ? E->getModificationTime() : 0;
+}
+
+uint64_t ASTWriter::WriteAST(Sema &SemaRef, const std::string &OutputFile,
+ Module *WritingModule, StringRef isysroot,
+ bool hasErrors) {
WritingAST = true;
-
+
ASTHasCompilerErrors = hasErrors;
// Emit the file header.
@@ -4079,13 +4077,15 @@ void ASTWriter::WriteAST(Sema &SemaRef,
Context = &SemaRef.Context;
PP = &SemaRef.PP;
this->WritingModule = WritingModule;
- WriteASTCore(SemaRef, isysroot, OutputFile, WritingModule);
+ ASTFileSignature Signature =
+ WriteASTCore(SemaRef, isysroot, OutputFile, WritingModule);
Context = nullptr;
PP = nullptr;
this->WritingModule = nullptr;
this->BaseDirectory.clear();
WritingAST = false;
+ return Signature;
}
template<typename Vector>
@@ -4097,10 +4097,9 @@ static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec,
}
}
-void ASTWriter::WriteASTCore(Sema &SemaRef,
- StringRef isysroot,
- const std::string &OutputFile,
- Module *WritingModule) {
+uint64_t ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
+ const std::string &OutputFile,
+ Module *WritingModule) {
using namespace llvm;
bool isModule = WritingModule != nullptr;
@@ -4117,8 +4116,6 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
if (D) {
assert(D->isCanonicalDecl() && "predefined decl is not canonical");
DeclIDs[D] = ID;
- if (D->getMostRecentDecl() != D)
- Redeclarations.push_back(D);
}
};
RegisterPredefDecl(Context.getTranslationUnitDecl(),
@@ -4133,7 +4130,12 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
RegisterPredefDecl(Context.ObjCInstanceTypeDecl,
PREDEF_DECL_OBJC_INSTANCETYPE_ID);
RegisterPredefDecl(Context.BuiltinVaListDecl, PREDEF_DECL_BUILTIN_VA_LIST_ID);
+ RegisterPredefDecl(Context.VaListTagDecl, PREDEF_DECL_VA_LIST_TAG);
+ RegisterPredefDecl(Context.BuiltinMSVaListDecl,
+ PREDEF_DECL_BUILTIN_MS_VA_LIST_ID);
RegisterPredefDecl(Context.ExternCContext, PREDEF_DECL_EXTERN_C_CONTEXT_ID);
+ RegisterPredefDecl(Context.MakeIntegerSeqDecl,
+ PREDEF_DECL_MAKE_INTEGER_SEQ_ID);
// Build a record containing all of the tentative definitions in this file, in
// TentativeDefinitions order. Generally, this record will be empty for
@@ -4187,11 +4189,9 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
// Build a record containing all of pending implicit instantiations.
RecordData PendingInstantiations;
- for (std::deque<Sema::PendingImplicitInstantiation>::iterator
- I = SemaRef.PendingInstantiations.begin(),
- N = SemaRef.PendingInstantiations.end(); I != N; ++I) {
- AddDeclRef(I->first, PendingInstantiations);
- AddSourceLocation(I->second, PendingInstantiations);
+ for (const auto &I : SemaRef.PendingInstantiations) {
+ AddDeclRef(I.first, PendingInstantiations);
+ AddSourceLocation(I.second, PendingInstantiations);
}
assert(SemaRef.PendingLocalImplicitInstantiations.empty() &&
"There are local ones at end of translation unit!");
@@ -4210,12 +4210,9 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
// Build a record containing all of the known namespaces.
RecordData KnownNamespaces;
- for (llvm::MapVector<NamespaceDecl*, bool>::iterator
- I = SemaRef.KnownNamespaces.begin(),
- IEnd = SemaRef.KnownNamespaces.end();
- I != IEnd; ++I) {
- if (!I->second)
- AddDeclRef(I->first, KnownNamespaces);
+ for (const auto &I : SemaRef.KnownNamespaces) {
+ if (!I.second)
+ AddDeclRef(I.first, KnownNamespaces);
}
// Build a record of all used, undefined objects that require definitions.
@@ -4223,10 +4220,9 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
SemaRef.getUndefinedButUsed(Undefined);
- for (SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> >::iterator
- I = Undefined.begin(), E = Undefined.end(); I != E; ++I) {
- AddDeclRef(I->first, UndefinedButUsed);
- AddSourceLocation(I->second, UndefinedButUsed);
+ for (const auto &I : Undefined) {
+ AddDeclRef(I.first, UndefinedButUsed);
+ AddSourceLocation(I.second, UndefinedButUsed);
}
// Build a record containing all delete-expressions that we would like to
@@ -4244,41 +4240,43 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
}
// Write the control block
- WriteControlBlock(PP, Context, isysroot, OutputFile);
+ uint64_t Signature = WriteControlBlock(PP, Context, isysroot, OutputFile);
// Write the remaining AST contents.
- RecordData Record;
Stream.EnterSubblock(AST_BLOCK_ID, 5);
// This is so that older clang versions, before the introduction
// of the control block, can read and reject the newer PCH format.
- Record.clear();
- Record.push_back(VERSION_MAJOR);
- Stream.EmitRecord(METADATA_OLD_FORMAT, Record);
+ {
+ RecordData Record = {VERSION_MAJOR};
+ Stream.EmitRecord(METADATA_OLD_FORMAT, Record);
+ }
// Create a lexical update block containing all of the declarations in the
// translation unit that do not come from other AST files.
const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
- SmallVector<KindDeclIDPair, 64> NewGlobalDecls;
- for (const auto *I : TU->noload_decls()) {
- if (!I->isFromASTFile())
- NewGlobalDecls.push_back(std::make_pair(I->getKind(), GetDeclRef(I)));
+ SmallVector<uint32_t, 128> NewGlobalKindDeclPairs;
+ for (const auto *D : TU->noload_decls()) {
+ if (!D->isFromASTFile()) {
+ NewGlobalKindDeclPairs.push_back(D->getKind());
+ NewGlobalKindDeclPairs.push_back(GetDeclRef(D));
+ }
}
- llvm::BitCodeAbbrev *Abv = new llvm::BitCodeAbbrev();
+ auto *Abv = new llvm::BitCodeAbbrev();
Abv->Add(llvm::BitCodeAbbrevOp(TU_UPDATE_LEXICAL));
Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
unsigned TuUpdateLexicalAbbrev = Stream.EmitAbbrev(Abv);
- Record.clear();
- Record.push_back(TU_UPDATE_LEXICAL);
- Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record,
- bytes(NewGlobalDecls));
-
+ {
+ RecordData::value_type Record[] = {TU_UPDATE_LEXICAL};
+ Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record,
+ bytes(NewGlobalKindDeclPairs));
+ }
+
// And a visible updates block for the translation unit.
Abv = new llvm::BitCodeAbbrev();
Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_VISIBLE));
Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
- Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Fixed, 32));
Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
UpdateVisibleAbbrev = Stream.EmitAbbrev(Abv);
WriteDeclContextVisibleUpdate(TU);
@@ -4310,29 +4308,27 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
// Make sure visible decls, added to DeclContexts previously loaded from
// an AST file, are registered for serialization.
- for (SmallVectorImpl<const Decl *>::iterator
- I = UpdatingVisibleDecls.begin(),
- E = UpdatingVisibleDecls.end(); I != E; ++I) {
- GetDeclRef(*I);
+ for (const auto *I : UpdatingVisibleDecls) {
+ GetDeclRef(I);
}
// Make sure all decls associated with an identifier are registered for
- // serialization.
- llvm::SmallVector<const IdentifierInfo*, 256> IIs;
- for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(),
- IDEnd = PP.getIdentifierTable().end();
- ID != IDEnd; ++ID) {
- const IdentifierInfo *II = ID->second;
- if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization())
- IIs.push_back(II);
- }
- // Sort the identifiers to visit based on their name.
- std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
- for (const IdentifierInfo *II : IIs) {
- for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II),
- DEnd = SemaRef.IdResolver.end();
- D != DEnd; ++D) {
- GetDeclRef(*D);
+ // serialization, if we're storing decls with identifiers.
+ if (!WritingModule || !getLangOpts().CPlusPlus) {
+ llvm::SmallVector<const IdentifierInfo*, 256> IIs;
+ for (const auto &ID : PP.getIdentifierTable()) {
+ const IdentifierInfo *II = ID.second;
+ if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization())
+ IIs.push_back(II);
+ }
+ // Sort the identifiers to visit based on their name.
+ std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
+ for (const IdentifierInfo *II : IIs) {
+ for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II),
+ DEnd = SemaRef.IdResolver.end();
+ D != DEnd; ++D) {
+ GetDeclRef(*D);
+ }
}
}
@@ -4363,7 +4359,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
// c++-base-specifiers-id:i32
// type-id:i32)
//
- llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ auto *Abbrev = new BitCodeAbbrev();
Abbrev->Add(BitCodeAbbrevOp(MODULE_OFFSET_MAP));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned ModuleOffsetMapAbbrev = Stream.EmitAbbrev(Abbrev);
@@ -4402,8 +4398,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
writeBaseIDOrNone(M->BaseTypeIndex, M->LocalNumTypes);
}
}
- Record.clear();
- Record.push_back(MODULE_OFFSET_MAP);
+ RecordData::value_type Record[] = {MODULE_OFFSET_MAP};
Stream.EmitRecordWithBlob(ModuleOffsetMapAbbrev, Record,
Buffer.data(), Buffer.size());
}
@@ -4415,10 +4410,6 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
Stream.EnterSubblock(DECLTYPES_BLOCK_ID, /*bits for abbreviations*/5);
WriteTypeAbbrevs();
WriteDeclAbbrevs();
- for (DeclsToRewriteTy::iterator I = DeclsToRewrite.begin(),
- E = DeclsToRewrite.end();
- I != E; ++I)
- DeclTypesToEmit.push(const_cast<Decl*>(*I));
do {
WriteDeclUpdatesBlocks(DeclUpdatesOffsetsRecord);
while (!DeclTypesToEmit.empty()) {
@@ -4442,12 +4433,12 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
WriteCXXCtorInitializersOffsets();
WriteFileDeclIDsMap();
WriteSourceManagerBlock(Context.getSourceManager(), PP);
-
WriteComments();
WritePreprocessor(PP, isModule);
WriteHeaderSearch(PP.getHeaderSearchInfo());
WriteSelectors(SemaRef);
WriteReferencedSelectorsPool(SemaRef);
+ WriteLateParsedTemplates(SemaRef);
WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
WriteFPPragmaOptions(SemaRef.getFPOptions());
WriteOpenCLExtensions(SemaRef);
@@ -4561,20 +4552,21 @@ void ASTWriter::WriteASTCore(Sema &SemaRef,
}
WriteDeclReplacementsBlock();
- WriteRedeclarations();
WriteObjCCategories();
- WriteLateParsedTemplates(SemaRef);
if(!WritingModule)
WriteOptimizePragmaOptions(SemaRef);
// Some simple statistics
- Record.clear();
- Record.push_back(NumStatements);
- Record.push_back(NumMacros);
- Record.push_back(NumLexicalDeclContexts);
- Record.push_back(NumVisibleDeclContexts);
+ RecordData::value_type Record[] = {
+ NumStatements, NumMacros, NumLexicalDeclContexts, NumVisibleDeclContexts};
Stream.EmitRecord(STATISTICS, Record);
Stream.ExitBlock();
+
+ // Write the module file extension blocks.
+ for (const auto &ExtWriter : ModuleFileExtensionWriters)
+ WriteModuleFileExtension(SemaRef, *ExtWriter);
+
+ return Signature;
}
void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
@@ -4586,8 +4578,6 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
for (auto &DeclUpdate : LocalUpdates) {
const Decl *D = DeclUpdate.first;
- if (isRewritten(D))
- continue; // The decl will be written completely,no need to store updates.
bool HasUpdatedBody = false;
RecordData Record;
@@ -4699,7 +4689,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
}
if (HasUpdatedBody) {
- const FunctionDecl *Def = cast<FunctionDecl>(D);
+ const auto *Def = cast<FunctionDecl>(D);
Record.push_back(UPD_CXX_ADDED_FUNCTION_DEFINITION);
Record.push_back(Def->isInlined());
AddSourceLocation(Def->getInnerLocStart(), Record);
@@ -4720,11 +4710,10 @@ void ASTWriter::WriteDeclReplacementsBlock() {
return;
RecordData Record;
- for (SmallVectorImpl<ReplacedDeclInfo>::iterator
- I = ReplacedDecls.begin(), E = ReplacedDecls.end(); I != E; ++I) {
- Record.push_back(I->ID);
- Record.push_back(I->Offset);
- Record.push_back(I->Loc);
+ for (const auto &I : ReplacedDecls) {
+ Record.push_back(I.ID);
+ Record.push_back(I.Offset);
+ Record.push_back(I.Loc);
}
Stream.EmitRecord(DECL_REPLACEMENTS, Record);
}
@@ -5247,9 +5236,8 @@ void ASTWriter::AddTemplateName(TemplateName Name, RecordDataImpl &Record) {
case TemplateName::OverloadedTemplate: {
OverloadedTemplateStorage *OvT = Name.getAsOverloadedTemplate();
Record.push_back(OvT->size());
- for (OverloadedTemplateStorage::iterator I = OvT->begin(), E = OvT->end();
- I != E; ++I)
- AddDeclRef(*I, Record);
+ for (const auto &I : *OvT)
+ AddDeclRef(I, Record);
break;
}
@@ -5339,10 +5327,8 @@ ASTWriter::AddTemplateParameterList(const TemplateParameterList *TemplateParams,
AddSourceLocation(TemplateParams->getLAngleLoc(), Record);
AddSourceLocation(TemplateParams->getRAngleLoc(), Record);
Record.push_back(TemplateParams->size());
- for (TemplateParameterList::const_iterator
- P = TemplateParams->begin(), PEnd = TemplateParams->end();
- P != PEnd; ++P)
- AddDeclRef(*P, Record);
+ for (const auto &P : *TemplateParams)
+ AddDeclRef(P, Record);
}
/// \brief Emit a template argument list.
@@ -5657,7 +5643,7 @@ void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) {
void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
assert(D->isCompleteDefinition());
assert(!WritingAST && "Already writing the AST!");
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
// We are interested when a PCH decl is modified.
if (RD->isFromASTFile()) {
// A forward reference was mutated into a definition. Rewrite it.
@@ -5671,26 +5657,52 @@ void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
}
}
+static bool isImportedDeclContext(ASTReader *Chain, const Decl *D) {
+ if (D->isFromASTFile())
+ return true;
+
+ // If we've not loaded any modules, this can't be imported.
+ if (!Chain || !Chain->getModuleManager().size())
+ return false;
+
+ // The predefined __va_list_tag struct is imported if we imported any decls.
+ // FIXME: This is a gross hack.
+ return D == D->getASTContext().getVaListTagDecl();
+}
+
void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
// TU and namespaces are handled elsewhere.
if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC))
return;
- if (!(!D->isFromASTFile() && cast<Decl>(DC)->isFromASTFile()))
- return; // Not a source decl added to a DeclContext from PCH.
+ // We're only interested in cases where a local declaration is added to an
+ // imported context.
+ if (D->isFromASTFile() || !isImportedDeclContext(Chain, cast<Decl>(DC)))
+ return;
+ assert(DC == DC->getPrimaryContext() && "added to non-primary context");
assert(!getDefinitiveDeclContext(DC) && "DeclContext not definitive!");
assert(!WritingAST && "Already writing the AST!");
- UpdatedDeclContexts.insert(DC);
+ if (UpdatedDeclContexts.insert(DC) && !cast<Decl>(DC)->isFromASTFile()) {
+ // We're adding a visible declaration to a predefined decl context. Ensure
+ // that we write out all of its lookup results so we don't get a nasty
+ // surprise when we try to emit its lookup table.
+ for (auto *Child : DC->decls())
+ UpdatingVisibleDecls.push_back(Child);
+ }
UpdatingVisibleDecls.push_back(D);
}
void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
assert(D->isImplicit());
- if (!(!D->isFromASTFile() && RD->isFromASTFile()))
- return; // Not a source member added to a class from PCH.
+
+ // We're only interested in cases where a local declaration is added to an
+ // imported context.
+ if (D->isFromASTFile() || !isImportedDeclContext(Chain, RD))
+ return;
+
if (!isa<CXXMethodDecl>(D))
- return; // We are interested in lazily declared implicit methods.
+ return;
// A decl coming from PCH was modified.
assert(RD->isCompleteDefinition());
@@ -5698,42 +5710,6 @@ void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
DeclUpdates[RD].push_back(DeclUpdate(UPD_CXX_ADDED_IMPLICIT_MEMBER, D));
}
-void ASTWriter::AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
- const ClassTemplateSpecializationDecl *D) {
- // The specializations set is kept in the canonical template.
- TD = TD->getCanonicalDecl();
- if (!(!D->isFromASTFile() && TD->isFromASTFile()))
- return; // Not a source specialization added to a template from PCH.
-
- assert(!WritingAST && "Already writing the AST!");
- DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
- D));
-}
-
-void ASTWriter::AddedCXXTemplateSpecialization(
- const VarTemplateDecl *TD, const VarTemplateSpecializationDecl *D) {
- // The specializations set is kept in the canonical template.
- TD = TD->getCanonicalDecl();
- if (!(!D->isFromASTFile() && TD->isFromASTFile()))
- return; // Not a source specialization added to a template from PCH.
-
- assert(!WritingAST && "Already writing the AST!");
- DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
- D));
-}
-
-void ASTWriter::AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
- const FunctionDecl *D) {
- // The specializations set is kept in the canonical template.
- TD = TD->getCanonicalDecl();
- if (!(!D->isFromASTFile() && TD->isFromASTFile()))
- return; // Not a source specialization added to a template from PCH.
-
- assert(!WritingAST && "Already writing the AST!");
- DeclUpdates[TD].push_back(DeclUpdate(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
- D));
-}
-
void ASTWriter::ResolvedExceptionSpec(const FunctionDecl *FD) {
assert(!DoneWritingDeclsAndTypes && "Already done writing updates!");
if (!Chain) return;
@@ -5807,21 +5783,6 @@ void ASTWriter::AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
const_cast<ObjCInterfaceDecl *>(IFD->getDefinition()));
}
-
-void ASTWriter::AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
- const ObjCPropertyDecl *OrigProp,
- const ObjCCategoryDecl *ClassExt) {
- const ObjCInterfaceDecl *D = ClassExt->getClassInterface();
- if (!D)
- return;
-
- assert(!WritingAST && "Already writing the AST!");
- if (!D->isFromASTFile())
- return; // Declaration not imported from PCH.
-
- RewriteDecl(D);
-}
-
void ASTWriter::DeclarationMarkedUsed(const Decl *D) {
assert(!WritingAST && "Already writing the AST!");
if (!D->isFromASTFile())
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index fd6708dd5c3f..20ca6d6fd512 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -159,6 +159,22 @@ namespace clang {
Writer.AddStmt(FD->getBody());
}
+ /// Add to the record the first declaration from each module file that
+ /// provides a declaration of D. The intent is to provide a sufficient
+ /// set such that reloading this set will load all current redeclarations.
+ void AddFirstDeclFromEachModule(const Decl *D, bool IncludeLocal) {
+ llvm::MapVector<ModuleFile*, const Decl*> Firsts;
+ // FIXME: We can skip entries that we know are implied by others.
+ for (const Decl *R = D->getMostRecentDecl(); R; R = R->getPreviousDecl()) {
+ if (R->isFromASTFile())
+ Firsts[Writer.Chain->getOwningModuleFile(R)] = R;
+ else if (IncludeLocal)
+ Firsts[nullptr] = R;
+ }
+ for (const auto &F : Firsts)
+ Writer.AddDeclRef(F.second, Record);
+ }
+
/// Get the specialization decl from an entry in the specialization list.
template <typename EntryType>
typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType *
@@ -192,22 +208,48 @@ namespace clang {
auto &&PartialSpecializations = getPartialSpecializations(Common);
ArrayRef<DeclID> LazySpecializations;
if (auto *LS = Common->LazySpecializations)
- LazySpecializations = ArrayRef<DeclID>(LS + 1, LS + 1 + LS[0]);
+ LazySpecializations = llvm::makeArrayRef(LS + 1, LS[0]);
+
+ // Add a slot to the record for the number of specializations.
+ unsigned I = Record.size();
+ Record.push_back(0);
- Record.push_back(Specializations.size() +
- PartialSpecializations.size() +
- LazySpecializations.size());
for (auto &Entry : Specializations) {
auto *D = getSpecializationDecl(Entry);
assert(D->isCanonicalDecl() && "non-canonical decl in set");
- Writer.AddDeclRef(D, Record);
+ AddFirstDeclFromEachModule(D, /*IncludeLocal*/true);
}
for (auto &Entry : PartialSpecializations) {
auto *D = getSpecializationDecl(Entry);
assert(D->isCanonicalDecl() && "non-canonical decl in set");
- Writer.AddDeclRef(D, Record);
+ AddFirstDeclFromEachModule(D, /*IncludeLocal*/true);
}
Record.append(LazySpecializations.begin(), LazySpecializations.end());
+
+ // Update the size entry we added earlier.
+ Record[I] = Record.size() - I - 1;
+ }
+
+ /// Ensure that this template specialization is associated with the specified
+ /// template on reload.
+ void RegisterTemplateSpecialization(const Decl *Template,
+ const Decl *Specialization) {
+ Template = Template->getCanonicalDecl();
+
+ // If the canonical template is local, we'll write out this specialization
+ // when we emit it.
+ // FIXME: We can do the same thing if there is any local declaration of
+ // the template, to avoid emitting an update record.
+ if (!Template->isFromASTFile())
+ return;
+
+ // We only need to associate the first local declaration of the
+ // specialization. The other declarations will get pulled in by it.
+ if (Writer.getFirstLocalDecl(Specialization) != Specialization)
+ return;
+
+ Writer.DeclUpdates[Template].push_back(ASTWriter::DeclUpdate(
+ UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION, Specialization));
}
};
}
@@ -218,7 +260,7 @@ void ASTDeclWriter::Visit(Decl *D) {
// Source locations require array (variable-length) abbreviations. The
// abbreviation infrastructure requires that arrays are encoded last, so
// we handle it here in the case of those classes derived from DeclaratorDecl
- if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)){
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
Writer.AddTypeSourceInfo(DD->getTypeSourceInfo(), Record);
}
@@ -234,7 +276,10 @@ void ASTDeclWriter::Visit(Decl *D) {
void ASTDeclWriter::VisitDecl(Decl *D) {
Writer.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()), Record);
- Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record);
+ if (D->getDeclContext() != D->getLexicalDeclContext())
+ Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record);
+ else
+ Record.push_back(0);
Record.push_back(D->isInvalidDecl());
Record.push_back(D->hasAttrs());
if (D->hasAttrs())
@@ -298,7 +343,8 @@ void ASTDeclWriter::VisitTypedefNameDecl(TypedefNameDecl *D) {
void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
VisitTypedefNameDecl(D);
- if (!D->hasAttrs() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
!D->isImplicit() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
@@ -336,9 +382,6 @@ void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
Record.push_back(2);
Writer.AddDeclRef(TD, Record);
Writer.AddIdentifierRef(TD->getDeclName().getAsIdentifierInfo(), Record);
- } else if (auto *DD = D->getDeclaratorForAnonDecl()) {
- Record.push_back(3);
- Writer.AddDeclRef(DD, Record);
} else {
Record.push_back(0);
}
@@ -363,12 +406,12 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
Writer.AddDeclRef(nullptr, Record);
}
- if (!D->hasAttrs() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
!D->isImplicit() &&
!D->isUsed(false) &&
!D->hasExtInfo() &&
!D->getTypedefNameForAnonDecl() &&
- !D->getDeclaratorForAnonDecl() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
@@ -392,12 +435,12 @@ void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
Record.push_back(D->hasObjectMember());
Record.push_back(D->hasVolatileMember());
- if (!D->hasAttrs() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
!D->isImplicit() &&
!D->isUsed(false) &&
!D->hasExtInfo() &&
!D->getTypedefNameForAnonDecl() &&
- !D->getDeclaratorForAnonDecl() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
@@ -479,6 +522,9 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
case FunctionDecl::TK_FunctionTemplateSpecialization: {
FunctionTemplateSpecializationInfo *
FTSInfo = D->getTemplateSpecializationInfo();
+
+ RegisterTemplateSpecialization(FTSInfo->getTemplate(), D);
+
Writer.AddDeclRef(FTSInfo->getTemplate(), Record);
Record.push_back(FTSInfo->getTemplateSpecializationKind());
@@ -648,7 +694,8 @@ void ASTDeclWriter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
Record.push_back(D->getAccessControl());
Record.push_back(D->getSynthesize());
- if (!D->hasAttrs() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
!D->isImplicit() &&
!D->isUsed(false) &&
!D->isInvalidDecl() &&
@@ -780,7 +827,8 @@ void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
if (!D->getDeclName())
Writer.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D), Record);
- if (!D->hasAttrs() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
!D->isImplicit() &&
!D->isUsed(false) &&
!D->isInvalidDecl() &&
@@ -854,7 +902,8 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(VarNotTemplate);
}
- if (!D->hasAttrs() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
!D->isImplicit() &&
!D->isUsed(false) &&
!D->isInvalidDecl() &&
@@ -902,7 +951,8 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
// If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
// we dynamically check for the properties that we optimize for, but don't
// know are true of all PARM_VAR_DECLs.
- if (!D->hasAttrs() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->hasAttrs() &&
!D->hasExtInfo() &&
!D->isImplicit() &&
!D->isUsed(false) &&
@@ -1122,7 +1172,8 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
Record.push_back(0);
}
- if (D->getFirstDecl() == D->getMostRecentDecl() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ D->getFirstDecl() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->hasAttrs() &&
!D->isTopLevelDeclInObjCContainer() &&
@@ -1249,6 +1300,8 @@ void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
void ASTDeclWriter::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
+ RegisterTemplateSpecialization(D->getSpecializedTemplate(), D);
+
VisitCXXRecordDecl(D);
llvm::PointerUnion<ClassTemplateDecl *,
@@ -1308,6 +1361,8 @@ void ASTDeclWriter::VisitVarTemplateDecl(VarTemplateDecl *D) {
void ASTDeclWriter::VisitVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *D) {
+ RegisterTemplateSpecialization(D->getSpecializedTemplate(), D);
+
VisitVarDecl(D);
llvm::PointerUnion<VarTemplateDecl *, VarTemplatePartialSpecializationDecl *>
@@ -1478,48 +1533,77 @@ void ASTDeclWriter::VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
Record.push_back(VisibleOffset);
}
-/// Determine whether D is the first declaration in its redeclaration chain that
-/// is not from an AST file.
-template <typename T>
-static bool isFirstLocalDecl(Redeclarable<T> *D) {
- assert(D && !static_cast<T*>(D)->isFromASTFile());
- do
- D = D->getPreviousDecl();
- while (D && static_cast<T*>(D)->isFromASTFile());
- return !D;
+const Decl *ASTWriter::getFirstLocalDecl(const Decl *D) {
+ /// \brief Is this a local declaration (that is, one that will be written to
+ /// our AST file)? This is the case for declarations that are neither imported
+ /// from another AST file nor predefined.
+ auto IsLocalDecl = [&](const Decl *D) -> bool {
+ if (D->isFromASTFile())
+ return false;
+ auto I = DeclIDs.find(D);
+ return (I == DeclIDs.end() || I->second >= NUM_PREDEF_DECL_IDS);
+ };
+
+ assert(IsLocalDecl(D) && "expected a local declaration");
+
+ const Decl *Canon = D->getCanonicalDecl();
+ if (IsLocalDecl(Canon))
+ return Canon;
+
+ const Decl *&CacheEntry = FirstLocalDeclCache[Canon];
+ if (CacheEntry)
+ return CacheEntry;
+
+ for (const Decl *Redecl = D; Redecl; Redecl = Redecl->getPreviousDecl())
+ if (IsLocalDecl(Redecl))
+ D = Redecl;
+ return CacheEntry = D;
}
template <typename T>
void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) {
T *First = D->getFirstDecl();
T *MostRecent = First->getMostRecentDecl();
+ T *DAsT = static_cast<T *>(D);
if (MostRecent != First) {
- assert(isRedeclarableDeclKind(static_cast<T *>(D)->getKind()) &&
+ assert(isRedeclarableDeclKind(DAsT->getKind()) &&
"Not considered redeclarable?");
Writer.AddDeclRef(First, Record);
- // In a modules build, emit a list of all imported key declarations
- // (excluding First, if it was imported), so that we can be sure that all
- // redeclarations visible to this module are before D in the redecl chain.
- unsigned I = Record.size();
- Record.push_back(0);
- if (Context.getLangOpts().Modules && Writer.Chain) {
- if (isFirstLocalDecl(D)) {
- Writer.Chain->forEachImportedKeyDecl(First, [&](const Decl *D) {
- if (D != First)
- Writer.AddDeclRef(D, Record);
- });
- Record[I] = Record.size() - I - 1;
-
- // Write a redeclaration chain, attached to the first key decl.
- Writer.Redeclarations.push_back(Writer.Chain->getKeyDeclaration(First));
+ // Write out a list of local redeclarations of this declaration if it's the
+ // first local declaration in the chain.
+ const Decl *FirstLocal = Writer.getFirstLocalDecl(DAsT);
+ if (DAsT == FirstLocal) {
+ // Emit a list of all imported first declarations so that we can be sure
+ // that all redeclarations visible to this module are before D in the
+ // redecl chain.
+ unsigned I = Record.size();
+ Record.push_back(0);
+ if (Writer.Chain)
+ AddFirstDeclFromEachModule(DAsT, /*IncludeLocal*/false);
+ // This is the number of imported first declarations + 1.
+ Record[I] = Record.size() - I;
+
+ // Collect the set of local redeclarations of this declaration, from
+ // newest to oldest.
+ RecordData LocalRedecls;
+ for (const Decl *Prev = FirstLocal->getMostRecentDecl();
+ Prev != FirstLocal; Prev = Prev->getPreviousDecl())
+ if (!Prev->isFromASTFile())
+ Writer.AddDeclRef(Prev, LocalRedecls);
+
+ // If we have any redecls, write them now as a separate record preceding
+ // the declaration itself.
+ if (LocalRedecls.empty())
+ Record.push_back(0);
+ else {
+ Record.push_back(Writer.Stream.GetCurrentBitNo());
+ Writer.Stream.EmitRecord(LOCAL_REDECLARATIONS, LocalRedecls);
}
- } else if (D == First || D->getPreviousDecl()->isFromASTFile()) {
- assert(isFirstLocalDecl(D) && "imported decl after local decl");
-
- // Write a redeclaration chain attached to the first decl.
- Writer.Redeclarations.push_back(First);
+ } else {
+ Record.push_back(0);
+ Writer.AddDeclRef(FirstLocal, Record);
}
// Make sure that we serialize both the previous and the most-recent
@@ -1558,7 +1642,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(serialization::DECL_FIELD));
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
@@ -1591,7 +1675,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(serialization::DECL_OBJC_IVAR));
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
@@ -1629,7 +1713,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
@@ -1677,7 +1761,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
@@ -1720,7 +1804,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
@@ -1767,7 +1851,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
@@ -1796,7 +1880,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
@@ -1842,7 +1926,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // CanonicalDecl
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
Abv->Add(BitCodeAbbrevOp(0)); // Invalid
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Implicit
@@ -1977,7 +2061,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_VISIBLE));
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
DeclContextVisibleLookupAbbrev = Stream.EmitAbbrev(Abv);
}
@@ -1994,14 +2077,19 @@ void ASTWriter::WriteDeclAbbrevs() {
/// clients to use a separate API call to "realize" the decl. This should be
/// relatively painless since they would presumably only do it for top-level
/// decls.
-static bool isRequiredDecl(const Decl *D, ASTContext &Context) {
+static bool isRequiredDecl(const Decl *D, ASTContext &Context,
+ bool WritingModule) {
// An ObjCMethodDecl is never considered as "required" because its
// implementation container always is.
- // File scoped assembly or obj-c implementation must be seen. ImportDecl is
- // used by codegen to determine the set of imported modules to search for
- // inputs for automatic linking.
- if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D) || isa<ImportDecl>(D))
+ // File scoped assembly or obj-c implementation must be seen.
+ if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D))
+ return true;
+
+ // ImportDecl is used by codegen to determine the set of imported modules to
+ // search for inputs for automatic linking; include it if it has a semantic
+ // effect.
+ if (isa<ImportDecl>(D) && !WritingModule)
return true;
return Context.DeclMustBeEmitted(D);
@@ -2016,16 +2104,12 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
// Determine the ID for this declaration.
serialization::DeclID ID;
- if (D->isFromASTFile()) {
- assert(isRewritten(D) && "should not be emitting imported decl");
- ID = getDeclID(D);
- } else {
- serialization::DeclID &IDR = DeclIDs[D];
- if (IDR == 0)
- IDR = NextDeclID++;
+ assert(!D->isFromASTFile() && "should not be emitting imported decl");
+ serialization::DeclID &IDR = DeclIDs[D];
+ if (IDR == 0)
+ IDR = NextDeclID++;
- ID= IDR;
- }
+ ID = IDR;
bool isReplacingADecl = ID < FirstDeclID;
@@ -2050,6 +2134,13 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
VisibleOffset = WriteDeclContextVisibleBlock(Context, DC);
}
+ // Build a record for this declaration
+ Record.clear();
+ W.Code = (serialization::DeclCode)0;
+ W.AbbrevToUse = 0;
+ W.Visit(D);
+ if (DC) W.VisitDeclContext(DC, LexicalOffset, VisibleOffset);
+
if (isReplacingADecl) {
// We're replacing a decl in a previous file.
ReplacedDecls.push_back(ReplacedDeclInfo(ID, Stream.GetCurrentBitNo(),
@@ -2066,19 +2157,12 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
DeclOffsets[Index].setLocation(Loc);
DeclOffsets[Index].BitOffset = Stream.GetCurrentBitNo();
}
-
+
SourceManager &SM = Context.getSourceManager();
if (Loc.isValid() && SM.isLocalSourceLocation(Loc))
associateDeclWithFile(D, ID);
}
- // Build and emit a record for this declaration
- Record.clear();
- W.Code = (serialization::DeclCode)0;
- W.AbbrevToUse = 0;
- W.Visit(D);
- if (DC) W.VisitDeclContext(DC, LexicalOffset, VisibleOffset);
-
if (!W.Code)
llvm::report_fatal_error(StringRef("unexpected declaration kind '") +
D->getDeclKindName() + "'");
@@ -2090,7 +2174,7 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
// Note declarations that should be deserialized eagerly so that we can add
// them to a record in the AST file later.
- if (isRequiredDecl(D, Context))
+ if (isRequiredDecl(D, Context, WritingModule))
EagerlyDeserializedDecls.push_back(ID);
}
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 0dd809036a3d..e52ed052d3bc 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -40,7 +40,8 @@ namespace clang {
ASTStmtWriter(ASTWriter &Writer, ASTWriter::RecordData &Record)
: Writer(Writer), Record(Record) { }
- void AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &Args);
+ void AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &ArgInfo,
+ const TemplateArgumentLoc *Args);
void VisitStmt(Stmt *S);
#define STMT(Type, Base) \
@@ -49,13 +50,13 @@ namespace clang {
};
}
-void ASTStmtWriter::
-AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &Args) {
- Writer.AddSourceLocation(Args.getTemplateKeywordLoc(), Record);
- Writer.AddSourceLocation(Args.LAngleLoc, Record);
- Writer.AddSourceLocation(Args.RAngleLoc, Record);
- for (unsigned i=0; i != Args.NumTemplateArgs; ++i)
- Writer.AddTemplateArgumentLoc(Args.getTemplateArgs()[i], Record);
+void ASTStmtWriter::AddTemplateKWAndArgsInfo(
+ const ASTTemplateKWAndArgsInfo &ArgInfo, const TemplateArgumentLoc *Args) {
+ Writer.AddSourceLocation(ArgInfo.TemplateKWLoc, Record);
+ Writer.AddSourceLocation(ArgInfo.LAngleLoc, Record);
+ Writer.AddSourceLocation(ArgInfo.RAngleLoc, Record);
+ for (unsigned i = 0; i != ArgInfo.NumTemplateArgs; ++i)
+ Writer.AddTemplateArgumentLoc(Args[i], Record);
}
void ASTStmtWriter::VisitStmt(Stmt *S) {
@@ -287,6 +288,26 @@ void ASTStmtWriter::VisitMSAsmStmt(MSAsmStmt *S) {
Code = serialization::STMT_MSASM;
}
+void ASTStmtWriter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtWriter::VisitCoreturnStmt(CoreturnStmt *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtWriter::VisitCoawaitExpr(CoawaitExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
+void ASTStmtWriter::VisitCoyieldExpr(CoyieldExpr *S) {
+ // FIXME: Implement coroutine serialization.
+ llvm_unreachable("unimplemented");
+}
+
void ASTStmtWriter::VisitCapturedStmt(CapturedStmt *S) {
VisitStmt(S);
// NumCaptures
@@ -366,7 +387,8 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
Writer.AddDeclRef(E->getFoundDecl(), Record);
if (E->hasTemplateKWAndArgsInfo())
- AddTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo());
+ AddTemplateKWAndArgsInfo(*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
+ E->getTrailingObjects<TemplateArgumentLoc>());
Writer.AddDeclRef(E->getDecl(), Record);
Writer.AddSourceLocation(E->getLocation(), Record);
@@ -462,24 +484,24 @@ void ASTStmtWriter::VisitOffsetOfExpr(OffsetOfExpr *E) {
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
- const OffsetOfExpr::OffsetOfNode &ON = E->getComponent(I);
+ const OffsetOfNode &ON = E->getComponent(I);
Record.push_back(ON.getKind()); // FIXME: Stable encoding
Writer.AddSourceLocation(ON.getSourceRange().getBegin(), Record);
Writer.AddSourceLocation(ON.getSourceRange().getEnd(), Record);
switch (ON.getKind()) {
- case OffsetOfExpr::OffsetOfNode::Array:
+ case OffsetOfNode::Array:
Record.push_back(ON.getArrayExprIndex());
break;
-
- case OffsetOfExpr::OffsetOfNode::Field:
+
+ case OffsetOfNode::Field:
Writer.AddDeclRef(ON.getField(), Record);
break;
-
- case OffsetOfExpr::OffsetOfNode::Identifier:
+
+ case OffsetOfNode::Identifier:
Writer.AddIdentifierRef(ON.getFieldName(), Record);
break;
-
- case OffsetOfExpr::OffsetOfNode::Base:
+
+ case OffsetOfNode::Base:
Writer.AddCXXBaseSpecifier(*ON.getBase(), Record);
break;
}
@@ -511,6 +533,16 @@ void ASTStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Code = serialization::EXPR_ARRAY_SUBSCRIPT;
}
+void ASTStmtWriter::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddStmt(E->getLowerBound());
+ Writer.AddStmt(E->getLength());
+ Writer.AddSourceLocation(E->getColonLoc(), Record);
+ Writer.AddSourceLocation(E->getRBracketLoc(), Record);
+ Code = serialization::EXPR_OMP_ARRAY_SECTION;
+}
+
void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
@@ -761,6 +793,7 @@ void ASTStmtWriter::VisitVAArgExpr(VAArgExpr *E) {
Writer.AddTypeSourceInfo(E->getWrittenTypeInfo(), Record);
Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Record.push_back(E->isMicrosoftABI());
Code = serialization::EXPR_VA_ARG;
}
@@ -1124,6 +1157,7 @@ void ASTStmtWriter::VisitCXXTryStmt(CXXTryStmt *S) {
void ASTStmtWriter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
VisitStmt(S);
Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getCoawaitLoc(), Record);
Writer.AddSourceLocation(S->getColonLoc(), Record);
Writer.AddSourceLocation(S->getRParenLoc(), Record);
Writer.AddStmt(S->getRangeStmt());
@@ -1408,9 +1442,11 @@ ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
Record.push_back(E->HasTemplateKWAndArgsInfo);
if (E->HasTemplateKWAndArgsInfo) {
- const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
- Record.push_back(Args.NumTemplateArgs);
- AddTemplateKWAndArgsInfo(Args);
+ const ASTTemplateKWAndArgsInfo &ArgInfo =
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>();
+ Record.push_back(ArgInfo.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(ArgInfo,
+ E->getTrailingObjects<TemplateArgumentLoc>());
}
if (!E->isImplicitAccess())
@@ -1435,9 +1471,11 @@ ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
Record.push_back(E->HasTemplateKWAndArgsInfo);
if (E->HasTemplateKWAndArgsInfo) {
- const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
- Record.push_back(Args.NumTemplateArgs);
- AddTemplateKWAndArgsInfo(Args);
+ const ASTTemplateKWAndArgsInfo &ArgInfo =
+ *E->getTrailingObjects<ASTTemplateKWAndArgsInfo>();
+ Record.push_back(ArgInfo.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(ArgInfo,
+ E->getTrailingObjects<TemplateArgumentLoc>());
}
Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
@@ -1466,9 +1504,10 @@ void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
Record.push_back(E->HasTemplateKWAndArgsInfo);
if (E->HasTemplateKWAndArgsInfo) {
- const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
- Record.push_back(Args.NumTemplateArgs);
- AddTemplateKWAndArgsInfo(Args);
+ const ASTTemplateKWAndArgsInfo &ArgInfo =
+ *E->getTrailingASTTemplateKWAndArgsInfo();
+ Record.push_back(ArgInfo.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(ArgInfo, E->getTrailingTemplateArgumentLoc());
}
Record.push_back(E->getNumDecls());
@@ -1547,11 +1586,18 @@ void ASTStmtWriter::VisitPackExpansionExpr(PackExpansionExpr *E) {
void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
VisitExpr(E);
+ Record.push_back(E->isPartiallySubstituted() ? E->getPartialArguments().size()
+ : 0);
Writer.AddSourceLocation(E->OperatorLoc, Record);
Writer.AddSourceLocation(E->PackLoc, Record);
Writer.AddSourceLocation(E->RParenLoc, Record);
- Record.push_back(E->Length);
Writer.AddDeclRef(E->Pack, Record);
+ if (E->isPartiallySubstituted()) {
+ for (const auto &TA : E->getPartialArguments())
+ Writer.AddTemplateArgument(TA, Record);
+ } else if (!E->isValueDependent()) {
+ Record.push_back(E->getPackLength());
+ }
Code = serialization::EXPR_SIZEOF_PACK;
}
@@ -1650,6 +1696,14 @@ void ASTStmtWriter::VisitMSPropertyRefExpr(MSPropertyRefExpr *E) {
Code = serialization::EXPR_CXX_PROPERTY_REF_EXPR;
}
+void ASTStmtWriter::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddStmt(E->getIdx());
+ Writer.AddSourceLocation(E->getRBracketLoc(), Record);
+ Code = serialization::EXPR_CXX_PROPERTY_SUBSCRIPT_EXPR;
+}
+
void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
VisitExpr(E);
Writer.AddSourceRange(E->getSourceRange(), Record);
@@ -1718,6 +1772,9 @@ void OMPClauseWriter::writeClause(OMPClause *C) {
}
void OMPClauseWriter::VisitOMPIfClause(OMPIfClause *C) {
+ Record.push_back(C->getNameModifier());
+ Writer->Writer.AddSourceLocation(C->getNameModifierLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
Writer->Writer.AddStmt(C->getCondition());
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
}
@@ -1737,6 +1794,11 @@ void OMPClauseWriter::VisitOMPSafelenClause(OMPSafelenClause *C) {
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
}
+void OMPClauseWriter::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
+ Writer->Writer.AddStmt(C->getSimdlen());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
void OMPClauseWriter::VisitOMPCollapseClause(OMPCollapseClause *C) {
Writer->Writer.AddStmt(C->getNumForLoops());
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
@@ -1756,14 +1818,21 @@ void OMPClauseWriter::VisitOMPProcBindClause(OMPProcBindClause *C) {
void OMPClauseWriter::VisitOMPScheduleClause(OMPScheduleClause *C) {
Record.push_back(C->getScheduleKind());
+ Record.push_back(C->getFirstScheduleModifier());
+ Record.push_back(C->getSecondScheduleModifier());
Writer->Writer.AddStmt(C->getChunkSize());
Writer->Writer.AddStmt(C->getHelperChunkSize());
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getFirstScheduleModifierLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getSecondScheduleModifierLoc(), Record);
Writer->Writer.AddSourceLocation(C->getScheduleKindLoc(), Record);
Writer->Writer.AddSourceLocation(C->getCommaLoc(), Record);
}
-void OMPClauseWriter::VisitOMPOrderedClause(OMPOrderedClause *) {}
+void OMPClauseWriter::VisitOMPOrderedClause(OMPOrderedClause *C) {
+ Writer->Writer.AddStmt(C->getNumForLoops());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
void OMPClauseWriter::VisitOMPNowaitClause(OMPNowaitClause *) {}
@@ -1781,6 +1850,12 @@ void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+void OMPClauseWriter::VisitOMPThreadsClause(OMPThreadsClause *) {}
+
+void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
+
+void OMPClauseWriter::VisitOMPNogroupClause(OMPNogroupClause *) {}
+
void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
Record.push_back(C->varlist_size());
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
@@ -1836,6 +1911,8 @@ void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
Writer->Writer.AddDeclarationNameInfo(C->getNameInfo(), Record);
for (auto *VE : C->varlists())
Writer->Writer.AddStmt(VE);
+ for (auto *VE : C->privates())
+ Writer->Writer.AddStmt(VE);
for (auto *E : C->lhs_exprs())
Writer->Writer.AddStmt(E);
for (auto *E : C->rhs_exprs())
@@ -1848,9 +1925,14 @@ void OMPClauseWriter::VisitOMPLinearClause(OMPLinearClause *C) {
Record.push_back(C->varlist_size());
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ Record.push_back(C->getModifier());
+ Writer->Writer.AddSourceLocation(C->getModifierLoc(), Record);
for (auto *VE : C->varlists()) {
Writer->Writer.AddStmt(VE);
}
+ for (auto *VE : C->privates()) {
+ Writer->Writer.AddStmt(VE);
+ }
for (auto *VE : C->inits()) {
Writer->Writer.AddStmt(VE);
}
@@ -1916,6 +1998,52 @@ void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
Writer->Writer.AddStmt(VE);
}
+void OMPClauseWriter::VisitOMPDeviceClause(OMPDeviceClause *C) {
+ Writer->Writer.AddStmt(C->getDevice());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
+ Record.push_back(C->varlist_size());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+ Record.push_back(C->getMapTypeModifier());
+ Record.push_back(C->getMapType());
+ Writer->Writer.AddSourceLocation(C->getMapLoc(), Record);
+ Writer->Writer.AddSourceLocation(C->getColonLoc(), Record);
+ for (auto *VE : C->varlists())
+ Writer->Writer.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ Writer->Writer.AddStmt(C->getNumTeams());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ Writer->Writer.AddStmt(C->getThreadLimit());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPPriorityClause(OMPPriorityClause *C) {
+ Writer->Writer.AddStmt(C->getPriority());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ Writer->Writer.AddStmt(C->getGrainsize());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
+ Writer->Writer.AddStmt(C->getNumTasks());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
+void OMPClauseWriter::VisitOMPHintClause(OMPHintClause *C) {
+ Writer->Writer.AddStmt(C->getHint());
+ Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
+}
+
//===----------------------------------------------------------------------===//
// OpenMP Directives.
//===----------------------------------------------------------------------===//
@@ -1954,6 +2082,9 @@ void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
for (auto I : D->counters()) {
Writer.AddStmt(I);
}
+ for (auto I : D->private_counters()) {
+ Writer.AddStmt(I);
+ }
for (auto I : D->inits()) {
Writer.AddStmt(I);
}
@@ -1969,6 +2100,7 @@ void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
}
@@ -1979,6 +2111,7 @@ void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_FOR_DIRECTIVE;
}
@@ -1991,12 +2124,14 @@ void ASTStmtWriter::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_SECTIONS_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPSectionDirective(OMPSectionDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_SECTION_DIRECTIVE;
}
@@ -2015,6 +2150,7 @@ void ASTStmtWriter::VisitOMPMasterDirective(OMPMasterDirective *D) {
void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
VisitStmt(D);
+ Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Writer.AddDeclarationNameInfo(D->getDirectiveName(), Record);
Code = serialization::STMT_OMP_CRITICAL_DIRECTIVE;
@@ -2022,6 +2158,7 @@ void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
void ASTStmtWriter::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_FOR_DIRECTIVE;
}
@@ -2036,6 +2173,7 @@ void ASTStmtWriter::VisitOMPParallelSectionsDirective(
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE;
}
@@ -2043,6 +2181,7 @@ void ASTStmtWriter::VisitOMPTaskDirective(OMPTaskDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TASK_DIRECTIVE;
}
@@ -2066,6 +2205,13 @@ void ASTStmtWriter::VisitOMPTargetDirective(OMPTargetDirective *D) {
Code = serialization::STMT_OMP_TARGET_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TARGET_DATA_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2099,6 +2245,7 @@ void ASTStmtWriter::VisitOMPFlushDirective(OMPFlushDirective *D) {
void ASTStmtWriter::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
+ Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_ORDERED_DIRECTIVE;
}
@@ -2120,11 +2267,27 @@ void ASTStmtWriter::VisitOMPCancellationPointDirective(
void ASTStmtWriter::VisitOMPCancelDirective(OMPCancelDirective *D) {
VisitStmt(D);
+ Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Record.push_back(D->getCancelRegion());
Code = serialization::STMT_OMP_CANCEL_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TASKLOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TASKLOOP_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_DISTRIBUTE_DIRECTIVE;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/Serialization/CMakeLists.txt b/lib/Serialization/CMakeLists.txt
index d885db22975e..95b33c388c56 100644
--- a/lib/Serialization/CMakeLists.txt
+++ b/lib/Serialization/CMakeLists.txt
@@ -15,6 +15,7 @@ add_clang_library(clangSerialization
GeneratePCH.cpp
GlobalModuleIndex.cpp
Module.cpp
+ ModuleFileExtension.cpp
ModuleManager.cpp
ADDITIONAL_HEADERS
diff --git a/lib/Serialization/GeneratePCH.cpp b/lib/Serialization/GeneratePCH.cpp
index c461fe936ee4..4a2255ab6d39 100644
--- a/lib/Serialization/GeneratePCH.cpp
+++ b/lib/Serialization/GeneratePCH.cpp
@@ -23,12 +23,15 @@
using namespace clang;
-PCHGenerator::PCHGenerator(const Preprocessor &PP, StringRef OutputFile,
- clang::Module *Module, StringRef isysroot,
- std::shared_ptr<PCHBuffer> Buffer,
- bool AllowASTWithErrors)
+PCHGenerator::PCHGenerator(
+ const Preprocessor &PP, StringRef OutputFile,
+ clang::Module *Module, StringRef isysroot,
+ std::shared_ptr<PCHBuffer> Buffer,
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ bool AllowASTWithErrors, bool IncludeTimestamps)
: PP(PP), OutputFile(OutputFile), Module(Module), isysroot(isysroot.str()),
- SemaPtr(nullptr), Buffer(Buffer), Stream(Buffer->Data), Writer(Stream),
+ SemaPtr(nullptr), Buffer(Buffer), Stream(Buffer->Data),
+ Writer(Stream, Extensions, IncludeTimestamps),
AllowASTWithErrors(AllowASTWithErrors) {
Buffer->IsComplete = false;
}
@@ -47,7 +50,8 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
// Emit the PCH file to the Buffer.
assert(SemaPtr && "No Sema?");
- Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot, hasErrors);
+ Buffer->Signature =
+ Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot, hasErrors);
Buffer->IsComplete = true;
}
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index 17c7914243e7..af5f94a5cdc4 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -757,9 +757,7 @@ void GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);
// Write the identifier table
- Record.clear();
- Record.push_back(IDENTIFIER_INDEX);
- Record.push_back(BucketOffset);
+ uint64_t Record[] = {IDENTIFIER_INDEX, BucketOffset};
Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable);
}
diff --git a/lib/Serialization/Module.cpp b/lib/Serialization/Module.cpp
index 3b237d5529c6..4884f0b09480 100644
--- a/lib/Serialization/Module.cpp
+++ b/lib/Serialization/Module.cpp
@@ -40,19 +40,11 @@ ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation)
LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(nullptr),
LocalNumCXXCtorInitializers(0), CXXCtorInitializersOffsets(nullptr),
FileSortedDecls(nullptr), NumFileSortedDecls(0),
- RedeclarationsMap(nullptr), LocalNumRedeclarationsInMap(0),
ObjCCategoriesMap(nullptr), LocalNumObjCCategoriesInMap(0),
LocalNumTypes(0), TypeOffsets(nullptr), BaseTypeIndex(0)
{}
ModuleFile::~ModuleFile() {
- for (DeclContextInfosMap::iterator I = DeclContextInfos.begin(),
- E = DeclContextInfos.end();
- I != E; ++I) {
- if (I->second.NameLookupTableData)
- delete I->second.NameLookupTableData;
- }
-
delete static_cast<ASTIdentifierLookupTable *>(IdentifierLookupTable);
delete static_cast<HeaderFileInfoLookupTable *>(HeaderFileInfoTable);
delete static_cast<ASTSelectorLookupTable *>(SelectorLookupTable);
diff --git a/lib/Serialization/ModuleFileExtension.cpp b/lib/Serialization/ModuleFileExtension.cpp
new file mode 100644
index 000000000000..81dcfd60ce8e
--- /dev/null
+++ b/lib/Serialization/ModuleFileExtension.cpp
@@ -0,0 +1,22 @@
+//===-- ModuleFileExtension.cpp - Module File Extensions ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Serialization/ModuleFileExtension.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+ModuleFileExtension::~ModuleFileExtension() { }
+
+llvm::hash_code ModuleFileExtension::hashExtension(llvm::hash_code Code) const {
+ return Code;
+}
+
+ModuleFileExtensionWriter::~ModuleFileExtensionWriter() { }
+
+ModuleFileExtensionReader::~ModuleFileExtensionReader() { }
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index 271619404d2f..74f75a103f7a 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -95,6 +95,8 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
New->File = Entry;
New->ImportLoc = ImportLoc;
Chain.push_back(New);
+ if (!New->isModule())
+ PCHChain.push_back(New);
if (!ImportedBy)
Roots.push_back(New);
NewModule = true;
@@ -159,6 +161,8 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
Modules.erase(Entry);
assert(Chain.back() == ModuleEntry);
Chain.pop_back();
+ if (!ModuleEntry->isModule())
+ PCHChain.pop_back();
if (Roots.back() == ModuleEntry)
Roots.pop_back();
else
@@ -190,6 +194,9 @@ void ModuleManager::removeModules(
if (first == last)
return;
+ // Explicitly clear VisitOrder since we might not notice it is stale.
+ VisitOrder.clear();
+
// Collect the set of module file pointers that we'll be removing.
llvm::SmallPtrSet<ModuleFile *, 4> victimSet(first, last);
@@ -203,6 +210,15 @@ void ModuleManager::removeModules(
Roots.erase(std::remove_if(Roots.begin(), Roots.end(), IsVictim),
Roots.end());
+ // Remove the modules from the PCH chain.
+ for (auto I = first; I != last; ++I) {
+ if (!(*I)->isModule()) {
+ PCHChain.erase(std::find(PCHChain.begin(), PCHChain.end(), *I),
+ PCHChain.end());
+ break;
+ }
+ }
+
// Delete the modules and erase them from the various structures.
for (ModuleIterator victim = first; victim != last; ++victim) {
Modules.erase((*victim)->File);
@@ -236,15 +252,6 @@ ModuleManager::addInMemoryBuffer(StringRef FileName,
InMemoryBuffers[Entry] = std::move(Buffer);
}
-bool ModuleManager::addKnownModuleFile(StringRef FileName) {
- const FileEntry *File;
- if (lookupModuleFile(FileName, 0, 0, File))
- return true;
- if (!Modules.count(File))
- AdditionalKnownModuleFiles.insert(File);
- return false;
-}
-
ModuleManager::VisitState *ModuleManager::allocateVisitState() {
// Fast path: if we have a cached state, use it.
if (FirstVisitState) {
@@ -281,8 +288,6 @@ void ModuleManager::setGlobalIndex(GlobalModuleIndex *Index) {
}
void ModuleManager::moduleFileAccepted(ModuleFile *MF) {
- AdditionalKnownModuleFiles.remove(MF->File);
-
if (!GlobalIndex || GlobalIndex->loadedModuleFile(MF))
return;
@@ -300,10 +305,8 @@ ModuleManager::~ModuleManager() {
delete FirstVisitState;
}
-void
-ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData),
- void *UserData,
- llvm::SmallPtrSetImpl<ModuleFile *> *ModuleFilesHit) {
+void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
+ llvm::SmallPtrSetImpl<ModuleFile *> *ModuleFilesHit) {
// If the visitation order vector is the wrong size, recompute the order.
if (VisitOrder.size() != Chain.size()) {
unsigned N = size();
@@ -316,28 +319,24 @@ ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData),
SmallVector<ModuleFile *, 4> Queue;
Queue.reserve(N);
llvm::SmallVector<unsigned, 4> UnusedIncomingEdges;
- UnusedIncomingEdges.reserve(size());
- for (ModuleIterator M = begin(), MEnd = end(); M != MEnd; ++M) {
- if (unsigned Size = (*M)->ImportedBy.size())
- UnusedIncomingEdges.push_back(Size);
- else {
- UnusedIncomingEdges.push_back(0);
+ UnusedIncomingEdges.resize(size());
+ for (auto M = rbegin(), MEnd = rend(); M != MEnd; ++M) {
+ unsigned Size = (*M)->ImportedBy.size();
+ UnusedIncomingEdges[(*M)->Index] = Size;
+ if (!Size)
Queue.push_back(*M);
- }
}
// Traverse the graph, making sure to visit a module before visiting any
// of its dependencies.
- unsigned QueueStart = 0;
- while (QueueStart < Queue.size()) {
- ModuleFile *CurrentModule = Queue[QueueStart++];
+ while (!Queue.empty()) {
+ ModuleFile *CurrentModule = Queue.pop_back_val();
VisitOrder.push_back(CurrentModule);
// For any module that this module depends on, push it on the
// stack (if it hasn't already been marked as visited).
- for (llvm::SetVector<ModuleFile *>::iterator
- M = CurrentModule->Imports.begin(),
- MEnd = CurrentModule->Imports.end();
+ for (auto M = CurrentModule->Imports.rbegin(),
+ MEnd = CurrentModule->Imports.rend();
M != MEnd; ++M) {
// Remove our current module as an impediment to visiting the
// module we depend on. If we were the last unvisited module
@@ -379,7 +378,7 @@ ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData),
// Visit the module.
assert(State->VisitNumber[CurrentModule->Index] == VisitNumber - 1);
State->VisitNumber[CurrentModule->Index] = VisitNumber;
- if (!Visitor(*CurrentModule, UserData))
+ if (!Visitor(*CurrentModule))
continue;
// The visitor has requested that cut off visitation of any
@@ -410,71 +409,6 @@ ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData),
returnVisitState(State);
}
-static void markVisitedDepthFirst(ModuleFile &M,
- SmallVectorImpl<bool> &Visited) {
- for (llvm::SetVector<ModuleFile *>::iterator IM = M.Imports.begin(),
- IMEnd = M.Imports.end();
- IM != IMEnd; ++IM) {
- if (Visited[(*IM)->Index])
- continue;
- Visited[(*IM)->Index] = true;
- if (!M.DirectlyImported)
- markVisitedDepthFirst(**IM, Visited);
- }
-}
-
-/// \brief Perform a depth-first visit of the current module.
-static bool visitDepthFirst(
- ModuleFile &M,
- ModuleManager::DFSPreorderControl (*PreorderVisitor)(ModuleFile &M,
- void *UserData),
- bool (*PostorderVisitor)(ModuleFile &M, void *UserData), void *UserData,
- SmallVectorImpl<bool> &Visited) {
- if (PreorderVisitor) {
- switch (PreorderVisitor(M, UserData)) {
- case ModuleManager::Abort:
- return true;
- case ModuleManager::SkipImports:
- markVisitedDepthFirst(M, Visited);
- return false;
- case ModuleManager::Continue:
- break;
- }
- }
-
- // Visit children
- for (llvm::SetVector<ModuleFile *>::iterator IM = M.Imports.begin(),
- IMEnd = M.Imports.end();
- IM != IMEnd; ++IM) {
- if (Visited[(*IM)->Index])
- continue;
- Visited[(*IM)->Index] = true;
-
- if (visitDepthFirst(**IM, PreorderVisitor, PostorderVisitor, UserData, Visited))
- return true;
- }
-
- if (PostorderVisitor)
- return PostorderVisitor(M, UserData);
-
- return false;
-}
-
-void ModuleManager::visitDepthFirst(
- ModuleManager::DFSPreorderControl (*PreorderVisitor)(ModuleFile &M,
- void *UserData),
- bool (*PostorderVisitor)(ModuleFile &M, void *UserData), void *UserData) {
- SmallVector<bool, 16> Visited(size(), false);
- for (unsigned I = 0, N = Roots.size(); I != N; ++I) {
- if (Visited[Roots[I]->Index])
- continue;
- Visited[Roots[I]->Index] = true;
-
- if (::visitDepthFirst(*Roots[I], PreorderVisitor, PostorderVisitor, UserData, Visited))
- return;
- }
-}
-
bool ModuleManager::lookupModuleFile(StringRef FileName,
off_t ExpectedSize,
time_t ExpectedModTime,
diff --git a/lib/Serialization/MultiOnDiskHashTable.h b/lib/Serialization/MultiOnDiskHashTable.h
new file mode 100644
index 000000000000..04dea831695c
--- /dev/null
+++ b/lib/Serialization/MultiOnDiskHashTable.h
@@ -0,0 +1,330 @@
+//===--- MultiOnDiskHashTable.h - Merged set of hash tables -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a hash table data structure suitable for incremental and
+// distributed storage across a set of files.
+//
+// Multiple hash tables from different files are implicitly merged to improve
+// performance, and on reload the merged table will override those from other
+// files.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LIB_SERIALIZATION_MULTIONDISKHASHTABLE_H
+#define LLVM_CLANG_LIB_SERIALIZATION_MULTIONDISKHASHTABLE_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/OnDiskHashTable.h"
+
+namespace clang {
+namespace serialization {
+
+class ModuleFile;
+
+/// \brief A collection of on-disk hash tables, merged when relevant for performance.
+template<typename Info> class MultiOnDiskHashTable {
+public:
+ /// A handle to a file, used when overriding tables.
+ typedef typename Info::file_type file_type;
+ /// A pointer to an on-disk representation of the hash table.
+ typedef const unsigned char *storage_type;
+
+ typedef typename Info::external_key_type external_key_type;
+ typedef typename Info::internal_key_type internal_key_type;
+ typedef typename Info::data_type data_type;
+ typedef typename Info::data_type_builder data_type_builder;
+ typedef unsigned hash_value_type;
+
+private:
+ /// \brief A hash table stored on disk.
+ struct OnDiskTable {
+ typedef llvm::OnDiskIterableChainedHashTable<Info> HashTable;
+
+ file_type File;
+ HashTable Table;
+
+ OnDiskTable(file_type File, unsigned NumBuckets, unsigned NumEntries,
+ storage_type Buckets, storage_type Payload, storage_type Base,
+ const Info &InfoObj)
+ : File(File),
+ Table(NumBuckets, NumEntries, Buckets, Payload, Base, InfoObj) {}
+ };
+
+ struct MergedTable {
+ std::vector<file_type> Files;
+ llvm::DenseMap<internal_key_type, data_type> Data;
+ };
+
+ typedef llvm::PointerUnion<OnDiskTable*, MergedTable*> Table;
+ typedef llvm::TinyPtrVector<void*> TableVector;
+
+ /// \brief The current set of on-disk and merged tables.
+ /// We manually store the opaque value of the Table because TinyPtrVector
+ /// can't cope with holding a PointerUnion directly.
+ /// There can be at most one MergedTable in this vector, and if present,
+ /// it is the first table.
+ TableVector Tables;
+
+ /// \brief Files corresponding to overridden tables that we've not yet
+ /// discarded.
+ llvm::TinyPtrVector<file_type> PendingOverrides;
+
+ struct AsOnDiskTable {
+ typedef OnDiskTable *result_type;
+ result_type operator()(void *P) const {
+ return Table::getFromOpaqueValue(P).template get<OnDiskTable *>();
+ }
+ };
+ typedef llvm::mapped_iterator<TableVector::iterator, AsOnDiskTable>
+ table_iterator;
+ typedef llvm::iterator_range<table_iterator> table_range;
+
+ /// \brief The current set of on-disk tables.
+ table_range tables() {
+ auto Begin = Tables.begin(), End = Tables.end();
+ if (getMergedTable())
+ ++Begin;
+ return llvm::make_range(llvm::map_iterator(Begin, AsOnDiskTable()),
+ llvm::map_iterator(End, AsOnDiskTable()));
+ }
+
+ MergedTable *getMergedTable() const {
+ // If we already have a merged table, it's the first one.
+ return Tables.empty() ? nullptr : Table::getFromOpaqueValue(*Tables.begin())
+ .template dyn_cast<MergedTable*>();
+ }
+
+ /// \brief Delete all our current on-disk tables.
+ void clear() {
+ for (auto *T : tables())
+ delete T;
+ if (auto *M = getMergedTable())
+ delete M;
+ Tables.clear();
+ }
+
+ void removeOverriddenTables() {
+ llvm::DenseSet<file_type> Files;
+ Files.insert(PendingOverrides.begin(), PendingOverrides.end());
+ // Explicitly capture Files to work around an MSVC 2015 rejects-valid bug.
+ auto ShouldRemove = [&Files](void *T) -> bool {
+ auto *ODT = Table::getFromOpaqueValue(T).template get<OnDiskTable *>();
+ bool Remove = Files.count(ODT->File);
+ if (Remove)
+ delete ODT;
+ return Remove;
+ };
+ Tables.erase(std::remove_if(tables().begin().getCurrent(), Tables.end(),
+ ShouldRemove),
+ Tables.end());
+ PendingOverrides.clear();
+ }
+
+ void condense() {
+ MergedTable *Merged = getMergedTable();
+ if (!Merged)
+ Merged = new MergedTable;
+
+ // Read in all the tables and merge them together.
+ // FIXME: Be smarter about which tables we merge.
+ for (auto *ODT : tables()) {
+ auto &HT = ODT->Table;
+ Info &InfoObj = HT.getInfoObj();
+
+ for (auto I = HT.data_begin(), E = HT.data_end(); I != E; ++I) {
+ auto *LocalPtr = I.getItem();
+
+ // FIXME: Don't rely on the OnDiskHashTable format here.
+ auto L = InfoObj.ReadKeyDataLength(LocalPtr);
+ const internal_key_type &Key = InfoObj.ReadKey(LocalPtr, L.first);
+ data_type_builder ValueBuilder(Merged->Data[Key]);
+ InfoObj.ReadDataInto(Key, LocalPtr + L.first, L.second,
+ ValueBuilder);
+ }
+
+ Merged->Files.push_back(ODT->File);
+ delete ODT;
+ }
+
+ Tables.clear();
+ Tables.push_back(Table(Merged).getOpaqueValue());
+ }
+
+ /// The generator is permitted to read our merged table.
+ template<typename ReaderInfo, typename WriterInfo>
+ friend class MultiOnDiskHashTableGenerator;
+
+public:
+ MultiOnDiskHashTable() {}
+ MultiOnDiskHashTable(MultiOnDiskHashTable &&O)
+ : Tables(std::move(O.Tables)),
+ PendingOverrides(std::move(O.PendingOverrides)) {
+ O.Tables.clear();
+ }
+ MultiOnDiskHashTable &operator=(MultiOnDiskHashTable &&O) {
+ if (&O == this)
+ return *this;
+ clear();
+ Tables = std::move(O.Tables);
+ O.Tables.clear();
+ PendingOverrides = std::move(O.PendingOverrides);
+ return *this;
+ }
+ ~MultiOnDiskHashTable() { clear(); }
+
+ /// \brief Add the table \p Data loaded from file \p File.
+ void add(file_type File, storage_type Data, Info InfoObj = Info()) {
+ using namespace llvm::support;
+ storage_type Ptr = Data;
+
+ uint32_t BucketOffset = endian::readNext<uint32_t, little, unaligned>(Ptr);
+
+ // Read the list of overridden files.
+ uint32_t NumFiles = endian::readNext<uint32_t, little, unaligned>(Ptr);
+ // FIXME: Add a reserve() to TinyPtrVector so that we don't need to make
+ // an additional copy.
+ llvm::SmallVector<file_type, 16> OverriddenFiles;
+ OverriddenFiles.reserve(NumFiles);
+ for (/**/; NumFiles != 0; --NumFiles)
+ OverriddenFiles.push_back(InfoObj.ReadFileRef(Ptr));
+ PendingOverrides.insert(PendingOverrides.end(), OverriddenFiles.begin(),
+ OverriddenFiles.end());
+
+ // Read the OnDiskChainedHashTable header.
+ storage_type Buckets = Data + BucketOffset;
+ auto NumBucketsAndEntries =
+ OnDiskTable::HashTable::readNumBucketsAndEntries(Buckets);
+
+ // Register the table.
+ Table NewTable = new OnDiskTable(File, NumBucketsAndEntries.first,
+ NumBucketsAndEntries.second,
+ Buckets, Ptr, Data, std::move(InfoObj));
+ Tables.push_back(NewTable.getOpaqueValue());
+ }
+
+ /// \brief Find and read the lookup results for \p EKey.
+ data_type find(const external_key_type &EKey) {
+ data_type Result;
+
+ if (!PendingOverrides.empty())
+ removeOverriddenTables();
+
+ if (Tables.size() > static_cast<unsigned>(Info::MaxTables))
+ condense();
+
+ internal_key_type Key = Info::GetInternalKey(EKey);
+ auto KeyHash = Info::ComputeHash(Key);
+
+ if (MergedTable *M = getMergedTable()) {
+ auto It = M->Data.find(Key);
+ if (It != M->Data.end())
+ Result = It->second;
+ }
+
+ data_type_builder ResultBuilder(Result);
+
+ for (auto *ODT : tables()) {
+ auto &HT = ODT->Table;
+ auto It = HT.find_hashed(Key, KeyHash);
+ if (It != HT.end())
+ HT.getInfoObj().ReadDataInto(Key, It.getDataPtr(), It.getDataLen(),
+ ResultBuilder);
+ }
+
+ return Result;
+ }
+
+ /// \brief Read all the lookup results into a single value. This only makes
+ /// sense if merging values across keys is meaningful.
+ data_type findAll() {
+ data_type Result;
+ data_type_builder ResultBuilder(Result);
+
+ if (!PendingOverrides.empty())
+ removeOverriddenTables();
+
+ if (MergedTable *M = getMergedTable()) {
+ for (auto &KV : M->Data)
+ Info::MergeDataInto(KV.second, ResultBuilder);
+ }
+
+ for (auto *ODT : tables()) {
+ auto &HT = ODT->Table;
+ Info &InfoObj = HT.getInfoObj();
+ for (auto I = HT.data_begin(), E = HT.data_end(); I != E; ++I) {
+ auto *LocalPtr = I.getItem();
+
+ // FIXME: Don't rely on the OnDiskHashTable format here.
+ auto L = InfoObj.ReadKeyDataLength(LocalPtr);
+ const internal_key_type &Key = InfoObj.ReadKey(LocalPtr, L.first);
+ InfoObj.ReadDataInto(Key, LocalPtr + L.first, L.second, ResultBuilder);
+ }
+ }
+
+ return Result;
+ }
+};
+
+/// \brief Writer for the on-disk hash table.
+template<typename ReaderInfo, typename WriterInfo>
+class MultiOnDiskHashTableGenerator {
+ typedef MultiOnDiskHashTable<ReaderInfo> BaseTable;
+ typedef llvm::OnDiskChainedHashTableGenerator<WriterInfo> Generator;
+
+ Generator Gen;
+
+public:
+ MultiOnDiskHashTableGenerator() : Gen() {}
+
+ void insert(typename WriterInfo::key_type_ref Key,
+ typename WriterInfo::data_type_ref Data, WriterInfo &Info) {
+ Gen.insert(Key, Data, Info);
+ }
+
+ void emit(llvm::SmallVectorImpl<char> &Out, WriterInfo &Info,
+ const BaseTable *Base) {
+ using namespace llvm::support;
+ llvm::raw_svector_ostream OutStream(Out);
+
+ // Write our header information.
+ {
+ endian::Writer<little> Writer(OutStream);
+
+ // Reserve four bytes for the bucket offset.
+ Writer.write<uint32_t>(0);
+
+ if (auto *Merged = Base ? Base->getMergedTable() : nullptr) {
+ // Write list of overridden files.
+ Writer.write<uint32_t>(Merged->Files.size());
+ for (const auto &F : Merged->Files)
+ Info.EmitFileRef(OutStream, F);
+
+ // Add all merged entries from Base to the generator.
+ for (auto &KV : Merged->Data) {
+ if (!Gen.contains(KV.first, Info))
+ Gen.insert(KV.first, Info.ImportData(KV.second), Info);
+ }
+ } else {
+ Writer.write<uint32_t>(0);
+ }
+ }
+
+ // Write the table itself.
+ uint32_t BucketOffset = Gen.Emit(OutStream, Info);
+
+ // Replace the first four bytes with the bucket offset.
+ endian::write32le(Out.data(), BucketOffset);
+ }
+};
+
+} // end namespace clang::serialization
+} // end namespace clang
+
+
+#endif
diff --git a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index 166471aaa1a6..a052d83f5afa 100644
--- a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -101,7 +101,7 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
else if (isa<BlockDecl>(D)) {
output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
}
-
+
NumBlocksUnreachable += unreachable;
NumBlocks += total;
std::string NameOfRootFunction = output.str();
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 557439b28881..c092610afe2b 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -23,7 +23,7 @@ using namespace clang;
using namespace ento;
namespace {
-class ArrayBoundChecker :
+class ArrayBoundChecker :
public Checker<check::Location> {
mutable std::unique_ptr<BuiltinBug> BT;
@@ -55,17 +55,17 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
ProgramStateRef state = C.getState();
// Get the size of the array.
- DefinedOrUnknownSVal NumElements
- = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+ DefinedOrUnknownSVal NumElements
+ = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
ER->getValueType());
ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.generateSink(StOutBound);
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
return;
-
+
if (!BT)
BT.reset(new BuiltinBug(
this, "Out-of-bound array access",
@@ -82,7 +82,7 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
C.emitReport(std::move(report));
return;
}
-
+
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
C.addTransition(StInBound);
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index d8dc2aaa6363..f4de733bd794 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -26,15 +26,15 @@ using namespace clang;
using namespace ento;
namespace {
-class ArrayBoundCheckerV2 :
+class ArrayBoundCheckerV2 :
public Checker<check::Location> {
mutable std::unique_ptr<BuiltinBug> BT;
enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
-
+
void reportOOB(CheckerContext &C, ProgramStateRef errorState,
OOB_Kind kind) const;
-
+
public:
void checkLocation(SVal l, bool isLoad, const Stmt*S,
CheckerContext &C) const;
@@ -55,7 +55,7 @@ public:
NonLoc getByteOffset() const { return byteOffset.castAs<NonLoc>(); }
const SubRegion *getRegion() const { return baseRegion; }
-
+
static RegionRawOffsetV2 computeOffset(ProgramStateRef state,
SValBuilder &svalBuilder,
SVal location);
@@ -65,12 +65,12 @@ public:
};
}
-static SVal computeExtentBegin(SValBuilder &svalBuilder,
+static SVal computeExtentBegin(SValBuilder &svalBuilder,
const MemRegion *region) {
while (true)
switch (region->getKind()) {
default:
- return svalBuilder.makeZeroArrayIndex();
+ return svalBuilder.makeZeroArrayIndex();
case MemRegion::SymbolicRegionKind:
// FIXME: improve this later by tracking symbolic lower bounds
// for symbolic regions.
@@ -94,22 +94,22 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// memory access is within the extent of the base region. Since we
// have some flexibility in defining the base region, we can achieve
// various levels of conservatism in our buffer overflow checking.
- ProgramStateRef state = checkerContext.getState();
+ ProgramStateRef state = checkerContext.getState();
ProgramStateRef originalState = state;
SValBuilder &svalBuilder = checkerContext.getSValBuilder();
- const RegionRawOffsetV2 &rawOffset =
+ const RegionRawOffsetV2 &rawOffset =
RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
if (!rawOffset.getRegion())
return;
- // CHECK LOWER BOUND: Is byteOffset < extent begin?
+ // CHECK LOWER BOUND: Is byteOffset < extent begin?
// If so, we are doing a load/store
// before the first valid offset in the memory region.
SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
-
+
if (Optional<NonLoc> NV = extentBegin.getAs<NonLoc>()) {
SVal lowerBound =
svalBuilder.evalBinOpNN(state, BO_LT, rawOffset.getByteOffset(), *NV,
@@ -118,7 +118,7 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
Optional<NonLoc> lowerBoundToCheck = lowerBound.getAs<NonLoc>();
if (!lowerBoundToCheck)
return;
-
+
ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
std::tie(state_precedesLowerBound, state_withinLowerBound) =
state->assume(*lowerBoundToCheck);
@@ -128,12 +128,12 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
reportOOB(checkerContext, state_precedesLowerBound, OOB_Precedes);
return;
}
-
+
// Otherwise, assume the constraint of the lower bound.
assert(state_withinLowerBound);
state = state_withinLowerBound;
}
-
+
do {
// CHECK UPPER BOUND: Is byteOffset >= extent(baseRegion)? If so,
// we are doing a load/store after the last valid offset.
@@ -146,11 +146,11 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
= svalBuilder.evalBinOpNN(state, BO_GE, rawOffset.getByteOffset(),
extentVal.castAs<NonLoc>(),
svalBuilder.getConditionType());
-
+
Optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
if (!upperboundToCheck)
break;
-
+
ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
std::tie(state_exceedsUpperBound, state_withinUpperBound) =
state->assume(*upperboundToCheck);
@@ -161,19 +161,19 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted);
return;
}
-
+
// If we are constrained enough to definitely exceed the upper bound, report.
if (state_exceedsUpperBound) {
assert(!state_withinUpperBound);
reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
return;
}
-
+
assert(state_withinUpperBound);
state = state_withinUpperBound;
}
while (false);
-
+
if (state != originalState)
checkerContext.addTransition(state);
}
@@ -181,8 +181,8 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
ProgramStateRef errorState,
OOB_Kind kind) const {
-
- ExplodedNode *errorNode = checkerContext.generateSink(errorState);
+
+ ExplodedNode *errorNode = checkerContext.generateErrorNode(errorState);
if (!errorNode)
return;
@@ -259,7 +259,7 @@ RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
{
const MemRegion *region = location.getAsRegion();
SVal offset = UndefinedVal();
-
+
while (region) {
switch (region->getKind()) {
default: {
@@ -280,7 +280,7 @@ RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
ASTContext &astContext = svalBuilder.getContext();
if (elemType->isIncompleteType())
return RegionRawOffsetV2();
-
+
// Update the offset.
offset = addValue(state,
getValue(offset, svalBuilder),
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index f763284aa2c9..26d42ba59c22 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -140,10 +140,10 @@ void NilArgChecker::warnIfNilExpr(const Expr *E,
ProgramStateRef State = C.getState();
if (State->isNull(C.getSVal(E)).isConstrainedTrue()) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
generateBugReport(N, Msg, E->getSourceRange(), E, C);
}
-
+
}
}
@@ -156,8 +156,8 @@ void NilArgChecker::warnIfNilArg(CheckerContext &C,
ProgramStateRef State = C.getState();
if (!State->isNull(msg.getArgSVal(Arg)).isConstrainedTrue())
return;
-
- if (ExplodedNode *N = C.generateSink()) {
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
@@ -193,7 +193,7 @@ void NilArgChecker::warnIfNilArg(CheckerContext &C,
os << "' cannot be nil";
}
}
-
+
generateBugReport(N, os.str(), msg.getArgSourceRange(Arg),
msg.getArgExpr(Arg), C);
}
@@ -224,7 +224,7 @@ void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
static const unsigned InvalidArgIndex = UINT_MAX;
unsigned Arg = InvalidArgIndex;
bool CanBeSubscript = false;
-
+
if (Class == FC_NSString) {
Selector S = msg.getSelector();
@@ -307,8 +307,7 @@ void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
warnIfNilArg(C, msg, /* Arg */1, Class);
} else if (S == SetObjectForKeyedSubscriptSel) {
CanBeSubscript = true;
- Arg = 0;
- warnIfNilArg(C, msg, /* Arg */1, Class, CanBeSubscript);
+ Arg = 1;
} else if (S == RemoveObjectForKeySel) {
Arg = 0;
}
@@ -433,7 +432,7 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
-
+
ASTContext &Ctx = C.getASTContext();
if (!II)
II = &Ctx.Idents.get("CFNumberCreate");
@@ -489,23 +488,24 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
if (SourceSize == TargetSize)
return;
- // Generate an error. Only generate a sink if 'SourceSize < TargetSize';
- // otherwise generate a regular node.
+ // Generate an error. Only generate a sink error node
+ // if 'SourceSize < TargetSize'; otherwise generate a non-fatal error node.
//
// FIXME: We can actually create an abstract "CFNumber" object that has
// the bits initialized to the provided values.
//
- if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink()
- : C.addTransition()) {
+ ExplodedNode *N = SourceSize < TargetSize ? C.generateErrorNode()
+ : C.generateNonFatalErrorNode();
+ if (N) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
-
+
os << (SourceSize == 8 ? "An " : "A ")
<< SourceSize << " bit integer is used to initialize a CFNumber "
"object that represents "
<< (TargetSize == 8 ? "an " : "a ")
<< TargetSize << " bit integer. ";
-
+
if (SourceSize < TargetSize)
os << (TargetSize - SourceSize)
<< " bits of the CFNumber value will be garbage." ;
@@ -549,7 +549,7 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
-
+
if (!BT) {
ASTContext &Ctx = C.getASTContext();
Retain = &Ctx.Idents.get("CFRetain");
@@ -589,7 +589,7 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
std::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
if (stateTrue && !stateFalse) {
- ExplodedNode *N = C.generateSink(stateTrue);
+ ExplodedNode *N = C.generateErrorNode(stateTrue);
if (!N)
return;
@@ -635,7 +635,7 @@ public:
void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
-
+
if (!BT) {
BT.reset(new APIMisuse(
this, "message incorrectly sent to class instead of class instance"));
@@ -646,7 +646,7 @@ void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
autoreleaseS = GetNullarySelector("autorelease", Ctx);
drainS = GetNullarySelector("drain", Ctx);
}
-
+
if (msg.isInstanceMessage())
return;
const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
@@ -655,8 +655,8 @@ void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
Selector S = msg.getSelector();
if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
return;
-
- if (ExplodedNode *N = C.addTransition()) {
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
@@ -665,7 +665,7 @@ void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
os << "' message should be sent to instances "
"of class '" << Class->getName()
<< "' and not the class directly";
-
+
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addRange(msg.getSourceRange());
C.emitReport(std::move(report));
@@ -699,12 +699,12 @@ public:
bool
VariadicMethodTypeChecker::isVariadicMessage(const ObjCMethodCall &msg) const {
const ObjCMethodDecl *MD = msg.getDecl();
-
+
if (!MD || !MD->isVariadic() || isa<ObjCProtocolDecl>(MD->getDeclContext()))
return false;
-
+
Selector S = msg.getSelector();
-
+
if (msg.isInstanceMessage()) {
// FIXME: Ideally we'd look at the receiver interface here, but that's not
// useful for init, because alloc returns 'id'. In theory, this could lead
@@ -751,7 +751,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
ASTContext &Ctx = C.getASTContext();
arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
- dictionaryWithObjectsAndKeysS =
+ dictionaryWithObjectsAndKeysS =
GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);
orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx);
@@ -789,18 +789,18 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
// Ignore pointer constants.
if (msg.getArgSVal(I).getAs<loc::ConcreteInt>())
continue;
-
+
// Ignore pointer types annotated with 'NSObject' attribute.
if (C.getASTContext().isObjCNSObjectType(ArgTy))
continue;
-
+
// Ignore CF references, which can be toll-free bridged.
if (coreFoundation::isCFObjectRef(ArgTy))
continue;
// Generate only one error node to use for all bug reports.
if (!errorNode.hasValue())
- errorNode = C.addTransition();
+ errorNode = C.generateNonFatalErrorNode();
if (!errorNode.getValue())
continue;
@@ -861,7 +861,7 @@ static bool isKnownNonNilCollectionType(QualType T) {
const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
if (!PT)
return false;
-
+
const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
if (!ID)
return false;
@@ -992,9 +992,7 @@ static bool alreadyExecutedAtLeastOneLoopIteration(const ExplodedNode *N,
ProgramPoint P = N->getLocation();
if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
- if (BE->getSrc()->getLoopTarget() == FCS)
- return true;
- return false;
+ return BE->getSrc()->getLoopTarget() == FCS;
}
// Keep looking for a block edge.
@@ -1023,9 +1021,9 @@ void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
State = checkElementNonNil(C, State, FCS);
State = assumeCollectionNonEmpty(C, State, FCS, /*Assumption*/true);
}
-
+
if (!State)
- C.generateSink();
+ C.generateSink(C.getState(), C.getPredecessor());
else if (State != C.getState())
C.addTransition(State);
}
@@ -1038,11 +1036,8 @@ bool ObjCLoopChecker::isCollectionCountMethod(const ObjCMethodCall &M,
CountSelectorII = &C.getASTContext().Idents.get("count");
// If the method returns collection count, record the value.
- if (S.isUnarySelector() &&
- (S.getIdentifierInfoForSlot(0) == CountSelectorII))
- return true;
-
- return false;
+ return S.isUnarySelector() &&
+ (S.getIdentifierInfoForSlot(0) == CountSelectorII);
}
void ObjCLoopChecker::checkPostObjCMessage(const ObjCMethodCall &M,
@@ -1069,7 +1064,7 @@ void ObjCLoopChecker::checkPostObjCMessage(const ObjCMethodCall &M,
// a call to "count" and add it to the map.
if (!isCollectionCountMethod(M, C))
return;
-
+
const Expr *MsgExpr = M.getOriginExpr();
SymbolRef CountS = C.getSVal(MsgExpr).getAsSymbol();
if (CountS) {
diff --git a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index e945c38e77c7..f26f73129e78 100644
--- a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -32,7 +32,7 @@ namespace {
void BoolAssignmentChecker::emitReport(ProgramStateRef state,
CheckerContext &C) const {
- if (ExplodedNode *N = C.addTransition(state)) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
if (!BT)
BT.reset(new BuiltinBug(this, "Assignment of a non-Boolean value"));
C.emitReport(llvm::make_unique<BugReport>(*BT, BT->getDescription(), N));
@@ -42,45 +42,45 @@ void BoolAssignmentChecker::emitReport(ProgramStateRef state,
static bool isBooleanType(QualType Ty) {
if (Ty->isBooleanType()) // C++ or C99
return true;
-
+
if (const TypedefType *TT = Ty->getAs<TypedefType>())
return TT->getDecl()->getName() == "BOOL" || // Objective-C
TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
TT->getDecl()->getName() == "Boolean"; // MacTypes.h
-
+
return false;
}
void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
CheckerContext &C) const {
-
+
// We are only interested in stores into Booleans.
const TypedValueRegion *TR =
dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
-
+
if (!TR)
return;
-
+
QualType valTy = TR->getValueType();
-
+
if (!isBooleanType(valTy))
return;
-
+
// Get the value of the right-hand side. We only care about values
// that are defined (UnknownVals and UndefinedVals are handled by other
// checkers).
Optional<DefinedSVal> DV = val.getAs<DefinedSVal>();
if (!DV)
return;
-
+
// Check if the assigned value meets our criteria for correctness. It must
// be a value that is either 0 or 1. One way to check this is to see if
// the value is possibly < 0 (for a negative value) or greater than 1.
- ProgramStateRef state = C.getState();
+ ProgramStateRef state = C.getState();
SValBuilder &svalBuilder = C.getSValBuilder();
ConstraintManager &CM = C.getConstraintManager();
-
- // First, ensure that the value is >= 0.
+
+ // First, ensure that the value is >= 0.
DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
SVal greaterThanOrEqualToZeroVal =
svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
@@ -91,13 +91,13 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
if (!greaterThanEqualToZero) {
// The SValBuilder cannot construct a valid SVal for this condition.
- // This means we cannot properly reason about it.
+ // This means we cannot properly reason about it.
return;
}
-
+
ProgramStateRef stateLT, stateGE;
std::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
-
+
// Is it possible for the value to be less than zero?
if (stateLT) {
// It is possible for the value to be less than zero. We only
@@ -106,15 +106,15 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// value is underconstrained and there is nothing left to be done.
if (!stateGE)
emitReport(stateLT, C);
-
+
// In either case, we are done.
return;
}
-
+
// If we reach here, it must be the case that the value is constrained
// to only be >= 0.
assert(stateGE == state);
-
+
// At this point we know that the value is >= 0.
// Now check to ensure that the value is <= 1.
DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
@@ -127,13 +127,13 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
if (!lessThanEqToOne) {
// The SValBuilder cannot construct a valid SVal for this condition.
- // This means we cannot properly reason about it.
+ // This means we cannot properly reason about it.
return;
}
-
+
ProgramStateRef stateGT, stateLE;
std::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
-
+
// Is it possible for the value to be greater than one?
if (stateGT) {
// It is possible for the value to be greater than one. We only
@@ -142,11 +142,11 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// value is underconstrained and there is nothing left to be done.
if (!stateLE)
emitReport(stateGT, C);
-
+
// In either case, we are done.
return;
}
-
+
// If we reach here, it must be the case that the value is constrained
// to only be <= 1.
assert(stateLE == state);
diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 104a81eac4c7..dab2f61229a0 100644
--- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -41,11 +41,12 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
default:
return false;
+ case Builtin::BI__builtin_unpredictable:
case Builtin::BI__builtin_expect:
case Builtin::BI__builtin_assume_aligned:
case Builtin::BI__builtin_addressof: {
- // For __builtin_expect and __builtin_assume_aligned, just return the value
- // of the subexpression.
+ // For __builtin_unpredictable, __builtin_expect, and
+ // __builtin_assume_aligned, just return the value of the subexpression.
// __builtin_addressof is going from a reference to a pointer, but those
// are represented the same way in the analyzer.
assert (CE->arg_begin() != CE->arg_end());
diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index 9fb22ecc852b..58ff48d6a22a 100644
--- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -33,12 +33,14 @@ add_clang_library(clangStaticAnalyzerCheckers
DirectIvarAssignment.cpp
DivZeroChecker.cpp
DynamicTypePropagation.cpp
+ DynamicTypeChecker.cpp
ExprInspectionChecker.cpp
FixedAddressChecker.cpp
GenericTaintChecker.cpp
IdenticalExprChecker.cpp
IvarInvalidationChecker.cpp
LLVMConventionsChecker.cpp
+ LocalizationChecker.cpp
MacOSKeychainAPIChecker.cpp
MacOSXAPIChecker.cpp
MallocChecker.cpp
@@ -48,12 +50,14 @@ add_clang_library(clangStaticAnalyzerCheckers
NSErrorChecker.cpp
NoReturnFunctionChecker.cpp
NonNullParamChecker.cpp
+ NullabilityChecker.cpp
ObjCAtSyncChecker.cpp
ObjCContainersASTChecker.cpp
ObjCContainersChecker.cpp
ObjCMissingSuperCallChecker.cpp
ObjCSelfInitChecker.cpp
ObjCUnusedIVarsChecker.cpp
+ PaddingChecker.cpp
PointerArithChecker.cpp
PointerSubChecker.cpp
PthreadLockChecker.cpp
@@ -73,6 +77,7 @@ add_clang_library(clangStaticAnalyzerCheckers
UndefinedAssignmentChecker.cpp
UnixAPIChecker.cpp
UnreachableCodeChecker.cpp
+ VforkChecker.cpp
VLASizeChecker.cpp
VirtualCallChecker.cpp
@@ -83,5 +88,6 @@ add_clang_library(clangStaticAnalyzerCheckers
clangAST
clangAnalysis
clangBasic
+ clangLex
clangStaticAnalyzerCore
)
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 54b12410aa56..5d78d9b02e6b 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -65,7 +65,7 @@ public:
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
bool wantsRegionChangeUpdate(ProgramStateRef state) const;
- ProgramStateRef
+ ProgramStateRef
checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
@@ -92,7 +92,7 @@ public:
void evalstrLength(CheckerContext &C, const CallExpr *CE) const;
void evalstrnLength(CheckerContext &C, const CallExpr *CE) const;
void evalstrLengthCommon(CheckerContext &C,
- const CallExpr *CE,
+ const CallExpr *CE,
bool IsStrnlen = false) const;
void evalStrcpy(CheckerContext &C, const CallExpr *CE) const;
@@ -137,15 +137,16 @@ public:
SVal Buf,
bool hypothetical = false) const;
- const StringLiteral *getCStringLiteral(CheckerContext &C,
+ const StringLiteral *getCStringLiteral(CheckerContext &C,
ProgramStateRef &state,
- const Expr *expr,
+ const Expr *expr,
SVal val) const;
static ProgramStateRef InvalidateBuffer(CheckerContext &C,
ProgramStateRef state,
const Expr *Ex, SVal V,
- bool IsSourceBuffer);
+ bool IsSourceBuffer,
+ const Expr *Size);
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
@@ -193,6 +194,14 @@ public:
ProgramStateRef state,
NonLoc left,
NonLoc right) const;
+
+ // Return true if the destination buffer of the copy function may be in bound.
+ // Expects SVal of Size to be positive and unsigned.
+ // Expects SVal of FirstBuf to be a FieldRegion.
+ static bool IsFirstBufInBound(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *FirstBuf,
+ const Expr *Size);
};
} //end anonymous namespace
@@ -229,7 +238,7 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
if (!Filter.CheckCStringNullArg)
return nullptr;
- ExplodedNode *N = C.generateSink(stateNull);
+ ExplodedNode *N = C.generateErrorNode(stateNull);
if (!N)
return nullptr;
@@ -282,7 +291,7 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
SValBuilder &svalBuilder = C.getSValBuilder();
- SVal Extent =
+ SVal Extent =
svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
DefinedOrUnknownSVal Size = Extent.castAs<DefinedOrUnknownSVal>();
@@ -292,7 +301,7 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.generateSink(StOutBound);
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
return nullptr;
@@ -327,7 +336,7 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
C.emitReport(std::move(report));
return nullptr;
}
-
+
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
return StInBound;
@@ -442,7 +451,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
return state;
// Are the two values the same?
- SValBuilder &svalBuilder = C.getSValBuilder();
+ SValBuilder &svalBuilder = C.getSValBuilder();
std::tie(stateTrue, stateFalse) =
state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
@@ -489,7 +498,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Bail out if the cast fails.
ASTContext &Ctx = svalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
- SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
+ SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
First->getType());
Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
if (!FirstStartLoc)
@@ -525,7 +534,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
const Stmt *First, const Stmt *Second) const {
- ExplodedNode *N = C.generateSink(state);
+ ExplodedNode *N = C.generateErrorNode(state);
if (!N)
return;
@@ -568,7 +577,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
} else {
// Try switching the operands. (The order of these two assignments is
// important!)
- maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left,
+ maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left,
sizeTy);
left = right;
}
@@ -585,7 +594,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
if (stateOverflow && !stateOkay) {
// We have an overflow. Emit a bug report.
- ExplodedNode *N = C.generateSink(stateOverflow);
+ ExplodedNode *N = C.generateErrorNode(stateOverflow);
if (!N)
return nullptr;
@@ -706,7 +715,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
if (!Filter.CheckCStringNotNullTerm)
return UndefinedVal();
- if (ExplodedNode *N = C.addTransition(state)) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
if (!BT_NotCString)
BT_NotCString.reset(new BuiltinBug(
Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
@@ -723,7 +732,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
report->addRange(Ex->getSourceRange());
- C.emitReport(std::move(report));
+ C.emitReport(std::move(report));
}
return UndefinedVal();
@@ -766,7 +775,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
if (!Filter.CheckCStringNotNullTerm)
return UndefinedVal();
- if (ExplodedNode *N = C.addTransition(state)) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
if (!BT_NotCString)
BT_NotCString.reset(new BuiltinBug(
Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
@@ -787,7 +796,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
report->addRange(Ex->getSourceRange());
- C.emitReport(std::move(report));
+ C.emitReport(std::move(report));
}
return UndefinedVal();
@@ -814,10 +823,74 @@ const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
return strRegion->getStringLiteral();
}
+bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *FirstBuf,
+ const Expr *Size) {
+ // If we do not know that the buffer is long enough we return 'true'.
+ // Otherwise the parent region of this field region would also get
+ // invalidated, which would lead to warnings based on an unknown state.
+
+ // Originally copied from CheckBufferAccess and CheckLocation.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ QualType sizeTy = Size->getType();
+ QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+ SVal BufVal = state->getSVal(FirstBuf, LCtx);
+
+ SVal LengthVal = state->getSVal(Size, LCtx);
+ Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+ if (!Length)
+ return true; // cf top comment.
+
+ // Compute the offset of the last element to be accessed: size-1.
+ NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
+ NonLoc LastOffset =
+ svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy)
+ .castAs<NonLoc>();
+
+ // Check that the first buffer is sufficiently long.
+ SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+ Optional<Loc> BufLoc = BufStart.getAs<Loc>();
+ if (!BufLoc)
+ return true; // cf top comment.
+
+ SVal BufEnd =
+ svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc, LastOffset, PtrTy);
+
+ // Check for out of bound array element access.
+ const MemRegion *R = BufEnd.getAsRegion();
+ if (!R)
+ return true; // cf top comment.
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return true; // cf top comment.
+
+ assert(ER->getValueType() == C.getASTContext().CharTy &&
+ "IsFirstBufInBound should only be called with char* ElementRegions");
+
+ // Get the size of the array.
+ const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
+ SVal Extent =
+ svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
+ DefinedOrUnknownSVal ExtentSize = Extent.castAs<DefinedOrUnknownSVal>();
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ExtentSize, true);
+
+ return static_cast<bool>(StInBound);
+}
+
ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
ProgramStateRef state,
const Expr *E, SVal V,
- bool IsSourceBuffer) {
+ bool IsSourceBuffer,
+ const Expr *Size) {
Optional<Loc> L = V.getAs<Loc>();
if (!L)
return state;
@@ -843,13 +916,23 @@ ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
// Invalidate and escape only indirect regions accessible through the source
// buffer.
if (IsSourceBuffer) {
- ITraits.setTrait(R,
+ ITraits.setTrait(R,
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
ITraits.setTrait(R, RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
CausesPointerEscape = true;
+ } else {
+ const MemRegion::Kind& K = R->getKind();
+ if (K == MemRegion::FieldRegionKind)
+ if (Size && IsFirstBufInBound(C, state, E, Size)) {
+ // If destination buffer is a field region and access is in bound,
+ // do not invalidate its super region.
+ ITraits.setTrait(
+ R,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ }
}
- return state->invalidateRegions(R, E, C.blockCount(), LCtx,
+ return state->invalidateRegions(R, E, C.blockCount(), LCtx,
CausesPointerEscape, nullptr, nullptr,
&ITraits);
}
@@ -901,7 +984,7 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
// evaluation of individual function calls.
//===----------------------------------------------------------------------===//
-void CStringChecker::evalCopyCommon(CheckerContext &C,
+void CStringChecker::evalCopyCommon(CheckerContext &C,
const CallExpr *CE,
ProgramStateRef state,
const Expr *Size, const Expr *Dest,
@@ -941,7 +1024,7 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// Get the value of the Src.
SVal srcVal = state->getSVal(Source, LCtx);
-
+
// Ensure the source is not null. If it is NULL there will be a
// NULL pointer dereference.
state = checkNonNull(C, state, Source, srcVal);
@@ -959,11 +1042,11 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
if (!state)
return;
- // If this is mempcpy, get the byte after the last byte copied and
+ // If this is mempcpy, get the byte after the last byte copied and
// bind the expr.
if (IsMempcpy) {
loc::MemRegionVal destRegVal = destVal.castAs<loc::MemRegionVal>();
-
+
// Get the length to copy.
if (Optional<NonLoc> lenValNonLoc = sizeVal.getAs<NonLoc>()) {
// Get the byte after the last byte copied.
@@ -972,11 +1055,11 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
loc::MemRegionVal DestRegCharVal = SvalBuilder.evalCast(destRegVal,
CharPtrTy, Dest->getType()).castAs<loc::MemRegionVal>();
- SVal lastElement = C.getSValBuilder().evalBinOpLN(state, BO_Add,
+ SVal lastElement = C.getSValBuilder().evalBinOpLN(state, BO_Add,
DestRegCharVal,
- *lenValNonLoc,
+ *lenValNonLoc,
Dest->getType());
-
+
// The byte after the last byte copied is the return value.
state = state->BindExpr(CE, LCtx, lastElement);
} else {
@@ -999,13 +1082,13 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dest, C.getSVal(Dest),
- /*IsSourceBuffer*/false);
+ state = InvalidateBuffer(C, state, Dest, C.getSVal(Dest),
+ /*IsSourceBuffer*/false, Size);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, Source, C.getSVal(Source),
- /*IsSourceBuffer*/true);
+ state = InvalidateBuffer(C, state, Source, C.getSVal(Source),
+ /*IsSourceBuffer*/true, nullptr);
C.addTransition(state);
}
@@ -1032,7 +1115,7 @@ void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
// The return value is a pointer to the byte following the last written byte.
const Expr *Dest = CE->getArg(0);
ProgramStateRef state = C.getState();
-
+
evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
}
@@ -1053,7 +1136,7 @@ void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
return;
// void bcopy(const void *src, void *dst, size_t n);
- evalCopyCommon(C, CE, C.getState(),
+ evalCopyCommon(C, CE, C.getState(),
CE->getArg(2), CE->getArg(1), CE->getArg(0));
}
@@ -1244,7 +1327,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
state, BO_LE, resultNL, *strLengthNL, cmpTy)
.castAs<DefinedOrUnknownSVal>(), true);
}
-
+
if (maxlenValNL) {
state = state->assume(C.getSValBuilder().evalBinOpNN(
state, BO_LE, resultNL, *maxlenValNL, cmpTy)
@@ -1275,8 +1358,8 @@ void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
return;
// char *strcpy(char *restrict dst, const char *restrict src);
- evalStrcpyCommon(C, CE,
- /* returnEnd = */ false,
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
/* isBounded = */ false,
/* isAppending = */ false);
}
@@ -1286,8 +1369,8 @@ void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
return;
// char *strncpy(char *restrict dst, const char *restrict src, size_t n);
- evalStrcpyCommon(C, CE,
- /* returnEnd = */ false,
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
/* isBounded = */ true,
/* isAppending = */ false);
}
@@ -1297,8 +1380,8 @@ void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
return;
// char *stpcpy(char *restrict dst, const char *restrict src);
- evalStrcpyCommon(C, CE,
- /* returnEnd = */ true,
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ true,
/* isBounded = */ false,
/* isAppending = */ false);
}
@@ -1308,8 +1391,8 @@ void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
return;
//char *strcat(char *restrict s1, const char *restrict s2);
- evalStrcpyCommon(C, CE,
- /* returnEnd = */ false,
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
/* isBounded = */ false,
/* isAppending = */ true);
}
@@ -1319,8 +1402,8 @@ void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
return;
//char *strncat(char *restrict s1, const char *restrict s2, size_t n);
- evalStrcpyCommon(C, CE,
- /* returnEnd = */ false,
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
/* isBounded = */ true,
/* isAppending = */ true);
}
@@ -1515,7 +1598,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
Optional<NonLoc> srcStrLengthNL = amountCopied.getAs<NonLoc>();
Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
-
+
// If we know both string lengths, we might know the final string length.
if (srcStrLengthNL && dstStrLengthNL) {
// Make sure the two lengths together don't overflow a size_t.
@@ -1523,7 +1606,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
if (!state)
return;
- finalStrLength = svalBuilder.evalBinOpNN(state, BO_Add, *srcStrLengthNL,
+ finalStrLength = svalBuilder.evalBinOpNN(state, BO_Add, *srcStrLengthNL,
*dstStrLengthNL, sizeTy);
}
@@ -1586,7 +1669,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
*maxLastNL, ptrTy);
- state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
+ state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
boundWarning);
if (!state)
return;
@@ -1620,11 +1703,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
state = InvalidateBuffer(C, state, Dst, *dstRegVal,
- /*IsSourceBuffer*/false);
+ /*IsSourceBuffer*/false, nullptr);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, srcExpr, srcVal, /*IsSourceBuffer*/true);
+ state = InvalidateBuffer(C, state, srcExpr, srcVal, /*IsSourceBuffer*/true,
+ nullptr);
// Set the C string length of the destination, if we know it.
if (isBounded && !isAppending) {
@@ -1667,7 +1751,7 @@ void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
}
-void CStringChecker::evalStrcasecmp(CheckerContext &C,
+void CStringChecker::evalStrcasecmp(CheckerContext &C,
const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
@@ -1676,7 +1760,7 @@ void CStringChecker::evalStrcasecmp(CheckerContext &C,
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
}
-void CStringChecker::evalStrncasecmp(CheckerContext &C,
+void CStringChecker::evalStrncasecmp(CheckerContext &C,
const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
@@ -1848,7 +1932,7 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// Invalidate the search string, representing the change of one delimiter
// character to NUL.
State = InvalidateBuffer(C, State, SearchStrPtr, Result,
- /*IsSourceBuffer*/false);
+ /*IsSourceBuffer*/false, nullptr);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
@@ -1915,7 +1999,7 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
evalFunction = &CStringChecker::evalBcopy;
else if (C.isCLibraryFunction(FDecl, "bcmp"))
evalFunction = &CStringChecker::evalMemcmp;
-
+
// If the callee isn't a string function, let another checker handle it.
if (!evalFunction)
return false;
@@ -1929,10 +2013,7 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// properties are held. However, if the user chooses to turn off some of these
// checks, we ignore the issues and leave the call evaluation to a generic
// handler.
- if (!C.isDifferent())
- return false;
-
- return true;
+ return C.isDifferent();
}
void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
@@ -1975,7 +2056,7 @@ bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
return !Entries.isEmpty();
}
-ProgramStateRef
+ProgramStateRef
CStringChecker::checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 26423b7368ef..145908376996 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -40,6 +40,7 @@ class CallAndMessageChecker
: public Checker< check::PreStmt<CallExpr>,
check::PreStmt<CXXDeleteExpr>,
check::PreObjCMessage,
+ check::ObjCMessageNil,
check::PreCall > {
mutable std::unique_ptr<BugType> BT_call_null;
mutable std::unique_ptr<BugType> BT_call_undef;
@@ -60,6 +61,12 @@ public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+
+ /// Fill in the return value that results from messaging nil based on the
+ /// return type and architecture and diagnose if the return value will be
+ /// garbage.
+ void checkObjCMessageNil(const ObjCMethodCall &msg, CheckerContext &C) const;
+
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
private:
@@ -82,7 +89,7 @@ private:
BT.reset(new BuiltinBug(this, desc));
}
bool uninitRefOrPointer(CheckerContext &C, const SVal &V,
- const SourceRange &ArgRange,
+ SourceRange ArgRange,
const Expr *ArgEx, std::unique_ptr<BugType> &BT,
const ParmVarDecl *ParamDecl, const char *BD) const;
};
@@ -90,7 +97,7 @@ private:
void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
const Expr *BadE) {
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
@@ -131,7 +138,7 @@ static StringRef describeUninitializedArgumentInCall(const CallEvent &Call,
bool CallAndMessageChecker::uninitRefOrPointer(CheckerContext &C,
const SVal &V,
- const SourceRange &ArgRange,
+ SourceRange ArgRange,
const Expr *ArgEx,
std::unique_ptr<BugType> &BT,
const ParmVarDecl *ParamDecl,
@@ -162,7 +169,7 @@ bool CallAndMessageChecker::uninitRefOrPointer(CheckerContext &C,
const ProgramStateRef State = C.getState();
const SVal PSV = State->getSVal(SValMemRegion);
if (PSV.isUndef()) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
LazyInit_BT(BD, BT);
auto R = llvm::make_unique<BugReport>(*BT, Message, N);
R->addRange(ArgRange);
@@ -193,7 +200,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
return true;
if (V.isUndef()) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
LazyInit_BT(BD, BT);
// Generate a report for this bug.
@@ -258,7 +265,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
D->getStore());
if (F.Find(D->getRegion())) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
LazyInit_BT(BD, BT);
SmallString<512> Str;
llvm::raw_svector_ostream os(Str);
@@ -331,7 +338,7 @@ void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
SVal Arg = C.getSVal(DE->getArgument());
if (Arg.isUndef()) {
StringRef Desc;
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
if (!BT_cxx_delete_undef)
@@ -388,7 +395,7 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
// the function.
unsigned Params = FD->getNumParams();
if (Call.getNumArgs() < Params) {
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
@@ -436,7 +443,7 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
SVal recVal = msg.getReceiverSVal();
if (recVal.isUndef()) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
BugType *BT = nullptr;
switch (msg.getMessageKind()) {
case OCM_Message:
@@ -471,22 +478,14 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
C.emitReport(std::move(R));
}
return;
- } else {
- // Bifurcate the state into nil and non-nil ones.
- DefinedOrUnknownSVal receiverVal = recVal.castAs<DefinedOrUnknownSVal>();
-
- ProgramStateRef state = C.getState();
- ProgramStateRef notNilState, nilState;
- std::tie(notNilState, nilState) = state->assume(receiverVal);
-
- // Handle receiver must be nil.
- if (nilState && !notNilState) {
- HandleNilReceiver(C, state, msg);
- return;
- }
}
}
+void CallAndMessageChecker::checkObjCMessageNil(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ HandleNilReceiver(C, C.getState(), msg);
+}
+
void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
const ObjCMethodCall &msg,
ExplodedNode *N) const {
@@ -523,7 +522,8 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
return (triple.getVendor() == llvm::Triple::Apple &&
- (triple.isiOS() || !triple.isMacOSXVersionLT(10,5)));
+ (triple.isiOS() || triple.isWatchOS() ||
+ !triple.isMacOSXVersionLT(10,5)));
}
void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
@@ -560,7 +560,7 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
Ctx.LongDoubleTy == CanRetTy ||
Ctx.LongLongTy == CanRetTy ||
Ctx.UnsignedLongLongTy == CanRetTy)))) {
- if (ExplodedNode *N = C.generateSink(state, nullptr, &Tag))
+ if (ExplodedNode *N = C.generateErrorNode(state, &Tag))
emitNilReceiverBug(C, Msg, N);
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 0d683f96df08..2337400750c7 100644
--- a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -82,10 +82,7 @@ static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
if (Left.isNegative())
return false;
- if (Left % FlexSize == 0)
- return true;
-
- return false;
+ return Left % FlexSize == 0;
}
void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
@@ -131,7 +128,7 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
if (evenFlexibleArraySize(Ctx, regionSize, typeSize, ToPointeeTy))
return;
- if (ExplodedNode *errorNode = C.generateSink()) {
+ if (ExplodedNode *errorNode = C.generateErrorNode()) {
if (!BT)
BT.reset(new BuiltinBug(this, "Cast region with wrong size.",
"Cast a region whose size is not a multiple"
diff --git a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index ba3024d78a19..fa7841356efb 100644
--- a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -56,7 +56,7 @@ void CastToStructChecker::checkPreStmt(const CastExpr *CE,
// Now the cast-to-type is struct pointer, the original type is not void*.
if (!OrigPointeeTy->isRecordType()) {
- if (ExplodedNode *N = C.addTransition()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "Cast from non-struct type to struct type",
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 12eb0bde28ad..25caa0002598 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -51,7 +51,7 @@ static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
if (E->getDecl()->getIdentifier() == SelfII)
if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
ME->getNumArgs() == 1 &&
- ME->getArg(0)->isNullPointerConstant(Ctx,
+ ME->getArg(0)->isNullPointerConstant(Ctx,
Expr::NPC_ValueDependentIsNull))
return true;
@@ -61,7 +61,7 @@ static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
if (ObjCPropertyRefExpr *PRE =
dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
if (PRE->isExplicitProperty() && PRE->getExplicitProperty() == PD)
- if (BO->getRHS()->isNullPointerConstant(Ctx,
+ if (BO->getRHS()->isNullPointerConstant(Ctx,
Expr::NPC_ValueDependentIsNull)) {
// This is only a 'release' if the property kind is not
// 'assign'.
diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index e0c113c86262..60f16188bcf8 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -86,8 +86,7 @@ public:
// Helpers.
bool checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD);
- typedef void (WalkAST::*FnCheck)(const CallExpr *,
- const FunctionDecl *);
+ typedef void (WalkAST::*FnCheck)(const CallExpr *, const FunctionDecl *);
// Checker-specific methods.
void checkLoopConditionForFloat(const ForStmt *FS);
@@ -115,7 +114,7 @@ void WalkAST::VisitChildren(Stmt *S) {
}
void WalkAST::VisitCallExpr(CallExpr *CE) {
- // Get the callee.
+ // Get the callee.
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
@@ -307,7 +306,7 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
void WalkAST::checkCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
if (!filter.check_gets)
return;
-
+
const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
if (!FPT)
return;
@@ -434,18 +433,18 @@ void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
.Case("mkdtemp", std::make_pair(0,-1))
.Case("mkstemps", std::make_pair(0,1))
.Default(std::make_pair(-1, -1));
-
+
assert(ArgSuffix.first >= 0 && "Unsupported function");
// Check if the number of arguments is consistent with out expectations.
unsigned numArgs = CE->getNumArgs();
if ((signed) numArgs <= ArgSuffix.first)
return;
-
+
const StringLiteral *strArg =
dyn_cast<StringLiteral>(CE->getArg((unsigned)ArgSuffix.first)
->IgnoreParenImpCasts());
-
+
// Currently we only handle string literals. It is possible to do better,
// either by looking at references to const variables, or by doing real
// flow analysis.
@@ -470,13 +469,13 @@ void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
suffix = (unsigned) Result.getZExtValue();
n = (n > suffix) ? n - suffix : 0;
}
-
+
for (unsigned i = 0; i < n; ++i)
if (str[i] == 'X') ++numX;
-
+
if (numX >= 6)
return;
-
+
// Issue a warning.
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
@@ -502,13 +501,13 @@ void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
// Check: Any use of 'strcpy' is insecure.
//
-// CWE-119: Improper Restriction of Operations within
-// the Bounds of a Memory Buffer
+// CWE-119: Improper Restriction of Operations within
+// the Bounds of a Memory Buffer
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
if (!filter.check_strcpy)
return;
-
+
if (!checkCall_strCommon(CE, FD))
return;
@@ -529,8 +528,8 @@ void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
// Check: Any use of 'strcat' is insecure.
//
-// CWE-119: Improper Restriction of Operations within
-// the Bounds of a Memory Buffer
+// CWE-119: Improper Restriction of Operations within
+// the Bounds of a Memory Buffer
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
if (!filter.check_strcpy)
@@ -684,7 +683,7 @@ void WalkAST::checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD) {
void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
if (!filter.check_UncheckedReturn)
return;
-
+
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
return;
@@ -749,7 +748,7 @@ namespace {
class SecuritySyntaxChecker : public Checker<check::ASTCodeBody> {
public:
ChecksFilter filter;
-
+
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
WalkAST walker(BR, mgr.getAnalysisDeclContext(D), filter);
diff --git a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index 81a20063f972..e079a8cb12be 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -55,8 +55,8 @@ void WalkAST::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
QualType T = E->getTypeOfArgument();
if (T->isPointerType()) {
- // Many false positives have the form 'sizeof *p'. This is reasonable
- // because people know what they are doing when they intentionally
+ // Many false positives have the form 'sizeof *p'. This is reasonable
+ // because people know what they are doing when they intentionally
// dereference the pointer.
Expr *ArgEx = E->getArgumentExpr();
if (!isa<DeclRefExpr>(ArgEx->IgnoreParens()))
diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 956dca7d9258..37b84480f892 100644
--- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -38,6 +38,7 @@ class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
check::PostStmt<DeclStmt>,
check::PreObjCMessage,
check::PostObjCMessage,
+ check::ObjCMessageNil,
check::PreCall,
check::PostCall,
check::BranchCondition,
@@ -95,6 +96,15 @@ public:
/// check::PostObjCMessage
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
+ /// \brief Visit an Objective-C message whose receiver is nil.
+ ///
+ /// This will be called when the analyzer core processes a method call whose
+ /// receiver is definitely nil. In this case, check{Pre/Post}ObjCMessage and
+ /// check{Pre/Post}Call will not be called.
+ ///
+ /// check::ObjCMessageNil
+ void checkObjCMessageNil(const ObjCMethodCall &M, CheckerContext &C) const {}
+
/// \brief Pre-visit an abstract "call" event.
///
/// This is used for checkers that want to check arguments or attributed
@@ -222,7 +232,7 @@ public:
/// changed, this allows the analyzer core to skip the more expensive
/// #checkRegionChanges when no checkers are tracking any state.
bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
-
+
/// \brief Called when the contents of one or more regions change.
///
/// This can occur in many different ways: an explicit bind, a blanket
@@ -246,7 +256,7 @@ public:
/// #wantsRegionChangeUpdate returns \c true.
///
/// check::RegionChanges
- ProgramStateRef
+ ProgramStateRef
checkRegionChanges(ProgramStateRef State,
const InvalidatedSymbols *Invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
@@ -259,12 +269,12 @@ public:
///
/// This notifies the checkers about pointer escape, which occurs whenever
/// the analyzer cannot track the symbol any more. For example, as a
- /// result of assigning a pointer into a global or when it's passed to a
+ /// result of assigning a pointer into a global or when it's passed to a
/// function call the analyzer cannot model.
- ///
+ ///
/// \param State The state at the point of escape.
/// \param Escaped The list of escaped symbols.
- /// \param Call The corresponding CallEvent, if the symbols escape as
+ /// \param Call The corresponding CallEvent, if the symbols escape as
/// parameters to the given call.
/// \param Kind How the symbols have escaped.
/// \returns Checkers can modify the state by returning a new state.
@@ -285,7 +295,7 @@ public:
PointerEscapeKind Kind) const {
return State;
}
-
+
/// check::Event<ImplicitNullDerefEvent>
void checkEvent(ImplicitNullDerefEvent Event) const {}
diff --git a/lib/StaticAnalyzer/Checkers/Checkers.td b/lib/StaticAnalyzer/Checkers/Checkers.td
index d1d6ac277ffe..8133d290d886 100644
--- a/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -13,6 +13,11 @@ include "clang/StaticAnalyzer/Checkers/CheckerBase.td"
// Packages.
//===----------------------------------------------------------------------===//
+// The Alpha package is for checkers that have too many false positives to be
+// turned on by default. The hierarchy under Alpha should be organized in the
+// hierarchy checkers would have had if they were truly at the top level.
+// (For example, a Cocoa-specific checker that is alpha should be in
+// alpha.osx.cocoa).
def Alpha : Package<"alpha">;
def Core : Package<"core">;
@@ -20,16 +25,33 @@ def CoreBuiltin : Package<"builtin">, InPackage<Core>;
def CoreUninitialized : Package<"uninitialized">, InPackage<Core>;
def CoreAlpha : Package<"core">, InPackage<Alpha>, Hidden;
+// The OptIn package is for checkers that are not alpha and that would normally
+// be on by default but where the driver does not have enough information to
+// determine when they are applicable. For example, localizability checkers fit
+// this criterion because the driver cannot determine whether a project is
+// localized or not -- this is best determined at the IDE or build-system level.
+//
+// The checker hierarchy under OptIn should mirror that in Alpha: checkers
+// should be organized as if they were at the top level.
+//
+// Note: OptIn is *not* intended for checkers that are too noisy to be on by
+// default. Such checkers belong in the alpha package.
+def OptIn : Package<"optin">;
+
+def Nullability : Package<"nullability">;
+
def Cplusplus : Package<"cplusplus">;
def CplusplusAlpha : Package<"cplusplus">, InPackage<Alpha>, Hidden;
def DeadCode : Package<"deadcode">;
def DeadCodeAlpha : Package<"deadcode">, InPackage<Alpha>, Hidden;
+def Performance : Package<"performance">, InPackage<OptIn>;
+
def Security : Package <"security">;
def InsecureAPI : Package<"insecureAPI">, InPackage<Security>;
def SecurityAlpha : Package<"security">, InPackage<Alpha>, Hidden;
-def Taint : Package<"taint">, InPackage<SecurityAlpha>, Hidden;
+def Taint : Package<"taint">, InPackage<SecurityAlpha>, Hidden;
def Unix : Package<"unix">;
def UnixAlpha : Package<"unix">, InPackage<Alpha>, Hidden;
@@ -38,11 +60,18 @@ def CStringAlpha : Package<"cstring">, InPackage<UnixAlpha>, Hidden;
def OSX : Package<"osx">;
def OSXAlpha : Package<"osx">, InPackage<Alpha>, Hidden;
+def OSXOptIn : Package<"osx">, InPackage<OptIn>;
+
def Cocoa : Package<"cocoa">, InPackage<OSX>;
def CocoaAlpha : Package<"cocoa">, InPackage<OSXAlpha>, Hidden;
+def CocoaOptIn : Package<"cocoa">, InPackage<OSXOptIn>;
+
def CoreFoundation : Package<"coreFoundation">, InPackage<OSX>;
def Containers : Package<"containers">, InPackage<CoreFoundation>;
+def LocalizabilityAlpha : Package<"localizability">, InPackage<CocoaAlpha>;
+def LocalizabilityOptIn : Package<"localizability">, InPackage<CocoaOptIn>;
+
def LLVM : Package<"llvm">;
def Debug : Package<"debug">;
@@ -128,8 +157,36 @@ def TestAfterDivZeroChecker : Checker<"TestAfterDivZero">,
HelpText<"Check for division by variable that is later compared against 0. Either the comparison is useless or there is division by zero.">,
DescFile<"TestAfterDivZeroChecker.cpp">;
+def DynamicTypeChecker : Checker<"DynamicTypeChecker">,
+ HelpText<"Check for cases where the dynamic and the static type of an object are unrelated.">,
+ DescFile<"DynamicTypeChecker.cpp">;
+
} // end "alpha.core"
+let ParentPackage = Nullability in {
+
+def NullPassedToNonnullChecker : Checker<"NullPassedToNonnull">,
+ HelpText<"Warns when a null pointer is passed to a pointer which has a _Nonnull type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullReturnedFromNonnullChecker : Checker<"NullReturnedFromNonnull">,
+ HelpText<"Warns when a null pointer is returned from a function that has _Nonnull return type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullableDereferencedChecker : Checker<"NullableDereferenced">,
+ HelpText<"Warns when a nullable pointer is dereferenced.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullablePassedToNonnullChecker : Checker<"NullablePassedToNonnull">,
+ HelpText<"Warns when a nullable pointer is passed to a pointer which has a _Nonnull type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+def NullableReturnedFromNonnullChecker : Checker<"NullablePassedToNonnull">,
+ HelpText<"Warns when a nullable pointer is returned from a function that has _Nonnull return type.">,
+ DescFile<"NullabilityChecker.cpp">;
+
+} // end "nullability"
+
//===----------------------------------------------------------------------===//
// Evaluate "builtin" functions.
//===----------------------------------------------------------------------===//
@@ -167,7 +224,7 @@ def UndefBranchChecker : Checker<"Branch">,
def UndefCapturedBlockVarChecker : Checker<"CapturedBlockVariable">,
HelpText<"Check for blocks that capture uninitialized values">,
DescFile<"UndefCapturedBlockVarChecker.cpp">;
-
+
def ReturnUndefChecker : Checker<"UndefReturn">,
HelpText<"Check for uninitialized values being returned to the caller">,
DescFile<"ReturnUndefChecker.cpp">;
@@ -181,11 +238,11 @@ def ReturnUndefChecker : Checker<"UndefReturn">,
let ParentPackage = Cplusplus in {
def NewDeleteChecker : Checker<"NewDelete">,
- HelpText<"Check for double-free and use-after-free problems. Traces memory managed by new/delete.">,
+ HelpText<"Check for double-free and use-after-free problems. Traces memory managed by new/delete.">,
DescFile<"MallocChecker.cpp">;
def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
- HelpText<"Check for memory leaks. Traces memory managed by new/delete.">,
+ HelpText<"Check for memory leaks. Traces memory managed by new/delete.">,
DescFile<"MallocChecker.cpp">;
} // end: "cplusplus"
@@ -193,7 +250,7 @@ def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
let ParentPackage = CplusplusAlpha in {
def VirtualCallChecker : Checker<"VirtualCall">,
- HelpText<"Check virtual function calls during construction or destruction">,
+ HelpText<"Check virtual function calls during construction or destruction">,
DescFile<"VirtualCallChecker.cpp">;
} // end: "alpha.cplusplus"
@@ -218,6 +275,18 @@ def UnreachableCodeChecker : Checker<"UnreachableCode">,
} // end "alpha.deadcode"
//===----------------------------------------------------------------------===//
+// Performance checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Performance in {
+
+def PaddingChecker : Checker<"Padding">,
+ HelpText<"Check for excessively padded structs.">,
+ DescFile<"PaddingChecker.cpp">;
+
+} // end: "padding"
+
+//===----------------------------------------------------------------------===//
// Security checkers.
//===----------------------------------------------------------------------===//
@@ -257,7 +326,7 @@ let ParentPackage = SecurityAlpha in {
def ArrayBoundChecker : Checker<"ArrayBound">,
HelpText<"Warn about buffer overflows (older checker)">,
- DescFile<"ArrayBoundChecker.cpp">;
+ DescFile<"ArrayBoundChecker.cpp">;
def ArrayBoundCheckerV2 : Checker<"ArrayBoundV2">,
HelpText<"Warn about buffer overflows (newer checker)">,
@@ -298,7 +367,7 @@ def UnixAPIChecker : Checker<"API">,
def MallocChecker: Checker<"Malloc">,
HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free().">,
DescFile<"MallocChecker.cpp">;
-
+
def MallocSizeofChecker : Checker<"MallocSizeof">,
HelpText<"Check for dubious malloc arguments involving sizeof">,
DescFile<"MallocSizeofChecker.cpp">;
@@ -306,7 +375,11 @@ def MallocSizeofChecker : Checker<"MallocSizeof">,
def MismatchedDeallocatorChecker : Checker<"MismatchedDeallocator">,
HelpText<"Check for mismatched deallocators.">,
DescFile<"MallocChecker.cpp">;
-
+
+def VforkChecker : Checker<"Vfork">,
+ HelpText<"Check for proper usage of vfork">,
+ DescFile<"VforkChecker.cpp">;
+
} // end "unix"
let ParentPackage = UnixAlpha in {
@@ -337,7 +410,7 @@ def CStringNullArg : Checker<"NullArg">,
def CStringSyntaxChecker : Checker<"BadSizeArg">,
HelpText<"Check the size argument passed into C string functions for common erroneous patterns">,
- DescFile<"CStringSyntaxChecker.cpp">;
+ DescFile<"CStringSyntaxChecker.cpp">;
}
let ParentPackage = CStringAlpha in {
@@ -428,6 +501,10 @@ def RetainCountChecker : Checker<"RetainCount">,
HelpText<"Check for leaks and improper reference count management">,
DescFile<"RetainCountChecker.cpp">;
+def ObjCGenericsChecker : Checker<"ObjCGenerics">,
+ HelpText<"Check for type errors when using Objective-C generics">,
+ DescFile<"DynamicTypePropagation.cpp">;
+
} // end "osx.cocoa"
let ParentPackage = CocoaAlpha in {
@@ -477,8 +554,25 @@ def ObjCContainersASTChecker : Checker<"PointerSizedValues">,
def ObjCContainersChecker : Checker<"OutOfBounds">,
HelpText<"Checks for index out-of-bounds when using 'CFArray' API">,
DescFile<"ObjCContainersChecker.cpp">;
-
+
}
+
+let ParentPackage = LocalizabilityOptIn in {
+def NonLocalizedStringChecker : Checker<"NonLocalizedStringChecker">,
+ HelpText<"Warns about uses of non-localized NSStrings passed to UI methods expecting localized NSStrings">,
+ DescFile<"LocalizationChecker.cpp">;
+
+def EmptyLocalizationContextChecker : Checker<"EmptyLocalizationContextChecker">,
+ HelpText<"Check that NSLocalizedString macros include a comment for context">,
+ DescFile<"LocalizationChecker.cpp">;
+}
+
+let ParentPackage = LocalizabilityAlpha in {
+def PluralMisuseChecker : Checker<"PluralMisuseChecker">,
+ HelpText<"Warns against using one vs. many plural pattern in code when generating localized strings.">,
+ DescFile<"LocalizationChecker.cpp">;
+}
+
//===----------------------------------------------------------------------===//
// Checkers for LLVM development.
//===----------------------------------------------------------------------===//
@@ -546,4 +640,8 @@ def ExplodedGraphViewer : Checker<"ViewExplodedGraph">,
HelpText<"View Exploded Graphs using GraphViz">,
DescFile<"DebugCheckers.cpp">;
+def BugHashDumper : Checker<"DumpBugHash">,
+ HelpText<"Dump the bug hash for all statements.">,
+ DescFile<"DebugCheckers.cpp">;
+
} // end "debug"
diff --git a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 804e83c0fb2a..3ad1996db893 100644
--- a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -27,7 +27,7 @@ namespace {
// enum value that represent the jail state
enum Kind { NO_CHROOT, ROOT_CHANGED, JAIL_ENTERED };
-
+
bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
//bool isJailEntered(intptr_t k) { return k == JAIL_ENTERED; }
@@ -50,7 +50,7 @@ public:
static int x;
return &x;
}
-
+
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -87,8 +87,8 @@ bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
ProgramStateManager &Mgr = state->getStateManager();
-
- // Once encouter a chroot(), set the enum value ROOT_CHANGED directly in
+
+ // Once encouter a chroot(), set the enum value ROOT_CHANGED directly in
// the GDM.
state = Mgr.addGDM(state, ChrootChecker::getTag(), (void*) ROOT_CHANGED);
C.addTransition(state);
@@ -106,7 +106,7 @@ void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
// After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
const Expr *ArgExpr = CE->getArg(0);
SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
-
+
if (const MemRegion *R = ArgVal.getAsRegion()) {
R = R->StripCasts();
if (const StringRegion* StrRegion= dyn_cast<StringRegion>(R)) {
@@ -135,12 +135,12 @@ void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
// Ingnore chroot and chdir.
if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
return;
-
+
// If jail state is ROOT_CHANGED, generate BugReport.
void *const* k = C.getState()->FindGDM(ChrootChecker::getTag());
if (k)
if (isRootChanged((intptr_t) *k))
- if (ExplodedNode *N = C.addTransition()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
if (!BT_BreakJail)
BT_BreakJail.reset(new BuiltinBug(
this, "Break out of jail", "No call of chdir(\"/\") immediately "
diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index f4be5b3e82f4..f2a269a3335c 100644
--- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -28,36 +28,36 @@
using namespace clang;
using namespace ento;
-namespace {
-
+namespace {
+
/// A simple visitor to record what VarDecls occur in EH-handling code.
class EHCodeVisitor : public RecursiveASTVisitor<EHCodeVisitor> {
public:
bool inEH;
llvm::DenseSet<const VarDecl *> &S;
-
+
bool TraverseObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
SaveAndRestore<bool> inFinally(inEH, true);
return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtFinallyStmt(S);
}
-
+
bool TraverseObjCAtCatchStmt(ObjCAtCatchStmt *S) {
SaveAndRestore<bool> inCatch(inEH, true);
return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtCatchStmt(S);
}
-
+
bool TraverseCXXCatchStmt(CXXCatchStmt *S) {
SaveAndRestore<bool> inCatch(inEH, true);
return TraverseStmt(S->getHandlerBlock());
}
-
+
bool VisitDeclRefExpr(DeclRefExpr *DR) {
if (inEH)
if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
S.insert(D);
return true;
}
-
+
EHCodeVisitor(llvm::DenseSet<const VarDecl *> &S) :
inEH(false), S(S) {}
};
@@ -70,9 +70,9 @@ class ReachableCode {
public:
ReachableCode(const CFG &cfg)
: cfg(cfg), reachable(cfg.getNumBlockIDs(), false) {}
-
+
void computeReachableBlocks();
-
+
bool isReachable(const CFGBlock *block) const {
return reachable[block->getBlockID()];
}
@@ -82,7 +82,7 @@ public:
void ReachableCode::computeReachableBlocks() {
if (!cfg.getNumBlockIDs())
return;
-
+
SmallVector<const CFGBlock*, 10> worklist;
worklist.push_back(&cfg.getEntry());
@@ -160,19 +160,19 @@ public:
// to analyze that yet.
return InEH->count(D);
}
-
+
void Report(const VarDecl *V, DeadStoreKind dsk,
PathDiagnosticLocation L, SourceRange R) {
if (Escaped.count(V))
return;
-
+
// Compute reachable blocks within the CFG for trivial cases
// where a bogus dead store can be reported because itself is unreachable.
if (!reachableCode.get()) {
reachableCode.reset(new ReachableCode(cfg));
reachableCode->computeReachableBlocks();
}
-
+
if (!reachableCode->isReachable(currentBlock))
return;
@@ -196,7 +196,7 @@ public:
case Enclosing:
// Don't report issues in this case, e.g.: "if (x = foo())",
- // where 'x' is unused later. We have yet to see a case where
+ // where 'x' is unused later. We have yet to see a case where
// this is a real bug.
return;
}
@@ -259,7 +259,7 @@ public:
const LiveVariables::LivenessValues &Live) override {
currentBlock = block;
-
+
// Skip statements in macros.
if (S->getLocStart().isMacroID())
return;
@@ -276,7 +276,7 @@ public:
const Expr *RHS =
LookThroughTransitiveAssignmentsAndCommaOperators(B->getRHS());
RHS = RHS->IgnoreParenCasts();
-
+
QualType T = VD->getType();
if (T->isPointerType() || T->isObjCObjectPointerType()) {
if (RHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNull))
@@ -318,27 +318,27 @@ public:
if (!V)
continue;
-
- if (V->hasLocalStorage()) {
+
+ if (V->hasLocalStorage()) {
// Reference types confuse the dead stores checker. Skip them
// for now.
if (V->getType()->getAs<ReferenceType>())
return;
-
+
if (const Expr *E = V->getInit()) {
while (const ExprWithCleanups *exprClean =
dyn_cast<ExprWithCleanups>(E))
E = exprClean->getSubExpr();
-
+
// Look through transitive assignments, e.g.:
// int x = y = 0;
E = LookThroughTransitiveAssignmentsAndCommaOperators(E);
-
+
// Don't warn on C++ objects (yet) until we can show that their
// constructors/destructors don't have side effects.
if (isa<CXXConstructExpr>(E))
return;
-
+
// A dead initialization is a variable that is dead after it
// is initialized. We don't flag warnings for those variables
// marked 'unused' or 'objc_precise_lifetime'.
@@ -401,6 +401,11 @@ public:
// Check for '&'. Any VarDecl whose address has been taken we treat as
// escaped.
// FIXME: What about references?
+ if (auto *LE = dyn_cast<LambdaExpr>(S)) {
+ findLambdaReferenceCaptures(LE);
+ return;
+ }
+
const UnaryOperator *U = dyn_cast<UnaryOperator>(S);
if (!U)
return;
@@ -412,6 +417,28 @@ public:
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
Escaped.insert(VD);
}
+
+ // Treat local variables captured by reference in C++ lambdas as escaped.
+ void findLambdaReferenceCaptures(const LambdaExpr *LE) {
+ const CXXRecordDecl *LambdaClass = LE->getLambdaClass();
+ llvm::DenseMap<const VarDecl *, FieldDecl *> CaptureFields;
+ FieldDecl *ThisCaptureField;
+ LambdaClass->getCaptureFields(CaptureFields, ThisCaptureField);
+
+ for (const LambdaCapture &C : LE->captures()) {
+ if (!C.capturesVariable())
+ continue;
+
+ VarDecl *VD = C.getCapturedVar();
+ const FieldDecl *FD = CaptureFields[VD];
+ if (!FD)
+ continue;
+
+ // If the capture field is a reference type, it is capture-by-reference.
+ if (FD->getType()->isReferenceType())
+ Escaped.insert(VD);
+ }
+ }
};
} // end anonymous namespace
diff --git a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 51e7a3d3ce34..2eef1688d4c4 100644
--- a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -16,7 +16,10 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/CallGraph.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/Support/Process.h"
@@ -209,3 +212,36 @@ public:
void ento::registerExplodedGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<ExplodedGraphViewer>();
}
+
+//===----------------------------------------------------------------------===//
+// DumpBugHash
+//===----------------------------------------------------------------------===//
+
+namespace {
+class BugHashDumper : public Checker<check::PostStmt<Stmt>> {
+public:
+ mutable std::unique_ptr<BugType> BT;
+
+ void checkPostStmt(const Stmt *S, CheckerContext &C) const {
+ if (!BT)
+ BT.reset(new BugType(this, "Dump hash components", "debug"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+
+ const LangOptions &Opts = C.getLangOpts();
+ const SourceManager &SM = C.getSourceManager();
+ FullSourceLoc FL(S->getLocStart(), SM);
+ std::string HashContent =
+ GetIssueString(SM, FL, getCheckName().getName(), BT->getCategory(),
+ C.getLocationContext()->getDecl(), Opts);
+
+ C.emitReport(llvm::make_unique<BugReport>(*BT, HashContent, N));
+ }
+};
+}
+
+void ento::registerBugHashDumper(CheckerManager &mgr) {
+ mgr.registerChecker<BugHashDumper>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 2ba7ea4a4e4d..5dd28320f88f 100644
--- a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -14,10 +14,12 @@
#include "ClangSACheckers.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -83,14 +85,14 @@ DereferenceChecker::AddDerefSource(raw_ostream &os,
SourceLocation L = IV->getLocation();
Ranges.push_back(SourceRange(L, L));
break;
- }
+ }
}
}
void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
CheckerContext &C, bool IsBind) const {
// Generate an error node.
- ExplodedNode *N = C.generateSink(State);
+ ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
@@ -110,15 +112,11 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
S = expr->IgnoreParenLValueCasts();
if (IsBind) {
- if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
- if (BO->isAssignmentOp())
- S = BO->getRHS();
- } else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
- assert(DS->isSingleDecl() && "We process decls one by one");
- if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl()))
- if (const Expr *Init = VD->getAnyInitializer())
- S = Init;
- }
+ const VarDecl *VD;
+ const Expr *Init;
+ std::tie(VD, Init) = parseAssignment(S);
+ if (VD && Init)
+ S = Init;
}
switch (S->getStmtClass()) {
@@ -130,6 +128,14 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
os << " results in a null pointer dereference";
break;
}
+ case Stmt::OMPArraySectionExprClass: {
+ os << "Array access";
+ const OMPArraySectionExpr *AE = cast<OMPArraySectionExpr>(S);
+ AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+ State.get(), N->getLocationContext());
+ os << " results in a null pointer dereference";
+ break;
+ }
case Stmt::UnaryOperatorClass: {
os << "Dereference of null pointer";
const UnaryOperator *U = cast<UnaryOperator>(S);
@@ -159,7 +165,6 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
break;
}
- os.flush();
auto report = llvm::make_unique<BugReport>(
*BT_null, buf.empty() ? BT_null->getDescription() : StringRef(buf), N);
@@ -176,7 +181,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
CheckerContext &C) const {
// Check for dereference of an undefined value.
if (l.isUndef()) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_undef)
BT_undef.reset(
new BuiltinBug(this, "Dereference of undefined pointer value"));
@@ -211,8 +216,9 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
// Otherwise, we have the case where the location could either be
// null or not-null. Record the error node as an "implicit" null
// dereference.
- if (ExplodedNode *N = C.generateSink(nullState)) {
- ImplicitNullDerefEvent event = { l, isLoad, N, &C.getBugReporter() };
+ if (ExplodedNode *N = C.generateSink(nullState, C.getPredecessor())) {
+ ImplicitNullDerefEvent event = {l, isLoad, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/false};
dispatchEvent(event);
}
}
@@ -248,9 +254,10 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
// At this point the value could be either null or non-null.
// Record this as an "implicit" null dereference.
- if (ExplodedNode *N = C.generateSink(StNull)) {
- ImplicitNullDerefEvent event = { V, /*isLoad=*/true, N,
- &C.getBugReporter() };
+ if (ExplodedNode *N = C.generateSink(StNull, C.getPredecessor())) {
+ ImplicitNullDerefEvent event = {V, /*isLoad=*/true, N,
+ &C.getBugReporter(),
+ /*IsDirectDereference=*/false};
dispatchEvent(event);
}
}
diff --git a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index a71def23c0bc..ad478cbf7829 100644
--- a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -41,13 +41,12 @@ namespace {
/// Checks for the init, dealloc, and any other functions that might be allowed
/// to perform direct instance variable assignment based on their name.
static bool DefaultMethodFilter(const ObjCMethodDecl *M) {
- if (M->getMethodFamily() == OMF_init || M->getMethodFamily() == OMF_dealloc ||
- M->getMethodFamily() == OMF_copy ||
- M->getMethodFamily() == OMF_mutableCopy ||
- M->getSelector().getNameForSlot(0).find("init") != StringRef::npos ||
- M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos)
- return true;
- return false;
+ return M->getMethodFamily() == OMF_init ||
+ M->getMethodFamily() == OMF_dealloc ||
+ M->getMethodFamily() == OMF_copy ||
+ M->getMethodFamily() == OMF_mutableCopy ||
+ M->getSelector().getNameForSlot(0).find("init") != StringRef::npos ||
+ M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos;
}
class DirectIvarAssignment :
diff --git a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 79f9479b1448..598502305633 100644
--- a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -29,13 +29,13 @@ class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
CheckerContext &C) const ;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
-};
+};
} // end anonymous namespace
void DivZeroChecker::reportBug(const char *Msg,
ProgramStateRef StateZero,
CheckerContext &C) const {
- if (ExplodedNode *N = C.generateSink(StateZero)) {
+ if (ExplodedNode *N = C.generateErrorNode(StateZero)) {
if (!BT)
BT.reset(new BuiltinBug(this, "Division by zero"));
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
new file mode 100644
index 000000000000..7e0cb8e93395
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -0,0 +1,213 @@
+//== DynamicTypeChecker.cpp ------------------------------------ -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker looks for cases where the dynamic type of an object is unrelated
+// to its static type. The type information utilized by this check is collected
+// by the DynamicTypePropagation checker. This check does not report any type
+// error for ObjC Generic types, in order to avoid duplicate erros from the
+// ObjC Generics checker. This checker is not supposed to modify the program
+// state, it is just the observer of the type information provided by other
+// checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DynamicTypeChecker : public Checker<check::PostStmt<ImplicitCastExpr>> {
+ mutable std::unique_ptr<BugType> BT;
+ void initBugType() const {
+ if (!BT)
+ BT.reset(
+ new BugType(this, "Dynamic and static type mismatch", "Type Error"));
+ }
+
+ class DynamicTypeBugVisitor
+ : public BugReporterVisitorImpl<DynamicTypeBugVisitor> {
+ public:
+ DynamicTypeBugVisitor(const MemRegion *Reg) : Reg(Reg) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Reg);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ // The tracked region.
+ const MemRegion *Reg;
+ };
+
+ void reportTypeError(QualType DynamicType, QualType StaticType,
+ const MemRegion *Reg, const Stmt *ReportedNode,
+ CheckerContext &C) const;
+
+public:
+ void checkPostStmt(const ImplicitCastExpr *CE, CheckerContext &C) const;
+};
+}
+
+void DynamicTypeChecker::reportTypeError(QualType DynamicType,
+ QualType StaticType,
+ const MemRegion *Reg,
+ const Stmt *ReportedNode,
+ CheckerContext &C) const {
+ initBugType();
+ SmallString<192> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Object has a dynamic type '";
+ QualType::print(DynamicType.getTypePtr(), Qualifiers(), OS, C.getLangOpts(),
+ llvm::Twine());
+ OS << "' which is incompatible with static type '";
+ QualType::print(StaticType.getTypePtr(), Qualifiers(), OS, C.getLangOpts(),
+ llvm::Twine());
+ OS << "'";
+ std::unique_ptr<BugReport> R(
+ new BugReport(*BT, OS.str(), C.generateNonFatalErrorNode()));
+ R->markInteresting(Reg);
+ R->addVisitor(llvm::make_unique<DynamicTypeBugVisitor>(Reg));
+ R->addRange(ReportedNode->getSourceRange());
+ C.emitReport(std::move(R));
+}
+
+PathDiagnosticPiece *DynamicTypeChecker::DynamicTypeBugVisitor::VisitNode(
+ const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef State = N->getState();
+ ProgramStateRef StatePrev = PrevN->getState();
+
+ DynamicTypeInfo TrackedType = getDynamicTypeInfo(State, Reg);
+ DynamicTypeInfo TrackedTypePrev = getDynamicTypeInfo(StatePrev, Reg);
+ if (!TrackedType.isValid())
+ return nullptr;
+
+ if (TrackedTypePrev.isValid() &&
+ TrackedTypePrev.getType() == TrackedType.getType())
+ return nullptr;
+
+ // Retrieve the associated statement.
+ const Stmt *S = nullptr;
+ ProgramPoint ProgLoc = N->getLocation();
+ if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ }
+
+ if (!S)
+ return nullptr;
+
+ const LangOptions &LangOpts = BRC.getASTContext().getLangOpts();
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Type '";
+ QualType::print(TrackedType.getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "' is inferred from ";
+
+ if (const auto *ExplicitCast = dyn_cast<ExplicitCastExpr>(S)) {
+ OS << "explicit cast (from '";
+ QualType::print(ExplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ExplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else if (const auto *ImplicitCast = dyn_cast<ImplicitCastExpr>(S)) {
+ OS << "implicit cast (from '";
+ QualType::print(ImplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ImplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else {
+ OS << "this context";
+ }
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, OS.str(), true, nullptr);
+}
+
+static bool hasDefinition(const ObjCObjectPointerType *ObjPtr) {
+ const ObjCInterfaceDecl *Decl = ObjPtr->getInterfaceDecl();
+ if (!Decl)
+ return false;
+
+ return Decl->getDefinition();
+}
+
+// TODO: consider checking explicit casts?
+void DynamicTypeChecker::checkPostStmt(const ImplicitCastExpr *CE,
+ CheckerContext &C) const {
+ // TODO: C++ support.
+ if (CE->getCastKind() != CK_BitCast)
+ return;
+
+ const MemRegion *Region = C.getSVal(CE).getAsRegion();
+ if (!Region)
+ return;
+
+ ProgramStateRef State = C.getState();
+ DynamicTypeInfo DynTypeInfo = getDynamicTypeInfo(State, Region);
+
+ if (!DynTypeInfo.isValid())
+ return;
+
+ QualType DynType = DynTypeInfo.getType();
+ QualType StaticType = CE->getType();
+
+ const auto *DynObjCType = DynType->getAs<ObjCObjectPointerType>();
+ const auto *StaticObjCType = StaticType->getAs<ObjCObjectPointerType>();
+
+ if (!DynObjCType || !StaticObjCType)
+ return;
+
+ if (!hasDefinition(DynObjCType) || !hasDefinition(StaticObjCType))
+ return;
+
+ ASTContext &ASTCtxt = C.getASTContext();
+
+ // Strip kindeofness to correctly detect subtyping relationships.
+ DynObjCType = DynObjCType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+ StaticObjCType = StaticObjCType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+
+ // Specialized objects are handled by the generics checker.
+ if (StaticObjCType->isSpecialized())
+ return;
+
+ if (ASTCtxt.canAssignObjCInterfaces(StaticObjCType, DynObjCType))
+ return;
+
+ if (DynTypeInfo.canBeASubClass() &&
+ ASTCtxt.canAssignObjCInterfaces(DynObjCType, StaticObjCType))
+ return;
+
+ reportTypeError(DynType, StaticType, Region, CE, C);
+}
+
+void ento::registerDynamicTypeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DynamicTypeChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 43a281218775..30f629830c61 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -7,42 +7,139 @@
//
//===----------------------------------------------------------------------===//
//
+// This file contains two checkers. One helps the static analyzer core to track
+// types, the other does type inference on Obj-C generics and report type
+// errors.
+//
+// Dynamic Type Propagation:
// This checker defines the rules for dynamic type gathering and propagation.
//
+// Generics Checker for Objective-C:
+// This checker tries to find type errors that the compiler is not able to catch
+// due to the implicit conversions that were introduced for backward
+// compatibility.
+//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
using namespace clang;
using namespace ento;
+// ProgramState trait - The type inflation is tracked by DynamicTypeMap. This is
+// an auxiliary map that tracks more information about generic types, because in
+// some cases the most derived type is not the most informative one about the
+// type parameters. This types that are stored for each symbol in this map must
+// be specialized.
+// TODO: In some case the type stored in this map is exactly the same that is
+// stored in DynamicTypeMap. We should no store duplicated information in those
+// cases.
+REGISTER_MAP_WITH_PROGRAMSTATE(MostSpecializedTypeArgsMap, SymbolRef,
+ const ObjCObjectPointerType *)
+
namespace {
class DynamicTypePropagation:
public Checker< check::PreCall,
check::PostCall,
- check::PostStmt<ImplicitCastExpr>,
- check::PostStmt<CXXNewExpr> > {
+ check::DeadSymbols,
+ check::PostStmt<CastExpr>,
+ check::PostStmt<CXXNewExpr>,
+ check::PreObjCMessage,
+ check::PostObjCMessage > {
const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
CheckerContext &C) const;
/// \brief Return a better dynamic type if one can be derived from the cast.
const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE,
CheckerContext &C) const;
+
+ ExplodedNode *dynamicTypePropagationOnCasts(const CastExpr *CE,
+ ProgramStateRef &State,
+ CheckerContext &C) const;
+
+ mutable std::unique_ptr<BugType> ObjCGenericsBugType;
+ void initBugType() const {
+ if (!ObjCGenericsBugType)
+ ObjCGenericsBugType.reset(
+ new BugType(this, "Generics", categories::CoreFoundationObjectiveC));
+ }
+
+ class GenericsBugVisitor : public BugReporterVisitorImpl<GenericsBugVisitor> {
+ public:
+ GenericsBugVisitor(SymbolRef S) : Sym(S) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Sym);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ // The tracked symbol.
+ SymbolRef Sym;
+ };
+
+ void reportGenericsBug(const ObjCObjectPointerType *From,
+ const ObjCObjectPointerType *To, ExplodedNode *N,
+ SymbolRef Sym, CheckerContext &C,
+ const Stmt *ReportedNode = nullptr) const;
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPostStmt(const ImplicitCastExpr *CastE, CheckerContext &C) const;
+ void checkPostStmt(const CastExpr *CastE, CheckerContext &C) const;
void checkPostStmt(const CXXNewExpr *NewE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+
+ /// This value is set to true, when the Generics checker is turned on.
+ DefaultBool CheckGenerics;
};
}
+void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ DynamicTypeMapImpl TypeMap = State->get<DynamicTypeMap>();
+ for (DynamicTypeMapImpl::iterator I = TypeMap.begin(), E = TypeMap.end();
+ I != E; ++I) {
+ if (!SR.isLiveRegion(I->first)) {
+ State = State->remove<DynamicTypeMap>(I->first);
+ }
+ }
+
+ if (!SR.hasDeadSymbols()) {
+ C.addTransition(State);
+ return;
+ }
+
+ MostSpecializedTypeArgsMapTy TyArgMap =
+ State->get<MostSpecializedTypeArgsMap>();
+ for (MostSpecializedTypeArgsMapTy::iterator I = TyArgMap.begin(),
+ E = TyArgMap.end();
+ I != E; ++I) {
+ if (SR.isDead(I->first)) {
+ State = State->remove<MostSpecializedTypeArgsMap>(I->first);
+ }
+ }
+
+ C.addTransition(State);
+}
+
static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
CheckerContext &C) {
assert(Region);
@@ -52,7 +149,7 @@ static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent()));
ProgramStateRef State = C.getState();
- State = State->setDynamicTypeInfo(Region, Ty, /*CanBeSubclass=*/false);
+ State = setDynamicTypeInfo(State, Region, Ty, /*CanBeSubclass=*/false);
C.addTransition(State);
return;
}
@@ -113,7 +210,7 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
ProgramStateRef State = C.getState();
const ObjCMethodDecl *D = Msg->getDecl();
-
+
if (D && D->hasRelatedResultType()) {
switch (Msg->getMethodFamily()) {
default:
@@ -131,7 +228,7 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
return;
QualType DynResTy =
C.getASTContext().getObjCObjectPointerType(QualType(ObjTy, 0));
- C.addTransition(State->setDynamicTypeInfo(RetReg, DynResTy, false));
+ C.addTransition(setDynamicTypeInfo(State, RetReg, DynResTy, false));
break;
}
case OMF_init: {
@@ -140,8 +237,8 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
const MemRegion *RecReg = Msg->getReceiverSVal().getAsRegion();
if (!RecReg)
return;
- DynamicTypeInfo RecDynType = State->getDynamicTypeInfo(RecReg);
- C.addTransition(State->setDynamicTypeInfo(RetReg, RecDynType));
+ DynamicTypeInfo RecDynType = getDynamicTypeInfo(State, RecReg);
+ C.addTransition(setDynamicTypeInfo(State, RetReg, RecDynType));
break;
}
}
@@ -173,23 +270,25 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
}
}
-void DynamicTypePropagation::checkPostStmt(const ImplicitCastExpr *CastE,
- CheckerContext &C) const {
- // We only track dynamic type info for regions.
- const MemRegion *ToR = C.getSVal(CastE).getAsRegion();
+/// TODO: Handle explicit casts.
+/// Handle C++ casts.
+///
+/// Precondition: the cast is between ObjCObjectPointers.
+ExplodedNode *DynamicTypePropagation::dynamicTypePropagationOnCasts(
+ const CastExpr *CE, ProgramStateRef &State, CheckerContext &C) const {
+ // We only track type info for regions.
+ const MemRegion *ToR = C.getSVal(CE).getAsRegion();
if (!ToR)
- return;
+ return C.getPredecessor();
+
+ if (isa<ExplicitCastExpr>(CE))
+ return C.getPredecessor();
- switch (CastE->getCastKind()) {
- default:
- break;
- case CK_BitCast:
- // Only handle ObjCObjects for now.
- if (const Type *NewTy = getBetterObjCType(CastE, C))
- C.addTransition(C.getState()->setDynamicTypeInfo(ToR, QualType(NewTy,0)));
- break;
+ if (const Type *NewTy = getBetterObjCType(CE, C)) {
+ State = setDynamicTypeInfo(State, ToR, QualType(NewTy, 0));
+ return C.addTransition(State);
}
- return;
+ return C.getPredecessor();
}
void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
@@ -201,9 +300,9 @@ void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
const MemRegion *MR = C.getSVal(NewE).getAsRegion();
if (!MR)
return;
-
- C.addTransition(C.getState()->setDynamicTypeInfo(MR, NewE->getType(),
- /*CanBeSubclass=*/false));
+
+ C.addTransition(setDynamicTypeInfo(C.getState(), MR, NewE->getType(),
+ /*CanBeSubclass=*/false));
}
const ObjCObjectType *
@@ -254,7 +353,7 @@ DynamicTypePropagation::getBetterObjCType(const Expr *CastE,
CastE->getType()->getAs<ObjCObjectPointerType>();
if (!NewTy)
return nullptr;
- QualType OldDTy = C.getState()->getDynamicTypeInfo(ToR).getType();
+ QualType OldDTy = getDynamicTypeInfo(C.getState(), ToR).getType();
if (OldDTy.isNull()) {
return NewTy;
}
@@ -276,6 +375,566 @@ DynamicTypePropagation::getBetterObjCType(const Expr *CastE,
return nullptr;
}
+static const ObjCObjectPointerType *getMostInformativeDerivedClassImpl(
+ const ObjCObjectPointerType *From, const ObjCObjectPointerType *To,
+ const ObjCObjectPointerType *MostInformativeCandidate, ASTContext &C) {
+ // Checking if from and to are the same classes modulo specialization.
+ if (From->getInterfaceDecl()->getCanonicalDecl() ==
+ To->getInterfaceDecl()->getCanonicalDecl()) {
+ if (To->isSpecialized()) {
+ assert(MostInformativeCandidate->isSpecialized());
+ return MostInformativeCandidate;
+ }
+ return From;
+ }
+ const auto *SuperOfTo =
+ To->getObjectType()->getSuperClassType()->getAs<ObjCObjectType>();
+ assert(SuperOfTo);
+ QualType SuperPtrOfToQual =
+ C.getObjCObjectPointerType(QualType(SuperOfTo, 0));
+ const auto *SuperPtrOfTo = SuperPtrOfToQual->getAs<ObjCObjectPointerType>();
+ if (To->isUnspecialized())
+ return getMostInformativeDerivedClassImpl(From, SuperPtrOfTo, SuperPtrOfTo,
+ C);
+ else
+ return getMostInformativeDerivedClassImpl(From, SuperPtrOfTo,
+ MostInformativeCandidate, C);
+}
+
+/// A downcast may loose specialization information. E. g.:
+/// MutableMap<T, U> : Map
+/// The downcast to MutableMap looses the information about the types of the
+/// Map (due to the type parameters are not being forwarded to Map), and in
+/// general there is no way to recover that information from the
+/// declaration. In order to have to most information, lets find the most
+/// derived type that has all the type parameters forwarded.
+///
+/// Get the a subclass of \p From (which has a lower bound \p To) that do not
+/// loose information about type parameters. \p To has to be a subclass of
+/// \p From. From has to be specialized.
+static const ObjCObjectPointerType *
+getMostInformativeDerivedClass(const ObjCObjectPointerType *From,
+ const ObjCObjectPointerType *To, ASTContext &C) {
+ return getMostInformativeDerivedClassImpl(From, To, To, C);
+}
+
+/// Inputs:
+/// \param StaticLowerBound Static lower bound for a symbol. The dynamic lower
+/// bound might be the subclass of this type.
+/// \param StaticUpperBound A static upper bound for a symbol.
+/// \p StaticLowerBound expected to be the subclass of \p StaticUpperBound.
+/// \param Current The type that was inferred for a symbol in a previous
+/// context. Might be null when this is the first time that inference happens.
+/// Precondition:
+/// \p StaticLowerBound or \p StaticUpperBound is specialized. If \p Current
+/// is not null, it is specialized.
+/// Possible cases:
+/// (1) The \p Current is null and \p StaticLowerBound <: \p StaticUpperBound
+/// (2) \p StaticLowerBound <: \p Current <: \p StaticUpperBound
+/// (3) \p Current <: \p StaticLowerBound <: \p StaticUpperBound
+/// (4) \p StaticLowerBound <: \p StaticUpperBound <: \p Current
+/// Effect:
+/// Use getMostInformativeDerivedClass with the upper and lower bound of the
+/// set {\p StaticLowerBound, \p Current, \p StaticUpperBound}. The computed
+/// lower bound must be specialized. If the result differs from \p Current or
+/// \p Current is null, store the result.
+static bool
+storeWhenMoreInformative(ProgramStateRef &State, SymbolRef Sym,
+ const ObjCObjectPointerType *const *Current,
+ const ObjCObjectPointerType *StaticLowerBound,
+ const ObjCObjectPointerType *StaticUpperBound,
+ ASTContext &C) {
+ // Precondition
+ assert(StaticUpperBound->isSpecialized() ||
+ StaticLowerBound->isSpecialized());
+ assert(!Current || (*Current)->isSpecialized());
+
+ // Case (1)
+ if (!Current) {
+ if (StaticUpperBound->isUnspecialized()) {
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, StaticLowerBound);
+ return true;
+ }
+ // Upper bound is specialized.
+ const ObjCObjectPointerType *WithMostInfo =
+ getMostInformativeDerivedClass(StaticUpperBound, StaticLowerBound, C);
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, WithMostInfo);
+ return true;
+ }
+
+ // Case (3)
+ if (C.canAssignObjCInterfaces(StaticLowerBound, *Current)) {
+ return false;
+ }
+
+ // Case (4)
+ if (C.canAssignObjCInterfaces(*Current, StaticUpperBound)) {
+ // The type arguments might not be forwarded at any point of inheritance.
+ const ObjCObjectPointerType *WithMostInfo =
+ getMostInformativeDerivedClass(*Current, StaticUpperBound, C);
+ WithMostInfo =
+ getMostInformativeDerivedClass(WithMostInfo, StaticLowerBound, C);
+ if (WithMostInfo == *Current)
+ return false;
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, WithMostInfo);
+ return true;
+ }
+
+ // Case (2)
+ const ObjCObjectPointerType *WithMostInfo =
+ getMostInformativeDerivedClass(*Current, StaticLowerBound, C);
+ if (WithMostInfo != *Current) {
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, WithMostInfo);
+ return true;
+ }
+
+ return false;
+}
+
+/// Type inference based on static type information that is available for the
+/// cast and the tracked type information for the given symbol. When the tracked
+/// symbol and the destination type of the cast are unrelated, report an error.
+void DynamicTypePropagation::checkPostStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getCastKind() != CK_BitCast)
+ return;
+
+ QualType OriginType = CE->getSubExpr()->getType();
+ QualType DestType = CE->getType();
+
+ const auto *OrigObjectPtrType = OriginType->getAs<ObjCObjectPointerType>();
+ const auto *DestObjectPtrType = DestType->getAs<ObjCObjectPointerType>();
+
+ if (!OrigObjectPtrType || !DestObjectPtrType)
+ return;
+
+ ProgramStateRef State = C.getState();
+ ExplodedNode *AfterTypeProp = dynamicTypePropagationOnCasts(CE, State, C);
+
+ ASTContext &ASTCtxt = C.getASTContext();
+
+ // This checker detects the subtyping relationships using the assignment
+ // rules. In order to be able to do this the kindofness must be stripped
+ // first. The checker treats every type as kindof type anyways: when the
+ // tracked type is the subtype of the static type it tries to look up the
+ // methods in the tracked type first.
+ OrigObjectPtrType = OrigObjectPtrType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+ DestObjectPtrType = DestObjectPtrType->stripObjCKindOfTypeAndQuals(ASTCtxt);
+
+ // TODO: erase tracked information when there is a cast to unrelated type
+ // and everything is unspecialized statically.
+ if (OrigObjectPtrType->isUnspecialized() &&
+ DestObjectPtrType->isUnspecialized())
+ return;
+
+ SymbolRef Sym = State->getSVal(CE, C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ return;
+
+ // Check which assignments are legal.
+ bool OrigToDest =
+ ASTCtxt.canAssignObjCInterfaces(DestObjectPtrType, OrigObjectPtrType);
+ bool DestToOrig =
+ ASTCtxt.canAssignObjCInterfaces(OrigObjectPtrType, DestObjectPtrType);
+ const ObjCObjectPointerType *const *TrackedType =
+ State->get<MostSpecializedTypeArgsMap>(Sym);
+
+ // Downcasts and upcasts handled in an uniform way regardless of being
+ // explicit. Explicit casts however can happen between mismatched types.
+ if (isa<ExplicitCastExpr>(CE) && !OrigToDest && !DestToOrig) {
+ // Mismatched types. If the DestType specialized, store it. Forget the
+ // tracked type otherwise.
+ if (DestObjectPtrType->isSpecialized()) {
+ State = State->set<MostSpecializedTypeArgsMap>(Sym, DestObjectPtrType);
+ C.addTransition(State, AfterTypeProp);
+ } else if (TrackedType) {
+ State = State->remove<MostSpecializedTypeArgsMap>(Sym);
+ C.addTransition(State, AfterTypeProp);
+ }
+ return;
+ }
+
+ // The tracked type should be the sub or super class of the static destination
+ // type. When an (implicit) upcast or a downcast happens according to static
+ // types, and there is no subtyping relationship between the tracked and the
+ // static destination types, it indicates an error.
+ if (TrackedType &&
+ !ASTCtxt.canAssignObjCInterfaces(DestObjectPtrType, *TrackedType) &&
+ !ASTCtxt.canAssignObjCInterfaces(*TrackedType, DestObjectPtrType)) {
+ static CheckerProgramPointTag IllegalConv(this, "IllegalConversion");
+ ExplodedNode *N = C.addTransition(State, AfterTypeProp, &IllegalConv);
+ reportGenericsBug(*TrackedType, DestObjectPtrType, N, Sym, C);
+ return;
+ }
+
+ // Handle downcasts and upcasts.
+
+ const ObjCObjectPointerType *LowerBound = DestObjectPtrType;
+ const ObjCObjectPointerType *UpperBound = OrigObjectPtrType;
+ if (OrigToDest && !DestToOrig)
+ std::swap(LowerBound, UpperBound);
+
+ // The id type is not a real bound. Eliminate it.
+ LowerBound = LowerBound->isObjCIdType() ? UpperBound : LowerBound;
+ UpperBound = UpperBound->isObjCIdType() ? LowerBound : UpperBound;
+
+ if (storeWhenMoreInformative(State, Sym, TrackedType, LowerBound, UpperBound,
+ ASTCtxt)) {
+ C.addTransition(State, AfterTypeProp);
+ }
+}
+
+static const Expr *stripCastsAndSugar(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
+ E = POE->getSyntacticForm()->IgnoreParenImpCasts();
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E))
+ E = OVE->getSourceExpr()->IgnoreParenImpCasts();
+ return E;
+}
+
+static bool isObjCTypeParamDependent(QualType Type) {
+ // It is illegal to typedef parameterized types inside an interface. Therfore
+ // an Objective-C type can only be dependent on a type parameter when the type
+ // parameter structurally present in the type itself.
+ class IsObjCTypeParamDependentTypeVisitor
+ : public RecursiveASTVisitor<IsObjCTypeParamDependentTypeVisitor> {
+ public:
+ IsObjCTypeParamDependentTypeVisitor() : Result(false) {}
+ bool VisitTypedefType(const TypedefType *Type) {
+ if (isa<ObjCTypeParamDecl>(Type->getDecl())) {
+ Result = true;
+ return false;
+ }
+ return true;
+ }
+
+ bool Result;
+ };
+
+ IsObjCTypeParamDependentTypeVisitor Visitor;
+ Visitor.TraverseType(Type);
+ return Visitor.Result;
+}
+
+/// A method might not be available in the interface indicated by the static
+/// type. However it might be available in the tracked type. In order to
+/// properly substitute the type parameters we need the declaration context of
+/// the method. The more specialized the enclosing class of the method is, the
+/// more likely that the parameter substitution will be successful.
+static const ObjCMethodDecl *
+findMethodDecl(const ObjCMessageExpr *MessageExpr,
+ const ObjCObjectPointerType *TrackedType, ASTContext &ASTCtxt) {
+ const ObjCMethodDecl *Method = nullptr;
+
+ QualType ReceiverType = MessageExpr->getReceiverType();
+ const auto *ReceiverObjectPtrType =
+ ReceiverType->getAs<ObjCObjectPointerType>();
+
+ // Do this "devirtualization" on instance and class methods only. Trust the
+ // static type on super and super class calls.
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Instance ||
+ MessageExpr->getReceiverKind() == ObjCMessageExpr::Class) {
+ // When the receiver type is id, Class, or some super class of the tracked
+ // type, look up the method in the tracked type, not in the receiver type.
+ // This way we preserve more information.
+ if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType() ||
+ ASTCtxt.canAssignObjCInterfaces(ReceiverObjectPtrType, TrackedType)) {
+ const ObjCInterfaceDecl *InterfaceDecl = TrackedType->getInterfaceDecl();
+ // The method might not be found.
+ Selector Sel = MessageExpr->getSelector();
+ Method = InterfaceDecl->lookupInstanceMethod(Sel);
+ if (!Method)
+ Method = InterfaceDecl->lookupClassMethod(Sel);
+ }
+ }
+
+ // Fallback to statick method lookup when the one based on the tracked type
+ // failed.
+ return Method ? Method : MessageExpr->getMethodDecl();
+}
+
+/// Get the returned ObjCObjectPointerType by a method based on the tracked type
+/// information, or null pointer when the returned type is not an
+/// ObjCObjectPointerType.
+static QualType getReturnTypeForMethod(
+ const ObjCMethodDecl *Method, ArrayRef<QualType> TypeArgs,
+ const ObjCObjectPointerType *SelfType, ASTContext &C) {
+ QualType StaticResultType = Method->getReturnType();
+
+ // Is the return type declared as instance type?
+ if (StaticResultType == C.getObjCInstanceType())
+ return QualType(SelfType, 0);
+
+ // Check whether the result type depends on a type parameter.
+ if (!isObjCTypeParamDependent(StaticResultType))
+ return QualType();
+
+ QualType ResultType = StaticResultType.substObjCTypeArgs(
+ C, TypeArgs, ObjCSubstitutionContext::Result);
+
+ return ResultType;
+}
+
+/// When the receiver has a tracked type, use that type to validate the
+/// argumments of the message expression and the return value.
+void DynamicTypePropagation::checkPreObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef Sym = M.getReceiverSVal().getAsSymbol();
+ if (!Sym)
+ return;
+
+ const ObjCObjectPointerType *const *TrackedType =
+ State->get<MostSpecializedTypeArgsMap>(Sym);
+ if (!TrackedType)
+ return;
+
+ // Get the type arguments from tracked type and substitute type arguments
+ // before do the semantic check.
+
+ ASTContext &ASTCtxt = C.getASTContext();
+ const ObjCMessageExpr *MessageExpr = M.getOriginExpr();
+ const ObjCMethodDecl *Method =
+ findMethodDecl(MessageExpr, *TrackedType, ASTCtxt);
+
+ // It is possible to call non-existent methods in Obj-C.
+ if (!Method)
+ return;
+
+ Optional<ArrayRef<QualType>> TypeArgs =
+ (*TrackedType)->getObjCSubstitutions(Method->getDeclContext());
+ // This case might happen when there is an unspecialized override of a
+ // specialized method.
+ if (!TypeArgs)
+ return;
+
+ for (unsigned i = 0; i < Method->param_size(); i++) {
+ const Expr *Arg = MessageExpr->getArg(i);
+ const ParmVarDecl *Param = Method->parameters()[i];
+
+ QualType OrigParamType = Param->getType();
+ if (!isObjCTypeParamDependent(OrigParamType))
+ continue;
+
+ QualType ParamType = OrigParamType.substObjCTypeArgs(
+ ASTCtxt, *TypeArgs, ObjCSubstitutionContext::Parameter);
+ // Check if it can be assigned
+ const auto *ParamObjectPtrType = ParamType->getAs<ObjCObjectPointerType>();
+ const auto *ArgObjectPtrType =
+ stripCastsAndSugar(Arg)->getType()->getAs<ObjCObjectPointerType>();
+ if (!ParamObjectPtrType || !ArgObjectPtrType)
+ continue;
+
+ // Check if we have more concrete tracked type that is not a super type of
+ // the static argument type.
+ SVal ArgSVal = M.getArgSVal(i);
+ SymbolRef ArgSym = ArgSVal.getAsSymbol();
+ if (ArgSym) {
+ const ObjCObjectPointerType *const *TrackedArgType =
+ State->get<MostSpecializedTypeArgsMap>(ArgSym);
+ if (TrackedArgType &&
+ ASTCtxt.canAssignObjCInterfaces(ArgObjectPtrType, *TrackedArgType)) {
+ ArgObjectPtrType = *TrackedArgType;
+ }
+ }
+
+ // Warn when argument is incompatible with the parameter.
+ if (!ASTCtxt.canAssignObjCInterfaces(ParamObjectPtrType,
+ ArgObjectPtrType)) {
+ static CheckerProgramPointTag Tag(this, "ArgTypeMismatch");
+ ExplodedNode *N = C.addTransition(State, &Tag);
+ reportGenericsBug(ArgObjectPtrType, ParamObjectPtrType, N, Sym, C, Arg);
+ return;
+ }
+ }
+}
+
+/// This callback is used to infer the types for Class variables. This info is
+/// used later to validate messages that sent to classes. Class variables are
+/// initialized with by invoking the 'class' method on a class.
+/// This method is also used to infer the type information for the return
+/// types.
+// TODO: right now it only tracks generic types. Extend this to track every
+// type in the DynamicTypeMap and diagnose type errors!
+void DynamicTypePropagation::checkPostObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ const ObjCMessageExpr *MessageExpr = M.getOriginExpr();
+
+ SymbolRef RetSym = M.getReturnValue().getAsSymbol();
+ if (!RetSym)
+ return;
+
+ Selector Sel = MessageExpr->getSelector();
+ ProgramStateRef State = C.getState();
+ // Inference for class variables.
+ // We are only interested in cases where the class method is invoked on a
+ // class. This method is provided by the runtime and available on all classes.
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Class &&
+ Sel.getAsString() == "class") {
+
+ QualType ReceiverType = MessageExpr->getClassReceiver();
+ const auto *ReceiverClassType = ReceiverType->getAs<ObjCObjectType>();
+ QualType ReceiverClassPointerType =
+ C.getASTContext().getObjCObjectPointerType(
+ QualType(ReceiverClassType, 0));
+
+ if (!ReceiverClassType->isSpecialized())
+ return;
+ const auto *InferredType =
+ ReceiverClassPointerType->getAs<ObjCObjectPointerType>();
+ assert(InferredType);
+
+ State = State->set<MostSpecializedTypeArgsMap>(RetSym, InferredType);
+ C.addTransition(State);
+ return;
+ }
+
+ // Tracking for return types.
+ SymbolRef RecSym = M.getReceiverSVal().getAsSymbol();
+ if (!RecSym)
+ return;
+
+ const ObjCObjectPointerType *const *TrackedType =
+ State->get<MostSpecializedTypeArgsMap>(RecSym);
+ if (!TrackedType)
+ return;
+
+ ASTContext &ASTCtxt = C.getASTContext();
+ const ObjCMethodDecl *Method =
+ findMethodDecl(MessageExpr, *TrackedType, ASTCtxt);
+ if (!Method)
+ return;
+
+ Optional<ArrayRef<QualType>> TypeArgs =
+ (*TrackedType)->getObjCSubstitutions(Method->getDeclContext());
+ if (!TypeArgs)
+ return;
+
+ QualType ResultType =
+ getReturnTypeForMethod(Method, *TypeArgs, *TrackedType, ASTCtxt);
+ // The static type is the same as the deduced type.
+ if (ResultType.isNull())
+ return;
+
+ const MemRegion *RetRegion = M.getReturnValue().getAsRegion();
+ ExplodedNode *Pred = C.getPredecessor();
+ // When there is an entry available for the return symbol in DynamicTypeMap,
+ // the call was inlined, and the information in the DynamicTypeMap is should
+ // be precise.
+ if (RetRegion && !State->get<DynamicTypeMap>(RetRegion)) {
+ // TODO: we have duplicated information in DynamicTypeMap and
+ // MostSpecializedTypeArgsMap. We should only store anything in the later if
+ // the stored data differs from the one stored in the former.
+ State = setDynamicTypeInfo(State, RetRegion, ResultType,
+ /*CanBeSubclass=*/true);
+ Pred = C.addTransition(State);
+ }
+
+ const auto *ResultPtrType = ResultType->getAs<ObjCObjectPointerType>();
+
+ if (!ResultPtrType || ResultPtrType->isUnspecialized())
+ return;
+
+ // When the result is a specialized type and it is not tracked yet, track it
+ // for the result symbol.
+ if (!State->get<MostSpecializedTypeArgsMap>(RetSym)) {
+ State = State->set<MostSpecializedTypeArgsMap>(RetSym, ResultPtrType);
+ C.addTransition(State, Pred);
+ }
+}
+
+void DynamicTypePropagation::reportGenericsBug(
+ const ObjCObjectPointerType *From, const ObjCObjectPointerType *To,
+ ExplodedNode *N, SymbolRef Sym, CheckerContext &C,
+ const Stmt *ReportedNode) const {
+ if (!CheckGenerics)
+ return;
+
+ initBugType();
+ SmallString<192> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Conversion from value of type '";
+ QualType::print(From, Qualifiers(), OS, C.getLangOpts(), llvm::Twine());
+ OS << "' to incompatible type '";
+ QualType::print(To, Qualifiers(), OS, C.getLangOpts(), llvm::Twine());
+ OS << "'";
+ std::unique_ptr<BugReport> R(
+ new BugReport(*ObjCGenericsBugType, OS.str(), N));
+ R->markInteresting(Sym);
+ R->addVisitor(llvm::make_unique<GenericsBugVisitor>(Sym));
+ if (ReportedNode)
+ R->addRange(ReportedNode->getSourceRange());
+ C.emitReport(std::move(R));
+}
+
+PathDiagnosticPiece *DynamicTypePropagation::GenericsBugVisitor::VisitNode(
+ const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef state = N->getState();
+ ProgramStateRef statePrev = PrevN->getState();
+
+ const ObjCObjectPointerType *const *TrackedType =
+ state->get<MostSpecializedTypeArgsMap>(Sym);
+ const ObjCObjectPointerType *const *TrackedTypePrev =
+ statePrev->get<MostSpecializedTypeArgsMap>(Sym);
+ if (!TrackedType)
+ return nullptr;
+
+ if (TrackedTypePrev && *TrackedTypePrev == *TrackedType)
+ return nullptr;
+
+ // Retrieve the associated statement.
+ const Stmt *S = nullptr;
+ ProgramPoint ProgLoc = N->getLocation();
+ if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ }
+
+ if (!S)
+ return nullptr;
+
+ const LangOptions &LangOpts = BRC.getASTContext().getLangOpts();
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Type '";
+ QualType::print(*TrackedType, Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' is inferred from ";
+
+ if (const auto *ExplicitCast = dyn_cast<ExplicitCastExpr>(S)) {
+ OS << "explicit cast (from '";
+ QualType::print(ExplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ExplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else if (const auto *ImplicitCast = dyn_cast<ImplicitCastExpr>(S)) {
+ OS << "implicit cast (from '";
+ QualType::print(ImplicitCast->getSubExpr()->getType().getTypePtr(),
+ Qualifiers(), OS, LangOpts, llvm::Twine());
+ OS << "' to '";
+ QualType::print(ImplicitCast->getType().getTypePtr(), Qualifiers(), OS,
+ LangOpts, llvm::Twine());
+ OS << "')";
+ } else {
+ OS << "this context";
+ }
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, OS.str(), true, nullptr);
+}
+
+/// Register checkers.
+void ento::registerObjCGenericsChecker(CheckerManager &mgr) {
+ DynamicTypePropagation *checker =
+ mgr.registerChecker<DynamicTypePropagation>();
+ checker->CheckGenerics = true;
+}
+
void ento::registerDynamicTypePropagation(CheckerManager &mgr) {
mgr.registerChecker<DynamicTypePropagation>();
}
diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 7dc0a8745958..8f6c20ab1906 100644
--- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -17,22 +17,26 @@ using namespace clang;
using namespace ento;
namespace {
-class ExprInspectionChecker : public Checker< eval::Call > {
+class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols> {
mutable std::unique_ptr<BugType> BT;
void analyzerEval(const CallExpr *CE, CheckerContext &C) const;
void analyzerCheckInlined(const CallExpr *CE, CheckerContext &C) const;
void analyzerWarnIfReached(const CallExpr *CE, CheckerContext &C) const;
void analyzerCrash(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerWarnOnDeadSymbol(const CallExpr *CE, CheckerContext &C) const;
typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
CheckerContext &C) const;
public:
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
};
}
+REGISTER_SET_WITH_PROGRAMSTATE(MarkedSymbols, const void *)
+
bool ExprInspectionChecker::evalCall(const CallExpr *CE,
CheckerContext &C) const {
// These checks should have no effect on the surrounding environment
@@ -42,7 +46,10 @@ bool ExprInspectionChecker::evalCall(const CallExpr *CE,
.Case("clang_analyzer_checkInlined",
&ExprInspectionChecker::analyzerCheckInlined)
.Case("clang_analyzer_crash", &ExprInspectionChecker::analyzerCrash)
- .Case("clang_analyzer_warnIfReached", &ExprInspectionChecker::analyzerWarnIfReached)
+ .Case("clang_analyzer_warnIfReached",
+ &ExprInspectionChecker::analyzerWarnIfReached)
+ .Case("clang_analyzer_warnOnDeadSymbol",
+ &ExprInspectionChecker::analyzerWarnOnDeadSymbol)
.Default(nullptr);
if (!Handler)
@@ -86,8 +93,7 @@ static const char *getArgumentValueString(const CallExpr *CE,
void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
CheckerContext &C) const {
- ExplodedNode *N = C.getPredecessor();
- const LocationContext *LC = N->getLocationContext();
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
// A specific instantiation of an inlined function may have more constrained
// values than can generally be assumed. Skip the check.
@@ -97,24 +103,28 @@ void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
C.emitReport(
llvm::make_unique<BugReport>(*BT, getArgumentValueString(CE, C), N));
}
void ExprInspectionChecker::analyzerWarnIfReached(const CallExpr *CE,
CheckerContext &C) const {
- ExplodedNode *N = C.getPredecessor();
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
C.emitReport(llvm::make_unique<BugReport>(*BT, "REACHABLE", N));
}
void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
CheckerContext &C) const {
- ExplodedNode *N = C.getPredecessor();
- const LocationContext *LC = N->getLocationContext();
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
// An inlined function could conceivably also be analyzed as a top-level
// function. We ignore this case and only emit a message (TRUE or FALSE)
@@ -127,10 +137,48 @@ void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
C.emitReport(
llvm::make_unique<BugReport>(*BT, getArgumentValueString(CE, C), N));
}
+void ExprInspectionChecker::analyzerWarnOnDeadSymbol(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() == 0)
+ return;
+ SVal Val = C.getSVal(CE->getArg(0));
+ SymbolRef Sym = Val.getAsSymbol();
+ if (!Sym)
+ return;
+
+ ProgramStateRef State = C.getState();
+ State = State->add<MarkedSymbols>(Sym);
+ C.addTransition(State);
+}
+
+void ExprInspectionChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const MarkedSymbolsTy &Syms = State->get<MarkedSymbols>();
+ for (auto I = Syms.begin(), E = Syms.end(); I != E; ++I) {
+ SymbolRef Sym = static_cast<SymbolRef>(*I);
+ if (!SymReaper.isDead(Sym))
+ continue;
+
+ if (!BT)
+ BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+
+ C.emitReport(llvm::make_unique<BugReport>(*BT, "SYMBOL DEAD", N));
+ C.addTransition(State->remove<MarkedSymbols>(Sym), N);
+ }
+}
+
void ExprInspectionChecker::analyzerCrash(const CallExpr *CE,
CheckerContext &C) const {
LLVM_BUILTIN_TRAP;
diff --git a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 48d6bd4b37e6..3fe89f96a43b 100644
--- a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -23,7 +23,7 @@ using namespace clang;
using namespace ento;
namespace {
-class FixedAddressChecker
+class FixedAddressChecker
: public Checker< check::PreStmt<BinaryOperator> > {
mutable std::unique_ptr<BuiltinBug> BT;
@@ -50,7 +50,7 @@ void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
if (!RV.isConstant() || RV.isZeroConstant())
return;
- if (ExplodedNode *N = C.addTransition()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "Use fixed address",
diff --git a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 2cf508ff086c..8c8acc637f1f 100644
--- a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -100,8 +100,8 @@ private:
/// Generate a report if the expression is tainted or points to tainted data.
bool generateReportIfTainted(const Expr *E, const char Msg[],
CheckerContext &C) const;
-
-
+
+
typedef SmallVector<unsigned, 2> ArgVector;
/// \brief A struct used to specify taint propagation rules for a function.
@@ -441,7 +441,7 @@ SymbolRef GenericTaintChecker::getPointedToSymbol(CheckerContext &C,
return Val.getAsSymbol();
}
-ProgramStateRef
+ProgramStateRef
GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -640,7 +640,7 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
return false;
// Generate diagnostic.
- if (ExplodedNode *N = C.addTransition()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
initBugType();
auto report = llvm::make_unique<BugReport>(*BT, Msg, N);
report->addRange(E->getSourceRange());
@@ -658,17 +658,15 @@ bool GenericTaintChecker::checkUncontrolledFormatString(const CallExpr *CE,
return false;
// If either the format string content or the pointer itself are tainted, warn.
- if (generateReportIfTainted(CE->getArg(ArgNum),
- MsgUncontrolledFormatString, C))
- return true;
- return false;
+ return generateReportIfTainted(CE->getArg(ArgNum),
+ MsgUncontrolledFormatString, C);
}
bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
StringRef Name,
CheckerContext &C) const {
- // TODO: It might make sense to run this check on demand. In some cases,
- // we should check if the environment has been cleansed here. We also might
+ // TODO: It might make sense to run this check on demand. In some cases,
+ // we should check if the environment has been cleansed here. We also might
// need to know if the user was reset before these calls(seteuid).
unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
.Case("system", 0)
@@ -686,11 +684,7 @@ bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
if (ArgNum == UINT_MAX || CE->getNumArgs() < (ArgNum + 1))
return false;
- if (generateReportIfTainted(CE->getArg(ArgNum),
- MsgSanitizeSystemArgs, C))
- return true;
-
- return false;
+ return generateReportIfTainted(CE->getArg(ArgNum), MsgSanitizeSystemArgs, C);
}
// TODO: Should this check be a part of the CString checker?
@@ -728,11 +722,8 @@ bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
ArgNum = 2;
}
- if (ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
- generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C))
- return true;
-
- return false;
+ return ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
+ generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C);
}
void ento::registerGenericTaintChecker(CheckerManager &mgr) {
diff --git a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index 58d0783f3974..0c3bff5b63b8 100644
--- a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -96,7 +96,7 @@ void FindIdenticalExprVisitor::checkBitwiseOrLogicalOp(const BinaryOperator *B,
}
LHS = B2->getLHS();
}
-
+
if (isIdenticalStmt(AC->getASTContext(), RHS, LHS)) {
Sr[0] = RHS->getSourceRange();
Sr[1] = LHS->getSourceRange();
@@ -108,6 +108,24 @@ bool FindIdenticalExprVisitor::VisitIfStmt(const IfStmt *I) {
const Stmt *Stmt1 = I->getThen();
const Stmt *Stmt2 = I->getElse();
+ // Check for identical inner condition:
+ //
+ // if (x<10) {
+ // if (x<10) {
+ // ..
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(Stmt1)) {
+ if (!CS->body_empty()) {
+ const IfStmt *InnerIf = dyn_cast<IfStmt>(*CS->body_begin());
+ if (InnerIf && isIdenticalStmt(AC->getASTContext(), I->getCond(), InnerIf->getCond(), /*ignoreSideEffects=*/ false)) {
+ PathDiagnosticLocation ELoc(InnerIf->getCond(), BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), Checker, "Identical conditions",
+ categories::LogicError,
+ "conditions of the inner and outer statements are identical",
+ ELoc);
+ }
+ }
+ }
+
// Check for identical conditions:
//
// if (b) {
@@ -287,9 +305,7 @@ static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
const Stmt *Stmt2, bool IgnoreSideEffects) {
if (!Stmt1 || !Stmt2) {
- if (!Stmt1 && !Stmt2)
- return true;
- return false;
+ return !Stmt1 && !Stmt2;
}
// If Stmt1 & Stmt2 are of different class then they are not
@@ -332,6 +348,7 @@ static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
return false;
case Stmt::CallExprClass:
case Stmt::ArraySubscriptExprClass:
+ case Stmt::OMPArraySectionExprClass:
case Stmt::ImplicitCastExprClass:
case Stmt::ParenExprClass:
case Stmt::BreakStmtClass:
diff --git a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 3df5fa034a43..dffff38c91a2 100644
--- a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -20,8 +20,8 @@
// been called on them. An invalidation method should either invalidate all
// the ivars or call another invalidation method (on self).
//
-// Partial invalidor annotation allows to addess cases when ivars are
-// invalidated by other methods, which might or might not be called from
+// Partial invalidor annotation allows to addess cases when ivars are
+// invalidated by other methods, which might or might not be called from
// the invalidation method. The checker checks that each invalidation
// method and all the partial methods cumulatively invalidate all ivars.
// __attribute__((annotate("objc_instance_variable_invalidator_partial")));
@@ -310,7 +310,7 @@ const ObjCIvarDecl *IvarInvalidationCheckerImpl::findPropertyBackingIvar(
// Lookup for the synthesized case.
IvarD = Prop->getPropertyIvarDecl();
- // We only track the ivars/properties that are defined in the current
+ // We only track the ivars/properties that are defined in the current
// class (not the parent).
if (IvarD && IvarD->getContainingInterface() == InterfaceD) {
if (TrackedIvars.count(IvarD)) {
diff --git a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 4e3f9b73acb2..db4fbca36deb 100644
--- a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -276,7 +276,6 @@ void ASTFieldVisitor::ReportError(QualType T) {
}
}
os << " (type " << FieldChain.back()->getType().getAsString() << ")";
- os.flush();
// Note that this will fire for every translation unit that uses this
// class. This is suboptimal, but at least scan-build will merge
diff --git a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
new file mode 100644
index 000000000000..56346cd4f706
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -0,0 +1,1201 @@
+//=- LocalizationChecker.cpp -------------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of checks for localizability including:
+// 1) A checker that warns about uses of non-localized NSStrings passed to
+// UI methods expecting localized strings
+// 2) A syntactic checker that warns against the bad practice of
+// not including a comment in NSLocalizedString macros.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Support/Unicode.h"
+#include "llvm/ADT/StringSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct LocalizedState {
+private:
+ enum Kind { NonLocalized, Localized } K;
+ LocalizedState(Kind InK) : K(InK) {}
+
+public:
+ bool isLocalized() const { return K == Localized; }
+ bool isNonLocalized() const { return K == NonLocalized; }
+
+ static LocalizedState getLocalized() { return LocalizedState(Localized); }
+ static LocalizedState getNonLocalized() {
+ return LocalizedState(NonLocalized);
+ }
+
+ // Overload the == operator
+ bool operator==(const LocalizedState &X) const { return K == X.K; }
+
+ // LLVMs equivalent of a hash function
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(K); }
+};
+
+class NonLocalizedStringChecker
+ : public Checker<check::PostCall, check::PreObjCMessage,
+ check::PostObjCMessage,
+ check::PostStmt<ObjCStringLiteral>> {
+
+ mutable std::unique_ptr<BugType> BT;
+
+ // Methods that require a localized string
+ mutable llvm::DenseMap<const IdentifierInfo *,
+ llvm::DenseMap<Selector, uint8_t>> UIMethods;
+ // Methods that return a localized string
+ mutable llvm::SmallSet<std::pair<const IdentifierInfo *, Selector>, 12> LSM;
+ // C Functions that return a localized string
+ mutable llvm::SmallSet<const IdentifierInfo *, 5> LSF;
+
+ void initUIMethods(ASTContext &Ctx) const;
+ void initLocStringsMethods(ASTContext &Ctx) const;
+
+ bool hasNonLocalizedState(SVal S, CheckerContext &C) const;
+ bool hasLocalizedState(SVal S, CheckerContext &C) const;
+ void setNonLocalizedState(SVal S, CheckerContext &C) const;
+ void setLocalizedState(SVal S, CheckerContext &C) const;
+
+ bool isAnnotatedAsLocalized(const Decl *D) const;
+ void reportLocalizationError(SVal S, const ObjCMethodCall &M,
+ CheckerContext &C, int argumentNumber = 0) const;
+
+ int getLocalizedArgumentForSelector(const IdentifierInfo *Receiver,
+ Selector S) const;
+
+public:
+ NonLocalizedStringChecker();
+
+ // When this parameter is set to true, the checker assumes all
+ // methods that return NSStrings are unlocalized. Thus, more false
+ // positives will be reported.
+ DefaultBool IsAggressive;
+
+ void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+ void checkPostStmt(const ObjCStringLiteral *SL, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(LocalizedMemMap, const MemRegion *,
+ LocalizedState)
+
+NonLocalizedStringChecker::NonLocalizedStringChecker() {
+ BT.reset(new BugType(this, "Unlocalizable string",
+ "Localizability Issue (Apple)"));
+}
+
+#define NEW_RECEIVER(receiver) \
+ llvm::DenseMap<Selector, uint8_t> &receiver##M = \
+ UIMethods.insert({&Ctx.Idents.get(#receiver), \
+ llvm::DenseMap<Selector, uint8_t>()}) \
+ .first->second;
+#define ADD_NULLARY_METHOD(receiver, method, argument) \
+ receiver##M.insert( \
+ {Ctx.Selectors.getNullarySelector(&Ctx.Idents.get(#method)), argument});
+#define ADD_UNARY_METHOD(receiver, method, argument) \
+ receiver##M.insert( \
+ {Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(#method)), argument});
+#define ADD_METHOD(receiver, method_list, count, argument) \
+ receiver##M.insert({Ctx.Selectors.getSelector(count, method_list), argument});
+
+/// Initializes a list of methods that require a localized string
+/// Format: {"ClassName", {{"selectorName:", LocStringArg#}, ...}, ...}
+void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
+ if (!UIMethods.empty())
+ return;
+
+ // UI Methods
+ NEW_RECEIVER(UISearchDisplayController)
+ ADD_UNARY_METHOD(UISearchDisplayController, setSearchResultsTitle, 0)
+
+ NEW_RECEIVER(UITabBarItem)
+ IdentifierInfo *initWithTitleUITabBarItemTag[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("image"),
+ &Ctx.Idents.get("tag")};
+ ADD_METHOD(UITabBarItem, initWithTitleUITabBarItemTag, 3, 0)
+ IdentifierInfo *initWithTitleUITabBarItemImage[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("image"),
+ &Ctx.Idents.get("selectedImage")};
+ ADD_METHOD(UITabBarItem, initWithTitleUITabBarItemImage, 3, 0)
+
+ NEW_RECEIVER(NSDockTile)
+ ADD_UNARY_METHOD(NSDockTile, setBadgeLabel, 0)
+
+ NEW_RECEIVER(NSStatusItem)
+ ADD_UNARY_METHOD(NSStatusItem, setTitle, 0)
+ ADD_UNARY_METHOD(NSStatusItem, setToolTip, 0)
+
+ NEW_RECEIVER(UITableViewRowAction)
+ IdentifierInfo *rowActionWithStyleUITableViewRowAction[] = {
+ &Ctx.Idents.get("rowActionWithStyle"), &Ctx.Idents.get("title"),
+ &Ctx.Idents.get("handler")};
+ ADD_METHOD(UITableViewRowAction, rowActionWithStyleUITableViewRowAction, 3, 1)
+ ADD_UNARY_METHOD(UITableViewRowAction, setTitle, 0)
+
+ NEW_RECEIVER(NSBox)
+ ADD_UNARY_METHOD(NSBox, setTitle, 0)
+
+ NEW_RECEIVER(NSButton)
+ ADD_UNARY_METHOD(NSButton, setTitle, 0)
+ ADD_UNARY_METHOD(NSButton, setAlternateTitle, 0)
+
+ NEW_RECEIVER(NSSavePanel)
+ ADD_UNARY_METHOD(NSSavePanel, setPrompt, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setTitle, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setNameFieldLabel, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setNameFieldStringValue, 0)
+ ADD_UNARY_METHOD(NSSavePanel, setMessage, 0)
+
+ NEW_RECEIVER(UIPrintInfo)
+ ADD_UNARY_METHOD(UIPrintInfo, setJobName, 0)
+
+ NEW_RECEIVER(NSTabViewItem)
+ ADD_UNARY_METHOD(NSTabViewItem, setLabel, 0)
+ ADD_UNARY_METHOD(NSTabViewItem, setToolTip, 0)
+
+ NEW_RECEIVER(NSBrowser)
+ IdentifierInfo *setTitleNSBrowser[] = {&Ctx.Idents.get("setTitle"),
+ &Ctx.Idents.get("ofColumn")};
+ ADD_METHOD(NSBrowser, setTitleNSBrowser, 2, 0)
+
+ NEW_RECEIVER(UIAccessibilityElement)
+ ADD_UNARY_METHOD(UIAccessibilityElement, setAccessibilityLabel, 0)
+ ADD_UNARY_METHOD(UIAccessibilityElement, setAccessibilityHint, 0)
+ ADD_UNARY_METHOD(UIAccessibilityElement, setAccessibilityValue, 0)
+
+ NEW_RECEIVER(UIAlertAction)
+ IdentifierInfo *actionWithTitleUIAlertAction[] = {
+ &Ctx.Idents.get("actionWithTitle"), &Ctx.Idents.get("style"),
+ &Ctx.Idents.get("handler")};
+ ADD_METHOD(UIAlertAction, actionWithTitleUIAlertAction, 3, 0)
+
+ NEW_RECEIVER(NSPopUpButton)
+ ADD_UNARY_METHOD(NSPopUpButton, addItemWithTitle, 0)
+ IdentifierInfo *insertItemWithTitleNSPopUpButton[] = {
+ &Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSPopUpButton, insertItemWithTitleNSPopUpButton, 2, 0)
+ ADD_UNARY_METHOD(NSPopUpButton, removeItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButton, selectItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButton, setTitle, 0)
+
+ NEW_RECEIVER(NSTableViewRowAction)
+ IdentifierInfo *rowActionWithStyleNSTableViewRowAction[] = {
+ &Ctx.Idents.get("rowActionWithStyle"), &Ctx.Idents.get("title"),
+ &Ctx.Idents.get("handler")};
+ ADD_METHOD(NSTableViewRowAction, rowActionWithStyleNSTableViewRowAction, 3, 1)
+ ADD_UNARY_METHOD(NSTableViewRowAction, setTitle, 0)
+
+ NEW_RECEIVER(NSImage)
+ ADD_UNARY_METHOD(NSImage, setAccessibilityDescription, 0)
+
+ NEW_RECEIVER(NSUserActivity)
+ ADD_UNARY_METHOD(NSUserActivity, setTitle, 0)
+
+ NEW_RECEIVER(NSPathControlItem)
+ ADD_UNARY_METHOD(NSPathControlItem, setTitle, 0)
+
+ NEW_RECEIVER(NSCell)
+ ADD_UNARY_METHOD(NSCell, initTextCell, 0)
+ ADD_UNARY_METHOD(NSCell, setTitle, 0)
+ ADD_UNARY_METHOD(NSCell, setStringValue, 0)
+
+ NEW_RECEIVER(NSPathControl)
+ ADD_UNARY_METHOD(NSPathControl, setPlaceholderString, 0)
+
+ NEW_RECEIVER(UIAccessibility)
+ ADD_UNARY_METHOD(UIAccessibility, setAccessibilityLabel, 0)
+ ADD_UNARY_METHOD(UIAccessibility, setAccessibilityHint, 0)
+ ADD_UNARY_METHOD(UIAccessibility, setAccessibilityValue, 0)
+
+ NEW_RECEIVER(NSTableColumn)
+ ADD_UNARY_METHOD(NSTableColumn, setTitle, 0)
+ ADD_UNARY_METHOD(NSTableColumn, setHeaderToolTip, 0)
+
+ NEW_RECEIVER(NSSegmentedControl)
+ IdentifierInfo *setLabelNSSegmentedControl[] = {
+ &Ctx.Idents.get("setLabel"), &Ctx.Idents.get("forSegment")};
+ ADD_METHOD(NSSegmentedControl, setLabelNSSegmentedControl, 2, 0)
+
+ NEW_RECEIVER(NSButtonCell)
+ ADD_UNARY_METHOD(NSButtonCell, setTitle, 0)
+ ADD_UNARY_METHOD(NSButtonCell, setAlternateTitle, 0)
+
+ NEW_RECEIVER(NSSliderCell)
+ ADD_UNARY_METHOD(NSSliderCell, setTitle, 0)
+
+ NEW_RECEIVER(NSControl)
+ ADD_UNARY_METHOD(NSControl, setStringValue, 0)
+
+ NEW_RECEIVER(NSAccessibility)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityValueDescription, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityLabel, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityTitle, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityPlaceholderValue, 0)
+ ADD_UNARY_METHOD(NSAccessibility, setAccessibilityHelp, 0)
+
+ NEW_RECEIVER(NSMatrix)
+ IdentifierInfo *setToolTipNSMatrix[] = {&Ctx.Idents.get("setToolTip"),
+ &Ctx.Idents.get("forCell")};
+ ADD_METHOD(NSMatrix, setToolTipNSMatrix, 2, 0)
+
+ NEW_RECEIVER(NSPrintPanel)
+ ADD_UNARY_METHOD(NSPrintPanel, setDefaultButtonTitle, 0)
+
+ NEW_RECEIVER(UILocalNotification)
+ ADD_UNARY_METHOD(UILocalNotification, setAlertBody, 0)
+ ADD_UNARY_METHOD(UILocalNotification, setAlertAction, 0)
+ ADD_UNARY_METHOD(UILocalNotification, setAlertTitle, 0)
+
+ NEW_RECEIVER(NSSlider)
+ ADD_UNARY_METHOD(NSSlider, setTitle, 0)
+
+ NEW_RECEIVER(UIMenuItem)
+ IdentifierInfo *initWithTitleUIMenuItem[] = {&Ctx.Idents.get("initWithTitle"),
+ &Ctx.Idents.get("action")};
+ ADD_METHOD(UIMenuItem, initWithTitleUIMenuItem, 2, 0)
+ ADD_UNARY_METHOD(UIMenuItem, setTitle, 0)
+
+ NEW_RECEIVER(UIAlertController)
+ IdentifierInfo *alertControllerWithTitleUIAlertController[] = {
+ &Ctx.Idents.get("alertControllerWithTitle"), &Ctx.Idents.get("message"),
+ &Ctx.Idents.get("preferredStyle")};
+ ADD_METHOD(UIAlertController, alertControllerWithTitleUIAlertController, 3, 1)
+ ADD_UNARY_METHOD(UIAlertController, setTitle, 0)
+ ADD_UNARY_METHOD(UIAlertController, setMessage, 0)
+
+ NEW_RECEIVER(UIApplicationShortcutItem)
+ IdentifierInfo *initWithTypeUIApplicationShortcutItemIcon[] = {
+ &Ctx.Idents.get("initWithType"), &Ctx.Idents.get("localizedTitle"),
+ &Ctx.Idents.get("localizedSubtitle"), &Ctx.Idents.get("icon"),
+ &Ctx.Idents.get("userInfo")};
+ ADD_METHOD(UIApplicationShortcutItem,
+ initWithTypeUIApplicationShortcutItemIcon, 5, 1)
+ IdentifierInfo *initWithTypeUIApplicationShortcutItem[] = {
+ &Ctx.Idents.get("initWithType"), &Ctx.Idents.get("localizedTitle")};
+ ADD_METHOD(UIApplicationShortcutItem, initWithTypeUIApplicationShortcutItem,
+ 2, 1)
+
+ NEW_RECEIVER(UIActionSheet)
+ IdentifierInfo *initWithTitleUIActionSheet[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("delegate"),
+ &Ctx.Idents.get("cancelButtonTitle"),
+ &Ctx.Idents.get("destructiveButtonTitle"),
+ &Ctx.Idents.get("otherButtonTitles")};
+ ADD_METHOD(UIActionSheet, initWithTitleUIActionSheet, 5, 0)
+ ADD_UNARY_METHOD(UIActionSheet, addButtonWithTitle, 0)
+ ADD_UNARY_METHOD(UIActionSheet, setTitle, 0)
+
+ NEW_RECEIVER(NSURLSessionTask)
+ ADD_UNARY_METHOD(NSURLSessionTask, setTaskDescription, 0)
+
+ NEW_RECEIVER(UIAccessibilityCustomAction)
+ IdentifierInfo *initWithNameUIAccessibilityCustomAction[] = {
+ &Ctx.Idents.get("initWithName"), &Ctx.Idents.get("target"),
+ &Ctx.Idents.get("selector")};
+ ADD_METHOD(UIAccessibilityCustomAction,
+ initWithNameUIAccessibilityCustomAction, 3, 0)
+ ADD_UNARY_METHOD(UIAccessibilityCustomAction, setName, 0)
+
+ NEW_RECEIVER(UISearchBar)
+ ADD_UNARY_METHOD(UISearchBar, setText, 0)
+ ADD_UNARY_METHOD(UISearchBar, setPrompt, 0)
+ ADD_UNARY_METHOD(UISearchBar, setPlaceholder, 0)
+
+ NEW_RECEIVER(UIBarItem)
+ ADD_UNARY_METHOD(UIBarItem, setTitle, 0)
+
+ NEW_RECEIVER(UITextView)
+ ADD_UNARY_METHOD(UITextView, setText, 0)
+
+ NEW_RECEIVER(NSView)
+ ADD_UNARY_METHOD(NSView, setToolTip, 0)
+
+ NEW_RECEIVER(NSTextField)
+ ADD_UNARY_METHOD(NSTextField, setPlaceholderString, 0)
+
+ NEW_RECEIVER(NSAttributedString)
+ ADD_UNARY_METHOD(NSAttributedString, initWithString, 0)
+ IdentifierInfo *initWithStringNSAttributedString[] = {
+ &Ctx.Idents.get("initWithString"), &Ctx.Idents.get("attributes")};
+ ADD_METHOD(NSAttributedString, initWithStringNSAttributedString, 2, 0)
+
+ NEW_RECEIVER(NSText)
+ ADD_UNARY_METHOD(NSText, setString, 0)
+
+ NEW_RECEIVER(UIKeyCommand)
+ IdentifierInfo *keyCommandWithInputUIKeyCommand[] = {
+ &Ctx.Idents.get("keyCommandWithInput"), &Ctx.Idents.get("modifierFlags"),
+ &Ctx.Idents.get("action"), &Ctx.Idents.get("discoverabilityTitle")};
+ ADD_METHOD(UIKeyCommand, keyCommandWithInputUIKeyCommand, 4, 3)
+ ADD_UNARY_METHOD(UIKeyCommand, setDiscoverabilityTitle, 0)
+
+ NEW_RECEIVER(UILabel)
+ ADD_UNARY_METHOD(UILabel, setText, 0)
+
+ NEW_RECEIVER(NSAlert)
+ IdentifierInfo *alertWithMessageTextNSAlert[] = {
+ &Ctx.Idents.get("alertWithMessageText"), &Ctx.Idents.get("defaultButton"),
+ &Ctx.Idents.get("alternateButton"), &Ctx.Idents.get("otherButton"),
+ &Ctx.Idents.get("informativeTextWithFormat")};
+ ADD_METHOD(NSAlert, alertWithMessageTextNSAlert, 5, 0)
+ ADD_UNARY_METHOD(NSAlert, addButtonWithTitle, 0)
+ ADD_UNARY_METHOD(NSAlert, setMessageText, 0)
+ ADD_UNARY_METHOD(NSAlert, setInformativeText, 0)
+ ADD_UNARY_METHOD(NSAlert, setHelpAnchor, 0)
+
+ NEW_RECEIVER(UIMutableApplicationShortcutItem)
+ ADD_UNARY_METHOD(UIMutableApplicationShortcutItem, setLocalizedTitle, 0)
+ ADD_UNARY_METHOD(UIMutableApplicationShortcutItem, setLocalizedSubtitle, 0)
+
+ NEW_RECEIVER(UIButton)
+ IdentifierInfo *setTitleUIButton[] = {&Ctx.Idents.get("setTitle"),
+ &Ctx.Idents.get("forState")};
+ ADD_METHOD(UIButton, setTitleUIButton, 2, 0)
+
+ NEW_RECEIVER(NSWindow)
+ ADD_UNARY_METHOD(NSWindow, setTitle, 0)
+ IdentifierInfo *minFrameWidthWithTitleNSWindow[] = {
+ &Ctx.Idents.get("minFrameWidthWithTitle"), &Ctx.Idents.get("styleMask")};
+ ADD_METHOD(NSWindow, minFrameWidthWithTitleNSWindow, 2, 0)
+ ADD_UNARY_METHOD(NSWindow, setMiniwindowTitle, 0)
+
+ NEW_RECEIVER(NSPathCell)
+ ADD_UNARY_METHOD(NSPathCell, setPlaceholderString, 0)
+
+ NEW_RECEIVER(UIDocumentMenuViewController)
+ IdentifierInfo *addOptionWithTitleUIDocumentMenuViewController[] = {
+ &Ctx.Idents.get("addOptionWithTitle"), &Ctx.Idents.get("image"),
+ &Ctx.Idents.get("order"), &Ctx.Idents.get("handler")};
+ ADD_METHOD(UIDocumentMenuViewController,
+ addOptionWithTitleUIDocumentMenuViewController, 4, 0)
+
+ NEW_RECEIVER(UINavigationItem)
+ ADD_UNARY_METHOD(UINavigationItem, initWithTitle, 0)
+ ADD_UNARY_METHOD(UINavigationItem, setTitle, 0)
+ ADD_UNARY_METHOD(UINavigationItem, setPrompt, 0)
+
+ NEW_RECEIVER(UIAlertView)
+ IdentifierInfo *initWithTitleUIAlertView[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("message"),
+ &Ctx.Idents.get("delegate"), &Ctx.Idents.get("cancelButtonTitle"),
+ &Ctx.Idents.get("otherButtonTitles")};
+ ADD_METHOD(UIAlertView, initWithTitleUIAlertView, 5, 0)
+ ADD_UNARY_METHOD(UIAlertView, addButtonWithTitle, 0)
+ ADD_UNARY_METHOD(UIAlertView, setTitle, 0)
+ ADD_UNARY_METHOD(UIAlertView, setMessage, 0)
+
+ NEW_RECEIVER(NSFormCell)
+ ADD_UNARY_METHOD(NSFormCell, initTextCell, 0)
+ ADD_UNARY_METHOD(NSFormCell, setTitle, 0)
+ ADD_UNARY_METHOD(NSFormCell, setPlaceholderString, 0)
+
+ NEW_RECEIVER(NSUserNotification)
+ ADD_UNARY_METHOD(NSUserNotification, setTitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setSubtitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setInformativeText, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setActionButtonTitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setOtherButtonTitle, 0)
+ ADD_UNARY_METHOD(NSUserNotification, setResponsePlaceholder, 0)
+
+ NEW_RECEIVER(NSToolbarItem)
+ ADD_UNARY_METHOD(NSToolbarItem, setLabel, 0)
+ ADD_UNARY_METHOD(NSToolbarItem, setPaletteLabel, 0)
+ ADD_UNARY_METHOD(NSToolbarItem, setToolTip, 0)
+
+ NEW_RECEIVER(NSProgress)
+ ADD_UNARY_METHOD(NSProgress, setLocalizedDescription, 0)
+ ADD_UNARY_METHOD(NSProgress, setLocalizedAdditionalDescription, 0)
+
+ NEW_RECEIVER(NSSegmentedCell)
+ IdentifierInfo *setLabelNSSegmentedCell[] = {&Ctx.Idents.get("setLabel"),
+ &Ctx.Idents.get("forSegment")};
+ ADD_METHOD(NSSegmentedCell, setLabelNSSegmentedCell, 2, 0)
+ IdentifierInfo *setToolTipNSSegmentedCell[] = {&Ctx.Idents.get("setToolTip"),
+ &Ctx.Idents.get("forSegment")};
+ ADD_METHOD(NSSegmentedCell, setToolTipNSSegmentedCell, 2, 0)
+
+ NEW_RECEIVER(NSUndoManager)
+ ADD_UNARY_METHOD(NSUndoManager, setActionName, 0)
+ ADD_UNARY_METHOD(NSUndoManager, undoMenuTitleForUndoActionName, 0)
+ ADD_UNARY_METHOD(NSUndoManager, redoMenuTitleForUndoActionName, 0)
+
+ NEW_RECEIVER(NSMenuItem)
+ IdentifierInfo *initWithTitleNSMenuItem[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("action"),
+ &Ctx.Idents.get("keyEquivalent")};
+ ADD_METHOD(NSMenuItem, initWithTitleNSMenuItem, 3, 0)
+ ADD_UNARY_METHOD(NSMenuItem, setTitle, 0)
+ ADD_UNARY_METHOD(NSMenuItem, setToolTip, 0)
+
+ NEW_RECEIVER(NSPopUpButtonCell)
+ IdentifierInfo *initTextCellNSPopUpButtonCell[] = {
+ &Ctx.Idents.get("initTextCell"), &Ctx.Idents.get("pullsDown")};
+ ADD_METHOD(NSPopUpButtonCell, initTextCellNSPopUpButtonCell, 2, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, addItemWithTitle, 0)
+ IdentifierInfo *insertItemWithTitleNSPopUpButtonCell[] = {
+ &Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSPopUpButtonCell, insertItemWithTitleNSPopUpButtonCell, 2, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, removeItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, selectItemWithTitle, 0)
+ ADD_UNARY_METHOD(NSPopUpButtonCell, setTitle, 0)
+
+ NEW_RECEIVER(NSViewController)
+ ADD_UNARY_METHOD(NSViewController, setTitle, 0)
+
+ NEW_RECEIVER(NSMenu)
+ ADD_UNARY_METHOD(NSMenu, initWithTitle, 0)
+ IdentifierInfo *insertItemWithTitleNSMenu[] = {
+ &Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("action"),
+ &Ctx.Idents.get("keyEquivalent"), &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSMenu, insertItemWithTitleNSMenu, 4, 0)
+ IdentifierInfo *addItemWithTitleNSMenu[] = {
+ &Ctx.Idents.get("addItemWithTitle"), &Ctx.Idents.get("action"),
+ &Ctx.Idents.get("keyEquivalent")};
+ ADD_METHOD(NSMenu, addItemWithTitleNSMenu, 3, 0)
+ ADD_UNARY_METHOD(NSMenu, setTitle, 0)
+
+ NEW_RECEIVER(UIMutableUserNotificationAction)
+ ADD_UNARY_METHOD(UIMutableUserNotificationAction, setTitle, 0)
+
+ NEW_RECEIVER(NSForm)
+ ADD_UNARY_METHOD(NSForm, addEntry, 0)
+ IdentifierInfo *insertEntryNSForm[] = {&Ctx.Idents.get("insertEntry"),
+ &Ctx.Idents.get("atIndex")};
+ ADD_METHOD(NSForm, insertEntryNSForm, 2, 0)
+
+ NEW_RECEIVER(NSTextFieldCell)
+ ADD_UNARY_METHOD(NSTextFieldCell, setPlaceholderString, 0)
+
+ NEW_RECEIVER(NSUserNotificationAction)
+ IdentifierInfo *actionWithIdentifierNSUserNotificationAction[] = {
+ &Ctx.Idents.get("actionWithIdentifier"), &Ctx.Idents.get("title")};
+ ADD_METHOD(NSUserNotificationAction,
+ actionWithIdentifierNSUserNotificationAction, 2, 1)
+
+ NEW_RECEIVER(NSURLSession)
+ ADD_UNARY_METHOD(NSURLSession, setSessionDescription, 0)
+
+ NEW_RECEIVER(UITextField)
+ ADD_UNARY_METHOD(UITextField, setText, 0)
+ ADD_UNARY_METHOD(UITextField, setPlaceholder, 0)
+
+ NEW_RECEIVER(UIBarButtonItem)
+ IdentifierInfo *initWithTitleUIBarButtonItem[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("style"),
+ &Ctx.Idents.get("target"), &Ctx.Idents.get("action")};
+ ADD_METHOD(UIBarButtonItem, initWithTitleUIBarButtonItem, 4, 0)
+
+ NEW_RECEIVER(UIViewController)
+ ADD_UNARY_METHOD(UIViewController, setTitle, 0)
+
+ NEW_RECEIVER(UISegmentedControl)
+ IdentifierInfo *insertSegmentWithTitleUISegmentedControl[] = {
+ &Ctx.Idents.get("insertSegmentWithTitle"), &Ctx.Idents.get("atIndex"),
+ &Ctx.Idents.get("animated")};
+ ADD_METHOD(UISegmentedControl, insertSegmentWithTitleUISegmentedControl, 3, 0)
+ IdentifierInfo *setTitleUISegmentedControl[] = {
+ &Ctx.Idents.get("setTitle"), &Ctx.Idents.get("forSegmentAtIndex")};
+ ADD_METHOD(UISegmentedControl, setTitleUISegmentedControl, 2, 0)
+}
+
+#define LSF_INSERT(function_name) LSF.insert(&Ctx.Idents.get(function_name));
+#define LSM_INSERT_NULLARY(receiver, method_name) \
+ LSM.insert({&Ctx.Idents.get(receiver), Ctx.Selectors.getNullarySelector( \
+ &Ctx.Idents.get(method_name))});
+#define LSM_INSERT_UNARY(receiver, method_name) \
+ LSM.insert({&Ctx.Idents.get(receiver), \
+ Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(method_name))});
+#define LSM_INSERT_SELECTOR(receiver, method_list, arguments) \
+ LSM.insert({&Ctx.Idents.get(receiver), \
+ Ctx.Selectors.getSelector(arguments, method_list)});
+
+/// Initializes a list of methods and C functions that return a localized string
+void NonLocalizedStringChecker::initLocStringsMethods(ASTContext &Ctx) const {
+ if (!LSM.empty())
+ return;
+
+ IdentifierInfo *LocalizedStringMacro[] = {
+ &Ctx.Idents.get("localizedStringForKey"), &Ctx.Idents.get("value"),
+ &Ctx.Idents.get("table")};
+ LSM_INSERT_SELECTOR("NSBundle", LocalizedStringMacro, 3)
+ LSM_INSERT_UNARY("NSDateFormatter", "stringFromDate")
+ IdentifierInfo *LocalizedStringFromDate[] = {
+ &Ctx.Idents.get("localizedStringFromDate"), &Ctx.Idents.get("dateStyle"),
+ &Ctx.Idents.get("timeStyle")};
+ LSM_INSERT_SELECTOR("NSDateFormatter", LocalizedStringFromDate, 3)
+ LSM_INSERT_UNARY("NSNumberFormatter", "stringFromNumber")
+ LSM_INSERT_NULLARY("UITextField", "text")
+ LSM_INSERT_NULLARY("UITextView", "text")
+ LSM_INSERT_NULLARY("UILabel", "text")
+
+ LSF_INSERT("CFDateFormatterCreateStringWithDate");
+ LSF_INSERT("CFDateFormatterCreateStringWithAbsoluteTime");
+ LSF_INSERT("CFNumberFormatterCreateStringWithNumber");
+}
+
+/// Checks to see if the method / function declaration includes
+/// __attribute__((annotate("returns_localized_nsstring")))
+bool NonLocalizedStringChecker::isAnnotatedAsLocalized(const Decl *D) const {
+ if (!D)
+ return false;
+ return std::any_of(
+ D->specific_attr_begin<AnnotateAttr>(),
+ D->specific_attr_end<AnnotateAttr>(), [](const AnnotateAttr *Ann) {
+ return Ann->getAnnotation() == "returns_localized_nsstring";
+ });
+}
+
+/// Returns true if the given SVal is marked as Localized in the program state
+bool NonLocalizedStringChecker::hasLocalizedState(SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ const LocalizedState *LS = C.getState()->get<LocalizedMemMap>(mt);
+ if (LS && LS->isLocalized())
+ return true;
+ }
+ return false;
+}
+
+/// Returns true if the given SVal is marked as NonLocalized in the program
+/// state
+bool NonLocalizedStringChecker::hasNonLocalizedState(SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ const LocalizedState *LS = C.getState()->get<LocalizedMemMap>(mt);
+ if (LS && LS->isNonLocalized())
+ return true;
+ }
+ return false;
+}
+
+/// Marks the given SVal as Localized in the program state
+void NonLocalizedStringChecker::setLocalizedState(const SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ ProgramStateRef State =
+ C.getState()->set<LocalizedMemMap>(mt, LocalizedState::getLocalized());
+ C.addTransition(State);
+ }
+}
+
+/// Marks the given SVal as NonLocalized in the program state
+void NonLocalizedStringChecker::setNonLocalizedState(const SVal S,
+ CheckerContext &C) const {
+ const MemRegion *mt = S.getAsRegion();
+ if (mt) {
+ ProgramStateRef State = C.getState()->set<LocalizedMemMap>(
+ mt, LocalizedState::getNonLocalized());
+ C.addTransition(State);
+ }
+}
+
+/// Reports a localization error for the passed in method call and SVal
+void NonLocalizedStringChecker::reportLocalizationError(
+ SVal S, const ObjCMethodCall &M, CheckerContext &C,
+ int argumentNumber) const {
+
+ ExplodedNode *ErrNode = C.getPredecessor();
+ static CheckerProgramPointTag Tag("NonLocalizedStringChecker",
+ "UnlocalizedString");
+ ErrNode = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+
+ if (!ErrNode)
+ return;
+
+ // Generate the bug report.
+ std::unique_ptr<BugReport> R(new BugReport(
+ *BT, "User-facing text should use localized string macro", ErrNode));
+ if (argumentNumber) {
+ R->addRange(M.getArgExpr(argumentNumber - 1)->getSourceRange());
+ } else {
+ R->addRange(M.getSourceRange());
+ }
+ R->markInteresting(S);
+ C.emitReport(std::move(R));
+}
+
+/// Returns the argument number requiring localized string if it exists
+/// otherwise, returns -1
+int NonLocalizedStringChecker::getLocalizedArgumentForSelector(
+ const IdentifierInfo *Receiver, Selector S) const {
+ auto method = UIMethods.find(Receiver);
+
+ if (method == UIMethods.end())
+ return -1;
+
+ auto argumentIterator = method->getSecond().find(S);
+
+ if (argumentIterator == method->getSecond().end())
+ return -1;
+
+ int argumentNumber = argumentIterator->getSecond();
+ return argumentNumber;
+}
+
+/// Check if the string being passed in has NonLocalized state
+void NonLocalizedStringChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ initUIMethods(C.getASTContext());
+
+ const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
+ if (!OD)
+ return;
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ Selector S = msg.getSelector();
+
+ std::string SelectorString = S.getAsString();
+ StringRef SelectorName = SelectorString;
+ assert(!SelectorName.empty());
+
+ if (odInfo->isStr("NSString")) {
+ // Handle the case where the receiver is an NSString
+ // These special NSString methods draw to the screen
+
+ if (!(SelectorName.startswith("drawAtPoint") ||
+ SelectorName.startswith("drawInRect") ||
+ SelectorName.startswith("drawWithRect")))
+ return;
+
+ SVal svTitle = msg.getReceiverSVal();
+
+ bool isNonLocalized = hasNonLocalizedState(svTitle, C);
+
+ if (isNonLocalized) {
+ reportLocalizationError(svTitle, msg, C);
+ }
+ }
+
+ int argumentNumber = getLocalizedArgumentForSelector(odInfo, S);
+ // Go up each hierarchy of superclasses and their protocols
+ while (argumentNumber < 0 && OD->getSuperClass() != nullptr) {
+ for (const auto *P : OD->all_referenced_protocols()) {
+ argumentNumber = getLocalizedArgumentForSelector(P->getIdentifier(), S);
+ if (argumentNumber >= 0)
+ break;
+ }
+ if (argumentNumber < 0) {
+ OD = OD->getSuperClass();
+ argumentNumber = getLocalizedArgumentForSelector(OD->getIdentifier(), S);
+ }
+ }
+
+ if (argumentNumber < 0) // There was no match in UIMethods
+ return;
+
+ SVal svTitle = msg.getArgSVal(argumentNumber);
+
+ if (const ObjCStringRegion *SR =
+ dyn_cast_or_null<ObjCStringRegion>(svTitle.getAsRegion())) {
+ StringRef stringValue =
+ SR->getObjCStringLiteral()->getString()->getString();
+ if ((stringValue.trim().size() == 0 && stringValue.size() > 0) ||
+ stringValue.empty())
+ return;
+ if (!IsAggressive && llvm::sys::unicode::columnWidthUTF8(stringValue) < 2)
+ return;
+ }
+
+ bool isNonLocalized = hasNonLocalizedState(svTitle, C);
+
+ if (isNonLocalized) {
+ reportLocalizationError(svTitle, msg, C, argumentNumber + 1);
+ }
+}
+
+static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
+
+ const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ ObjCInterfaceDecl *Cls = PT->getObjectType()->getInterface();
+ if (!Cls)
+ return false;
+
+ IdentifierInfo *ClsName = Cls->getIdentifier();
+
+ // FIXME: Should we walk the chain of classes?
+ return ClsName == &Ctx.Idents.get("NSString") ||
+ ClsName == &Ctx.Idents.get("NSMutableString");
+}
+
+/// Marks a string being returned by any call as localized
+/// if it is in LocStringFunctions (LSF) or the function is annotated.
+/// Otherwise, we mark it as NonLocalized (Aggressive) or
+/// NonLocalized only if it is not backed by a SymRegion (Non-Aggressive),
+/// basically leaving only string literals as NonLocalized.
+void NonLocalizedStringChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ initLocStringsMethods(C.getASTContext());
+
+ if (!Call.getOriginExpr())
+ return;
+
+ // Anything that takes in a localized NSString as an argument
+ // and returns an NSString will be assumed to be returning a
+ // localized NSString. (Counter: Incorrectly combining two LocalizedStrings)
+ const QualType RT = Call.getResultType();
+ if (isNSStringType(RT, C.getASTContext())) {
+ for (unsigned i = 0; i < Call.getNumArgs(); ++i) {
+ SVal argValue = Call.getArgSVal(i);
+ if (hasLocalizedState(argValue, C)) {
+ SVal sv = Call.getReturnValue();
+ setLocalizedState(sv, C);
+ return;
+ }
+ }
+ }
+
+ const Decl *D = Call.getDecl();
+ if (!D)
+ return;
+
+ const IdentifierInfo *Identifier = Call.getCalleeIdentifier();
+
+ SVal sv = Call.getReturnValue();
+ if (isAnnotatedAsLocalized(D) || LSF.count(Identifier) != 0) {
+ setLocalizedState(sv, C);
+ } else if (isNSStringType(RT, C.getASTContext()) &&
+ !hasLocalizedState(sv, C)) {
+ if (IsAggressive) {
+ setNonLocalizedState(sv, C);
+ } else {
+ const SymbolicRegion *SymReg =
+ dyn_cast_or_null<SymbolicRegion>(sv.getAsRegion());
+ if (!SymReg)
+ setNonLocalizedState(sv, C);
+ }
+ }
+}
+
+/// Marks a string being returned by an ObjC method as localized
+/// if it is in LocStringMethods or the method is annotated
+void NonLocalizedStringChecker::checkPostObjCMessage(const ObjCMethodCall &msg,
+ CheckerContext &C) const {
+ initLocStringsMethods(C.getASTContext());
+
+ if (!msg.isInstanceMessage())
+ return;
+
+ const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
+ if (!OD)
+ return;
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ Selector S = msg.getSelector();
+ std::string SelectorName = S.getAsString();
+
+ std::pair<const IdentifierInfo *, Selector> MethodDescription = {odInfo, S};
+
+ if (LSM.count(MethodDescription) || isAnnotatedAsLocalized(msg.getDecl())) {
+ SVal sv = msg.getReturnValue();
+ setLocalizedState(sv, C);
+ }
+}
+
+/// Marks all empty string literals as localized
+void NonLocalizedStringChecker::checkPostStmt(const ObjCStringLiteral *SL,
+ CheckerContext &C) const {
+ SVal sv = C.getSVal(SL);
+ setNonLocalizedState(sv, C);
+}
+
+namespace {
+class EmptyLocalizationContextChecker
+ : public Checker<check::ASTDecl<ObjCImplementationDecl>> {
+
+ // A helper class, which walks the AST
+ class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+ const ObjCMethodDecl *MD;
+ BugReporter &BR;
+ AnalysisManager &Mgr;
+ const CheckerBase *Checker;
+ LocationOrAnalysisDeclContext DCtx;
+
+ public:
+ MethodCrawler(const ObjCMethodDecl *InMD, BugReporter &InBR,
+ const CheckerBase *Checker, AnalysisManager &InMgr,
+ AnalysisDeclContext *InDCtx)
+ : MD(InMD), BR(InBR), Mgr(InMgr), Checker(Checker), DCtx(InDCtx) {}
+
+ void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+ void VisitObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ void reportEmptyContextError(const ObjCMessageExpr *M) const;
+
+ void VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child)
+ this->Visit(Child);
+ }
+ }
+ };
+
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+};
+} // end anonymous namespace
+
+void EmptyLocalizationContextChecker::checkASTDecl(
+ const ObjCImplementationDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {
+
+ for (const ObjCMethodDecl *M : D->methods()) {
+ AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M);
+
+ const Stmt *Body = M->getBody();
+ assert(Body);
+
+ MethodCrawler MC(M->getCanonicalDecl(), BR, this, Mgr, DCtx);
+ MC.VisitStmt(Body);
+ }
+}
+
+/// This check attempts to match these macros, assuming they are defined as
+/// follows:
+///
+/// #define NSLocalizedString(key, comment) \
+/// [[NSBundle mainBundle] localizedStringForKey:(key) value:@"" table:nil]
+/// #define NSLocalizedStringFromTable(key, tbl, comment) \
+/// [[NSBundle mainBundle] localizedStringForKey:(key) value:@"" table:(tbl)]
+/// #define NSLocalizedStringFromTableInBundle(key, tbl, bundle, comment) \
+/// [bundle localizedStringForKey:(key) value:@"" table:(tbl)]
+/// #define NSLocalizedStringWithDefaultValue(key, tbl, bundle, val, comment)
+///
+/// We cannot use the path sensitive check because the macro argument we are
+/// checking for (comment) is not used and thus not present in the AST,
+/// so we use Lexer on the original macro call and retrieve the value of
+/// the comment. If it's empty or nil, we raise a warning.
+void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+
+ const ObjCInterfaceDecl *OD = ME->getReceiverInterface();
+ if (!OD)
+ return;
+
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ if (!(odInfo->isStr("NSBundle") &&
+ ME->getSelector().getAsString() ==
+ "localizedStringForKey:value:table:")) {
+ return;
+ }
+
+ SourceRange R = ME->getSourceRange();
+ if (!R.getBegin().isMacroID())
+ return;
+
+ // getImmediateMacroCallerLoc gets the location of the immediate macro
+ // caller, one level up the stack toward the initial macro typed into the
+ // source, so SL should point to the NSLocalizedString macro.
+ SourceLocation SL =
+ Mgr.getSourceManager().getImmediateMacroCallerLoc(R.getBegin());
+ std::pair<FileID, unsigned> SLInfo =
+ Mgr.getSourceManager().getDecomposedLoc(SL);
+
+ SrcMgr::SLocEntry SE = Mgr.getSourceManager().getSLocEntry(SLInfo.first);
+
+ // If NSLocalizedString macro is wrapped in another macro, we need to
+ // unwrap the expansion until we get to the NSLocalizedStringMacro.
+ while (SE.isExpansion()) {
+ SL = SE.getExpansion().getSpellingLoc();
+ SLInfo = Mgr.getSourceManager().getDecomposedLoc(SL);
+ SE = Mgr.getSourceManager().getSLocEntry(SLInfo.first);
+ }
+
+ llvm::MemoryBuffer *BF = SE.getFile().getContentCache()->getRawBuffer();
+ Lexer TheLexer(SL, LangOptions(), BF->getBufferStart(),
+ BF->getBufferStart() + SLInfo.second, BF->getBufferEnd());
+
+ Token I;
+ Token Result; // This will hold the token just before the last ')'
+ int p_count = 0; // This is for parenthesis matching
+ while (!TheLexer.LexFromRawLexer(I)) {
+ if (I.getKind() == tok::l_paren)
+ ++p_count;
+ if (I.getKind() == tok::r_paren) {
+ if (p_count == 1)
+ break;
+ --p_count;
+ }
+ Result = I;
+ }
+
+ if (isAnyIdentifier(Result.getKind())) {
+ if (Result.getRawIdentifier().equals("nil")) {
+ reportEmptyContextError(ME);
+ return;
+ }
+ }
+
+ if (!isStringLiteral(Result.getKind()))
+ return;
+
+ StringRef Comment =
+ StringRef(Result.getLiteralData(), Result.getLength()).trim("\"");
+
+ if ((Comment.trim().size() == 0 && Comment.size() > 0) || // Is Whitespace
+ Comment.empty()) {
+ reportEmptyContextError(ME);
+ }
+}
+
+void EmptyLocalizationContextChecker::MethodCrawler::reportEmptyContextError(
+ const ObjCMessageExpr *ME) const {
+ // Generate the bug report.
+ BR.EmitBasicReport(MD, Checker, "Context Missing",
+ "Localizability Issue (Apple)",
+ "Localized string macro should include a non-empty "
+ "comment for translators",
+ PathDiagnosticLocation(ME, BR.getSourceManager(), DCtx));
+}
+
+namespace {
+class PluralMisuseChecker : public Checker<check::ASTCodeBody> {
+
+ // A helper class, which walks the AST
+ class MethodCrawler : public RecursiveASTVisitor<MethodCrawler> {
+ BugReporter &BR;
+ const CheckerBase *Checker;
+ AnalysisDeclContext *AC;
+
+ // This functions like a stack. We push on any IfStmt or
+ // ConditionalOperator that matches the condition
+ // and pop it off when we leave that statement
+ llvm::SmallVector<const clang::Stmt *, 8> MatchingStatements;
+ // This is true when we are the direct-child of a
+ // matching statement
+ bool InMatchingStatement = false;
+
+ public:
+ explicit MethodCrawler(BugReporter &InBR, const CheckerBase *Checker,
+ AnalysisDeclContext *InAC)
+ : BR(InBR), Checker(Checker), AC(InAC) {}
+
+ bool VisitIfStmt(const IfStmt *I);
+ bool EndVisitIfStmt(IfStmt *I);
+ bool TraverseIfStmt(IfStmt *x);
+ bool VisitConditionalOperator(const ConditionalOperator *C);
+ bool TraverseConditionalOperator(ConditionalOperator *C);
+ bool VisitCallExpr(const CallExpr *CE);
+ bool VisitObjCMessageExpr(const ObjCMessageExpr *ME);
+
+ private:
+ void reportPluralMisuseError(const Stmt *S) const;
+ bool isCheckingPlurality(const Expr *E) const;
+ };
+
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ MethodCrawler Visitor(BR, this, Mgr.getAnalysisDeclContext(D));
+ Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+} // end anonymous namespace
+
+// Checks the condition of the IfStmt and returns true if one
+// of the following heuristics are met:
+// 1) The conidtion is a variable with "singular" or "plural" in the name
+// 2) The condition is a binary operator with 1 or 2 on the right-hand side
+bool PluralMisuseChecker::MethodCrawler::isCheckingPlurality(
+ const Expr *Condition) const {
+ const BinaryOperator *BO = nullptr;
+ // Accounts for when a VarDecl represents a BinaryOperator
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Condition)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ const Expr *InitExpr = VD->getInit();
+ if (InitExpr) {
+ if (const BinaryOperator *B =
+ dyn_cast<BinaryOperator>(InitExpr->IgnoreParenImpCasts())) {
+ BO = B;
+ }
+ }
+ if (VD->getName().lower().find("plural") != StringRef::npos ||
+ VD->getName().lower().find("singular") != StringRef::npos) {
+ return true;
+ }
+ }
+ } else if (const BinaryOperator *B = dyn_cast<BinaryOperator>(Condition)) {
+ BO = B;
+ }
+
+ if (BO == nullptr)
+ return false;
+
+ if (IntegerLiteral *IL = dyn_cast_or_null<IntegerLiteral>(
+ BO->getRHS()->IgnoreParenImpCasts())) {
+ llvm::APInt Value = IL->getValue();
+ if (Value == 1 || Value == 2) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// A CallExpr with "LOC" in its identifier that takes in a string literal
+// has been shown to almost always be a function that returns a localized
+// string. Raise a diagnostic when this is in a statement that matches
+// the condition.
+bool PluralMisuseChecker::MethodCrawler::VisitCallExpr(const CallExpr *CE) {
+ if (InMatchingStatement) {
+ if (const FunctionDecl *FD = CE->getDirectCallee()) {
+ std::string NormalizedName =
+ StringRef(FD->getNameInfo().getAsString()).lower();
+ if (NormalizedName.find("loc") != std::string::npos) {
+ for (const Expr *Arg : CE->arguments()) {
+ if (isa<ObjCStringLiteral>(Arg))
+ reportPluralMisuseError(CE);
+ }
+ }
+ }
+ }
+ return true;
+}
+
+// The other case is for NSLocalizedString which also returns
+// a localized string. It's a macro for the ObjCMessageExpr
+// [NSBundle localizedStringForKey:value:table:] Raise a
+// diagnostic when this is in a statement that matches
+// the condition.
+bool PluralMisuseChecker::MethodCrawler::VisitObjCMessageExpr(
+ const ObjCMessageExpr *ME) {
+ const ObjCInterfaceDecl *OD = ME->getReceiverInterface();
+ if (!OD)
+ return true;
+
+ const IdentifierInfo *odInfo = OD->getIdentifier();
+
+ if (odInfo->isStr("NSBundle") &&
+ ME->getSelector().getAsString() == "localizedStringForKey:value:table:") {
+ if (InMatchingStatement) {
+ reportPluralMisuseError(ME);
+ }
+ }
+ return true;
+}
+
+/// Override TraverseIfStmt so we know when we are done traversing an IfStmt
+bool PluralMisuseChecker::MethodCrawler::TraverseIfStmt(IfStmt *I) {
+ RecursiveASTVisitor<MethodCrawler>::TraverseIfStmt(I);
+ return EndVisitIfStmt(I);
+}
+
+// EndVisit callbacks are not provided by the RecursiveASTVisitor
+// so we override TraverseIfStmt and make a call to EndVisitIfStmt
+// after traversing the IfStmt
+bool PluralMisuseChecker::MethodCrawler::EndVisitIfStmt(IfStmt *I) {
+ MatchingStatements.pop_back();
+ if (!MatchingStatements.empty()) {
+ if (MatchingStatements.back() != nullptr) {
+ InMatchingStatement = true;
+ return true;
+ }
+ }
+ InMatchingStatement = false;
+ return true;
+}
+
+bool PluralMisuseChecker::MethodCrawler::VisitIfStmt(const IfStmt *I) {
+ const Expr *Condition = I->getCond()->IgnoreParenImpCasts();
+ if (isCheckingPlurality(Condition)) {
+ MatchingStatements.push_back(I);
+ InMatchingStatement = true;
+ } else {
+ MatchingStatements.push_back(nullptr);
+ InMatchingStatement = false;
+ }
+
+ return true;
+}
+
+// Preliminary support for conditional operators.
+bool PluralMisuseChecker::MethodCrawler::TraverseConditionalOperator(
+ ConditionalOperator *C) {
+ RecursiveASTVisitor<MethodCrawler>::TraverseConditionalOperator(C);
+ MatchingStatements.pop_back();
+ if (!MatchingStatements.empty()) {
+ if (MatchingStatements.back() != nullptr)
+ InMatchingStatement = true;
+ else
+ InMatchingStatement = false;
+ } else {
+ InMatchingStatement = false;
+ }
+ return true;
+}
+
+bool PluralMisuseChecker::MethodCrawler::VisitConditionalOperator(
+ const ConditionalOperator *C) {
+ const Expr *Condition = C->getCond()->IgnoreParenImpCasts();
+ if (isCheckingPlurality(Condition)) {
+ MatchingStatements.push_back(C);
+ InMatchingStatement = true;
+ } else {
+ MatchingStatements.push_back(nullptr);
+ InMatchingStatement = false;
+ }
+ return true;
+}
+
+void PluralMisuseChecker::MethodCrawler::reportPluralMisuseError(
+ const Stmt *S) const {
+ // Generate the bug report.
+ BR.EmitBasicReport(AC->getDecl(), Checker, "Plural Misuse",
+ "Localizability Issue (Apple)",
+ "Plural cases are not supported accross all languages. "
+ "Use a .stringsdict file instead",
+ PathDiagnosticLocation(S, BR.getSourceManager(), AC));
+}
+
+//===----------------------------------------------------------------------===//
+// Checker registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerNonLocalizedStringChecker(CheckerManager &mgr) {
+ NonLocalizedStringChecker *checker =
+ mgr.registerChecker<NonLocalizedStringChecker>();
+ checker->IsAggressive =
+ mgr.getAnalyzerOptions().getBooleanOption("AggressiveReport", false);
+}
+
+void ento::registerEmptyLocalizationContextChecker(CheckerManager &mgr) {
+ mgr.registerChecker<EmptyLocalizationContextChecker>();
+}
+
+void ento::registerPluralMisuseChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PluralMisuseChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 783890135ea3..1e56d709e4f9 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -118,7 +118,7 @@ private:
SValBuilder &Builder) const {
return definitelyReturnedError(RetSym, State, Builder, true);
}
-
+
/// Mark an AllocationPair interesting for diagnostic reporting.
void markInteresting(BugReport *R, const AllocationPair &AP) const {
R->markInteresting(AP.first);
@@ -136,7 +136,6 @@ private:
public:
SecKeychainBugVisitor(SymbolRef S) : Sym(S) {}
- ~SecKeychainBugVisitor() override {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int X = 0;
@@ -202,12 +201,8 @@ unsigned MacOSKeychainAPIChecker::getTrackedFunctionIndex(StringRef Name,
static bool isBadDeallocationArgument(const MemRegion *Arg) {
if (!Arg)
return false;
- if (isa<AllocaRegion>(Arg) ||
- isa<BlockDataRegion>(Arg) ||
- isa<TypedRegion>(Arg)) {
- return true;
- }
- return false;
+ return isa<AllocaRegion>(Arg) || isa<BlockDataRegion>(Arg) ||
+ isa<TypedRegion>(Arg);
}
/// Given the address expression, retrieve the value it's pointing to. Assume
@@ -241,11 +236,7 @@ bool MacOSKeychainAPIChecker::definitelyReturnedError(SymbolRef RetSym,
DefinedOrUnknownSVal NoErr = Builder.evalEQ(State, NoErrVal,
nonloc::SymbolVal(RetSym));
ProgramStateRef ErrState = State->assume(NoErr, noError);
- if (ErrState == State) {
- return true;
- }
-
- return false;
+ return ErrState == State;
}
// Report deallocator mismatch. Remove the region from tracking - reporting a
@@ -256,7 +247,7 @@ void MacOSKeychainAPIChecker::
CheckerContext &C) const {
ProgramStateRef State = C.getState();
State = State->remove<AllocatedData>(AP.first);
- ExplodedNode *N = C.addTransition(State);
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
@@ -283,7 +274,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD || FD->getKind() != Decl::Function)
return;
-
+
StringRef funName = C.getCalleeName(FD);
if (funName.empty())
return;
@@ -302,7 +293,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
// Remove the value from the state. The new symbol will be added for
// tracking when the second allocator is processed in checkPostStmt().
State = State->remove<AllocatedData>(V);
- ExplodedNode *N = C.addTransition(State);
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
initBugType();
@@ -365,7 +356,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
if (isEnclosingFunctionParam(ArgExpr))
return;
- ExplodedNode *N = C.addTransition(State);
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
initBugType();
@@ -431,7 +422,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
// report a bad call to free.
if (State->assume(ArgSVal.castAs<DefinedSVal>(), false) &&
!definitelyDidnotReturnError(AS->Region, State, C.getSValBuilder())) {
- ExplodedNode *N = C.addTransition(State);
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
initBugType();
@@ -585,10 +576,12 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
}
static CheckerProgramPointTag Tag(this, "DeadSymbolsLeak");
- ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+ ExplodedNode *N = C.generateNonFatalErrorNode(C.getState(), &Tag);
+ if (!N)
+ return;
// Generate the error reports.
- for (const auto P : Errors)
+ for (const auto &P : Errors)
C.emitReport(generateAllocatedDataNotReleasedReport(P, N, C));
// Generate the new, cleaned up state.
diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 11ba6096e2dc..4cbe97b26075 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -62,7 +62,7 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
- ExplodedNode *N = C.generateSink(state);
+ ExplodedNode *N = C.generateErrorNode(state);
if (!N)
return;
@@ -79,7 +79,7 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
if (TrimmedFName != FName)
FName = TrimmedFName;
}
-
+
SmallString<256> S;
llvm::raw_svector_ostream os(S);
os << "Call to '" << FName << "' uses";
diff --git a/lib/StaticAnalyzer/Checkers/Makefile b/lib/StaticAnalyzer/Checkers/Makefile
index 2582908b95d0..7c8f7bf1bc4e 100644
--- a/lib/StaticAnalyzer/Checkers/Makefile
+++ b/lib/StaticAnalyzer/Checkers/Makefile
@@ -1,13 +1,13 @@
##===- clang/lib/Checker/Makefile --------------------------*- Makefile -*-===##
-#
+#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
-#
+#
##===----------------------------------------------------------------------===##
#
-# This implements analyses built on top of source-level CFGs.
+# This implements analyses built on top of source-level CFGs.
#
##===----------------------------------------------------------------------===##
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index a9e08653b241..713d9fe285a5 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -65,10 +65,10 @@ class RefState {
const Stmt *S;
unsigned K : 3; // Kind enum, but stored as a bitfield.
- unsigned Family : 29; // Rest of 32-bit word, currently just an allocation
+ unsigned Family : 29; // Rest of 32-bit word, currently just an allocation
// family.
- RefState(Kind k, const Stmt *s, unsigned family)
+ RefState(Kind k, const Stmt *s, unsigned family)
: S(s), K(k), Family(family) {
assert(family != AF_None);
}
@@ -94,7 +94,7 @@ public:
return RefState(AllocatedOfSizeZero, RS->getStmt(),
RS->getAllocationFamily());
}
- static RefState getReleased(unsigned family, const Stmt *s) {
+ static RefState getReleased(unsigned family, const Stmt *s) {
return RefState(Released, s, family);
}
static RefState getRelinquished(unsigned family, const Stmt *s) {
@@ -169,9 +169,9 @@ class MallocChecker : public Checker<check::DeadSymbols,
{
public:
MallocChecker()
- : II_alloca(nullptr), II_malloc(nullptr), II_free(nullptr),
+ : II_alloca(nullptr), II_malloc(nullptr), II_free(nullptr),
II_realloc(nullptr), II_calloc(nullptr), II_valloc(nullptr),
- II_reallocf(nullptr), II_strndup(nullptr), II_strdup(nullptr),
+ II_reallocf(nullptr), II_strndup(nullptr), II_strdup(nullptr),
II_kmalloc(nullptr), II_if_nameindex(nullptr),
II_if_freenameindex(nullptr) {}
@@ -185,7 +185,7 @@ public:
CK_NumCheckKinds
};
- enum class MemoryOperationKind {
+ enum class MemoryOperationKind {
MOK_Allocate,
MOK_Free,
MOK_Any
@@ -245,19 +245,19 @@ private:
/// \brief Print names of allocators and deallocators.
///
/// \returns true on success.
- bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
+ bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
const Expr *E) const;
/// \brief Print expected name of an allocator based on the deallocator's
/// family derived from the DeallocExpr.
- void printExpectedAllocName(raw_ostream &os, CheckerContext &C,
+ void printExpectedAllocName(raw_ostream &os, CheckerContext &C,
const Expr *DeallocExpr) const;
- /// \brief Print expected name of a deallocator based on the allocator's
+ /// \brief Print expected name of a deallocator based on the allocator's
/// family.
void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) const;
///@{
- /// Check if this is one of the functions which can allocate/reallocate memory
+ /// Check if this is one of the functions which can allocate/reallocate memory
/// pointed to by one of its arguments.
bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
bool isCMemFunction(const FunctionDecl *FD,
@@ -292,7 +292,7 @@ private:
const ProgramStateRef &State) const;
/// Update the RefState to reflect the new memory allocation.
- static ProgramStateRef
+ static ProgramStateRef
MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
AllocationFamily Family = AF_Malloc);
@@ -312,17 +312,17 @@ private:
bool ReturnsNullOnFailure = false) const;
ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
- bool FreesMemOnFailure,
+ bool FreesMemOnFailure,
ProgramStateRef State) const;
static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
ProgramStateRef State);
-
+
///\brief Check if the memory associated with this symbol was released.
bool isReleased(SymbolRef Sym, CheckerContext &C) const;
bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
- void checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
+ void checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const;
bool checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const;
@@ -330,7 +330,7 @@ private:
/// Check if the function is known free memory, or if it is
/// "interesting" and should be modeled explicitly.
///
- /// \param [out] EscapingSymbol A function might not free memory in general,
+ /// \param [out] EscapingSymbol A function might not free memory in general,
/// but could be known to free a particular symbol. In this case, false is
/// returned and the single escaping symbol is returned through the out
/// parameter.
@@ -357,20 +357,20 @@ private:
Optional<CheckKind> getCheckIfTracked(CheckerContext &C,
const Stmt *AllocDeallocStmt,
bool IsALeakCheck = false) const;
- Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
+ Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck = false) const;
///@}
static bool SummarizeValue(raw_ostream &os, SVal V);
static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
- void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
const Expr *DeallocExpr) const;
void ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const;
void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range,
const Expr *DeallocExpr, const RefState *RS,
SymbolRef Sym, bool OwnershipTransferred) const;
- void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
- const Expr *DeallocExpr,
+ void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr,
const Expr *AllocExpr = nullptr) const;
void ReportUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const;
@@ -392,7 +392,8 @@ private:
/// The bug visitor which allows us to print extra diagnostics along the
/// BugReport path. For example, showing the allocation site of the leaked
/// region.
- class MallocBugVisitor : public BugReporterVisitorImpl<MallocBugVisitor> {
+ class MallocBugVisitor final
+ : public BugReporterVisitorImpl<MallocBugVisitor> {
protected:
enum NotificationMode {
Normal,
@@ -414,8 +415,6 @@ private:
MallocBugVisitor(SymbolRef S, bool isLeak = false)
: Sym(S), Mode(Normal), FailedReallocSymbol(nullptr), IsLeak(isLeak) {}
- ~MallocBugVisitor() override {}
-
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int X = 0;
ID.AddPointer(&X);
@@ -426,8 +425,8 @@ private:
const Stmt *Stmt) {
// Did not track -> allocated. Other state (released) -> allocated.
return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXNewExpr>(Stmt)) &&
- (S && (S->isAllocated() || S->isAllocatedOfSizeZero())) &&
- (!SPrev || !(SPrev->isAllocated() ||
+ (S && (S->isAllocated() || S->isAllocatedOfSizeZero())) &&
+ (!SPrev || !(SPrev->isAllocated() ||
SPrev->isAllocatedOfSizeZero())));
}
@@ -509,13 +508,14 @@ private:
REGISTER_MAP_WITH_PROGRAMSTATE(RegionState, SymbolRef, RefState)
REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair)
+REGISTER_SET_WITH_PROGRAMSTATE(ReallocSizeZeroSymbols, SymbolRef)
-// A map from the freed symbol to the symbol representing the return value of
+// A map from the freed symbol to the symbol representing the return value of
// the free function.
REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef)
namespace {
-class StopTrackingCallback : public SymbolVisitor {
+class StopTrackingCallback final : public SymbolVisitor {
ProgramStateRef state;
public:
StopTrackingCallback(ProgramStateRef st) : state(st) {}
@@ -634,7 +634,7 @@ bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
return false;
OverloadedOperatorKind Kind = FD->getOverloadedOperator();
- if (Kind != OO_New && Kind != OO_Array_New &&
+ if (Kind != OO_New && Kind != OO_Array_New &&
Kind != OO_Delete && Kind != OO_Array_Delete)
return false;
@@ -799,8 +799,8 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
State = ProcessZeroAllocation(C, CE, 0, State);
} else if (isStandardNewDelete(FD, C.getASTContext())) {
// Process direct calls to operator new/new[]/delete/delete[] functions
- // as distinct from new/new[]/delete/delete[] expressions that are
- // processed by the checkPostStmt callbacks for CXXNewExpr and
+ // as distinct from new/new[]/delete/delete[] expressions that are
+ // processed by the checkPostStmt callbacks for CXXNewExpr and
// CXXDeleteExpr.
OverloadedOperatorKind K = FD->getOverloadedOperator();
if (K == OO_New) {
@@ -870,7 +870,7 @@ ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
assert(Arg);
- Optional<DefinedSVal> DefArgVal =
+ Optional<DefinedSVal> DefArgVal =
State->getSVal(Arg, C.getLocationContext()).getAs<DefinedSVal>();
if (!DefArgVal)
@@ -882,7 +882,7 @@ ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
DefinedSVal Zero =
SvalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
- std::tie(TrueState, FalseState) =
+ std::tie(TrueState, FalseState) =
State->assume(SvalBuilder.evalEQ(State, *DefArgVal, Zero));
if (TrueState && !FalseState) {
@@ -892,15 +892,19 @@ ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
return State;
const RefState *RS = State->get<RegionState>(Sym);
- if (!RS)
- return State; // TODO: change to assert(RS); after realloc() will
- // guarantee have a RegionState attached.
-
- if (!RS->isAllocated())
- return State;
-
- return TrueState->set<RegionState>(Sym,
- RefState::getAllocatedOfSizeZero(RS));
+ if (RS) {
+ if (RS->isAllocated())
+ return TrueState->set<RegionState>(Sym,
+ RefState::getAllocatedOfSizeZero(RS));
+ else
+ return State;
+ } else {
+ // Case of zero-size realloc. Historically 'realloc(ptr, 0)' is treated as
+ // 'free(ptr)' and the returned value from 'realloc(ptr, 0)' is not
+ // tracked. Add zero-reallocated Sym to the state to catch references
+ // to zero-allocated memory.
+ return TrueState->add<ReallocSizeZeroSymbols>(Sym);
+ }
}
// Assume the value is non-zero going forward.
@@ -944,7 +948,7 @@ static bool treatUnusedNewEscaped(const CXXNewExpr *NE) {
return false;
}
-void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
+void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
CheckerContext &C) const {
if (NE->getNumPlacementArgs())
@@ -961,17 +965,17 @@ void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
return;
ProgramStateRef State = C.getState();
- // The return value from operator new is bound to a specified initialization
- // value (if any) and we don't want to loose this value. So we call
- // MallocUpdateRefState() instead of MallocMemAux() which breakes the
+ // The return value from operator new is bound to a specified initialization
+ // value (if any) and we don't want to loose this value. So we call
+ // MallocUpdateRefState() instead of MallocMemAux() which breakes the
// existing binding.
- State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
+ State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
: AF_CXXNew);
State = ProcessZeroAllocation(C, NE, 0, State);
C.addTransition(State);
}
-void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
+void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
CheckerContext &C) const {
if (!ChecksEnabled[CK_NewDeleteChecker])
@@ -996,12 +1000,9 @@ static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
// Ex: [NSData dataWithBytesNoCopy:bytes length:10];
// (...unless a 'freeWhenDone' parameter is false, but that's checked later.)
StringRef FirstSlot = Call.getSelector().getNameForSlot(0);
- if (FirstSlot == "dataWithBytesNoCopy" ||
- FirstSlot == "initWithBytesNoCopy" ||
- FirstSlot == "initWithCharactersNoCopy")
- return true;
-
- return false;
+ return FirstSlot == "dataWithBytesNoCopy" ||
+ FirstSlot == "initWithBytesNoCopy" ||
+ FirstSlot == "initWithCharactersNoCopy";
}
static Optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
@@ -1038,7 +1039,7 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
ProgramStateRef
MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
- const OwnershipAttr *Att,
+ const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
@@ -1105,7 +1106,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
State = State->assume(extentMatchesSize, true);
assert(State);
}
-
+
return MallocUpdateRefState(C, CE, State, Family);
}
@@ -1132,7 +1133,7 @@ ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
const CallExpr *CE,
- const OwnershipAttr *Att,
+ const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
@@ -1184,7 +1185,7 @@ static bool didPreviousFreeFail(ProgramStateRef State,
return false;
}
-AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
+AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
const Stmt *S) const {
if (!S)
return AF_None;
@@ -1229,14 +1230,14 @@ AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
return AF_None;
}
-bool MallocChecker::printAllocDeallocName(raw_ostream &os, CheckerContext &C,
+bool MallocChecker::printAllocDeallocName(raw_ostream &os, CheckerContext &C,
const Expr *E) const {
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
// FIXME: This doesn't handle indirect calls.
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
return false;
-
+
os << *FD;
if (!FD->isOverloadedOperator())
os << "()";
@@ -1253,14 +1254,14 @@ bool MallocChecker::printAllocDeallocName(raw_ostream &os, CheckerContext &C,
}
if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
- os << "'"
+ os << "'"
<< getOperatorSpelling(NE->getOperatorNew()->getOverloadedOperator())
<< "'";
return true;
}
if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(E)) {
- os << "'"
+ os << "'"
<< getOperatorSpelling(DE->getOperatorDelete()->getOverloadedOperator())
<< "'";
return true;
@@ -1283,7 +1284,7 @@ void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C,
}
}
-void MallocChecker::printExpectedDeallocName(raw_ostream &os,
+void MallocChecker::printExpectedDeallocName(raw_ostream &os,
AllocationFamily Family) const {
switch(Family) {
case AF_Malloc: os << "free()"; return;
@@ -1327,25 +1328,25 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
return nullptr;
const MemRegion *R = ArgVal.getAsRegion();
-
+
// Nonlocs can't be freed, of course.
// Non-region locations (labels and fixed addresses) also shouldn't be freed.
if (!R) {
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return nullptr;
}
-
+
R = R->StripCasts();
-
+
// Blocks might show up as heap data, but should not be free()d
if (isa<BlockDataRegion>(R)) {
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return nullptr;
}
-
+
const MemSpaceRegion *MS = R->getMemorySpace();
-
- // Parameters, locals, statics, globals, and memory returned by
+
+ // Parameters, locals, statics, globals, and memory returned by
// __builtin_alloca() shouldn't be freed.
if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
// FIXME: at the time this code was written, malloc() regions were
@@ -1391,7 +1392,7 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// If the pointer is allocated or escaped, but we are now trying to free it,
// check that the call to free is proper.
- } else if (RsBase->isAllocated() || RsBase->isAllocatedOfSizeZero() ||
+ } else if (RsBase->isAllocated() || RsBase->isAllocatedOfSizeZero() ||
RsBase->isEscaped()) {
// Check if an expected deallocation function matches the real one.
@@ -1410,20 +1411,20 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
!Offset.hasSymbolicOffset() &&
Offset.getOffset() != 0) {
const Expr *AllocExpr = cast<Expr>(RsBase->getStmt());
- ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
AllocExpr);
return nullptr;
}
}
}
- ReleasedAllocated = (RsBase != nullptr) && (RsBase->isAllocated() ||
+ ReleasedAllocated = (RsBase != nullptr) && (RsBase->isAllocated() ||
RsBase->isAllocatedOfSizeZero());
// Clean out the info on previous call to free return info.
State = State->remove<FreeReturnValue>(SymBase);
- // Keep track of the return value. If it is NULL, we will know that free
+ // Keep track of the return value. If it is NULL, we will know that free
// failed.
if (ReturnsNullOnFailure) {
SVal RetVal = C.getSVal(ParentExpr);
@@ -1463,7 +1464,7 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
if (IsALeakCheck) {
if (ChecksEnabled[CK_NewDeleteLeaksChecker])
return CK_NewDeleteLeaksChecker;
- }
+ }
else {
if (ChecksEnabled[CK_NewDeleteChecker])
return CK_NewDeleteChecker;
@@ -1488,6 +1489,9 @@ MallocChecker::getCheckIfTracked(CheckerContext &C,
Optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck) const {
+ if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym))
+ return CK_MallocChecker;
+
const RefState *RS = C.getState()->get<RegionState>(Sym);
assert(RS);
return getCheckIfTracked(RS->getAllocationFamily(), IsALeakCheck);
@@ -1502,7 +1506,7 @@ bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
os << "the address of the label '" << Label->getLabel()->getName() << "'";
else
return false;
-
+
return true;
}
@@ -1526,7 +1530,7 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
return true;
default: {
const MemSpaceRegion *MS = MR->getMemorySpace();
-
+
if (isa<StackLocalsSpaceRegion>(MS)) {
const VarRegion *VR = dyn_cast<VarRegion>(MR);
const VarDecl *VD;
@@ -1580,8 +1584,8 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
}
}
-void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range,
+void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
+ SourceRange Range,
const Expr *DeallocExpr) const {
if (!ChecksEnabled[CK_MallocChecker] &&
@@ -1593,7 +1597,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
if (!CheckKind.hasValue())
return;
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_BadFree[*CheckKind])
BT_BadFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Bad free", "Memory Error"));
@@ -1610,7 +1614,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
os << "deallocator";
os << " is ";
- bool Summarized = MR ? SummarizeRegion(os, MR)
+ bool Summarized = MR ? SummarizeRegion(os, MR)
: SummarizeValue(os, ArgVal);
if (Summarized)
os << ", which is not memory allocated by ";
@@ -1626,7 +1630,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
}
}
-void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
+void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const {
Optional<MallocChecker::CheckKind> CheckKind;
@@ -1638,7 +1642,7 @@ void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
else
return;
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_FreeAlloca[*CheckKind])
BT_FreeAlloca[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Free alloca()", "Memory Error"));
@@ -1652,17 +1656,17 @@ void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
}
}
-void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
+void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
SourceRange Range,
- const Expr *DeallocExpr,
+ const Expr *DeallocExpr,
const RefState *RS,
- SymbolRef Sym,
+ SymbolRef Sym,
bool OwnershipTransferred) const {
if (!ChecksEnabled[CK_MismatchedDeallocatorChecker])
return;
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_MismatchedDealloc)
BT_MismatchedDealloc.reset(
new BugType(CheckNames[CK_MismatchedDeallocatorChecker],
@@ -1680,7 +1684,7 @@ void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
if (OwnershipTransferred) {
if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
os << DeallocOs.str() << " cannot";
- else
+ else
os << "Cannot";
os << " take ownership of memory";
@@ -1721,7 +1725,7 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
if (!CheckKind.hasValue())
return;
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
@@ -1775,7 +1779,7 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
if (!CheckKind.hasValue())
return;
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_UseFree[*CheckKind])
BT_UseFree[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Use-after-free", "Memory Error"));
@@ -1791,7 +1795,7 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
}
void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
- bool Released, SymbolRef Sym,
+ bool Released, SymbolRef Sym,
SymbolRef PrevSym) const {
if (!ChecksEnabled[CK_MallocChecker] &&
@@ -1802,7 +1806,7 @@ void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
if (!CheckKind.hasValue())
return;
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_DoubleFree[*CheckKind])
BT_DoubleFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Double free", "Memory Error"));
@@ -1830,7 +1834,7 @@ void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
if (!CheckKind.hasValue())
return;
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_DoubleDelete)
BT_DoubleDelete.reset(new BugType(CheckNames[CK_NewDeleteChecker],
"Double delete", "Memory Error"));
@@ -1857,7 +1861,7 @@ void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
if (!CheckKind.hasValue())
return;
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_UseZerroAllocated[*CheckKind])
BT_UseZerroAllocated[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Use of zero allocated", "Memory Error"));
@@ -1921,7 +1925,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull;
bool SizeIsZero = StateSizeIsZero && !StateSizeNotZero;
- // If the ptr is NULL and the size is not 0, the call is equivalent to
+ // If the ptr is NULL and the size is not 0, the call is equivalent to
// malloc(size).
if ( PrtIsNull && !SizeIsZero) {
ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
@@ -1930,7 +1934,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
}
if (PrtIsNull && SizeIsZero)
- return nullptr;
+ return State;
// Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
assert(!PrtIsNull);
@@ -1979,7 +1983,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
return nullptr;
}
-ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
+ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
ProgramStateRef State) {
if (!State)
return nullptr;
@@ -1992,7 +1996,7 @@ ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
SVal count = State->getSVal(CE->getArg(0), LCtx);
SVal elementSize = State->getSVal(CE->getArg(1), LCtx);
SVal TotalSize = svalBuilder.evalBinOp(State, BO_Mul, count, elementSize,
- svalBuilder.getContext().getSizeType());
+ svalBuilder.getContext().getSizeType());
SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
return MallocMemAux(C, CE, TotalSize, zeroVal, State);
@@ -2079,7 +2083,7 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
const ExplodedNode *AllocNode = nullptr;
const MemRegion *Region = nullptr;
std::tie(AllocNode, Region) = getAllocationSite(N, Sym, C);
-
+
ProgramPoint P = AllocNode->getLocation();
const Stmt *AllocationStmt = nullptr;
if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
@@ -2128,7 +2132,7 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
}
}
-
+
// Cleanup the Realloc Pairs Map.
ReallocPairsTy RP = state->get<ReallocPairs>();
for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
@@ -2151,10 +2155,12 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
ExplodedNode *N = C.getPredecessor();
if (!Errors.empty()) {
static CheckerProgramPointTag Tag("MallocChecker", "DeadSymbolsLeak");
- N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
- for (SmallVectorImpl<SymbolRef>::iterator
+ N = C.generateNonFatalErrorNode(C.getState(), &Tag);
+ if (N) {
+ for (SmallVectorImpl<SymbolRef>::iterator
I = Errors.begin(), E = Errors.end(); I != E; ++I) {
- reportLeak(*I, N, C);
+ reportLeak(*I, N, C);
+ }
}
}
@@ -2233,7 +2239,7 @@ void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
}
// TODO: Blocks should be either inlined or should call invalidate regions
-// upon invocation. After that's in place, special casing here will not be
+// upon invocation. After that's in place, special casing here will not be
// needed.
void MallocChecker::checkPostStmt(const BlockExpr *BE,
CheckerContext &C) const {
@@ -2292,10 +2298,14 @@ bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
void MallocChecker::checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const {
assert(Sym);
- const RefState *RS = C.getState()->get<RegionState>(Sym);
- if (RS && RS->isAllocatedOfSizeZero())
- ReportUseZeroAllocated(C, RS->getStmt()->getSourceRange(), Sym);
+ if (const RefState *RS = C.getState()->get<RegionState>(Sym)) {
+ if (RS->isAllocatedOfSizeZero())
+ ReportUseZeroAllocated(C, RS->getStmt()->getSourceRange(), Sym);
+ }
+ else if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym)) {
+ ReportUseZeroAllocated(C, S->getSourceRange(), Sym);
+ }
}
bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const {
@@ -2377,7 +2387,7 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
// If it's not a framework call, or if it takes a callback, assume it
// can free memory.
- if (!Call->isInSystemHeader() || Call->hasNonZeroCallbackArg())
+ if (!Call->isInSystemHeader() || Call->argumentsMayEscape())
return true;
// If it's a method we know about, handle it explicitly post-call.
@@ -2447,7 +2457,7 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
StringRef FName = II->getName();
// White list the 'XXXNoCopy' CoreFoundation functions.
- // We specifically check these before
+ // We specifically check these before
if (FName.endswith("NoCopy")) {
// Look for the deallocator argument. We know that the memory ownership
// is not transferred only if the deallocator argument is
@@ -2556,7 +2566,7 @@ ProgramStateRef MallocChecker::checkPointerEscapeAux(ProgramStateRef State,
if (EscapingSymbol && EscapingSymbol != sym)
continue;
-
+
if (const RefState *RS = State->get<RegionState>(sym)) {
if ((RS->isAllocated() || RS->isAllocatedOfSizeZero()) &&
CheckRefState(RS)) {
@@ -2703,7 +2713,7 @@ void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true;
checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] =
mgr.getCurrentCheckName();
- // We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
+ // We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
// checker.
if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker])
checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker] = true;
diff --git a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index e91347999dc1..99ba90d7a2d9 100644
--- a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -23,19 +23,22 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
using namespace ento;
+using llvm::APInt;
+using llvm::APSInt;
namespace {
struct MallocOverflowCheck {
const BinaryOperator *mulop;
const Expr *variable;
+ APSInt maxVal;
- MallocOverflowCheck (const BinaryOperator *m, const Expr *v)
- : mulop(m), variable (v)
- {}
+ MallocOverflowCheck(const BinaryOperator *m, const Expr *v, APSInt val)
+ : mulop(m), variable(v), maxVal(val) {}
};
class MallocOverflowSecurityChecker : public Checker<check::ASTCodeBody> {
@@ -54,6 +57,11 @@ public:
};
} // end anonymous namespace
+// Return true for computations which evaluate to zero: e.g., mult by 0.
+static inline bool EvaluatesToZero(APSInt &Val, BinaryOperatorKind op) {
+ return (op == BO_Mul) && (Val == 0);
+}
+
void MallocOverflowSecurityChecker::CheckMallocArgument(
SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
const Expr *TheArgument,
@@ -64,13 +72,14 @@ void MallocOverflowSecurityChecker::CheckMallocArgument(
Reject anything that applies to the variable: an explicit cast,
conditional expression, an operation that could reduce the range
of the result, or anything too complicated :-). */
- const Expr * e = TheArgument;
+ const Expr *e = TheArgument;
const BinaryOperator * mulop = nullptr;
+ APSInt maxVal;
for (;;) {
+ maxVal = 0;
e = e->IgnoreParenImpCasts();
- if (isa<BinaryOperator>(e)) {
- const BinaryOperator * binop = dyn_cast<BinaryOperator>(e);
+ if (const BinaryOperator *binop = dyn_cast<BinaryOperator>(e)) {
BinaryOperatorKind opc = binop->getOpcode();
// TODO: ignore multiplications by 1, reject if multiplied by 0.
if (mulop == nullptr && opc == BO_Mul)
@@ -80,12 +89,18 @@ void MallocOverflowSecurityChecker::CheckMallocArgument(
const Expr *lhs = binop->getLHS();
const Expr *rhs = binop->getRHS();
- if (rhs->isEvaluatable(Context))
+ if (rhs->isEvaluatable(Context)) {
e = lhs;
- else if ((opc == BO_Add || opc == BO_Mul)
- && lhs->isEvaluatable(Context))
+ maxVal = rhs->EvaluateKnownConstInt(Context);
+ if (EvaluatesToZero(maxVal, opc))
+ return;
+ } else if ((opc == BO_Add || opc == BO_Mul) &&
+ lhs->isEvaluatable(Context)) {
+ maxVal = lhs->EvaluateKnownConstInt(Context);
+ if (EvaluatesToZero(maxVal, opc))
+ return;
e = rhs;
- else
+ } else
return;
}
else if (isa<DeclRefExpr>(e) || isa<MemberExpr>(e))
@@ -103,7 +118,7 @@ void MallocOverflowSecurityChecker::CheckMallocArgument(
// TODO: Could push this into the innermost scope where 'e' is
// defined, rather than the whole function.
- PossibleMallocOverflows.push_back(MallocOverflowCheck(mulop, e));
+ PossibleMallocOverflows.push_back(MallocOverflowCheck(mulop, e, maxVal));
}
namespace {
@@ -126,33 +141,84 @@ private:
return false;
}
- void CheckExpr(const Expr *E_p) {
- const Expr *E = E_p->IgnoreParenImpCasts();
+ const Decl *getDecl(const DeclRefExpr *DR) { return DR->getDecl(); }
+
+ const Decl *getDecl(const MemberExpr *ME) { return ME->getMemberDecl(); }
+ template <typename T1>
+ void Erase(const T1 *DR, std::function<bool(theVecType::iterator)> pred) {
theVecType::iterator i = toScanFor.end();
theVecType::iterator e = toScanFor.begin();
-
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
- const Decl * EdreD = DR->getDecl();
- while (i != e) {
- --i;
- if (const DeclRefExpr *DR_i = dyn_cast<DeclRefExpr>(i->variable)) {
- if (DR_i->getDecl() == EdreD)
- i = toScanFor.erase(i);
- }
+ while (i != e) {
+ --i;
+ if (const T1 *DR_i = dyn_cast<T1>(i->variable)) {
+ if ((getDecl(DR_i) == getDecl(DR)) && pred(i))
+ i = toScanFor.erase(i);
}
}
+ }
+
+ void CheckExpr(const Expr *E_p) {
+ auto PredTrue = [](theVecType::iterator) -> bool { return true; };
+ const Expr *E = E_p->IgnoreParenImpCasts();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ Erase<DeclRefExpr>(DR, PredTrue);
else if (const auto *ME = dyn_cast<MemberExpr>(E)) {
- // No points-to analysis, just look at the member
- const Decl *EmeMD = ME->getMemberDecl();
- while (i != e) {
- --i;
- if (const auto *ME_i = dyn_cast<MemberExpr>(i->variable)) {
- if (ME_i->getMemberDecl() == EmeMD)
- i = toScanFor.erase (i);
- }
+ Erase<MemberExpr>(ME, PredTrue);
+ }
+ }
+
+ // Check if the argument to malloc is assigned a value
+ // which cannot cause an overflow.
+ // e.g., malloc (mul * x) and,
+ // case 1: mul = <constant value>
+ // case 2: mul = a/b, where b > x
+ void CheckAssignmentExpr(BinaryOperator *AssignEx) {
+ bool assignKnown = false;
+ bool numeratorKnown = false, denomKnown = false;
+ APSInt denomVal;
+ denomVal = 0;
+
+ // Erase if the multiplicand was assigned a constant value.
+ const Expr *rhs = AssignEx->getRHS();
+ if (rhs->isEvaluatable(Context))
+ assignKnown = true;
+
+ // Discard the report if the multiplicand was assigned a value,
+ // that can never overflow after multiplication. e.g., the assignment
+ // is a division operator and the denominator is > other multiplicand.
+ const Expr *rhse = rhs->IgnoreParenImpCasts();
+ if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(rhse)) {
+ if (BOp->getOpcode() == BO_Div) {
+ const Expr *denom = BOp->getRHS()->IgnoreParenImpCasts();
+ if (denom->EvaluateAsInt(denomVal, Context))
+ denomKnown = true;
+ const Expr *numerator = BOp->getLHS()->IgnoreParenImpCasts();
+ if (numerator->isEvaluatable(Context))
+ numeratorKnown = true;
}
}
+ if (!assignKnown && !denomKnown)
+ return;
+ auto denomExtVal = denomVal.getExtValue();
+
+ // Ignore negative denominator.
+ if (denomExtVal < 0)
+ return;
+
+ const Expr *lhs = AssignEx->getLHS();
+ const Expr *E = lhs->IgnoreParenImpCasts();
+
+ auto pred = [assignKnown, numeratorKnown,
+ denomExtVal](theVecType::iterator i) {
+ return assignKnown ||
+ (numeratorKnown && (denomExtVal >= i->maxVal.getExtValue()));
+ };
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ Erase<DeclRefExpr>(DR, pred);
+ else if (const auto *ME = dyn_cast<MemberExpr>(E))
+ Erase<MemberExpr>(ME, pred);
}
public:
@@ -162,11 +228,13 @@ private:
const Expr * rhs = E->getRHS();
// Ignore comparisons against zero, since they generally don't
// protect against an overflow.
- if (!isIntZeroExpr(lhs) && ! isIntZeroExpr(rhs)) {
+ if (!isIntZeroExpr(lhs) && !isIntZeroExpr(rhs)) {
CheckExpr(lhs);
CheckExpr(rhs);
}
}
+ if (E->isAssignmentOp())
+ CheckAssignmentExpr(E);
EvaluatedExprVisitor<CheckOverflowOps>::VisitBinaryOperator(E);
}
@@ -243,12 +311,12 @@ void MallocOverflowSecurityChecker::checkASTCodeBody(const Decl *D,
const FunctionDecl *FD = TheCall->getDirectCallee();
if (!FD)
- return;
+ continue;
// Get the name of the callee. If it's a builtin, strip off the prefix.
IdentifierInfo *FnInfo = FD->getIdentifier();
if (!FnInfo)
- return;
+ continue;
if (FnInfo->isStr ("malloc") || FnInfo->isStr ("_MALLOC")) {
if (TheCall->getNumArgs() == 1)
diff --git a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index fb07484bfcd9..80a3fbe1a409 100644
--- a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -143,20 +143,20 @@ static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
while (true) {
A = A.getCanonicalType();
B = B.getCanonicalType();
-
+
if (A.getTypePtr() == B.getTypePtr())
return true;
-
+
if (const PointerType *ptrA = A->getAs<PointerType>())
if (const PointerType *ptrB = B->getAs<PointerType>()) {
A = ptrA->getPointeeType();
B = ptrB->getPointeeType();
continue;
}
-
+
break;
}
-
+
return false;
}
diff --git a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index d23708ecbd97..0e7894788c87 100644
--- a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -11,7 +11,7 @@
// about subpar uses of NSAutoreleasePool. Note that while the check itself
// (in its current form) could be written as a flow-insensitive check, in
// can be potentially enhanced in the future with flow-sensitive information.
-// It is also a good example of the CheckerVisitor interface.
+// It is also a good example of the CheckerVisitor interface.
//
//===----------------------------------------------------------------------===//
@@ -48,7 +48,7 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
if (!OD)
- return;
+ return;
if (!OD->getIdentifier()->isStr("NSAutoreleasePool"))
return;
@@ -62,7 +62,7 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
BT.reset(new BugType(this, "Use -drain instead of -release",
"API Upgrade (Apple)"));
- ExplodedNode *N = C.addTransition();
+ ExplodedNode *N = C.generateNonFatalErrorNode();
if (!N) {
assert(0);
return;
diff --git a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index c351c6e9e08b..dab068b27e80 100644
--- a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -58,7 +58,7 @@ void NSErrorMethodChecker::checkASTDecl(const ObjCMethodDecl *D,
return;
if (!II)
- II = &D->getASTContext().Idents.get("NSError");
+ II = &D->getASTContext().Idents.get("NSError");
bool hasNSError = false;
for (const auto *I : D->params()) {
@@ -105,7 +105,7 @@ void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
return;
if (!II)
- II = &D->getASTContext().Idents.get("CFErrorRef");
+ II = &D->getASTContext().Idents.get("CFErrorRef");
bool hasCFError = false;
for (auto I : D->params()) {
diff --git a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index ba82d1d1d41f..c1deadef4202 100644
--- a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -66,6 +66,7 @@ void NoReturnFunctionChecker::checkPostCall(const CallEvent &CE,
.Case("assfail", true)
.Case("db_error", true)
.Case("__assert", true)
+ .Case("__assert2", true)
// For the purpose of static analysis, we do not care that
// this MSVC function will return if the user decides to continue.
.Case("_wassert", true)
@@ -81,7 +82,7 @@ void NoReturnFunctionChecker::checkPostCall(const CallEvent &CE,
}
if (BuildSinks)
- C.generateSink();
+ C.generateSink(C.getState(), C.getPredecessor());
}
void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
@@ -90,7 +91,7 @@ void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
if (const ObjCMethodDecl *MD = Msg.getDecl()) {
MD = MD->getCanonicalDecl();
if (MD->hasAttr<AnalyzerNoReturnAttr>()) {
- C.generateSink();
+ C.generateSink(C.getState(), C.getPredecessor());
return;
}
}
@@ -136,7 +137,7 @@ void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
}
// If we got here, it's one of the messages we care about.
- C.generateSink();
+ C.generateSink(C.getState(), C.getPredecessor());
}
void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
diff --git a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 73f8087fd3c0..1f82ab94af82 100644
--- a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -28,7 +28,7 @@ using namespace ento;
namespace {
class NonNullParamChecker
- : public Checker< check::PreCall > {
+ : public Checker< check::PreCall, EventDispatcher<ImplicitNullDerefEvent> > {
mutable std::unique_ptr<BugType> BTAttrNonNull;
mutable std::unique_ptr<BugType> BTNullRefArg;
@@ -139,26 +139,34 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
ProgramStateRef stateNotNull, stateNull;
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
- if (stateNull && !stateNotNull) {
- // Generate an error node. Check for a null node in case
- // we cache out.
- if (ExplodedNode *errorNode = C.generateSink(stateNull)) {
+ if (stateNull) {
+ if (!stateNotNull) {
+ // Generate an error node. Check for a null node in case
+ // we cache out.
+ if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {
- std::unique_ptr<BugReport> R;
- if (haveAttrNonNull)
- R = genReportNullAttrNonNull(errorNode, ArgE);
- else if (haveRefTypeParam)
- R = genReportReferenceToNullPointer(errorNode, ArgE);
+ std::unique_ptr<BugReport> R;
+ if (haveAttrNonNull)
+ R = genReportNullAttrNonNull(errorNode, ArgE);
+ else if (haveRefTypeParam)
+ R = genReportReferenceToNullPointer(errorNode, ArgE);
- // Highlight the range of the argument that was null.
- R->addRange(Call.getArgSourceRange(idx));
+ // Highlight the range of the argument that was null.
+ R->addRange(Call.getArgSourceRange(idx));
- // Emit the bug report.
- C.emitReport(std::move(R));
- }
+ // Emit the bug report.
+ C.emitReport(std::move(R));
+ }
- // Always return. Either we cached out or we just emitted an error.
- return;
+ // Always return. Either we cached out or we just emitted an error.
+ return;
+ }
+ if (ExplodedNode *N = C.generateSink(stateNull, C.getPredecessor())) {
+ ImplicitNullDerefEvent event = {
+ V, false, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/haveRefTypeParam};
+ dispatchEvent(event);
+ }
}
// If a pointer value passed the check we should assume that it is
diff --git a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
new file mode 100644
index 000000000000..bb86ea401df5
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -0,0 +1,1066 @@
+//== Nullabilityhecker.cpp - Nullability checker ----------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker tries to find nullability violations. There are several kinds of
+// possible violations:
+// * Null pointer is passed to a pointer which has a _Nonnull type.
+// * Null pointer is returned from a function which has a _Nonnull return type.
+// * Nullable pointer is passed to a pointer which has a _Nonnull type.
+// * Nullable pointer is returned from a function which has a _Nonnull return
+// type.
+// * Nullable pointer is dereferenced.
+//
+// This checker propagates the nullability information of the pointers and looks
+// for the patterns that are described above. Explicit casts are trusted and are
+// considered a way to suppress false positives for this checker. The other way
+// to suppress warnings would be to add asserts or guarding if statements to the
+// code. In addition to the nullability propagation this checker also uses some
+// heuristics to suppress potential false positives.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "llvm/Support/Path.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+// Do not reorder! The getMostNullable method relies on the order.
+// Optimization: Most pointers expected to be unspecified. When a symbol has an
+// unspecified or nonnull type non of the rules would indicate any problem for
+// that symbol. For this reason only nullable and contradicted nullability are
+// stored for a symbol. When a symbol is already contradicted, it can not be
+// casted back to nullable.
+enum class Nullability : char {
+ Contradicted, // Tracked nullability is contradicted by an explicit cast. Do
+ // not report any nullability related issue for this symbol.
+ // This nullability is propagated agressively to avoid false
+ // positive results. See the comment on getMostNullable method.
+ Nullable,
+ Unspecified,
+ Nonnull
+};
+
+/// Returns the most nullable nullability. This is used for message expressions
+/// like [reciever method], where the nullability of this expression is either
+/// the nullability of the receiver or the nullability of the return type of the
+/// method, depending on which is more nullable. Contradicted is considered to
+/// be the most nullable, to avoid false positive results.
+Nullability getMostNullable(Nullability Lhs, Nullability Rhs) {
+ return static_cast<Nullability>(
+ std::min(static_cast<char>(Lhs), static_cast<char>(Rhs)));
+}
+
+const char *getNullabilityString(Nullability Nullab) {
+ switch (Nullab) {
+ case Nullability::Contradicted:
+ return "contradicted";
+ case Nullability::Nullable:
+ return "nullable";
+ case Nullability::Unspecified:
+ return "unspecified";
+ case Nullability::Nonnull:
+ return "nonnull";
+ }
+ llvm_unreachable("Unexpected enumeration.");
+ return "";
+}
+
+// These enums are used as an index to ErrorMessages array.
+enum class ErrorKind : int {
+ NilAssignedToNonnull,
+ NilPassedToNonnull,
+ NilReturnedToNonnull,
+ NullableAssignedToNonnull,
+ NullableReturnedToNonnull,
+ NullableDereferenced,
+ NullablePassedToNonnull
+};
+
+const char *const ErrorMessages[] = {
+ "Null is assigned to a pointer which is expected to have non-null value",
+ "Null passed to a callee that requires a non-null argument",
+ "Null is returned from a function that is expected to return a non-null "
+ "value",
+ "Nullable pointer is assigned to a pointer which is expected to have "
+ "non-null value",
+ "Nullable pointer is returned from a function that is expected to return a "
+ "non-null value",
+ "Nullable pointer is dereferenced",
+ "Nullable pointer is passed to a callee that requires a non-null argument"};
+
+class NullabilityChecker
+ : public Checker<check::Bind, check::PreCall, check::PreStmt<ReturnStmt>,
+ check::PostCall, check::PostStmt<ExplicitCastExpr>,
+ check::PostObjCMessage, check::DeadSymbols,
+ check::Event<ImplicitNullDerefEvent>> {
+ mutable std::unique_ptr<BugType> BT;
+
+public:
+ void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const ExplicitCastExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ void checkEvent(ImplicitNullDerefEvent Event) const;
+
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
+
+ struct NullabilityChecksFilter {
+ DefaultBool CheckNullPassedToNonnull;
+ DefaultBool CheckNullReturnedFromNonnull;
+ DefaultBool CheckNullableDereferenced;
+ DefaultBool CheckNullablePassedToNonnull;
+ DefaultBool CheckNullableReturnedFromNonnull;
+
+ CheckName CheckNameNullPassedToNonnull;
+ CheckName CheckNameNullReturnedFromNonnull;
+ CheckName CheckNameNullableDereferenced;
+ CheckName CheckNameNullablePassedToNonnull;
+ CheckName CheckNameNullableReturnedFromNonnull;
+ };
+
+ NullabilityChecksFilter Filter;
+ // When set to false no nullability information will be tracked in
+ // NullabilityMap. It is possible to catch errors like passing a null pointer
+ // to a callee that expects nonnull argument without the information that is
+ // stroed in the NullabilityMap. This is an optimization.
+ DefaultBool NeedTracking;
+
+private:
+ class NullabilityBugVisitor
+ : public BugReporterVisitorImpl<NullabilityBugVisitor> {
+ public:
+ NullabilityBugVisitor(const MemRegion *M) : Region(M) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Region);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ // The tracked region.
+ const MemRegion *Region;
+ };
+
+ /// When any of the nonnull arguments of the analyzed function is null, do not
+ /// report anything and turn off the check.
+ ///
+ /// When \p SuppressPath is set to true, no more bugs will be reported on this
+ /// path by this checker.
+ void reportBugIfPreconditionHolds(ErrorKind Error, ExplodedNode *N,
+ const MemRegion *Region, CheckerContext &C,
+ const Stmt *ValueExpr = nullptr,
+ bool SuppressPath = false) const;
+
+ void reportBug(ErrorKind Error, ExplodedNode *N, const MemRegion *Region,
+ BugReporter &BR, const Stmt *ValueExpr = nullptr) const {
+ if (!BT)
+ BT.reset(new BugType(this, "Nullability", "Memory error"));
+ const char *Msg = ErrorMessages[static_cast<int>(Error)];
+ std::unique_ptr<BugReport> R(new BugReport(*BT, Msg, N));
+ if (Region) {
+ R->markInteresting(Region);
+ R->addVisitor(llvm::make_unique<NullabilityBugVisitor>(Region));
+ }
+ if (ValueExpr) {
+ R->addRange(ValueExpr->getSourceRange());
+ if (Error == ErrorKind::NilAssignedToNonnull ||
+ Error == ErrorKind::NilPassedToNonnull ||
+ Error == ErrorKind::NilReturnedToNonnull)
+ bugreporter::trackNullOrUndefValue(N, ValueExpr, *R);
+ }
+ BR.emitReport(std::move(R));
+ }
+
+ /// If an SVal wraps a region that should be tracked, it will return a pointer
+ /// to the wrapped region. Otherwise it will return a nullptr.
+ const SymbolicRegion *getTrackRegion(SVal Val,
+ bool CheckSuperRegion = false) const;
+};
+
+class NullabilityState {
+public:
+ NullabilityState(Nullability Nullab, const Stmt *Source = nullptr)
+ : Nullab(Nullab), Source(Source) {}
+
+ const Stmt *getNullabilitySource() const { return Source; }
+
+ Nullability getValue() const { return Nullab; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(static_cast<char>(Nullab));
+ ID.AddPointer(Source);
+ }
+
+ void print(raw_ostream &Out) const {
+ Out << getNullabilityString(Nullab) << "\n";
+ }
+
+private:
+ Nullability Nullab;
+ // Source is the expression which determined the nullability. For example in a
+ // message like [nullable nonnull_returning] has nullable nullability, because
+ // the receiver is nullable. Here the receiver will be the source of the
+ // nullability. This is useful information when the diagnostics are generated.
+ const Stmt *Source;
+};
+
+bool operator==(NullabilityState Lhs, NullabilityState Rhs) {
+ return Lhs.getValue() == Rhs.getValue() &&
+ Lhs.getNullabilitySource() == Rhs.getNullabilitySource();
+}
+
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(NullabilityMap, const MemRegion *,
+ NullabilityState)
+
+// If the nullability precondition of a function is violated, we should not
+// report nullability related issues on that path. For this reason once a
+// precondition is not met on a path, this checker will be esentially turned off
+// for the rest of the analysis. We do not want to generate a sink node however,
+// so this checker would not lead to reduced coverage.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PreconditionViolated, bool)
+
+enum class NullConstraint { IsNull, IsNotNull, Unknown };
+
+static NullConstraint getNullConstraint(DefinedOrUnknownSVal Val,
+ ProgramStateRef State) {
+ ConditionTruthVal Nullness = State->isNull(Val);
+ if (Nullness.isConstrainedFalse())
+ return NullConstraint::IsNotNull;
+ if (Nullness.isConstrainedTrue())
+ return NullConstraint::IsNull;
+ return NullConstraint::Unknown;
+}
+
+const SymbolicRegion *
+NullabilityChecker::getTrackRegion(SVal Val, bool CheckSuperRegion) const {
+ if (!NeedTracking)
+ return nullptr;
+
+ auto RegionSVal = Val.getAs<loc::MemRegionVal>();
+ if (!RegionSVal)
+ return nullptr;
+
+ const MemRegion *Region = RegionSVal->getRegion();
+
+ if (CheckSuperRegion) {
+ if (auto FieldReg = Region->getAs<FieldRegion>())
+ return dyn_cast<SymbolicRegion>(FieldReg->getSuperRegion());
+ if (auto ElementReg = Region->getAs<ElementRegion>())
+ return dyn_cast<SymbolicRegion>(ElementReg->getSuperRegion());
+ }
+
+ return dyn_cast<SymbolicRegion>(Region);
+}
+
+PathDiagnosticPiece *NullabilityChecker::NullabilityBugVisitor::VisitNode(
+ const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef State = N->getState();
+ ProgramStateRef StatePrev = PrevN->getState();
+
+ const NullabilityState *TrackedNullab = State->get<NullabilityMap>(Region);
+ const NullabilityState *TrackedNullabPrev =
+ StatePrev->get<NullabilityMap>(Region);
+ if (!TrackedNullab)
+ return nullptr;
+
+ if (TrackedNullabPrev &&
+ TrackedNullabPrev->getValue() == TrackedNullab->getValue())
+ return nullptr;
+
+ // Retrieve the associated statement.
+ const Stmt *S = TrackedNullab->getNullabilitySource();
+ if (!S) {
+ ProgramPoint ProgLoc = N->getLocation();
+ if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+ S = SP->getStmt();
+ }
+ }
+
+ if (!S)
+ return nullptr;
+
+ std::string InfoText =
+ (llvm::Twine("Nullability '") +
+ getNullabilityString(TrackedNullab->getValue()) + "' is infered")
+ .str();
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, InfoText, true, nullptr);
+}
+
+static Nullability getNullabilityAnnotation(QualType Type) {
+ const auto *AttrType = Type->getAs<AttributedType>();
+ if (!AttrType)
+ return Nullability::Unspecified;
+ if (AttrType->getAttrKind() == AttributedType::attr_nullable)
+ return Nullability::Nullable;
+ else if (AttrType->getAttrKind() == AttributedType::attr_nonnull)
+ return Nullability::Nonnull;
+ return Nullability::Unspecified;
+}
+
+template <typename ParamVarDeclRange>
+static bool
+checkParamsForPreconditionViolation(const ParamVarDeclRange &Params,
+ ProgramStateRef State,
+ const LocationContext *LocCtxt) {
+ for (const auto *ParamDecl : Params) {
+ if (ParamDecl->isParameterPack())
+ break;
+
+ if (getNullabilityAnnotation(ParamDecl->getType()) != Nullability::Nonnull)
+ continue;
+
+ auto RegVal = State->getLValue(ParamDecl, LocCtxt)
+ .template getAs<loc::MemRegionVal>();
+ if (!RegVal)
+ continue;
+
+ auto ParamValue = State->getSVal(RegVal->getRegion())
+ .template getAs<DefinedOrUnknownSVal>();
+ if (!ParamValue)
+ continue;
+
+ if (getNullConstraint(*ParamValue, State) == NullConstraint::IsNull) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool checkPreconditionViolation(ProgramStateRef State, ExplodedNode *N,
+ CheckerContext &C) {
+ if (State->get<PreconditionViolated>())
+ return true;
+
+ const LocationContext *LocCtxt = C.getLocationContext();
+ const Decl *D = LocCtxt->getDecl();
+ if (!D)
+ return false;
+
+ if (const auto *BlockD = dyn_cast<BlockDecl>(D)) {
+ if (checkParamsForPreconditionViolation(BlockD->parameters(), State,
+ LocCtxt)) {
+ if (!N->isSink())
+ C.addTransition(State->set<PreconditionViolated>(true), N);
+ return true;
+ }
+ return false;
+ }
+
+ if (const auto *FuncDecl = dyn_cast<FunctionDecl>(D)) {
+ if (checkParamsForPreconditionViolation(FuncDecl->parameters(), State,
+ LocCtxt)) {
+ if (!N->isSink())
+ C.addTransition(State->set<PreconditionViolated>(true), N);
+ return true;
+ }
+ return false;
+ }
+ return false;
+}
+
+void NullabilityChecker::reportBugIfPreconditionHolds(
+ ErrorKind Error, ExplodedNode *N, const MemRegion *Region,
+ CheckerContext &C, const Stmt *ValueExpr, bool SuppressPath) const {
+ ProgramStateRef OriginalState = N->getState();
+
+ if (checkPreconditionViolation(OriginalState, N, C))
+ return;
+ if (SuppressPath) {
+ OriginalState = OriginalState->set<PreconditionViolated>(true);
+ N = C.addTransition(OriginalState, N);
+ }
+
+ reportBug(Error, N, Region, C.getBugReporter(), ValueExpr);
+}
+
+/// Cleaning up the program state.
+void NullabilityChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ if (!SR.hasDeadSymbols())
+ return;
+
+ ProgramStateRef State = C.getState();
+ NullabilityMapTy Nullabilities = State->get<NullabilityMap>();
+ for (NullabilityMapTy::iterator I = Nullabilities.begin(),
+ E = Nullabilities.end();
+ I != E; ++I) {
+ const auto *Region = I->first->getAs<SymbolicRegion>();
+ assert(Region && "Non-symbolic region is tracked.");
+ if (SR.isDead(Region->getSymbol())) {
+ State = State->remove<NullabilityMap>(I->first);
+ }
+ }
+ // When one of the nonnull arguments are constrained to be null, nullability
+ // preconditions are violated. It is not enough to check this only when we
+ // actually report an error, because at that time interesting symbols might be
+ // reaped.
+ if (checkPreconditionViolation(State, C.getPredecessor(), C))
+ return;
+ C.addTransition(State);
+}
+
+/// This callback triggers when a pointer is dereferenced and the analyzer does
+/// not know anything about the value of that pointer. When that pointer is
+/// nullable, this code emits a warning.
+void NullabilityChecker::checkEvent(ImplicitNullDerefEvent Event) const {
+ if (Event.SinkNode->getState()->get<PreconditionViolated>())
+ return;
+
+ const MemRegion *Region =
+ getTrackRegion(Event.Location, /*CheckSuperregion=*/true);
+ if (!Region)
+ return;
+
+ ProgramStateRef State = Event.SinkNode->getState();
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (!TrackedNullability)
+ return;
+
+ if (Filter.CheckNullableDereferenced &&
+ TrackedNullability->getValue() == Nullability::Nullable) {
+ BugReporter &BR = *Event.BR;
+ // Do not suppress errors on defensive code paths, because dereferencing
+ // a nullable pointer is always an error.
+ if (Event.IsDirectDereference)
+ reportBug(ErrorKind::NullableDereferenced, Event.SinkNode, Region, BR);
+ else
+ reportBug(ErrorKind::NullablePassedToNonnull, Event.SinkNode, Region, BR);
+ }
+}
+
+/// This method check when nullable pointer or null value is returned from a
+/// function that has nonnull return type.
+///
+/// TODO: when nullability preconditons are violated, it is ok to violate the
+/// nullability postconditons (i.e.: when one of the nonnull parameters are null
+/// this check should not report any nullability related issue).
+void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+ auto RetExpr = S->getRetValue();
+ if (!RetExpr)
+ return;
+
+ if (!RetExpr->getType()->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ auto RetSVal =
+ State->getSVal(S, C.getLocationContext()).getAs<DefinedOrUnknownSVal>();
+ if (!RetSVal)
+ return;
+
+ AnalysisDeclContext *DeclCtxt =
+ C.getLocationContext()->getAnalysisDeclContext();
+ const FunctionType *FuncType = DeclCtxt->getDecl()->getFunctionType();
+ if (!FuncType)
+ return;
+
+ NullConstraint Nullness = getNullConstraint(*RetSVal, State);
+
+ Nullability RequiredNullability =
+ getNullabilityAnnotation(FuncType->getReturnType());
+
+ // If the returned value is null but the type of the expression
+ // generating it is nonnull then we will suppress the diagnostic.
+ // This enables explicit suppression when returning a nil literal in a
+ // function with a _Nonnull return type:
+ // return (NSString * _Nonnull)0;
+ Nullability RetExprTypeLevelNullability =
+ getNullabilityAnnotation(RetExpr->getType());
+
+ if (Filter.CheckNullReturnedFromNonnull &&
+ Nullness == NullConstraint::IsNull &&
+ RetExprTypeLevelNullability != Nullability::Nonnull &&
+ RequiredNullability == Nullability::Nonnull) {
+ static CheckerProgramPointTag Tag(this, "NullReturnedFromNonnull");
+ ExplodedNode *N = C.generateErrorNode(State, &Tag);
+ if (!N)
+ return;
+ reportBugIfPreconditionHolds(ErrorKind::NilReturnedToNonnull, N, nullptr, C,
+ RetExpr);
+ return;
+ }
+
+ const MemRegion *Region = getTrackRegion(*RetSVal);
+ if (!Region)
+ return;
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+ if (TrackedNullability) {
+ Nullability TrackedNullabValue = TrackedNullability->getValue();
+ if (Filter.CheckNullableReturnedFromNonnull &&
+ Nullness != NullConstraint::IsNotNull &&
+ TrackedNullabValue == Nullability::Nullable &&
+ RequiredNullability == Nullability::Nonnull) {
+ static CheckerProgramPointTag Tag(this, "NullableReturnedFromNonnull");
+ ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);
+ reportBugIfPreconditionHolds(ErrorKind::NullableReturnedToNonnull, N,
+ Region, C);
+ }
+ return;
+ }
+ if (RequiredNullability == Nullability::Nullable) {
+ State = State->set<NullabilityMap>(Region,
+ NullabilityState(RequiredNullability,
+ S));
+ C.addTransition(State);
+ }
+}
+
+/// This callback warns when a nullable pointer or a null value is passed to a
+/// function that expects its argument to be nonnull.
+void NullabilityChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!Call.getDecl())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ ProgramStateRef OrigState = State;
+
+ unsigned Idx = 0;
+ for (const ParmVarDecl *Param : Call.parameters()) {
+ if (Param->isParameterPack())
+ break;
+
+ const Expr *ArgExpr = nullptr;
+ if (Idx < Call.getNumArgs())
+ ArgExpr = Call.getArgExpr(Idx);
+ auto ArgSVal = Call.getArgSVal(Idx++).getAs<DefinedOrUnknownSVal>();
+ if (!ArgSVal)
+ continue;
+
+ if (!Param->getType()->isAnyPointerType() &&
+ !Param->getType()->isReferenceType())
+ continue;
+
+ NullConstraint Nullness = getNullConstraint(*ArgSVal, State);
+
+ Nullability RequiredNullability =
+ getNullabilityAnnotation(Param->getType());
+ Nullability ArgExprTypeLevelNullability =
+ getNullabilityAnnotation(ArgExpr->getType());
+
+ if (Filter.CheckNullPassedToNonnull && Nullness == NullConstraint::IsNull &&
+ ArgExprTypeLevelNullability != Nullability::Nonnull &&
+ RequiredNullability == Nullability::Nonnull) {
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (!N)
+ return;
+ reportBugIfPreconditionHolds(ErrorKind::NilPassedToNonnull, N, nullptr, C,
+ ArgExpr);
+ return;
+ }
+
+ const MemRegion *Region = getTrackRegion(*ArgSVal);
+ if (!Region)
+ continue;
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (TrackedNullability) {
+ if (Nullness == NullConstraint::IsNotNull ||
+ TrackedNullability->getValue() != Nullability::Nullable)
+ continue;
+
+ if (Filter.CheckNullablePassedToNonnull &&
+ RequiredNullability == Nullability::Nonnull) {
+ ExplodedNode *N = C.addTransition(State);
+ reportBugIfPreconditionHolds(ErrorKind::NullablePassedToNonnull, N,
+ Region, C, ArgExpr, /*SuppressPath=*/true);
+ return;
+ }
+ if (Filter.CheckNullableDereferenced &&
+ Param->getType()->isReferenceType()) {
+ ExplodedNode *N = C.addTransition(State);
+ reportBugIfPreconditionHolds(ErrorKind::NullableDereferenced, N, Region,
+ C, ArgExpr, /*SuppressPath=*/true);
+ return;
+ }
+ continue;
+ }
+ // No tracked nullability yet.
+ if (ArgExprTypeLevelNullability != Nullability::Nullable)
+ continue;
+ State = State->set<NullabilityMap>(
+ Region, NullabilityState(ArgExprTypeLevelNullability, ArgExpr));
+ }
+ if (State != OrigState)
+ C.addTransition(State);
+}
+
+/// Suppress the nullability warnings for some functions.
+void NullabilityChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ auto Decl = Call.getDecl();
+ if (!Decl)
+ return;
+ // ObjC Messages handles in a different callback.
+ if (Call.getKind() == CE_ObjCMessage)
+ return;
+ const FunctionType *FuncType = Decl->getFunctionType();
+ if (!FuncType)
+ return;
+ QualType ReturnType = FuncType->getReturnType();
+ if (!ReturnType->isAnyPointerType())
+ return;
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ const MemRegion *Region = getTrackRegion(Call.getReturnValue());
+ if (!Region)
+ return;
+
+ // CG headers are misannotated. Do not warn for symbols that are the results
+ // of CG calls.
+ const SourceManager &SM = C.getSourceManager();
+ StringRef FilePath = SM.getFilename(SM.getSpellingLoc(Decl->getLocStart()));
+ if (llvm::sys::path::filename(FilePath).startswith("CG")) {
+ State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (!TrackedNullability &&
+ getNullabilityAnnotation(ReturnType) == Nullability::Nullable) {
+ State = State->set<NullabilityMap>(Region, Nullability::Nullable);
+ C.addTransition(State);
+ }
+}
+
+static Nullability getReceiverNullability(const ObjCMethodCall &M,
+ ProgramStateRef State) {
+ if (M.isReceiverSelfOrSuper()) {
+ // For super and super class receivers we assume that the receiver is
+ // nonnull.
+ return Nullability::Nonnull;
+ }
+ // Otherwise look up nullability in the state.
+ SVal Receiver = M.getReceiverSVal();
+ if (auto DefOrUnknown = Receiver.getAs<DefinedOrUnknownSVal>()) {
+ // If the receiver is constrained to be nonnull, assume that it is nonnull
+ // regardless of its type.
+ NullConstraint Nullness = getNullConstraint(*DefOrUnknown, State);
+ if (Nullness == NullConstraint::IsNotNull)
+ return Nullability::Nonnull;
+ }
+ auto ValueRegionSVal = Receiver.getAs<loc::MemRegionVal>();
+ if (ValueRegionSVal) {
+ const MemRegion *SelfRegion = ValueRegionSVal->getRegion();
+ assert(SelfRegion);
+
+ const NullabilityState *TrackedSelfNullability =
+ State->get<NullabilityMap>(SelfRegion);
+ if (TrackedSelfNullability)
+ return TrackedSelfNullability->getValue();
+ }
+ return Nullability::Unspecified;
+}
+
+/// Calculate the nullability of the result of a message expr based on the
+/// nullability of the receiver, the nullability of the return value, and the
+/// constraints.
+void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
+ CheckerContext &C) const {
+ auto Decl = M.getDecl();
+ if (!Decl)
+ return;
+ QualType RetType = Decl->getReturnType();
+ if (!RetType->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ const MemRegion *ReturnRegion = getTrackRegion(M.getReturnValue());
+ if (!ReturnRegion)
+ return;
+
+ auto Interface = Decl->getClassInterface();
+ auto Name = Interface ? Interface->getName() : "";
+ // In order to reduce the noise in the diagnostics generated by this checker,
+ // some framework and programming style based heuristics are used. These
+ // heuristics are for Cocoa APIs which have NS prefix.
+ if (Name.startswith("NS")) {
+ // Developers rely on dynamic invariants such as an item should be available
+ // in a collection, or a collection is not empty often. Those invariants can
+ // not be inferred by any static analysis tool. To not to bother the users
+ // with too many false positives, every item retrieval function should be
+ // ignored for collections. The instance methods of dictionaries in Cocoa
+ // are either item retrieval related or not interesting nullability wise.
+ // Using this fact, to keep the code easier to read just ignore the return
+ // value of every instance method of dictionaries.
+ if (M.isInstanceMessage() && Name.find("Dictionary") != StringRef::npos) {
+ State =
+ State->set<NullabilityMap>(ReturnRegion, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+ // For similar reasons ignore some methods of Cocoa arrays.
+ StringRef FirstSelectorSlot = M.getSelector().getNameForSlot(0);
+ if (Name.find("Array") != StringRef::npos &&
+ (FirstSelectorSlot == "firstObject" ||
+ FirstSelectorSlot == "lastObject")) {
+ State =
+ State->set<NullabilityMap>(ReturnRegion, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+
+ // Encoding related methods of string should not fail when lossless
+ // encodings are used. Using lossless encodings is so frequent that ignoring
+ // this class of methods reduced the emitted diagnostics by about 30% on
+ // some projects (and all of that was false positives).
+ if (Name.find("String") != StringRef::npos) {
+ for (auto Param : M.parameters()) {
+ if (Param->getName() == "encoding") {
+ State = State->set<NullabilityMap>(ReturnRegion,
+ Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+ }
+ }
+ }
+
+ const ObjCMessageExpr *Message = M.getOriginExpr();
+ Nullability SelfNullability = getReceiverNullability(M, State);
+
+ const NullabilityState *NullabilityOfReturn =
+ State->get<NullabilityMap>(ReturnRegion);
+
+ if (NullabilityOfReturn) {
+ // When we have a nullability tracked for the return value, the nullability
+ // of the expression will be the most nullable of the receiver and the
+ // return value.
+ Nullability RetValTracked = NullabilityOfReturn->getValue();
+ Nullability ComputedNullab =
+ getMostNullable(RetValTracked, SelfNullability);
+ if (ComputedNullab != RetValTracked &&
+ ComputedNullab != Nullability::Unspecified) {
+ const Stmt *NullabilitySource =
+ ComputedNullab == RetValTracked
+ ? NullabilityOfReturn->getNullabilitySource()
+ : Message->getInstanceReceiver();
+ State = State->set<NullabilityMap>(
+ ReturnRegion, NullabilityState(ComputedNullab, NullabilitySource));
+ C.addTransition(State);
+ }
+ return;
+ }
+
+ // No tracked information. Use static type information for return value.
+ Nullability RetNullability = getNullabilityAnnotation(RetType);
+
+ // Properties might be computed. For this reason the static analyzer creates a
+ // new symbol each time an unknown property is read. To avoid false pozitives
+ // do not treat unknown properties as nullable, even when they explicitly
+ // marked nullable.
+ if (M.getMessageKind() == OCM_PropertyAccess && !C.wasInlined)
+ RetNullability = Nullability::Nonnull;
+
+ Nullability ComputedNullab = getMostNullable(RetNullability, SelfNullability);
+ if (ComputedNullab == Nullability::Nullable) {
+ const Stmt *NullabilitySource = ComputedNullab == RetNullability
+ ? Message
+ : Message->getInstanceReceiver();
+ State = State->set<NullabilityMap>(
+ ReturnRegion, NullabilityState(ComputedNullab, NullabilitySource));
+ C.addTransition(State);
+ }
+}
+
+/// Explicit casts are trusted. If there is a disagreement in the nullability
+/// annotations in the destination and the source or '0' is casted to nonnull
+/// track the value as having contraditory nullability. This will allow users to
+/// suppress warnings.
+void NullabilityChecker::checkPostStmt(const ExplicitCastExpr *CE,
+ CheckerContext &C) const {
+ QualType OriginType = CE->getSubExpr()->getType();
+ QualType DestType = CE->getType();
+ if (!OriginType->isAnyPointerType())
+ return;
+ if (!DestType->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ Nullability DestNullability = getNullabilityAnnotation(DestType);
+
+ // No explicit nullability in the destination type, so this cast does not
+ // change the nullability.
+ if (DestNullability == Nullability::Unspecified)
+ return;
+
+ auto RegionSVal =
+ State->getSVal(CE, C.getLocationContext()).getAs<DefinedOrUnknownSVal>();
+ const MemRegion *Region = getTrackRegion(*RegionSVal);
+ if (!Region)
+ return;
+
+ // When 0 is converted to nonnull mark it as contradicted.
+ if (DestNullability == Nullability::Nonnull) {
+ NullConstraint Nullness = getNullConstraint(*RegionSVal, State);
+ if (Nullness == NullConstraint::IsNull) {
+ State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
+ C.addTransition(State);
+ return;
+ }
+ }
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(Region);
+
+ if (!TrackedNullability) {
+ if (DestNullability != Nullability::Nullable)
+ return;
+ State = State->set<NullabilityMap>(Region,
+ NullabilityState(DestNullability, CE));
+ C.addTransition(State);
+ return;
+ }
+
+ if (TrackedNullability->getValue() != DestNullability &&
+ TrackedNullability->getValue() != Nullability::Contradicted) {
+ State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
+ C.addTransition(State);
+ }
+}
+
+/// For a given statement performing a bind, attempt to syntactically
+/// match the expression resulting in the bound value.
+static const Expr * matchValueExprForBind(const Stmt *S) {
+ // For `x = e` the value expression is the right-hand side.
+ if (auto *BinOp = dyn_cast<BinaryOperator>(S)) {
+ if (BinOp->getOpcode() == BO_Assign)
+ return BinOp->getRHS();
+ }
+
+ // For `int x = e` the value expression is the initializer.
+ if (auto *DS = dyn_cast<DeclStmt>(S)) {
+ if (DS->isSingleDecl()) {
+ auto *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!VD)
+ return nullptr;
+
+ if (const Expr *Init = VD->getInit())
+ return Init;
+ }
+ }
+
+ return nullptr;
+}
+
+/// Returns true if \param S is a DeclStmt for a local variable that
+/// ObjC automated reference counting initialized with zero.
+static bool isARCNilInitializedLocal(CheckerContext &C, const Stmt *S) {
+ // We suppress diagnostics for ARC zero-initialized _Nonnull locals. This
+ // prevents false positives when a _Nonnull local variable cannot be
+ // initialized with an initialization expression:
+ // NSString * _Nonnull s; // no-warning
+ // @autoreleasepool {
+ // s = ...
+ // }
+ //
+ // FIXME: We should treat implicitly zero-initialized _Nonnull locals as
+ // uninitialized in Sema's UninitializedValues analysis to warn when a use of
+ // the zero-initialized definition will unexpectedly yield nil.
+
+ // Locals are only zero-initialized when automated reference counting
+ // is turned on.
+ if (!C.getASTContext().getLangOpts().ObjCAutoRefCount)
+ return false;
+
+ auto *DS = dyn_cast<DeclStmt>(S);
+ if (!DS || !DS->isSingleDecl())
+ return false;
+
+ auto *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!VD)
+ return false;
+
+ // Sema only zero-initializes locals with ObjCLifetimes.
+ if(!VD->getType().getQualifiers().hasObjCLifetime())
+ return false;
+
+ const Expr *Init = VD->getInit();
+ assert(Init && "ObjC local under ARC without initializer");
+
+ // Return false if the local is explicitly initialized (e.g., with '= nil').
+ if (!isa<ImplicitValueInitExpr>(Init))
+ return false;
+
+ return true;
+}
+
+/// Propagate the nullability information through binds and warn when nullable
+/// pointer or null symbol is assigned to a pointer with a nonnull type.
+void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
+ CheckerContext &C) const {
+ const TypedValueRegion *TVR =
+ dyn_cast_or_null<TypedValueRegion>(L.getAsRegion());
+ if (!TVR)
+ return;
+
+ QualType LocType = TVR->getValueType();
+ if (!LocType->isAnyPointerType())
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (State->get<PreconditionViolated>())
+ return;
+
+ auto ValDefOrUnknown = V.getAs<DefinedOrUnknownSVal>();
+ if (!ValDefOrUnknown)
+ return;
+
+ NullConstraint RhsNullness = getNullConstraint(*ValDefOrUnknown, State);
+
+ Nullability ValNullability = Nullability::Unspecified;
+ if (SymbolRef Sym = ValDefOrUnknown->getAsSymbol())
+ ValNullability = getNullabilityAnnotation(Sym->getType());
+
+ Nullability LocNullability = getNullabilityAnnotation(LocType);
+ if (Filter.CheckNullPassedToNonnull &&
+ RhsNullness == NullConstraint::IsNull &&
+ ValNullability != Nullability::Nonnull &&
+ LocNullability == Nullability::Nonnull &&
+ !isARCNilInitializedLocal(C, S)) {
+ static CheckerProgramPointTag Tag(this, "NullPassedToNonnull");
+ ExplodedNode *N = C.generateErrorNode(State, &Tag);
+ if (!N)
+ return;
+
+ const Stmt *ValueExpr = matchValueExprForBind(S);
+ if (!ValueExpr)
+ ValueExpr = S;
+
+ reportBugIfPreconditionHolds(ErrorKind::NilAssignedToNonnull, N, nullptr, C,
+ ValueExpr);
+ return;
+ }
+ // Intentionally missing case: '0' is bound to a reference. It is handled by
+ // the DereferenceChecker.
+
+ const MemRegion *ValueRegion = getTrackRegion(*ValDefOrUnknown);
+ if (!ValueRegion)
+ return;
+
+ const NullabilityState *TrackedNullability =
+ State->get<NullabilityMap>(ValueRegion);
+
+ if (TrackedNullability) {
+ if (RhsNullness == NullConstraint::IsNotNull ||
+ TrackedNullability->getValue() != Nullability::Nullable)
+ return;
+ if (Filter.CheckNullablePassedToNonnull &&
+ LocNullability == Nullability::Nonnull) {
+ static CheckerProgramPointTag Tag(this, "NullablePassedToNonnull");
+ ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);
+ reportBugIfPreconditionHolds(ErrorKind::NullableAssignedToNonnull, N,
+ ValueRegion, C);
+ }
+ return;
+ }
+
+ const auto *BinOp = dyn_cast<BinaryOperator>(S);
+
+ if (ValNullability == Nullability::Nullable) {
+ // Trust the static information of the value more than the static
+ // information on the location.
+ const Stmt *NullabilitySource = BinOp ? BinOp->getRHS() : S;
+ State = State->set<NullabilityMap>(
+ ValueRegion, NullabilityState(ValNullability, NullabilitySource));
+ C.addTransition(State);
+ return;
+ }
+
+ if (LocNullability == Nullability::Nullable) {
+ const Stmt *NullabilitySource = BinOp ? BinOp->getLHS() : S;
+ State = State->set<NullabilityMap>(
+ ValueRegion, NullabilityState(LocNullability, NullabilitySource));
+ C.addTransition(State);
+ }
+}
+
+void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ NullabilityMapTy B = State->get<NullabilityMap>();
+
+ if (B.isEmpty())
+ return;
+
+ Out << Sep << NL;
+
+ for (NullabilityMapTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ Out << I->first << " : ";
+ I->second.print(Out);
+ Out << NL;
+ }
+}
+
+#define REGISTER_CHECKER(name, trackingRequired) \
+ void ento::register##name##Checker(CheckerManager &mgr) { \
+ NullabilityChecker *checker = mgr.registerChecker<NullabilityChecker>(); \
+ checker->Filter.Check##name = true; \
+ checker->Filter.CheckName##name = mgr.getCurrentCheckName(); \
+ checker->NeedTracking = checker->NeedTracking || trackingRequired; \
+ }
+
+// The checks are likely to be turned on by default and it is possible to do
+// them without tracking any nullability related information. As an optimization
+// no nullability information will be tracked when only these two checks are
+// enables.
+REGISTER_CHECKER(NullPassedToNonnull, false)
+REGISTER_CHECKER(NullReturnedFromNonnull, false)
+
+REGISTER_CHECKER(NullableDereferenced, true)
+REGISTER_CHECKER(NullablePassedToNonnull, true)
+REGISTER_CHECKER(NullableReturnedFromNonnull, true)
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index a7b92b4c67f2..cbaa5c23592d 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -43,7 +43,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
// Uninitialized value used for the mutex?
if (V.getAs<UndefinedVal>()) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_undef)
BT_undef.reset(new BuiltinBug(this, "Uninitialized value used as mutex "
"for @synchronized"));
@@ -66,7 +66,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
if (!notNullState) {
// Generate an error node. This isn't a sink since
// a null mutex just means no synchronization occurs.
- if (ExplodedNode *N = C.addTransition(nullState)) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(nullState)) {
if (!BT_null)
BT_null.reset(new BuiltinBug(
this, "Nil value used as mutex for @synchronized() "
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 224251beb09a..b10ec848ee46 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -66,9 +66,8 @@ class WalkAST : public StmtVisitor<WalkAST> {
// The type must be an array/pointer type.
// This could be a null constant, which is allowed.
- if (E->isNullPointerConstant(ASTC, Expr::NPC_ValueDependentIsNull))
- return true;
- return false;
+ return static_cast<bool>(
+ E->isNullPointerConstant(ASTC, Expr::NPC_ValueDependentIsNull));
}
public:
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 53e159879eb2..0203d79cd00e 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -133,13 +133,13 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
if (IdxVal.isUnknownOrUndef())
return;
DefinedSVal Idx = IdxVal.castAs<DefinedSVal>();
-
+
// Now, check if 'Idx in [0, Size-1]'.
const QualType T = IdxExpr->getType();
ProgramStateRef StInBound = State->assumeInBound(Idx, *Size, true, T);
ProgramStateRef StOutBound = State->assumeInBound(Idx, *Size, false, T);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.generateSink(StOutBound);
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
return;
initBugType();
diff --git a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 016cb146f84e..32a1adb587bf 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -49,7 +49,7 @@ public:
DoesCallSuper = true;
// Recurse if we didn't find the super call yet.
- return !DoesCallSuper;
+ return !DoesCallSuper;
}
bool DoesCallSuper;
@@ -59,7 +59,7 @@ private:
};
//===----------------------------------------------------------------------===//
-// ObjCSuperCallChecker
+// ObjCSuperCallChecker
//===----------------------------------------------------------------------===//
class ObjCSuperCallChecker : public Checker<
@@ -88,7 +88,7 @@ private:
/// \param[out] SuperclassName On return, the found superclass name.
bool ObjCSuperCallChecker::isCheckableClass(const ObjCImplementationDecl *D,
StringRef &SuperclassName) const {
- const ObjCInterfaceDecl *ID = D->getClassInterface();
+ const ObjCInterfaceDecl *ID = D->getClassInterface()->getSuperClass();
for ( ; ID ; ID = ID->getSuperClass())
{
SuperclassName = ID->getIdentifier()->getName();
@@ -202,7 +202,7 @@ void ObjCSuperCallChecker::checkASTDecl(const ObjCImplementationDecl *D,
SmallString<320> Buf;
llvm::raw_svector_ostream os(Buf);
- os << "The '" << S.getAsString()
+ os << "The '" << S.getAsString()
<< "' instance method in " << SuperclassName.str() << " subclass '"
<< *D << "' is missing a [super " << S.getAsString() << "] call";
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 93b0553b3b72..ffa3a2700616 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -145,15 +145,15 @@ void ObjCSelfInitChecker::checkForInvalidSelf(const Expr *E, CheckerContext &C,
const char *errorStr) const {
if (!E)
return;
-
+
if (!C.getState()->get<CalledInit>())
return;
-
+
if (!isInvalidSelf(E, C))
return;
-
+
// Generate an error node.
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
@@ -177,12 +177,12 @@ void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
if (isInitMessage(Msg)) {
// Tag the return value as the result of an initializer.
ProgramStateRef state = C.getState();
-
+
// FIXME this really should be context sensitive, where we record
// the current stack frame (for IPA). Also, we need to clean this
// value out when we return from this method.
state = state->set<CalledInit>(true);
-
+
SVal V = state->getSVal(Msg.getOriginExpr(), C.getLocationContext());
addSelfFlag(state, V, SelfFlag_InitRes, C);
return;
@@ -318,7 +318,7 @@ void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S,
CheckerContext &C) const {
// Allow assignment of anything to self. Self is a local variable in the
// initializer, so it is legal to assign anything to it, like results of
- // static functions/method calls. After self is assigned something we cannot
+ // static functions/method calls. After self is assigned something we cannot
// reason about, stop enforcing the rules.
// (Only continue checking if the assigned value should be treated as self.)
if ((isSelfVar(loc, C)) &&
@@ -404,15 +404,12 @@ static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
if (II == NSObjectII)
break;
}
- if (!ID)
- return false;
-
- return true;
+ return ID != nullptr;
}
/// \brief Returns true if the location is 'self'.
static bool isSelfVar(SVal location, CheckerContext &C) {
- AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
+ AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
if (!analCtx->getSelfDecl())
return false;
if (!location.getAs<loc::MemRegionVal>())
diff --git a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
new file mode 100644
index 000000000000..8ce37357fe1f
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -0,0 +1,314 @@
+//=======- PaddingChecker.cpp ------------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that checks for padding that could be
+// removed by re-ordering members.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <numeric>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+private:
+ mutable std::unique_ptr<BugType> PaddingBug;
+ mutable int64_t AllowedPad;
+ mutable BugReporter *BR;
+
+public:
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+ AllowedPad =
+ MGR.getAnalyzerOptions().getOptionAsInteger("AllowedPad", 24, this);
+ assert(AllowedPad >= 0 && "AllowedPad option should be non-negative");
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const PaddingChecker *Checker;
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+ explicit LocalVisitor(const PaddingChecker *Checker) : Checker(Checker) {}
+ bool VisitRecordDecl(const RecordDecl *RD) {
+ Checker->visitRecord(RD);
+ return true;
+ }
+ bool VisitVarDecl(const VarDecl *VD) {
+ Checker->visitVariable(VD);
+ return true;
+ }
+ // TODO: Visit array new and mallocs for arrays.
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ /// \brief Look for records of overly padded types. If padding *
+ /// PadMultiplier exceeds AllowedPad, then generate a report.
+ /// PadMultiplier is used to share code with the array padding
+ /// checker.
+ void visitRecord(const RecordDecl *RD, uint64_t PadMultiplier = 1) const {
+ if (shouldSkipDecl(RD))
+ return;
+
+ auto &ASTContext = RD->getASTContext();
+ const ASTRecordLayout &RL = ASTContext.getASTRecordLayout(RD);
+ assert(llvm::isPowerOf2_64(RL.getAlignment().getQuantity()));
+
+ CharUnits BaselinePad = calculateBaselinePad(RD, ASTContext, RL);
+ if (BaselinePad.isZero())
+ return;
+ CharUnits OptimalPad = calculateOptimalPad(RD, ASTContext, RL);
+
+ CharUnits DiffPad = PadMultiplier * (BaselinePad - OptimalPad);
+ if (DiffPad.getQuantity() <= AllowedPad) {
+ assert(!DiffPad.isNegative() && "DiffPad should not be negative");
+ // There is not enough excess padding to trigger a warning.
+ return;
+ }
+ reportRecord(RD, BaselinePad, OptimalPad);
+ }
+
+ /// \brief Look for arrays of overly padded types. If the padding of the
+ /// array type exceeds AllowedPad, then generate a report.
+ void visitVariable(const VarDecl *VD) const {
+ const ArrayType *ArrTy = VD->getType()->getAsArrayTypeUnsafe();
+ if (ArrTy == nullptr)
+ return;
+ uint64_t Elts = 0;
+ if (const ConstantArrayType *CArrTy = dyn_cast<ConstantArrayType>(ArrTy))
+ Elts = CArrTy->getSize().getZExtValue();
+ if (Elts == 0)
+ return;
+ const RecordType *RT = ArrTy->getElementType()->getAs<RecordType>();
+ if (RT == nullptr)
+ return;
+
+ // TODO: Recurse into the fields and base classes to see if any
+ // of those have excess padding.
+ visitRecord(RT->getDecl(), Elts);
+ }
+
+ bool shouldSkipDecl(const RecordDecl *RD) const {
+ auto Location = RD->getLocation();
+ // If the construct doesn't have a source file, then it's not something
+ // we want to diagnose.
+ if (!Location.isValid())
+ return true;
+ SrcMgr::CharacteristicKind Kind =
+ BR->getSourceManager().getFileCharacteristic(Location);
+ // Throw out all records that come from system headers.
+ if (Kind != SrcMgr::C_User)
+ return true;
+
+ // Not going to attempt to optimize unions.
+ if (RD->isUnion())
+ return true;
+ // How do you reorder fields if you haven't got any?
+ if (RD->field_empty())
+ return true;
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Tail padding with base classes ends up being very complicated.
+ // We will skip objects with base classes for now.
+ if (CXXRD->getNumBases() != 0)
+ return true;
+ // Virtual bases are complicated, skipping those for now.
+ if (CXXRD->getNumVBases() != 0)
+ return true;
+ // Can't layout a template, so skip it. We do still layout the
+ // instantiations though.
+ if (CXXRD->getTypeForDecl()->isDependentType())
+ return true;
+ if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
+ return true;
+ }
+ auto IsTrickyField = [](const FieldDecl *FD) -> bool {
+ // Bitfield layout is hard.
+ if (FD->isBitField())
+ return true;
+
+ // Variable length arrays are tricky too.
+ QualType Ty = FD->getType();
+ if (Ty->isIncompleteArrayType())
+ return true;
+ return false;
+ };
+
+ if (std::any_of(RD->field_begin(), RD->field_end(), IsTrickyField))
+ return true;
+ return false;
+ }
+
+ static CharUnits calculateBaselinePad(const RecordDecl *RD,
+ const ASTContext &ASTContext,
+ const ASTRecordLayout &RL) {
+ CharUnits PaddingSum;
+ CharUnits Offset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
+ for (const auto &FD : RD->fields()) {
+ // This checker only cares about the padded size of the
+ // field, and not the data size. If the field is a record
+ // with tail padding, then we won't put that number in our
+ // total because reordering fields won't fix that problem.
+ CharUnits FieldSize = ASTContext.getTypeSizeInChars(FD->getType());
+ auto FieldOffsetBits = RL.getFieldOffset(FD->getFieldIndex());
+ CharUnits FieldOffset = ASTContext.toCharUnitsFromBits(FieldOffsetBits);
+ PaddingSum += (FieldOffset - Offset);
+ Offset = FieldOffset + FieldSize;
+ }
+ PaddingSum += RL.getSize() - Offset;
+ return PaddingSum;
+ }
+
+ /// Optimal padding overview:
+ /// 1. Find a close approximation to where we can place our first field.
+ /// This will usually be at offset 0.
+ /// 2. Try to find the best field that can legally be placed at the current
+ /// offset.
+ /// a. "Best" is the largest alignment that is legal, but smallest size.
+ /// This is to account for overly aligned types.
+ /// 3. If no fields can fit, pad by rounding the current offset up to the
+ /// smallest alignment requirement of our fields. Measure and track the
+ // amount of padding added. Go back to 2.
+ /// 4. Increment the current offset by the size of the chosen field.
+ /// 5. Remove the chosen field from the set of future possibilities.
+ /// 6. Go back to 2 if there are still unplaced fields.
+ /// 7. Add tail padding by rounding the current offset up to the structure
+ /// alignment. Track the amount of padding added.
+
+ static CharUnits calculateOptimalPad(const RecordDecl *RD,
+ const ASTContext &ASTContext,
+ const ASTRecordLayout &RL) {
+ struct CharUnitPair {
+ CharUnits Align;
+ CharUnits Size;
+ bool operator<(const CharUnitPair &RHS) const {
+ // Order from small alignments to large alignments,
+ // then large sizes to small sizes.
+ return std::make_pair(Align, -Size) <
+ std::make_pair(RHS.Align, -RHS.Size);
+ }
+ };
+ SmallVector<CharUnitPair, 20> Fields;
+ auto GatherSizesAndAlignments = [](const FieldDecl *FD) {
+ CharUnitPair RetVal;
+ auto &Ctx = FD->getASTContext();
+ std::tie(RetVal.Size, RetVal.Align) =
+ Ctx.getTypeInfoInChars(FD->getType());
+ assert(llvm::isPowerOf2_64(RetVal.Align.getQuantity()));
+ if (auto Max = FD->getMaxAlignment())
+ RetVal.Align = std::max(Ctx.toCharUnitsFromBits(Max), RetVal.Align);
+ return RetVal;
+ };
+ std::transform(RD->field_begin(), RD->field_end(),
+ std::back_inserter(Fields), GatherSizesAndAlignments);
+ std::sort(Fields.begin(), Fields.end());
+
+ // This lets us skip over vptrs and non-virtual bases,
+ // so that we can just worry about the fields in our object.
+ // Note that this does cause us to miss some cases where we
+ // could pack more bytes in to a base class's tail padding.
+ CharUnits NewOffset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
+ CharUnits NewPad;
+
+ while (!Fields.empty()) {
+ unsigned TrailingZeros =
+ llvm::countTrailingZeros((unsigned long long)NewOffset.getQuantity());
+ // If NewOffset is zero, then countTrailingZeros will be 64. Shifting
+ // 64 will overflow our unsigned long long. Shifting 63 will turn
+ // our long long (and CharUnits internal type) negative. So shift 62.
+ long long CurAlignmentBits = 1ull << (std::min)(TrailingZeros, 62u);
+ CharUnits CurAlignment = CharUnits::fromQuantity(CurAlignmentBits);
+ CharUnitPair InsertPoint = {CurAlignment, CharUnits::Zero()};
+ auto CurBegin = Fields.begin();
+ auto CurEnd = Fields.end();
+
+ // In the typical case, this will find the last element
+ // of the vector. We won't find a middle element unless
+ // we started on a poorly aligned address or have an overly
+ // aligned field.
+ auto Iter = std::upper_bound(CurBegin, CurEnd, InsertPoint);
+ if (Iter != CurBegin) {
+ // We found a field that we can layout with the current alignment.
+ --Iter;
+ NewOffset += Iter->Size;
+ Fields.erase(Iter);
+ } else {
+ // We are poorly aligned, and we need to pad in order to layout another
+ // field. Round up to at least the smallest field alignment that we
+ // currently have.
+ CharUnits NextOffset = NewOffset.RoundUpToAlignment(Fields[0].Align);
+ NewPad += NextOffset - NewOffset;
+ NewOffset = NextOffset;
+ }
+ }
+ // Calculate tail padding.
+ CharUnits NewSize = NewOffset.RoundUpToAlignment(RL.getAlignment());
+ NewPad += NewSize - NewOffset;
+ return NewPad;
+ }
+
+ void reportRecord(const RecordDecl *RD, CharUnits BaselinePad,
+ CharUnits TargetPad) const {
+ if (!PaddingBug)
+ PaddingBug =
+ llvm::make_unique<BugType>(this, "Excessive Padding", "Performance");
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ Os << "Excessive padding in '";
+ Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers()) << "'";
+
+ if (auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ // TODO: make this show up better in the console output and in
+ // the HTML. Maybe just make it show up in HTML like the path
+ // diagnostics show.
+ SourceLocation ILoc = TSD->getPointOfInstantiation();
+ if (ILoc.isValid())
+ Os << " instantiated here: "
+ << ILoc.printToString(BR->getSourceManager());
+ }
+
+ Os << " (" << BaselinePad.getQuantity() << " padding bytes, where "
+ << TargetPad.getQuantity() << " is optimal). Consider reordering "
+ << "the fields or adding explicit padding members.";
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::create(RD, BR->getSourceManager());
+
+ auto Report = llvm::make_unique<BugReport>(*PaddingBug, Os.str(), CELoc);
+ Report->setDeclWithIssue(RD);
+ Report->addRange(RD->getSourceRange());
+
+ BR->emitReport(std::move(Report));
+ }
+};
+}
+
+void ento::registerPaddingChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<PaddingChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 806312468beb..e3369677af72 100644
--- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -22,7 +22,7 @@ using namespace clang;
using namespace ento;
namespace {
-class PointerArithChecker
+class PointerArithChecker
: public Checker< check::PreStmt<BinaryOperator> > {
mutable std::unique_ptr<BuiltinBug> BT;
@@ -48,10 +48,10 @@ void PointerArithChecker::checkPreStmt(const BinaryOperator *B,
// If pointer arithmetic is done on variables of non-array type, this often
// means behavior rely on memory organization, which is dangerous.
- if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) ||
+ if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) ||
isa<CompoundLiteralRegion>(LR)) {
- if (ExplodedNode *N = C.addTransition()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "Dangerous pointer arithmetic",
diff --git a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index cf1f88a2851b..2d33ebc2610d 100644
--- a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This files defines PointerSubChecker, a builtin checker that checks for
-// pointer subtractions on two pointers pointing to different memory chunks.
+// pointer subtractions on two pointers pointing to different memory chunks.
// This check corresponds to CWE-469.
//
//===----------------------------------------------------------------------===//
@@ -23,7 +23,7 @@ using namespace clang;
using namespace ento;
namespace {
-class PointerSubChecker
+class PointerSubChecker
: public Checker< check::PreStmt<BinaryOperator> > {
mutable std::unique_ptr<BuiltinBug> BT;
@@ -60,7 +60,7 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
return;
- if (ExplodedNode *N = C.addTransition()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "Pointer subtraction",
diff --git a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 4209017a58d5..28a4a083ea3c 100644
--- a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -32,9 +32,9 @@ private:
LockState(Kind K) : K(K) {}
public:
- static LockState getLocked(void) { return LockState(Locked); }
- static LockState getUnlocked(void) { return LockState(Unlocked); }
- static LockState getDestroyed(void) { return LockState(Destroyed); }
+ static LockState getLocked() { return LockState(Locked); }
+ static LockState getUnlocked() { return LockState(Unlocked); }
+ static LockState getDestroyed() { return LockState(Destroyed); }
bool operator==(const LockState &X) const {
return K == X.K;
@@ -62,10 +62,10 @@ class PthreadLockChecker : public Checker< check::PostStmt<CallExpr> > {
};
public:
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
-
+
void AcquireLock(CheckerContext &C, const CallExpr *CE, SVal lock,
bool isTryLock, enum LockingSemantics semantics) const;
-
+
void ReleaseLock(CheckerContext &C, const CallExpr *CE, SVal lock) const;
void DestroyLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
void InitLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
@@ -96,7 +96,7 @@ void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
false, PthreadSemantics);
else if (FName == "lck_mtx_lock" ||
FName == "lck_rw_lock_exclusive" ||
- FName == "lck_rw_lock_shared")
+ FName == "lck_rw_lock_shared")
AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
false, XNUSemantics);
else if (FName == "pthread_mutex_trylock" ||
@@ -124,17 +124,17 @@ void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
SVal lock, bool isTryLock,
enum LockingSemantics semantics) const {
-
+
const MemRegion *lockR = lock.getAsRegion();
if (!lockR)
return;
-
+
ProgramStateRef state = C.getState();
-
+
SVal X = state->getSVal(CE, C.getLocationContext());
if (X.isUnknownOrUndef())
return;
-
+
DefinedSVal retVal = X.castAs<DefinedSVal>();
if (const LockState *LState = state->get<LockMap>(lockR)) {
@@ -142,7 +142,7 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
if (!BT_doublelock)
BT_doublelock.reset(new BugType(this, "Double locking",
"Lock checker"));
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
auto report = llvm::make_unique<BugReport>(
@@ -183,8 +183,8 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
assert((semantics == XNUSemantics) && "Unknown locking semantics");
lockSucc = state;
}
-
- // Record that the lock was acquired.
+
+ // Record that the lock was acquired.
lockSucc = lockSucc->add<LockSet>(lockR);
lockSucc = lockSucc->set<LockMap>(lockR, LockState::getLocked());
C.addTransition(lockSucc);
@@ -196,7 +196,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
const MemRegion *lockR = lock.getAsRegion();
if (!lockR)
return;
-
+
ProgramStateRef state = C.getState();
if (const LockState *LState = state->get<LockMap>(lockR)) {
@@ -204,7 +204,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
if (!BT_doubleunlock)
BT_doubleunlock.reset(new BugType(this, "Double unlocking",
"Lock checker"));
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
auto Report = llvm::make_unique<BugReport>(
@@ -227,7 +227,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
if (firstLockR != lockR) {
if (!BT_lor)
BT_lor.reset(new BugType(this, "Lock order reversal", "Lock checker"));
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
auto report = llvm::make_unique<BugReport>(
@@ -272,7 +272,7 @@ void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
if (!BT_destroylock)
BT_destroylock.reset(new BugType(this, "Destroy invalid lock",
"Lock checker"));
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
auto Report = llvm::make_unique<BugReport>(*BT_destroylock, Message, N);
@@ -307,7 +307,7 @@ void PthreadLockChecker::InitLock(CheckerContext &C, const CallExpr *CE,
if (!BT_initlock)
BT_initlock.reset(new BugType(this, "Init invalid lock",
"Lock checker"));
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
auto Report = llvm::make_unique<BugReport>(*BT_initlock, Message, N);
@@ -320,7 +320,7 @@ void PthreadLockChecker::reportUseDestroyedBug(CheckerContext &C,
if (!BT_destroylock)
BT_destroylock.reset(new BugType(this, "Use destroyed lock",
"Lock checker"));
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
auto Report = llvm::make_unique<BugReport>(
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index 6ee87a561e02..f983c3085635 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -234,6 +234,7 @@ public:
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
getType(), IvarAccessHistory::AccessedDirectly);
}
+
RefVal releaseViaIvar() const {
assert(getIvarAccessHistory() == IvarAccessHistory::AccessedDirectly);
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
@@ -250,7 +251,7 @@ public:
bool operator==(const RefVal& X) const {
return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind();
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.Add(T);
ID.AddInteger(RawKind);
@@ -426,16 +427,16 @@ public:
/// setRetEffect - Set the effect of the return value of the call.
void setRetEffect(RetEffect E) { Ret = E; }
-
+
/// Sets the effect on the receiver of the message.
void setReceiverEffect(ArgEffect e) { Receiver = e; }
-
+
/// getReceiverEffect - Returns the effect on the receiver of the call.
/// This is only meaningful if the summary applies to an ObjCMessageExpr*.
ArgEffect getReceiverEffect() const { return Receiver; }
/// Test if two retain summaries are identical. Note that merely equivalent
- /// summaries are not necessarily identical (for example, if an explicit
+ /// summaries are not necessarily identical (for example, if an explicit
/// argument effect matches the default effect).
bool operator==(const RetainSummary &Other) const {
return Args == Other.Args && DefaultArgEffect == Other.DefaultArgEffect &&
@@ -484,7 +485,7 @@ public:
IdentifierInfo *getIdentifier() const { return II; }
Selector getSelector() const { return S; }
};
-}
+} // end anonymous namespace
namespace llvm {
template <> struct DenseMapInfo<ObjCSummaryKey> {
@@ -621,7 +622,7 @@ class RetainSummaryManager {
ArgEffects::Factory AF;
/// ScratchArgs - A holding buffer for construct ArgEffects.
- ArgEffects ScratchArgs;
+ ArgEffects ScratchArgs;
/// ObjCAllocRetE - Default return effect for methods returning Objective-C
/// objects.
@@ -644,7 +645,7 @@ class RetainSummaryManager {
ArgEffects getArgEffects();
enum UnaryFuncKind { cfretain, cfrelease, cfautorelease, cfmakecollectable };
-
+
const RetainSummary *getUnarySummary(const FunctionType* FT,
UnaryFuncKind func);
@@ -664,7 +665,7 @@ class RetainSummaryManager {
const RetainSummary *getDoNothingSummary() {
return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
-
+
const RetainSummary *getDefaultSummary() {
return getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, MayEscape);
@@ -689,7 +690,7 @@ private:
void addClassMethSummary(const char* Cls, const char* name,
const RetainSummary *Summ, bool isNullary = true) {
IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
- Selector S = isNullary ? GetNullarySelector(name, Ctx)
+ Selector S = isNullary ? GetNullarySelector(name, Ctx)
: GetUnarySelector(name, Ctx);
ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
@@ -739,7 +740,7 @@ public:
? RetEffect::MakeGCNotOwned()
: (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
: RetEffect::MakeOwned(RetEffect::ObjC, true))),
- ObjCInitRetE(gcenabled
+ ObjCInitRetE(gcenabled
? RetEffect::MakeGCNotOwned()
: (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
: RetEffect::MakeOwnedWhenTrackedReceiver())) {
@@ -803,7 +804,7 @@ public:
bool isGCEnabled() const { return GCEnabled; }
bool isARCEnabled() const { return ARCEnabled; }
-
+
bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
@@ -966,7 +967,7 @@ void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
// Additionally, our Self Init checker already warns about it. To avoid
// overwhelming the user with messages from both checkers, we model the case
// of '[super init]' in cases when it is not consumed by another expression
- // as if the call preserves the value of 'self'; essentially, assuming it can
+ // as if the call preserves the value of 'self'; essentially, assuming it can
// never fail and return 'nil'.
// Note, we don't want to just stop tracking the value since we want the
// RetainCount checker to report leaks and use-after-free if SelfInit checker
@@ -985,7 +986,6 @@ void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
}
}
-
}
}
@@ -1150,7 +1150,7 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
if (S)
break;
- if (RetTy->isPointerType()) {
+ if (RetTy->isPointerType()) {
// For CoreFoundation ('CF') types.
if (cocoa::isRefType(RetTy, "CF", FName)) {
if (isRetain(FD, FName)) {
@@ -1278,14 +1278,14 @@ RetainSummaryManager::getUnarySummary(const FunctionType* FT,
return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
-const RetainSummary *
+const RetainSummary *
RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
assert (ScratchArgs.isEmpty());
return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
}
-const RetainSummary *
+const RetainSummary *
RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
assert (ScratchArgs.isEmpty());
return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
@@ -1331,7 +1331,7 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
// Effects on the parameters.
unsigned parm_idx = 0;
- for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
+ for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
if (pd->hasAttr<NSConsumedAttr>())
@@ -1367,8 +1367,8 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
// Effects on the receiver.
if (MD->hasAttr<NSConsumesSelfAttr>())
- Template->setReceiverEffect(DecRefMsg);
-
+ Template->setReceiverEffect(DecRefMsg);
+
// Effects on the parameters.
unsigned parm_idx = 0;
for (ObjCMethodDecl::param_const_iterator
@@ -1376,9 +1376,9 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
if (pd->hasAttr<NSConsumedAttr>())
- Template->addArg(AF, parm_idx, DecRefMsg);
+ Template->addArg(AF, parm_idx, DecRefMsg);
else if (pd->hasAttr<CFConsumedAttr>()) {
- Template->addArg(AF, parm_idx, DecRef);
+ Template->addArg(AF, parm_idx, DecRef);
} else if (pd->hasAttr<CFReturnsRetainedAttr>()) {
QualType PointeeTy = pd->getType()->getPointeeType();
if (!PointeeTy.isNull())
@@ -1415,7 +1415,7 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
if (cocoa::isCocoaObjectRef(RetTy))
ResultEff = RetEffect::MakeNotOwned(RetEffect::ObjC);
else if (coreFoundation::isCFObjectRef(RetTy)) {
- // ObjCMethodDecl currently doesn't consider CF objects as valid return
+ // ObjCMethodDecl currently doesn't consider CF objects as valid return
// values for alloc, new, copy, or mutableCopy, so we have to
// double-check with the selector. This is ugly, but there aren't that
// many Objective-C methods that return CF objects, right?
@@ -1428,11 +1428,11 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
break;
default:
- ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
break;
}
} else {
- ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
}
}
break;
@@ -1749,7 +1749,7 @@ namespace {
SymbolRef Sym;
const SummaryLogTy &SummaryLog;
bool GCEnabled;
-
+
public:
CFRefReportVisitor(SymbolRef sym, bool gcEnabled, const SummaryLogTy &log)
: Sym(sym), SummaryLog(log), GCEnabled(gcEnabled) {}
@@ -1869,7 +1869,7 @@ void CFRefReport::addGCModeDescription(const LangOptions &LOpts,
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
- return isa<IntegerLiteral>(E) ||
+ return isa<IntegerLiteral>(E) ||
isa<CharacterLiteral>(E) ||
isa<FloatingLiteral>(E) ||
isa<ObjCBoolLiteralExpr>(E) ||
@@ -1948,7 +1948,7 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
else if (isa<ObjCIvarRefExpr>(S)) {
os << "Object loaded from instance variable";
}
- else {
+ else {
if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Get the name of the callee (if it is available).
SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
@@ -2192,6 +2192,7 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
return P;
}
+namespace {
// Find the first node in the current function context that referred to the
// tracked symbol and the memory location that value was stored to. Note, the
// value is only reported if the allocation occurred in the same function as
@@ -2206,6 +2207,7 @@ struct AllocationInfo {
const LocationContext *InInterestingMethodContext) :
N(InN), R(InR), InterestingMethodContext(InInterestingMethodContext) {}
};
+} // end anonymous namespace
static AllocationInfo
GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
@@ -2228,7 +2230,7 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
StoreManager::FindUniqueBinding FB(Sym);
StateMgr.iterBindings(St, FB);
-
+
if (FB) {
const MemRegion *R = FB.getRegion();
const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
@@ -2345,10 +2347,10 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
// objects. Only "copy", "alloc", "retain" and "new" transfer ownership
// to the caller for NS objects.
const Decl *D = &EndN->getCodeDecl();
-
+
os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
: " is returned from a function ");
-
+
if (D->hasAttr<CFReturnsNotRetainedAttr>())
os << "that is annotated as CF_RETURNS_NOT_RETAINED";
else if (D->hasAttr<NSReturnsNotRetainedAttr>())
@@ -2385,7 +2387,7 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
}
CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
- bool GCEnabled, const SummaryLogTy &Log,
+ bool GCEnabled, const SummaryLogTy &Log,
ExplodedNode *n, SymbolRef sym,
CheckerContext &Ctx,
bool IncludeAllocationLine)
@@ -2414,7 +2416,7 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
// FIXME: This will crash the analyzer if an allocation comes from an
// implicit call (ex: a destructor call).
// (Currently there are no such allocations in Cocoa, though.)
- const Stmt *AllocStmt = 0;
+ const Stmt *AllocStmt = nullptr;
ProgramPoint P = AllocNode->getLocation();
if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
AllocStmt = Exit->getCalleeContext()->getCallSite();
@@ -2492,7 +2494,7 @@ class RetainCountChecker
/// the allocation line.
mutable bool IncludeAllocationLine;
-public:
+public:
RetainCountChecker(AnalyzerOptions &AO)
: ShouldResetSummaryLog(false),
IncludeAllocationLine(shouldIncludeAllocationSiteInLeakDiagnostics(AO)) {}
@@ -2617,7 +2619,7 @@ public:
void checkPostStmt(const ObjCIvarRefExpr *IRE, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
-
+
void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
CheckerContext &C) const;
@@ -2630,13 +2632,13 @@ public:
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
- ProgramStateRef
+ ProgramStateRef
checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const;
-
+
bool wantsRegionChangeUpdate(ProgramStateRef state) const {
return true;
}
@@ -2645,7 +2647,7 @@ public:
void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
ExplodedNode *Pred, RetEffect RE, RefVal X,
SymbolRef Sym, ProgramStateRef state) const;
-
+
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkEndFunction(CheckerContext &C) const;
@@ -2656,7 +2658,7 @@ public:
void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
RefVal::Kind ErrorKind, SymbolRef Sym,
CheckerContext &C) const;
-
+
void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
@@ -2678,7 +2680,7 @@ public:
} // end anonymous namespace
namespace {
-class StopTrackingCallback : public SymbolVisitor {
+class StopTrackingCallback final : public SymbolVisitor {
ProgramStateRef state;
public:
StopTrackingCallback(ProgramStateRef st) : state(st) {}
@@ -2740,21 +2742,21 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
if (!BE)
return;
-
+
ArgEffect AE = IncRef;
-
+
switch (BE->getBridgeKind()) {
case clang::OBC_Bridge:
// Do nothing.
return;
case clang::OBC_BridgeRetained:
AE = IncRef;
- break;
+ break;
case clang::OBC_BridgeTransfer:
AE = DecRefBridgedTransferred;
break;
}
-
+
ProgramStateRef state = C.getState();
SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
if (!Sym)
@@ -2765,7 +2767,7 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
RefVal::Kind hasErr = (RefVal::Kind) 0;
state = updateSymbol(state, Sym, *T, AE, hasErr, C);
-
+
if (hasErr) {
// FIXME: If we get an error during a bridge cast, should we report it?
return;
@@ -2777,7 +2779,7 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
void RetainCountChecker::processObjCLiterals(CheckerContext &C,
const Expr *Ex) const {
ProgramStateRef state = C.getState();
- const ExplodedNode *pred = C.getPredecessor();
+ const ExplodedNode *pred = C.getPredecessor();
for (const Stmt *Child : Ex->children()) {
SVal V = state->getSVal(Child, pred->getLocationContext());
if (SymbolRef sym = V.getAsSymbol())
@@ -2790,17 +2792,17 @@ void RetainCountChecker::processObjCLiterals(CheckerContext &C,
}
}
}
-
+
// Return the object as autoreleased.
// RetEffect RE = RetEffect::MakeNotOwned(RetEffect::ObjC);
- if (SymbolRef sym =
+ if (SymbolRef sym =
state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
QualType ResultTy = Ex->getType();
state = setRefBinding(state, sym,
RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
}
-
- C.addTransition(state);
+
+ C.addTransition(state);
}
void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
@@ -2817,7 +2819,7 @@ void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
CheckerContext &C) const {
- const ExplodedNode *Pred = C.getPredecessor();
+ const ExplodedNode *Pred = C.getPredecessor();
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
@@ -2966,7 +2968,7 @@ void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
if (Sym)
state = removeRefBinding(state, Sym);
}
-
+
C.addTransition(state);
}
@@ -3062,7 +3064,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
if (ReceiverIsTracked)
- RE = getSummaryManager(C).getObjAllocRetEffect();
+ RE = getSummaryManager(C).getObjAllocRetEffect();
else
RE = RetEffect::MakeNoRet();
}
@@ -3129,8 +3131,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
}
}
-
-ProgramStateRef
+ProgramStateRef
RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
RefVal V, ArgEffect E, RefVal::Kind &hasErr,
CheckerContext &C) const {
@@ -3306,7 +3307,7 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St,
if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
return;
- ExplodedNode *N = C.generateSink(St);
+ ExplodedNode *N = C.generateErrorNode(St);
if (!N)
return;
@@ -3388,7 +3389,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
isMakeCollectable(FD, FName);
}
}
-
+
if (!canEval)
return false;
@@ -3531,7 +3532,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *Pred,
RetEffect RE, RefVal X,
SymbolRef Sym,
- ProgramStateRef state) const {
+ ProgramStateRef state) const {
// HACK: Ignore retain-count issues on values accessed through ivars,
// because of cases like this:
// [_contentView retain];
@@ -3669,7 +3670,6 @@ void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
SVal Cond,
bool Assumption) const {
-
// FIXME: We may add to the interface of evalAssume the list of symbols
// whose assumptions have changed. For now we just iterate through the
// bindings and check if any of the tracked symbols are NULL. This isn't
@@ -3700,7 +3700,7 @@ ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
return state;
}
-ProgramStateRef
+ProgramStateRef
RetainCountChecker::checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
@@ -3810,7 +3810,7 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
return nullptr;
}
-ProgramStateRef
+ProgramStateRef
RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
SymbolRef sid, RefVal V,
SmallVectorImpl<SymbolRef> &Leaked) const {
@@ -3890,7 +3890,7 @@ void RetainCountChecker::checkEndFunction(CheckerContext &Ctx) const {
// and suggest annotations.
if (LCtx->getParent())
return;
-
+
B = state->get<RefBindings>();
SmallVector<SymbolRef, 10> Leaked;
@@ -3910,7 +3910,7 @@ RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
sym->dumpToStream(out);
tag = new CheckerProgramPointTag(this, out.str());
}
- return tag;
+ return tag;
}
void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
@@ -3993,7 +3993,9 @@ void ento::registerRetainCountChecker(CheckerManager &Mgr) {
// Implementation of the CallEffects API.
//===----------------------------------------------------------------------===//
-namespace clang { namespace ento { namespace objc_retain {
+namespace clang {
+namespace ento {
+namespace objc_retain {
// This is a bit gross, but it allows us to populate CallEffects without
// creating a bunch of accessors. This kind is very localized, so the
@@ -4022,4 +4024,6 @@ CallEffects CallEffects::getEffect(const FunctionDecl *FD) {
#undef createCallEffect
-}}}
+} // end namespace objc_retain
+} // end namespace ento
+} // end namespace clang
diff --git a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index acbd0d95d07b..19fa0fb193cc 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -23,7 +23,7 @@ using namespace clang;
using namespace ento;
namespace {
-class ReturnPointerRangeChecker :
+class ReturnPointerRangeChecker :
public Checker< check::PreStmt<ReturnStmt> > {
mutable std::unique_ptr<BuiltinBug> BT;
@@ -39,7 +39,7 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
-
+
SVal V = state->getSVal(RetE, C.getLocationContext());
const MemRegion *R = V.getAsRegion();
@@ -62,11 +62,11 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.generateSink(StOutBound);
+ ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
return;
-
+
// FIXME: This bug correspond to CWE-466. Eventually we should have bug
// types explicitly reference such exploit categories (when applicable).
if (!BT)
diff --git a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index 2668ac1e1eca..c5e826a84b84 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -80,7 +80,7 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
const Expr *TrackingE = nullptr) {
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
diff --git a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index c22e78b7eb62..7026a2ec16a1 100644
--- a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -92,7 +92,7 @@ public:
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
namespace {
-class StopTrackingCallback : public SymbolVisitor {
+class StopTrackingCallback final : public SymbolVisitor {
ProgramStateRef state;
public:
StopTrackingCallback(ProgramStateRef st) : state(st) {}
@@ -200,7 +200,9 @@ void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
State = State->remove<StreamMap>(Sym);
}
- ExplodedNode *N = C.addTransition(State);
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
reportLeaks(LeakedStreams, C, N);
}
@@ -208,7 +210,7 @@ void SimpleStreamChecker::reportDoubleClose(SymbolRef FileDescSym,
const CallEvent &Call,
CheckerContext &C) const {
// We reached a bug, stop exploring the path here by generating a sink.
- ExplodedNode *ErrNode = C.generateSink();
+ ExplodedNode *ErrNode = C.generateErrorNode();
// If we've already reached this node on another path, return.
if (!ErrNode)
return;
diff --git a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 813c811ef15f..79fc701d6d58 100644
--- a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines stack address leak checker, which checks if an invalid
+// This file defines stack address leak checker, which checks if an invalid
// stack address is stored into a global or heap location. See CERT DCL30-C.
//
//===----------------------------------------------------------------------===//
@@ -49,20 +49,20 @@ SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
SourceManager &SM = Ctx.getSourceManager();
SourceRange range;
os << "Address of ";
-
+
// Check if the region is a compound literal.
- if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
+ if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
const CompoundLiteralExpr *CL = CR->getLiteralExpr();
os << "stack memory associated with a compound literal "
"declared on line "
<< SM.getExpansionLineNumber(CL->getLocStart())
- << " returned to caller";
+ << " returned to caller";
range = CL->getSourceRange();
}
else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
const Expr *ARE = AR->getExpr();
SourceLocation L = ARE->getLocStart();
- range = ARE->getSourceRange();
+ range = ARE->getSourceRange();
os << "stack memory allocated by call to alloca() on line "
<< SM.getExpansionLineNumber(L);
}
@@ -87,14 +87,14 @@ SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
}
else {
llvm_unreachable("Invalid region in ReturnStackAddressChecker.");
- }
-
+ }
+
return range;
}
void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
const Expr *RetE) const {
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
@@ -118,7 +118,7 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *
void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
CheckerContext &C) const {
-
+
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
@@ -130,10 +130,10 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
if (!R)
return;
-
+
const StackSpaceRegion *SS =
dyn_cast_or_null<StackSpaceRegion>(R->getMemorySpace());
-
+
if (!SS)
return;
@@ -156,6 +156,15 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
if (isa<CXXConstructExpr>(RetE) && RetE->getType()->isRecordType())
return;
+ // The CK_CopyAndAutoreleaseBlockObject cast causes the block to be copied
+ // so the stack address is not escaping here.
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(RetE)) {
+ if (isa<BlockDataRegion>(R) &&
+ ICE->getCastKind() == CK_CopyAndAutoreleaseBlockObject) {
+ return;
+ }
+ }
+
EmitStackError(C, R, RetE);
}
@@ -175,35 +184,35 @@ void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const {
Ctx(CC),
CurSFC(CC.getLocationContext()->getCurrentStackFrame())
{}
-
+
bool HandleBinding(StoreManager &SMgr, Store store,
const MemRegion *region, SVal val) override {
if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
return true;
-
+
const MemRegion *vR = val.getAsRegion();
if (!vR)
return true;
-
+
// Under automated retain release, it is okay to assign a block
// directly to a global variable.
if (Ctx.getASTContext().getLangOpts().ObjCAutoRefCount &&
isa<BlockDataRegion>(vR))
return true;
- if (const StackSpaceRegion *SSR =
+ if (const StackSpaceRegion *SSR =
dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
// If the global variable holds a location in the current stack frame,
// record the binding to emit a warning.
if (SSR->getStackFrame() == CurSFC)
V.push_back(std::make_pair(region, vR));
}
-
+
return true;
}
};
-
+
CallBack cb(Ctx);
state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
@@ -211,7 +220,7 @@ void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const {
return;
// Generate an error node.
- ExplodedNode *N = Ctx.addTransition(state);
+ ExplodedNode *N = Ctx.generateNonFatalErrorNode(state);
if (!N)
return;
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 2109a75b1fb6..82b01fe814da 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -43,8 +43,8 @@ struct StreamState {
static StreamState getOpened(const Stmt *s) { return StreamState(Opened, s); }
static StreamState getClosed(const Stmt *s) { return StreamState(Closed, s); }
- static StreamState getOpenFailed(const Stmt *s) {
- return StreamState(OpenFailed, s);
+ static StreamState getOpenFailed(const Stmt *s) {
+ return StreamState(OpenFailed, s);
}
static StreamState getEscaped(const Stmt *s) {
return StreamState(Escaped, s);
@@ -59,14 +59,14 @@ struct StreamState {
class StreamChecker : public Checker<eval::Call,
check::DeadSymbols > {
mutable IdentifierInfo *II_fopen, *II_tmpfile, *II_fclose, *II_fread,
- *II_fwrite,
- *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
+ *II_fwrite,
+ *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
*II_clearerr, *II_feof, *II_ferror, *II_fileno;
mutable std::unique_ptr<BuiltinBug> BT_nullfp, BT_illegalwhence,
BT_doubleclose, BT_ResourceLeak;
public:
- StreamChecker()
+ StreamChecker()
: II_fopen(nullptr), II_tmpfile(nullptr), II_fclose(nullptr),
II_fread(nullptr), II_fwrite(nullptr), II_fseek(nullptr),
II_ftell(nullptr), II_rewind(nullptr), II_fgetpos(nullptr),
@@ -93,10 +93,10 @@ private:
void Fileno(CheckerContext &C, const CallExpr *CE) const;
void OpenFileAux(CheckerContext &C, const CallExpr *CE) const;
-
- ProgramStateRef CheckNullStream(SVal SV, ProgramStateRef state,
+
+ ProgramStateRef CheckNullStream(SVal SV, ProgramStateRef state,
CheckerContext &C) const;
- ProgramStateRef CheckDoubleClose(const CallExpr *CE, ProgramStateRef state,
+ ProgramStateRef CheckDoubleClose(const CallExpr *CE, ProgramStateRef state,
CheckerContext &C) const;
};
@@ -216,13 +216,13 @@ void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
C.blockCount())
.castAs<DefinedSVal>();
state = state->BindExpr(CE, C.getLocationContext(), RetVal);
-
+
ConstraintManager &CM = C.getConstraintManager();
// Bifurcate the state into two: one with a valid FILE* pointer, the other
// with a NULL.
ProgramStateRef stateNotNull, stateNull;
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
-
+
if (SymbolRef Sym = RetVal.getAsSymbol()) {
// if RetVal is not NULL, set the symbol's state to Opened.
stateNotNull =
@@ -271,7 +271,7 @@ void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
if (x >= 0 && x <= 2)
return;
- if (ExplodedNode *N = C.addTransition(state)) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
if (!BT_illegalwhence)
BT_illegalwhence.reset(
new BuiltinBug(this, "Illegal whence argument",
@@ -349,7 +349,7 @@ ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (!stateNotNull && stateNull) {
- if (ExplodedNode *N = C.generateSink(stateNull)) {
+ if (ExplodedNode *N = C.generateErrorNode(stateNull)) {
if (!BT_nullfp)
BT_nullfp.reset(new BuiltinBug(this, "NULL stream pointer",
"Stream pointer might be NULL."));
@@ -368,17 +368,17 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
state->getSVal(CE->getArg(0), C.getLocationContext()).getAsSymbol();
if (!Sym)
return state;
-
+
const StreamState *SS = state->get<StreamMap>(Sym);
// If the file stream is not tracked, return.
if (!SS)
return state;
-
+
// Check: Double close a File Descriptor could cause undefined behaviour.
// Conforming to man-pages
if (SS->isClosed()) {
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (N) {
if (!BT_doubleclose)
BT_doubleclose.reset(new BuiltinBug(
@@ -389,7 +389,7 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
}
return nullptr;
}
-
+
// Close the File Descriptor.
return state->set<StreamMap>(Sym, StreamState::getClosed(CE));
}
@@ -406,7 +406,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
continue;
if (SS->isOpened()) {
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (N) {
if (!BT_ResourceLeak)
BT_ResourceLeak.reset(new BuiltinBug(
diff --git a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index 6e2477579f55..2e0529015ca6 100644
--- a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -48,7 +48,7 @@ void TaintTesterChecker::checkPostStmt(const Expr *E,
return;
if (State->isTainted(E, C.getLocationContext())) {
- if (ExplodedNode *N = C.addTransition()) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
initBugType();
auto report = llvm::make_unique<BugReport>(*BT, "tainted",N);
report->addRange(E->getSourceRange());
diff --git a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index 638701da8a01..b794d2f86bbe 100644
--- a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -167,7 +167,7 @@ bool TestAfterDivZeroChecker::hasDivZeroMap(SVal Var,
}
void TestAfterDivZeroChecker::reportBug(SVal Val, CheckerContext &C) const {
- if (ExplodedNode *N = C.generateSink(C.getState())) {
+ if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
if (!DivZeroBug)
DivZeroBug.reset(new BuiltinBug(this, "Division by zero"));
diff --git a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 1d8ef9947175..ed17610e4116 100644
--- a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -30,7 +30,7 @@ class UndefBranchChecker : public Checker<check::BranchCondition> {
ProgramStateRef St;
const LocationContext *LCtx;
- FindUndefExpr(ProgramStateRef S, const LocationContext *L)
+ FindUndefExpr(ProgramStateRef S, const LocationContext *L)
: St(S), LCtx(L) {}
const Expr *FindExpr(const Expr *Ex) {
@@ -45,7 +45,7 @@ class UndefBranchChecker : public Checker<check::BranchCondition> {
return Ex;
}
- bool MatchesCriteria(const Expr *Ex) {
+ bool MatchesCriteria(const Expr *Ex) {
return St->getSVal(Ex, LCtx).isUndef();
}
};
@@ -62,7 +62,7 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
if (X.isUndef()) {
// Generate a sink node, which implicitly marks both outgoing branches as
// infeasible.
- ExplodedNode *N = Ctx.generateSink();
+ ExplodedNode *N = Ctx.generateErrorNode();
if (N) {
if (!BT)
BT.reset(new BuiltinBug(
diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 53fd069bf150..17fe8610da06 100644
--- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -74,7 +74,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
// Get the VarRegion associated with VD in the local stack frame.
if (Optional<UndefinedVal> V =
state->getSVal(I.getOriginalRegion()).getAs<UndefinedVal>()) {
- if (ExplodedNode *N = C.generateSink()) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "uninitialized variable captured by block"));
@@ -83,7 +83,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
- os << "Variable '" << VD->getName()
+ os << "Variable '" << VD->getName()
<< "' is uninitialized when captured by block";
auto R = llvm::make_unique<BugReport>(*BT, os.str(), N);
diff --git a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 5353310e6d5a..38d2aa6d8f9d 100644
--- a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines UndefResultChecker, a builtin check in ExprEngine that
+// This defines UndefResultChecker, a builtin check in ExprEngine that
// performs checks for undefined results of non-assignment binary operators.
//
//===----------------------------------------------------------------------===//
@@ -25,7 +25,7 @@ using namespace clang;
using namespace ento;
namespace {
-class UndefResultChecker
+class UndefResultChecker
: public Checker< check::PostStmt<BinaryOperator> > {
mutable std::unique_ptr<BugType> BT;
@@ -50,10 +50,10 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
return;
// Generate an error node.
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
-
+
if (!BT)
BT.reset(
new BuiltinBug(this, "Result of operation is garbage or undefined"));
@@ -62,7 +62,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
llvm::raw_svector_ostream OS(sbuf);
const Expr *Ex = nullptr;
bool isLeft = true;
-
+
if (state->getSVal(B->getLHS(), LCtx).isUndef()) {
Ex = B->getLHS()->IgnoreParenCasts();
isLeft = true;
@@ -71,13 +71,13 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
Ex = B->getRHS()->IgnoreParenCasts();
isLeft = false;
}
-
+
if (Ex) {
OS << "The " << (isLeft ? "left" : "right")
<< " operand of '"
<< BinaryOperator::getOpcodeStr(B->getOpcode())
<< "' is a garbage value";
- }
+ }
else {
// Neither operand was undefined, but the result is undefined.
OS << "The result of the '"
@@ -91,7 +91,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
}
else
bugreporter::trackNullOrUndefValue(N, B, *report);
-
+
C.emitReport(std::move(report));
}
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index ba4daf835148..fe07eafd281f 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -32,7 +32,7 @@ public:
};
} // end anonymous namespace
-void
+void
UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
CheckerContext &C) const {
const Expr *Index = A->getIdx();
@@ -46,7 +46,7 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
if (Ctor->isDefaulted())
return;
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
if (!BT)
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 81c96c4860bc..7a31efc8cef8 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -46,7 +46,7 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
if (C.getCalleeName(EnclosingFunctionDecl) == "swap")
return;
- ExplodedNode *N = C.generateSink();
+ ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index a799b4c21982..4b78c2058341 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -77,7 +77,7 @@ void UnixAPIChecker::ReportOpenBug(CheckerContext &C,
ProgramStateRef State,
const char *Msg,
SourceRange SR) const {
- ExplodedNode *N = C.generateSink(State);
+ ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
@@ -114,7 +114,7 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
// The definition of O_CREAT is platform specific. We need a better way
// of querying this information from the checking environment.
if (!Val_O_CREAT.hasValue()) {
- if (C.getASTContext().getTargetInfo().getTriple().getVendor()
+ if (C.getASTContext().getTargetInfo().getTriple().getVendor()
== llvm::Triple::Apple)
Val_O_CREAT = 0x0200;
else {
@@ -182,7 +182,7 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
- ExplodedNode *N = C.generateSink(state);
+ ExplodedNode *N = C.generateErrorNode(state);
if (!N)
return;
@@ -220,7 +220,7 @@ static bool IsZeroByteAllocation(ProgramStateRef state,
ProgramStateRef *falseState) {
std::tie(*trueState, *falseState) =
state->assume(argVal.castAs<DefinedSVal>());
-
+
return (*falseState && !*trueState);
}
@@ -231,7 +231,7 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
ProgramStateRef falseState,
const Expr *arg,
const char *fn_name) const {
- ExplodedNode *N = C.generateSink(falseState);
+ ExplodedNode *N = C.generateErrorNode(falseState);
if (!N)
return false;
@@ -239,7 +239,7 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
"Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
SmallString<256> S;
- llvm::raw_svector_ostream os(S);
+ llvm::raw_svector_ostream os(S);
os << "Call to '" << fn_name << "' has an allocation size of 0 bytes";
auto report = llvm::make_unique<BugReport>(*BT_mallocZero, os.str(), N);
@@ -272,13 +272,13 @@ void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
// Is the value perfectly constrained to zero?
if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
- (void) ReportZeroByteAllocation(C, falseState, arg, fn);
+ (void) ReportZeroByteAllocation(C, falseState, arg, fn);
return;
}
// Assume the value is non-zero going forward.
assert(trueState);
if (trueState != state)
- C.addTransition(trueState);
+ C.addTransition(trueState);
}
void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
diff --git a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index d78de3c6f3a8..a03abce9626b 100644
--- a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -54,7 +54,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
BugReporter &B,
ExprEngine &Eng) const {
CFGBlocksSet reachable, visited;
-
+
if (Eng.hasWorkRemaining())
return;
@@ -88,7 +88,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
// Bail out if we didn't get the CFG or the ParentMap.
if (!D || !C || !PM)
return;
-
+
// Don't do anything for template instantiations. Proving that code
// in a template instantiation is unreachable means proving that it is
// unreachable in all instantiations.
@@ -235,12 +235,9 @@ bool UnreachableCodeChecker::isInvalidPath(const CFGBlock *CB,
return false;
// Run each of the checks on the conditions
- if (containsMacro(cond) || containsEnum(cond)
- || containsStaticLocal(cond) || containsBuiltinOffsetOf(cond)
- || containsStmt<UnaryExprOrTypeTraitExpr>(cond))
- return true;
-
- return false;
+ return containsMacro(cond) || containsEnum(cond) ||
+ containsStaticLocal(cond) || containsBuiltinOffsetOf(cond) ||
+ containsStmt<UnaryExprOrTypeTraitExpr>(cond);
}
// Returns true if the given CFGBlock is empty
diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 80384bbfdb30..e3b2ed222363 100644
--- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines VLASizeChecker, a builtin check in ExprEngine that
+// This defines VLASizeChecker, a builtin check in ExprEngine that
// performs checks for declaration of VLA of undefined or zero size.
// In addition, VLASizeChecker is responsible for defining the extent
// of the MemRegion that represents a VLA.
@@ -46,7 +46,7 @@ void VLASizeChecker::reportBug(VLASize_Kind Kind,
ProgramStateRef State,
CheckerContext &C) const {
// Generate an error node.
- ExplodedNode *N = C.generateSink(State);
+ ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
@@ -82,7 +82,7 @@ void VLASizeChecker::reportBug(VLASize_Kind Kind,
void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!DS->isSingleDecl())
return;
-
+
const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
if (!VD)
return;
@@ -106,7 +106,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
// warned about that already.
if (sizeV.isUnknown())
return;
-
+
// Check if the size is tainted.
if (state->isTainted(sizeV)) {
reportBug(VLA_Tainted, SE, nullptr, C);
@@ -123,7 +123,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
reportBug(VLA_Zero, SE, stateZero, C);
return;
}
-
+
// From this point on, assume that the size is not zero.
state = stateNotZero;
diff --git a/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
new file mode 100644
index 000000000000..26ffee827cff
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -0,0 +1,218 @@
+//===- VforkChecker.cpp -------- Vfork usage checks --------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines vfork checker which checks for dangerous uses of vfork.
+// Vforked process shares memory (including stack) with parent so it's
+// range of actions is significantly limited: can't write variables,
+// can't call functions not in whitelist, etc. For more details, see
+// http://man7.org/linux/man-pages/man2/vfork.2.html
+//
+// This checker checks for prohibited constructs in vforked process.
+// The state transition diagram:
+// PARENT ---(vfork() == 0)--> CHILD
+// |
+// --(*p = ...)--> bug
+// |
+// --foo()--> bug
+// |
+// --return--> bug
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/AST/ParentMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class VforkChecker : public Checker<check::PreCall, check::PostCall,
+ check::Bind, check::PreStmt<ReturnStmt>> {
+ mutable std::unique_ptr<BuiltinBug> BT;
+ mutable llvm::SmallSet<const IdentifierInfo *, 10> VforkWhitelist;
+ mutable const IdentifierInfo *II_vfork;
+
+ static bool isChildProcess(const ProgramStateRef State);
+
+ bool isVforkCall(const Decl *D, CheckerContext &C) const;
+ bool isCallWhitelisted(const IdentifierInfo *II, CheckerContext &C) const;
+
+ void reportBug(const char *What, CheckerContext &C,
+ const char *Details = 0) const;
+
+public:
+ VforkChecker() : II_vfork(0) {}
+
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+// This trait holds region of variable that is assigned with vfork's
+// return value (this is the only region child is allowed to write).
+// VFORK_RESULT_INVALID means that we are in parent process.
+// VFORK_RESULT_NONE means that vfork's return value hasn't been assigned.
+// Other values point to valid regions.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(VforkResultRegion, const void *)
+#define VFORK_RESULT_INVALID 0
+#define VFORK_RESULT_NONE ((void *)(uintptr_t)1)
+
+bool VforkChecker::isChildProcess(const ProgramStateRef State) {
+ return State->get<VforkResultRegion>() != VFORK_RESULT_INVALID;
+}
+
+bool VforkChecker::isVforkCall(const Decl *D, CheckerContext &C) const {
+ auto FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD || !C.isCLibraryFunction(FD))
+ return false;
+
+ if (!II_vfork) {
+ ASTContext &AC = C.getASTContext();
+ II_vfork = &AC.Idents.get("vfork");
+ }
+
+ return FD->getIdentifier() == II_vfork;
+}
+
+// Returns true iff ok to call function after successful vfork.
+bool VforkChecker::isCallWhitelisted(const IdentifierInfo *II,
+ CheckerContext &C) const {
+ if (VforkWhitelist.empty()) {
+ // According to manpage.
+ const char *ids[] = {
+ "_exit",
+ "_Exit",
+ "execl",
+ "execlp",
+ "execle",
+ "execv",
+ "execvp",
+ "execvpe",
+ 0,
+ };
+
+ ASTContext &AC = C.getASTContext();
+ for (const char **id = ids; *id; ++id)
+ VforkWhitelist.insert(&AC.Idents.get(*id));
+ }
+
+ return VforkWhitelist.count(II);
+}
+
+void VforkChecker::reportBug(const char *What, CheckerContext &C,
+ const char *Details) const {
+ if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
+ if (!BT)
+ BT.reset(new BuiltinBug(this,
+ "Dangerous construct in a vforked process"));
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << What << " is prohibited after a successful vfork";
+
+ if (Details)
+ os << "; " << Details;
+
+ auto Report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ // TODO: mark vfork call in BugReportVisitor
+ C.emitReport(std::move(Report));
+ }
+}
+
+// Detect calls to vfork and split execution appropriately.
+void VforkChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ // We can't call vfork in child so don't bother
+ // (corresponding warning has already been emitted in checkPreCall).
+ ProgramStateRef State = C.getState();
+ if (isChildProcess(State))
+ return;
+
+ if (!isVforkCall(Call.getDecl(), C))
+ return;
+
+ // Get return value of vfork.
+ SVal VforkRetVal = Call.getReturnValue();
+ Optional<DefinedOrUnknownSVal> DVal =
+ VforkRetVal.getAs<DefinedOrUnknownSVal>();
+ if (!DVal)
+ return;
+
+ // Get assigned variable.
+ const ParentMap &PM = C.getLocationContext()->getParentMap();
+ const Stmt *P = PM.getParentIgnoreParenCasts(Call.getOriginExpr());
+ const VarDecl *LhsDecl;
+ std::tie(LhsDecl, std::ignore) = parseAssignment(P);
+
+ // Get assigned memory region.
+ MemRegionManager &M = C.getStoreManager().getRegionManager();
+ const MemRegion *LhsDeclReg =
+ LhsDecl
+ ? M.getVarRegion(LhsDecl, C.getLocationContext())
+ : (const MemRegion *)VFORK_RESULT_NONE;
+
+ // Parent branch gets nonzero return value (according to manpage).
+ ProgramStateRef ParentState, ChildState;
+ std::tie(ParentState, ChildState) = C.getState()->assume(*DVal);
+ C.addTransition(ParentState);
+ ChildState = ChildState->set<VforkResultRegion>(LhsDeclReg);
+ C.addTransition(ChildState);
+}
+
+// Prohibit calls to non-whitelist functions in child process.
+void VforkChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (isChildProcess(State)
+ && !isCallWhitelisted(Call.getCalleeIdentifier(), C))
+ reportBug("This function call", C);
+}
+
+// Prohibit writes in child process (except for vfork's lhs).
+void VforkChecker::checkBind(SVal L, SVal V, const Stmt *S,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (!isChildProcess(State))
+ return;
+
+ const MemRegion *VforkLhs =
+ static_cast<const MemRegion *>(State->get<VforkResultRegion>());
+ const MemRegion *MR = L.getAsRegion();
+
+ // Child is allowed to modify only vfork's lhs.
+ if (!MR || MR == VforkLhs)
+ return;
+
+ reportBug("This assignment", C);
+}
+
+// Prohibit return from function in child process.
+void VforkChecker::checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (isChildProcess(State))
+ reportBug("Return", C, "call _exit() instead");
+}
+
+void ento::registerVforkChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VforkChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index f6ef4aef5c78..550250302611 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines a checker that checks virtual function calls during
+// This file defines a checker that checks virtual function calls during
// construction or destruction of C++ objects.
//
//===----------------------------------------------------------------------===//
@@ -37,13 +37,13 @@ class WalkAST : public StmtVisitor<WalkAST> {
/// A vector representing the worklist which has a chain of CallExprs.
DFSWorkList WList;
-
+
// PreVisited : A CallExpr to this FunctionDecl is in the worklist, but the
// body has not been visited yet.
// PostVisited : A CallExpr to this FunctionDecl is in the worklist, and the
// body has been visited.
enum Kind { NotVisited,
- PreVisited, /**< A CallExpr to this FunctionDecl is in the
+ PreVisited, /**< A CallExpr to this FunctionDecl is in the
worklist, but the body has not yet been
visited. */
PostVisited /**< A CallExpr to this FunctionDecl is in the
@@ -57,7 +57,7 @@ class WalkAST : public StmtVisitor<WalkAST> {
/// generating bug reports. This is null while visiting the body of a
/// constructor or destructor.
const CallExpr *visitingCallExpr;
-
+
public:
WalkAST(const CheckerBase *checker, BugReporter &br,
AnalysisDeclContext *ac)
@@ -70,7 +70,7 @@ public:
void Enqueue(WorkListUnit WLUnit) {
const FunctionDecl *FD = WLUnit->getDirectCallee();
if (!FD || !FD->getBody())
- return;
+ return;
Kind &K = VisitedFunctions[FD];
if (K != NotVisited)
return;
@@ -81,9 +81,9 @@ public:
/// This method returns an item from the worklist without removing it.
WorkListUnit Dequeue() {
assert(!WList.empty());
- return WList.back();
+ return WList.back();
}
-
+
void Execute() {
while (hasWork()) {
WorkListUnit WLUnit = Dequeue();
@@ -95,7 +95,7 @@ public:
// Visit the body.
SaveAndRestore<const CallExpr *> SaveCall(visitingCallExpr, WLUnit);
Visit(FD->getBody());
-
+
// Mark the function as being PostVisited to indicate we have
// scanned the body.
VisitedFunctions[FD] = PostVisited;
@@ -114,7 +114,7 @@ public:
void VisitCXXMemberCallExpr(CallExpr *CE);
void VisitStmt(Stmt *S) { VisitChildren(S); }
void VisitChildren(Stmt *S);
-
+
void ReportVirtualCall(const CallExpr *CE, bool isPure);
};
@@ -138,7 +138,7 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
VisitChildren(CE);
bool callIsNonVirtual = false;
-
+
// Several situations to elide for checking.
if (MemberExpr *CME = dyn_cast<MemberExpr>(CE->getCallee())) {
// If the member access is fully qualified (i.e., X::F), then treat
@@ -170,7 +170,7 @@ void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
-
+
os << "Call Path : ";
// Name of current visiting CallExpr.
os << *CE->getDirectCallee();
@@ -190,7 +190,7 @@ void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
SourceRange R = CE->getCallee()->getSourceRange();
-
+
if (isPure) {
os << "\n" << "Call pure virtual functions during construction or "
<< "destruction may leads undefined behaviour";
diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 5798f01370de..54634fdffeb5 100644
--- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -18,7 +18,7 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
const LangOptions &lang,
const PathDiagnosticConsumers &PDC,
StoreManagerCreator storemgr,
- ConstraintManagerCreator constraintmgr,
+ ConstraintManagerCreator constraintmgr,
CheckerManager *checkerMgr,
AnalyzerOptions &Options,
CodeInjector *injector)
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 1696bcfac9c1..54c668cd2d6f 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -65,7 +65,7 @@ IPAKind AnalyzerOptions::getIPAMode() {
// Set the member variable.
IPAMode = IPAConfig;
}
-
+
return IPAMode;
}
@@ -295,6 +295,13 @@ unsigned AnalyzerOptions::getMaxTimesInlineLarge() {
return MaxTimesInlineLarge.getValue();
}
+unsigned AnalyzerOptions::getMinCFGSizeTreatFunctionsAsLarge() {
+ if (!MinCFGSizeTreatFunctionsAsLarge.hasValue())
+ MinCFGSizeTreatFunctionsAsLarge = getOptionAsInteger(
+ "min-cfg-size-treat-functions-as-large", 14);
+ return MinCFGSizeTreatFunctionsAsLarge.getValue();
+}
+
unsigned AnalyzerOptions::getMaxNodesPerTopLevelFunction() {
if (!MaxNodesPerTopLevelFunction.hasValue()) {
int DefaultValue = 0;
@@ -325,3 +332,15 @@ bool AnalyzerOptions::shouldPrunePaths() {
bool AnalyzerOptions::shouldConditionalizeStaticInitializers() {
return getBooleanOption("cfg-conditional-static-initializers", true);
}
+
+bool AnalyzerOptions::shouldInlineLambdas() {
+ if (!InlineLambdas.hasValue())
+ InlineLambdas = getBooleanOption("inline-lambdas", /*Default=*/true);
+ return InlineLambdas.getValue();
+}
+
+bool AnalyzerOptions::shouldWidenLoops() {
+ if (!WidenLoops.hasValue())
+ WidenLoops = getBooleanOption("widen-loops", /*Default=*/false);
+ return WidenLoops.getValue();
+}
diff --git a/lib/StaticAnalyzer/Core/BlockCounter.cpp b/lib/StaticAnalyzer/Core/BlockCounter.cpp
index c1ac03d5ab97..8c99bd808494 100644
--- a/lib/StaticAnalyzer/Core/BlockCounter.cpp
+++ b/lib/StaticAnalyzer/Core/BlockCounter.cpp
@@ -26,7 +26,7 @@ class CountKey {
unsigned BlockID;
public:
- CountKey(const StackFrameContext *CS, unsigned ID)
+ CountKey(const StackFrameContext *CS, unsigned ID)
: CallSite(CS), BlockID(ID) {}
bool operator==(const CountKey &RHS) const {
@@ -55,7 +55,7 @@ static inline CountMap::Factory& GetFactory(void *F) {
return *static_cast<CountMap::Factory*>(F);
}
-unsigned BlockCounter::getNumVisited(const StackFrameContext *CallSite,
+unsigned BlockCounter::getNumVisited(const StackFrameContext *CallSite,
unsigned BlockID) const {
CountMap M = GetMap(Data);
CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID));
@@ -71,10 +71,10 @@ BlockCounter::Factory::~Factory() {
}
BlockCounter
-BlockCounter::Factory::IncrementCount(BlockCounter BC,
+BlockCounter::Factory::IncrementCount(BlockCounter BC,
const StackFrameContext *CallSite,
unsigned BlockID) {
- return BlockCounter(GetFactory(F).add(GetMap(BC.Data),
+ return BlockCounter(GetFactory(F).add(GetMap(BC.Data),
CountKey(CallSite, BlockID),
BC.getNumVisited(CallSite, BlockID)+1).getRoot());
}
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index e4db64fe34e0..11be764633cf 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -81,13 +81,13 @@ eventsDescribeSameCondition(PathDiagnosticEventPiece *X,
// those that came from TrackConstraintBRVisitor.
const void *tagPreferred = ConditionBRVisitor::getTag();
const void *tagLesser = TrackConstraintBRVisitor::getTag();
-
+
if (X->getLocation() != Y->getLocation())
return nullptr;
if (X->getTag() == tagPreferred && Y->getTag() == tagLesser)
return X;
-
+
if (Y->getTag() == tagPreferred && X->getTag() == tagLesser)
return Y;
@@ -110,7 +110,7 @@ static void removeRedundantMsgs(PathPieces &path) {
for (unsigned i = 0; i < N; ++i) {
IntrusiveRefCntPtr<PathDiagnosticPiece> piece(path.front());
path.pop_front();
-
+
switch (piece->getKind()) {
case clang::ento::PathDiagnosticPiece::Call:
removeRedundantMsgs(cast<PathDiagnosticCallPiece>(piece)->path);
@@ -123,7 +123,7 @@ static void removeRedundantMsgs(PathPieces &path) {
case clang::ento::PathDiagnosticPiece::Event: {
if (i == N-1)
break;
-
+
if (PathDiagnosticEventPiece *nextEvent =
dyn_cast<PathDiagnosticEventPiece>(path.front().get())) {
PathDiagnosticEventPiece *event =
@@ -157,13 +157,13 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
LocationContextMap &LCM) {
bool containsSomethingInteresting = false;
const unsigned N = pieces.size();
-
+
for (unsigned i = 0 ; i < N ; ++i) {
// Remove the front piece from the path. If it is still something we
// want to keep once we are done, we will push it back on the end.
IntrusiveRefCntPtr<PathDiagnosticPiece> piece(pieces.front());
pieces.pop_front();
-
+
switch (piece->getKind()) {
case PathDiagnosticPiece::Call: {
PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece);
@@ -176,7 +176,7 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
if (!removeUnneededCalls(call->path, R, LCM))
continue;
-
+
containsSomethingInteresting = true;
break;
}
@@ -189,7 +189,7 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
}
case PathDiagnosticPiece::Event: {
PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece);
-
+
// We never throw away an event, but we do throw it away wholesale
// as part of a path if we throw the entire path away.
containsSomethingInteresting |= !event->isPrunable();
@@ -198,10 +198,10 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
case PathDiagnosticPiece::ControlFlow:
break;
}
-
+
pieces.push_back(piece);
}
-
+
return containsSomethingInteresting;
}
@@ -213,7 +213,7 @@ static bool hasImplicitBody(const Decl *D) {
}
/// Recursively scan through a path and make sure that all call pieces have
-/// valid locations.
+/// valid locations.
static void
adjustCallLocations(PathPieces &Pieces,
PathDiagnosticLocation *LastCallLocation = nullptr) {
@@ -323,7 +323,7 @@ class PathDiagnosticBuilder : public BugReporterContext {
NodeMapClosure NMC;
public:
const LocationContext *LC;
-
+
PathDiagnosticBuilder(GRBugReporter &br,
BugReport *r, InterExplodedGraphMap &Backmap,
PathDiagnosticConsumer *pdc)
@@ -339,7 +339,7 @@ public:
BugReport *getBugReport() { return R; }
Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); }
-
+
ParentMap& getParentMap() { return LC->getParentMap(); }
const Stmt *getParent(const Stmt *S) {
@@ -957,7 +957,7 @@ static PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
if (firstCharOnly)
L = PathDiagnosticLocation::createSingleLocation(L);
-
+
return L;
}
@@ -1001,7 +1001,7 @@ public:
~EdgeBuilder() {
while (!CLocs.empty()) popLocation();
-
+
// Finally, add an initial edge from the start location of the first
// statement (if it doesn't already exist).
PathDiagnosticLocation L = PathDiagnosticLocation::createDeclBegin(
@@ -1016,7 +1016,7 @@ public:
popLocation();
PrevLoc = PathDiagnosticLocation();
}
-
+
void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false,
bool IsPostJump = false);
@@ -1101,7 +1101,7 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
PrevLoc = NewLoc;
return;
}
-
+
if (NewLocClean.asLocation() == PrevLocClean.asLocation())
return;
@@ -1242,7 +1242,7 @@ static void reversePropagateIntererstingSymbols(BugReport &R,
SVal V = State->getSVal(Ex, LCtx);
if (!(R.isInteresting(V) || IE.count(Ex)))
return;
-
+
switch (Ex->getStmtClass()) {
default:
if (!isa<CastExpr>(Ex))
@@ -1260,7 +1260,7 @@ static void reversePropagateIntererstingSymbols(BugReport &R,
break;
}
}
-
+
R.markInteresting(V);
}
@@ -1275,7 +1275,7 @@ static void reversePropagateInterestingSymbols(BugReport &R,
const Stmt *CallSite = Callee->getCallSite();
if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite)) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeCtx->getDecl())) {
- FunctionDecl::param_const_iterator PI = FD->param_begin(),
+ FunctionDecl::param_const_iterator PI = FD->param_begin(),
PE = FD->param_end();
CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
for (; AI != AE && PI != PE; ++AI, ++PI) {
@@ -1406,7 +1406,7 @@ static bool GenerateExtensivePathDiagnostic(
N->getState().get(), Ex,
N->getLocationContext());
}
-
+
if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
const Stmt *S = CE->getCalleeContext()->getCallSite();
if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
@@ -1414,7 +1414,7 @@ static bool GenerateExtensivePathDiagnostic(
N->getState().get(), Ex,
N->getLocationContext());
}
-
+
PathDiagnosticCallPiece *C =
PathDiagnosticCallPiece::construct(N, *CE, SM);
LCM[&C->path] = CE->getCalleeContext();
@@ -1427,7 +1427,7 @@ static bool GenerateExtensivePathDiagnostic(
CallStack.push_back(StackDiagPair(C, N));
break;
}
-
+
// Pop the call hierarchy if we are done walking the contents
// of a function call.
if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
@@ -1436,7 +1436,7 @@ static bool GenerateExtensivePathDiagnostic(
PathDiagnosticLocation pos =
PathDiagnosticLocation::createBegin(D, SM);
EB.addEdge(pos);
-
+
// Flush all locations, and pop the active path.
bool VisitedEntireCall = PD.isWithinCall();
EB.flushLocations();
@@ -1466,7 +1466,7 @@ static bool GenerateExtensivePathDiagnostic(
}
break;
}
-
+
// Note that is important that we update the LocationContext
// after looking at CallExits. CallExit basically adds an
// edge in the *caller*, so we don't want to update the LocationContext
@@ -1486,7 +1486,7 @@ static bool GenerateExtensivePathDiagnostic(
CalleeCtx, CallerCtx);
}
}
-
+
// Are we jumping to the head of a loop? Add a special diagnostic.
if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
PathDiagnosticLocation L(Loop, SM, PDB.LC);
@@ -1552,11 +1552,11 @@ static bool GenerateExtensivePathDiagnostic(
else
EB.addExtendedContext(PDB.getEnclosingStmtLocation(stmt).asStmt());
}
-
+
break;
}
-
-
+
+
} while (0);
if (!NextNode)
@@ -2410,7 +2410,7 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
// Trim edges on expressions that are consumed by
// the parent expression.
if (isa<Expr>(s1End) && PM.isConsumedExpr(cast<Expr>(s1End))) {
- removeEdge = true;
+ removeEdge = true;
}
// Trim edges where a lexical containment doesn't exist.
// For example:
@@ -2557,7 +2557,7 @@ BugReport::~BugReport() {
const Decl *BugReport::getDeclWithIssue() const {
if (DeclWithIssue)
return DeclWithIssue;
-
+
const ExplodedNode *N = getErrorNode();
if (!N)
return nullptr;
@@ -2579,9 +2579,7 @@ void BugReport::Profile(llvm::FoldingSetNodeID& hash) const {
hash.AddPointer(GetCurrentOrPreviousStmt(ErrorNode));
}
- for (SmallVectorImpl<SourceRange>::const_iterator I =
- Ranges.begin(), E = Ranges.end(); I != E; ++I) {
- const SourceRange range = *I;
+ for (SourceRange range : Ranges) {
if (!range.isValid())
continue;
hash.AddInteger(range.getBegin().getRawEncoding());
@@ -2714,8 +2712,7 @@ llvm::iterator_range<BugReport::ranges_iterator> BugReport::getRanges() {
if (Ranges.size() == 1 && !Ranges.begin()->isValid())
return llvm::make_range(ranges_iterator(), ranges_iterator());
- return llvm::iterator_range<BugReport::ranges_iterator>(Ranges.begin(),
- Ranges.end());
+ return llvm::make_range(Ranges.begin(), Ranges.end());
}
PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const {
@@ -2973,14 +2970,14 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
for (PathPieces::const_iterator I = path.begin(), E = path.end();
I!=E; ++I) {
-
+
PathDiagnosticPiece *piece = I->get();
// Recursively compact calls.
if (PathDiagnosticCallPiece *call=dyn_cast<PathDiagnosticCallPiece>(piece)){
CompactPathDiagnostic(call->path, SM);
}
-
+
// Get the location of the PathDiagnosticPiece.
const FullSourceLoc Loc = piece->getLocation().asLocation();
@@ -3126,7 +3123,7 @@ bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
PD.resetPath();
origReportConfigToken = R->getConfigurationChangeToken();
- // Generate the very last diagnostic piece - the piece is visible before
+ // Generate the very last diagnostic piece - the piece is visible before
// the trace is expanded.
std::unique_ptr<PathDiagnosticPiece> LastPiece;
for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
@@ -3224,6 +3221,11 @@ void BugReporter::Register(BugType *BT) {
void BugReporter::emitReport(std::unique_ptr<BugReport> R) {
if (const ExplodedNode *E = R->getErrorNode()) {
+ // An error node must either be a sink or have a tag, otherwise
+ // it could get reclaimed before the path diagnostic is created.
+ assert((E->isSink() || E->getLocation().getTag()) &&
+ "Error node must either be a sink or have a tag");
+
const AnalysisDeclContext *DeclCtx =
E->getLocationContext()->getAnalysisDeclContext();
// The source of autosynthesized body can be handcrafted AST or a model
@@ -3234,7 +3236,7 @@ void BugReporter::emitReport(std::unique_ptr<BugReport> R) {
!DeclCtx->isBodyAutosynthesizedFromModelFile())
return;
}
-
+
bool ValidSourceLoc = R->getLocation(getSourceManager()).isValid();
assert(ValidSourceLoc);
// If we mess up in a release build, we'd still prefer to just drop the bug
@@ -3269,10 +3271,10 @@ namespace {
struct FRIEC_WLItem {
const ExplodedNode *N;
ExplodedNode::const_succ_iterator I, E;
-
+
FRIEC_WLItem(const ExplodedNode *n)
: N(n), I(N->succ_begin()), E(N->succ_end()) {}
-};
+};
}
static BugReport *
@@ -3287,11 +3289,11 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
// post-dominated by a sink, simply add all the nodes in the equivalence class
// to 'Nodes'. Any of the reports will serve as a "representative" report.
if (!BT.isSuppressOnSink()) {
- BugReport *R = I;
+ BugReport *R = &*I;
for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
const ExplodedNode *N = I->getErrorNode();
if (N) {
- R = I;
+ R = &*I;
bugReports.push_back(R);
}
}
@@ -3317,35 +3319,35 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
}
// No successors? By definition this nodes isn't post-dominated by a sink.
if (errorNode->succ_empty()) {
- bugReports.push_back(I);
+ bugReports.push_back(&*I);
if (!exampleReport)
- exampleReport = I;
+ exampleReport = &*I;
continue;
}
// At this point we know that 'N' is not a sink and it has at least one
- // successor. Use a DFS worklist to find a non-sink end-of-path node.
+ // successor. Use a DFS worklist to find a non-sink end-of-path node.
typedef FRIEC_WLItem WLItem;
typedef SmallVector<WLItem, 10> DFSWorkList;
llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
-
+
DFSWorkList WL;
WL.push_back(errorNode);
Visited[errorNode] = 1;
-
+
while (!WL.empty()) {
WLItem &WI = WL.back();
assert(!WI.N->succ_empty());
-
+
for (; WI.I != WI.E; ++WI.I) {
- const ExplodedNode *Succ = *WI.I;
+ const ExplodedNode *Succ = *WI.I;
// End-of-path node?
if (Succ->succ_empty()) {
// If we found an end-of-path node that is not a sink.
if (!Succ->isSink()) {
- bugReports.push_back(I);
+ bugReports.push_back(&*I);
if (!exampleReport)
- exampleReport = I;
+ exampleReport = &*I;
WL.clear();
break;
}
@@ -3426,7 +3428,7 @@ void BugReporter::FlushReport(BugReport *exampleReport,
PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager());
auto piece = llvm::make_unique<PathDiagnosticEventPiece>(
L, exampleReport->getDescription());
- for (const SourceRange &Range : exampleReport->getRanges())
+ for (SourceRange Range : exampleReport->getRanges())
piece->addRange(Range);
D->setEndOfPath(std::move(piece));
}
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index f2915ed818b2..ec1310d91814 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -117,7 +117,7 @@ std::unique_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
// special ranges for this report.
auto P = llvm::make_unique<PathDiagnosticEventPiece>(
L, BR.getDescription(), Ranges.begin() == Ranges.end());
- for (const SourceRange &Range : Ranges)
+ for (SourceRange Range : Ranges)
P->addRange(Range);
return std::move(P);
@@ -169,7 +169,7 @@ public:
bool InEnableNullFPSuppression) {
if (!CallEvent::isCallStmt(S))
return;
-
+
// First, find when we processed the statement.
do {
if (Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>())
@@ -192,11 +192,11 @@ public:
Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>();
if (!CEE)
return;
-
+
const StackFrameContext *CalleeContext = CEE->getCalleeContext();
if (CalleeContext->getCallSite() != S)
return;
-
+
// Check the return value.
ProgramStateRef State = Node->getState();
SVal RetVal = State->getSVal(S, Node->getLocationContext());
@@ -281,7 +281,7 @@ public:
EnableNullFPSuppression);
return nullptr;
}
-
+
// If we're returning 0, we should track where that 0 came from.
bugreporter::trackNullOrUndefValue(N, RetE, BR, /*IsArg*/ false,
EnableNullFPSuppression);
@@ -472,7 +472,7 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
InitE = PIP->getInitializer()->getInit();
}
}
-
+
// Otherwise, see if this is the store site:
// (1) Succ has this binding and Pred does not, i.e. this is
// where the binding first occurred.
@@ -504,7 +504,7 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (Optional<CallEnter> CE = Succ->getLocationAs<CallEnter>()) {
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
-
+
ProgramStateManager &StateMgr = BRC.getStateManager();
CallEventManager &CallMgr = StateMgr.getCallEventManager();
@@ -681,7 +681,7 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
else
os << "Assigning value";
}
-
+
if (R->canPrintPretty()) {
os << " to ";
R->printPretty(os);
@@ -931,7 +931,7 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
if (!N)
return false;
}
-
+
ProgramStateRef state = N->getState();
// The message send could be nil due to the receiver being nil.
@@ -959,7 +959,7 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
assert(LVNode && "Unable to find the lvalue node.");
ProgramStateRef LVState = LVNode->getState();
SVal LVal = LVState->getSVal(Inner, LVNode->getLocationContext());
-
+
if (LVState->isNull(LVal).isConstrainedTrue()) {
// In case of C++ references, we want to differentiate between a null
// reference and reference to null pointer.
@@ -1162,11 +1162,11 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
const ExplodedNode *Prev,
BugReporterContext &BRC,
BugReport &BR) {
-
+
ProgramPoint progPoint = N->getLocation();
ProgramStateRef CurrentState = N->getState();
ProgramStateRef PrevState = Prev->getState();
-
+
// Compare the GDMs of the state, because that is where constraints
// are managed. Note that ensure that we only look at nodes that
// were generated by the analyzer engine proper, not checkers.
@@ -1177,16 +1177,16 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
// If an assumption was made on a branch, it should be caught
// here by looking at the state transition.
if (Optional<BlockEdge> BE = progPoint.getAs<BlockEdge>()) {
- const CFGBlock *srcBlk = BE->getSrc();
+ const CFGBlock *srcBlk = BE->getSrc();
if (const Stmt *term = srcBlk->getTerminator())
return VisitTerminator(term, N, srcBlk, BE->getDst(), BR, BRC);
return nullptr;
}
-
+
if (Optional<PostStmt> PS = progPoint.getAs<PostStmt>()) {
// FIXME: Assuming that BugReporter is a GRBugReporter is a layering
// violation.
- const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =
+ const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =
cast<GRBugReporter>(BRC.getBugReporter()).
getEngine().geteagerlyAssumeBinOpBifurcationTags();
@@ -1222,7 +1222,7 @@ ConditionBRVisitor::VisitTerminator(const Stmt *Term,
case Stmt::ConditionalOperatorClass:
Cond = cast<ConditionalOperator>(Term)->getCond();
break;
- }
+ }
assert(Cond);
assert(srcBlk->succ_size() == 2);
@@ -1236,9 +1236,9 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
BugReporterContext &BRC,
BugReport &R,
const ExplodedNode *N) {
-
+
const Expr *Ex = Cond;
-
+
while (true) {
Ex = Ex->IgnoreParenCasts();
switch (Ex->getStmtClass()) {
@@ -1294,7 +1294,7 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex, raw_ostream &Out,
Out << '\'';
return quotes;
}
-
+
if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Ex)) {
QualType OriginalTy = OriginalExpr->getType();
if (OriginalTy->isPointerType()) {
@@ -1309,11 +1309,11 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex, raw_ostream &Out,
return false;
}
}
-
+
Out << IL->getValue();
return false;
}
-
+
return false;
}
@@ -1324,10 +1324,10 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
BugReporterContext &BRC,
BugReport &R,
const ExplodedNode *N) {
-
+
bool shouldInvert = false;
Optional<bool> shouldPrune;
-
+
SmallString<128> LhsString, RhsString;
{
llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
@@ -1335,10 +1335,10 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
shouldPrune);
const bool isVarRHS = patternMatch(BExpr->getRHS(), OutRHS, BRC, R, N,
shouldPrune);
-
- shouldInvert = !isVarLHS && isVarRHS;
+
+ shouldInvert = !isVarLHS && isVarRHS;
}
-
+
BinaryOperator::Opcode Op = BExpr->getOpcode();
if (BinaryOperator::isAssignmentOp(Op)) {
@@ -1380,7 +1380,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
default:
return nullptr;
}
-
+
switch (Op) {
case BO_EQ:
Out << "equal to ";
@@ -1392,7 +1392,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
Out << BinaryOperator::getOpcodeStr(Op) << ' ';
break;
}
-
+
Out << (shouldInvert ? LhsString : RhsString);
const LocationContext *LCtx = N->getLocationContext();
PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
@@ -1416,7 +1416,7 @@ ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
SmallString<256> buf;
llvm::raw_svector_ostream Out(buf);
Out << "Assuming " << LhsString << " is ";
-
+
QualType Ty = CondVarExpr->getType();
if (Ty->isPointerType())
@@ -1444,10 +1444,10 @@ ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
}
}
}
-
+
return event;
}
-
+
PathDiagnosticPiece *
ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
const DeclRefExpr *DR,
@@ -1462,11 +1462,11 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
SmallString<256> Buf;
llvm::raw_svector_ostream Out(Buf);
-
+
Out << "Assuming '" << VD->getDeclName() << "' is ";
-
+
QualType VDTy = VD->getType();
-
+
if (VDTy->isPointerType())
Out << (tookTrue ? "non-null" : "null");
else if (VDTy->isObjCObjectPointerType())
@@ -1480,7 +1480,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
PathDiagnosticEventPiece *event =
new PathDiagnosticEventPiece(Loc, Out.str());
-
+
const ProgramState *state = N->getState().get();
if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
if (report.isInteresting(R))
@@ -1615,13 +1615,13 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
// Function can only change the value passed in by address.
continue;
}
-
+
// If it is a const pointer value, the function does not intend to
// change the value.
if (T->getPointeeType().isConstQualified())
continue;
- // Mark the call site (LocationContext) as interesting if the value of the
+ // Mark the call site (LocationContext) as interesting if the value of the
// argument is undefined or '0'/'NULL'.
SVal BoundVal = State->getSVal(R);
if (BoundVal.isUndef() || BoundVal.isZeroConstant()) {
diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt
index 59a6b6fbc595..aaffb0b82ce0 100644
--- a/lib/StaticAnalyzer/Core/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -6,6 +6,7 @@ add_clang_library(clangStaticAnalyzerCore
AnalyzerOptions.cpp
BasicValueFactory.cpp
BlockCounter.cpp
+ IssueHash.cpp
BugReporter.cpp
BugReporterVisitors.cpp
CallEvent.cpp
@@ -17,6 +18,7 @@ add_clang_library(clangStaticAnalyzerCore
CommonBugCategories.cpp
ConstraintManager.cpp
CoreEngine.cpp
+ DynamicTypeMap.cpp
Environment.cpp
ExplodedGraph.cpp
ExprEngine.cpp
@@ -26,6 +28,7 @@ add_clang_library(clangStaticAnalyzerCore
ExprEngineObjC.cpp
FunctionSummary.cpp
HTMLDiagnostics.cpp
+ LoopWidening.cpp
MemRegion.cpp
PathDiagnostic.cpp
PlistDiagnostics.cpp
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index 8542f7d6a86b..69af09b25b6e 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ParentMap.h"
#include "clang/Analysis/ProgramPoint.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -49,11 +50,7 @@ QualType CallEvent::getResultType() const {
return ResultTy;
}
-static bool isCallbackArg(SVal V, QualType T) {
- // If the parameter is 0, it's harmless.
- if (V.isZeroConstant())
- return false;
-
+static bool isCallback(QualType T) {
// If a parameter is a block or a callback, assume it can modify pointer.
if (T->isBlockPointerType() ||
T->isFunctionPointerType() ||
@@ -74,32 +71,53 @@ static bool isCallbackArg(SVal V, QualType T) {
return true;
}
}
-
return false;
}
-bool CallEvent::hasNonZeroCallbackArg() const {
+static bool isVoidPointerToNonConst(QualType T) {
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ QualType PointeeTy = PT->getPointeeType();
+ if (PointeeTy.isConstQualified())
+ return false;
+ return PointeeTy->isVoidType();
+ } else
+ return false;
+}
+
+bool CallEvent::hasNonNullArgumentsWithType(bool (*Condition)(QualType)) const {
unsigned NumOfArgs = getNumArgs();
// If calling using a function pointer, assume the function does not
- // have a callback. TODO: We could check the types of the arguments here.
+ // satisfy the callback.
+ // TODO: We could check the types of the arguments here.
if (!getDecl())
return false;
unsigned Idx = 0;
for (CallEvent::param_type_iterator I = param_type_begin(),
- E = param_type_end();
+ E = param_type_end();
I != E && Idx < NumOfArgs; ++I, ++Idx) {
if (NumOfArgs <= Idx)
break;
- if (isCallbackArg(getArgSVal(Idx), *I))
+ // If the parameter is 0, it's harmless.
+ if (getArgSVal(Idx).isZeroConstant())
+ continue;
+
+ if (Condition(*I))
return true;
}
-
return false;
}
+bool CallEvent::hasNonZeroCallbackArg() const {
+ return hasNonNullArgumentsWithType(isCallback);
+}
+
+bool CallEvent::hasVoidPointerToNonConstArg() const {
+ return hasNonNullArgumentsWithType(isVoidPointerToNonConst);
+}
+
bool CallEvent::isGlobalCFunction(StringRef FunctionName) const {
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
if (!FD)
@@ -147,7 +165,7 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
SmallVector<SVal, 8> ValuesToInvalidate;
RegionAndSymbolInvalidationTraits ETraits;
- getExtraInvalidatedValues(ValuesToInvalidate);
+ getExtraInvalidatedValues(ValuesToInvalidate, &ETraits);
// Indexes of arguments whose values will be preserved by the call.
llvm::SmallSet<unsigned, 4> PreserveArgs;
@@ -159,7 +177,7 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
// below for efficiency.
if (PreserveArgs.count(Idx))
if (const MemRegion *MR = getArgSVal(Idx).getAsRegion())
- ETraits.setTrait(MR->StripCasts(),
+ ETraits.setTrait(MR->StripCasts(),
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
// TODO: Factor this out + handle the lower level const pointers.
@@ -184,7 +202,7 @@ ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
}
const Decl *D = getDecl();
- assert(D && "Cannot get a program point without a statement or decl");
+ assert(D && "Cannot get a program point without a statement or decl");
SourceLocation Loc = getSourceRange().getBegin();
if (IsPreVisit)
@@ -265,7 +283,7 @@ QualType CallEvent::getDeclaredResultType(const Decl *D) {
return QualType();
}
-
+
llvm_unreachable("unknown callable kind");
}
@@ -325,7 +343,7 @@ void AnyFunctionCall::getInitialStackFrameContents(
}
bool AnyFunctionCall::argumentsMayEscape() const {
- if (hasNonZeroCallbackArg())
+ if (CallEvent::argumentsMayEscape() || hasVoidPointerToNonConstArg())
return true;
const FunctionDecl *D = getDecl();
@@ -336,7 +354,7 @@ bool AnyFunctionCall::argumentsMayEscape() const {
if (!II)
return false;
- // This set of "escaping" APIs is
+ // This set of "escaping" APIs is
// - 'int pthread_setspecific(ptheread_key k, const void *)' stores a
// value into thread local storage. The value can later be retrieved with
@@ -402,8 +420,30 @@ const FunctionDecl *CXXInstanceCall::getDecl() const {
return getSVal(CE->getCallee()).getAsFunctionDecl();
}
-void CXXInstanceCall::getExtraInvalidatedValues(ValueList &Values) const {
- Values.push_back(getCXXThisVal());
+void CXXInstanceCall::getExtraInvalidatedValues(
+ ValueList &Values, RegionAndSymbolInvalidationTraits *ETraits) const {
+ SVal ThisVal = getCXXThisVal();
+ Values.push_back(ThisVal);
+
+ // Don't invalidate if the method is const and there are no mutable fields.
+ if (const CXXMethodDecl *D = cast_or_null<CXXMethodDecl>(getDecl())) {
+ if (!D->isConst())
+ return;
+ // Get the record decl for the class of 'This'. D->getParent() may return a
+ // base class decl, rather than the class of the instance which needs to be
+ // checked for mutable fields.
+ const Expr *Ex = getCXXThisExpr()->ignoreParenBaseCasts();
+ const CXXRecordDecl *ParentRecord = Ex->getType()->getAsCXXRecordDecl();
+ if (!ParentRecord || ParentRecord->hasMutableFields())
+ return;
+ // Preserve CXXThis.
+ const MemRegion *ThisRegion = ThisVal.getAsRegion();
+ if (!ThisRegion)
+ return;
+
+ ETraits->setTrait(ThisRegion->getBaseRegion(),
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ }
}
SVal CXXInstanceCall::getCXXThisVal() const {
@@ -435,7 +475,7 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
return RuntimeDefinition();
// Do we know anything about the type of 'this'?
- DynamicTypeInfo DynType = getState()->getDynamicTypeInfo(R);
+ DynamicTypeInfo DynType = getDynamicTypeInfo(getState(), R);
if (!DynType.isValid())
return RuntimeDefinition();
@@ -455,7 +495,7 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
// However, we should at least be able to search up and down our own class
// hierarchy, and some real bugs have been caught by checking this.
assert(!RD->isDerivedFrom(MD->getParent()) && "Couldn't find known method");
-
+
// FIXME: This is checking that our DynamicTypeInfo is at least as good as
// the static type. However, because we currently don't update
// DynamicTypeInfo when an object is cast, we can't actually be sure the
@@ -525,7 +565,7 @@ RuntimeDefinition CXXMemberCall::getRuntimeDefinition() const {
if (const MemberExpr *ME = dyn_cast<MemberExpr>(getOriginExpr()->getCallee()))
if (ME->hasQualifier())
return AnyFunctionCall::getRuntimeDefinition();
-
+
return CXXInstanceCall::getRuntimeDefinition();
}
@@ -549,7 +589,8 @@ ArrayRef<ParmVarDecl*> BlockCall::parameters() const {
return D->parameters();
}
-void BlockCall::getExtraInvalidatedValues(ValueList &Values) const {
+void BlockCall::getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const {
// FIXME: This also needs to invalidate captured globals.
if (const MemRegion *R = getBlockRegion())
Values.push_back(loc::MemRegionVal(R));
@@ -557,10 +598,25 @@ void BlockCall::getExtraInvalidatedValues(ValueList &Values) const {
void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const {
- const BlockDecl *D = cast<BlockDecl>(CalleeCtx->getDecl());
SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ ArrayRef<ParmVarDecl*> Params;
+ if (isConversionFromLambda()) {
+ auto *LambdaOperatorDecl = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Params = LambdaOperatorDecl->parameters();
+
+ // For blocks converted from a C++ lambda, the callee declaration is the
+ // operator() method on the lambda so we bind "this" to
+ // the lambda captured by the block.
+ const VarRegion *CapturedLambdaRegion = getRegionStoringCapturedLambda();
+ SVal ThisVal = loc::MemRegionVal(CapturedLambdaRegion);
+ Loc ThisLoc = SVB.getCXXThis(LambdaOperatorDecl, CalleeCtx);
+ Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+ } else {
+ Params = cast<BlockDecl>(CalleeCtx->getDecl())->parameters();
+ }
+
addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
- D->parameters());
+ Params);
}
@@ -570,7 +626,8 @@ SVal CXXConstructorCall::getCXXThisVal() const {
return UnknownVal();
}
-void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values) const {
+void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const {
if (Data)
Values.push_back(loc::MemRegionVal(static_cast<const MemRegion *>(Data)));
}
@@ -612,7 +669,8 @@ ArrayRef<ParmVarDecl*> ObjCMethodCall::parameters() const {
}
void
-ObjCMethodCall::getExtraInvalidatedValues(ValueList &Values) const {
+ObjCMethodCall::getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const {
Values.push_back(getReceiverSVal());
}
@@ -628,7 +686,7 @@ SVal ObjCMethodCall::getReceiverSVal() const {
// FIXME: Is this the best way to handle class receivers?
if (!isInstanceMessage())
return UnknownVal();
-
+
if (const Expr *RecE = getOriginExpr()->getInstanceReceiver())
return getSVal(RecE);
@@ -709,7 +767,7 @@ ObjCMessageKind ObjCMethodCall::getMessageKind() const {
return K;
}
}
-
+
const_cast<ObjCMethodCall *>(this)->Data
= ObjCMessageDataTy(nullptr, 1).getOpaqueValue();
assert(getMessageKind() == OCM_Message);
@@ -730,7 +788,7 @@ bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
getState()->getStateManager().getContext().getSourceManager();
// If the class interface is declared inside the main file, assume it is not
- // subcassed.
+ // subcassed.
// TODO: It could actually be subclassed if the subclass is private as well.
// This is probably very rare.
SourceLocation InterfLoc = IDecl->getEndOfDefinitionLoc();
@@ -800,7 +858,7 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
if (!Receiver)
return RuntimeDefinition();
- DynamicTypeInfo DTI = getState()->getDynamicTypeInfo(Receiver);
+ DynamicTypeInfo DTI = getDynamicTypeInfo(getState(), Receiver);
QualType DynType = DTI.getType();
CanBeSubClassed = DTI.canBeASubClass();
ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType);
diff --git a/lib/StaticAnalyzer/Core/Checker.cpp b/lib/StaticAnalyzer/Core/Checker.cpp
index 22352111d14a..b422a8871983 100644
--- a/lib/StaticAnalyzer/Core/Checker.cpp
+++ b/lib/StaticAnalyzer/Core/Checker.cpp
@@ -23,7 +23,7 @@ StringRef CheckerBase::getTagDescription() const {
CheckName CheckerBase::getCheckName() const { return Name; }
-CheckerProgramPointTag::CheckerProgramPointTag(StringRef CheckerName,
+CheckerProgramPointTag::CheckerProgramPointTag(StringRef CheckerName,
StringRef Msg)
: SimpleProgramPointTag(CheckerName, Msg) {}
diff --git a/lib/StaticAnalyzer/Core/CheckerContext.cpp b/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 6b22bf411c29..5ec8bfa80074 100644
--- a/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -45,7 +45,7 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
if (BId != 0) {
if (Name.empty())
return true;
- StringRef BName = FD->getASTContext().BuiltinInfo.GetName(BId);
+ StringRef BName = FD->getASTContext().BuiltinInfo.getName(BId);
if (BName.find(Name) != StringRef::npos)
return true;
}
@@ -57,12 +57,8 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
return false;
// Look through 'extern "C"' and anything similar invented in the future.
- const DeclContext *DC = FD->getDeclContext();
- while (DC->isTransparentContext())
- DC = DC->getParent();
-
- // If this function is in a namespace, it is not a C library function.
- if (!DC->isTranslationUnit())
+ // If this function is not in TU directly, it is not a C library function.
+ if (!FD->getDeclContext()->getRedeclContext()->isTranslationUnit())
return false;
// If this function is not externally visible, it is not a C library function.
diff --git a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index 3d9a81581549..d6aeceb1457d 100644
--- a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
// Recursively find any substatements containing macros
@@ -70,3 +71,26 @@ bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) {
return false;
}
+
+// Extract lhs and rhs from assignment statement
+std::pair<const clang::VarDecl *, const clang::Expr *>
+clang::ento::parseAssignment(const Stmt *S) {
+ const VarDecl *VD = 0;
+ const Expr *RHS = 0;
+
+ if (auto Assign = dyn_cast_or_null<BinaryOperator>(S)) {
+ if (Assign->isAssignmentOp()) {
+ // Ordinary assignment
+ RHS = Assign->getRHS();
+ if (auto DE = dyn_cast_or_null<DeclRefExpr>(Assign->getLHS()))
+ VD = dyn_cast_or_null<VarDecl>(DE->getDecl());
+ }
+ } else if (auto PD = dyn_cast_or_null<DeclStmt>(S)) {
+ // Initialization
+ assert(PD->isSingleDecl() && "We process decls one by one");
+ VD = dyn_cast_or_null<VarDecl>(PD->getSingleDecl());
+ RHS = VD->getAnyInitializer();
+ }
+
+ return std::make_pair(VD, RHS);
+}
diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 2684cc78be75..008e8ef31cda 100644
--- a/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -177,7 +177,9 @@ void CheckerManager::runCheckersForStmt(bool isPreVisit,
namespace {
struct CheckObjCMessageContext {
typedef std::vector<CheckerManager::CheckObjCMessageFunc> CheckersTy;
- bool IsPreVisit, WasInlined;
+
+ ObjCMessageVisitKind Kind;
+ bool WasInlined;
const CheckersTy &Checkers;
const ObjCMethodCall &Msg;
ExprEngine &Eng;
@@ -185,14 +187,28 @@ namespace {
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
- CheckObjCMessageContext(bool isPreVisit, const CheckersTy &checkers,
+ CheckObjCMessageContext(ObjCMessageVisitKind visitKind,
+ const CheckersTy &checkers,
const ObjCMethodCall &msg, ExprEngine &eng,
bool wasInlined)
- : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
+ : Kind(visitKind), WasInlined(wasInlined), Checkers(checkers),
Msg(msg), Eng(eng) { }
void runChecker(CheckerManager::CheckObjCMessageFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
+
+ bool IsPreVisit;
+
+ switch (Kind) {
+ case ObjCMessageVisitKind::Pre:
+ IsPreVisit = true;
+ break;
+ case ObjCMessageVisitKind::MessageNil:
+ case ObjCMessageVisitKind::Post:
+ IsPreVisit = false;
+ break;
+ }
+
const ProgramPoint &L = Msg.getProgramPoint(IsPreVisit,checkFn.Checker);
CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
@@ -202,19 +218,30 @@ namespace {
}
/// \brief Run checkers for visiting obj-c messages.
-void CheckerManager::runCheckersForObjCMessage(bool isPreVisit,
+void CheckerManager::runCheckersForObjCMessage(ObjCMessageVisitKind visitKind,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
const ObjCMethodCall &msg,
ExprEngine &Eng,
bool WasInlined) {
- CheckObjCMessageContext C(isPreVisit,
- isPreVisit ? PreObjCMessageCheckers
- : PostObjCMessageCheckers,
- msg, Eng, WasInlined);
+ auto &checkers = getObjCMessageCheckers(visitKind);
+ CheckObjCMessageContext C(visitKind, checkers, msg, Eng, WasInlined);
expandGraphWithCheckers(C, Dst, Src);
}
+const std::vector<CheckerManager::CheckObjCMessageFunc> &
+CheckerManager::getObjCMessageCheckers(ObjCMessageVisitKind Kind) {
+ switch (Kind) {
+ case ObjCMessageVisitKind::Pre:
+ return PreObjCMessageCheckers;
+ break;
+ case ObjCMessageVisitKind::Post:
+ return PostObjCMessageCheckers;
+ case ObjCMessageVisitKind::MessageNil:
+ return ObjCMessageNilCheckers;
+ }
+ llvm_unreachable("Unknown Kind");
+}
namespace {
// FIXME: This has all the same signatures as CheckObjCMessageContext.
// Is there a way we can merge the two?
@@ -357,9 +384,9 @@ void CheckerManager::runCheckersForEndFunction(NodeBuilderContext &BC,
ExplodedNodeSet &Dst,
ExplodedNode *Pred,
ExprEngine &Eng) {
-
+
// We define the builder outside of the loop bacause if at least one checkers
- // creates a sucsessor for Pred, we do not need to generate an
+ // creates a sucsessor for Pred, we do not need to generate an
// autotransition for it.
NodeBuilder Bldr(Pred, Dst, BC);
for (unsigned i = 0, e = EndFunctionCheckers.size(); i != e; ++i) {
@@ -467,7 +494,7 @@ bool CheckerManager::wantsRegionChangeUpdate(ProgramStateRef state) {
}
/// \brief Run checkers for region changes.
-ProgramStateRef
+ProgramStateRef
CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
@@ -478,7 +505,7 @@ CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
// bail out.
if (!state)
return nullptr;
- state = RegionChangesCheckers[i].CheckFn(state, invalidated,
+ state = RegionChangesCheckers[i].CheckFn(state, invalidated,
ExplicitRegions, Regions, Call);
}
return state;
@@ -506,7 +533,7 @@ CheckerManager::runCheckersForPointerEscape(ProgramStateRef State,
}
/// \brief Run checkers for handling assumptions on symbolic values.
-ProgramStateRef
+ProgramStateRef
CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
SVal Cond, bool Assumption) {
for (unsigned i = 0, e = EvalAssumeCheckers.size(); i != e; ++i) {
@@ -558,7 +585,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
#endif
}
}
-
+
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!anyEvaluated) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
@@ -616,6 +643,11 @@ void CheckerManager::_registerForPostStmt(CheckStmtFunc checkfn,
void CheckerManager::_registerForPreObjCMessage(CheckObjCMessageFunc checkfn) {
PreObjCMessageCheckers.push_back(checkfn);
}
+
+void CheckerManager::_registerForObjCMessageNil(CheckObjCMessageFunc checkfn) {
+ ObjCMessageNilCheckers.push_back(checkfn);
+}
+
void CheckerManager::_registerForPostObjCMessage(CheckObjCMessageFunc checkfn) {
PostObjCMessageCheckers.push_back(checkfn);
}
diff --git a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
index 6ba64f52d183..a15e1573e228 100644
--- a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -94,7 +94,7 @@ void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
}
}
-void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
+void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
SmallVectorImpl<CheckerOptInfo> &opts) const {
// Sort checkers for efficient collection.
std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
diff --git a/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/lib/StaticAnalyzer/Core/ConstraintManager.cpp
index 4dec52600518..b7db8333aaac 100644
--- a/lib/StaticAnalyzer/Core/ConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -26,7 +26,7 @@ static DefinedSVal getLocFromSymbol(const ProgramStateRef &State,
}
ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State,
- SymbolRef Sym) {
+ SymbolRef Sym) {
QualType Ty = Sym->getType();
DefinedSVal V = Loc::isLocType(Ty) ? getLocFromSymbol(State, Sym)
: nonloc::SymbolVal(Sym);
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 7844ad4a9c04..39cf7e771755 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -271,7 +271,7 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
unsigned Steps,
- ProgramStateRef InitState,
+ ProgramStateRef InitState,
ExplodedNodeSet &Dst) {
bool DidNotFinish = ExecuteWorkList(L, Steps, InitState);
for (ExplodedGraph::eop_iterator I = G.eop_begin(), E = G.eop_end(); I != E;
@@ -386,7 +386,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
}
return;
}
-
+
case Stmt::DoStmtClass:
HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
return;
@@ -456,7 +456,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
Pred->State, Pred);
}
-void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
+void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
const CFGBlock * B, ExplodedNode *Pred) {
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
@@ -491,7 +491,7 @@ void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
}
-void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
+void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
ExplodedNode *Pred) {
assert(B);
assert(!B->empty());
diff --git a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
new file mode 100644
index 000000000000..fd35b664a912
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
@@ -0,0 +1,51 @@
+//==- DynamicTypeMap.cpp - Dynamic Type Info related APIs ----------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs that track and query dynamic type information. This
+// information can be used to devirtualize calls during the symbolic exection
+// or do type checking.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+
+namespace clang {
+namespace ento {
+
+DynamicTypeInfo getDynamicTypeInfo(ProgramStateRef State,
+ const MemRegion *Reg) {
+ Reg = Reg->StripCasts();
+
+ // Look up the dynamic type in the GDM.
+ const DynamicTypeInfo *GDMType = State->get<DynamicTypeMap>(Reg);
+ if (GDMType)
+ return *GDMType;
+
+ // Otherwise, fall back to what we know about the region.
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(Reg))
+ return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false);
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
+ SymbolRef Sym = SR->getSymbol();
+ return DynamicTypeInfo(Sym->getType());
+ }
+
+ return DynamicTypeInfo();
+}
+
+ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *Reg,
+ DynamicTypeInfo NewTy) {
+ Reg = Reg->StripCasts();
+ ProgramStateRef NewState = State->set<DynamicTypeMap>(Reg, NewTy);
+ assert(NewState);
+ return NewState;
+}
+
+} // namespace ento
+} // namespace clang
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index ae5a4cc8b4aa..e2cb52cb417e 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -90,6 +90,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::CXXNullPtrLiteralExprClass:
case Stmt::ObjCStringLiteralClass:
case Stmt::StringLiteralClass:
+ case Stmt::TypeTraitExprClass:
// Known constants; defer to SValBuilder.
return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
@@ -97,9 +98,9 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
const ReturnStmt *RS = cast<ReturnStmt>(S);
if (const Expr *RE = RS->getRetValue())
return getSVal(EnvironmentEntry(RE, LCtx), svalBuilder);
- return UndefinedVal();
+ return UndefinedVal();
}
-
+
// Handle all other Stmt* using a lookup.
default:
return lookupExpr(EnvironmentEntry(S, LCtx));
@@ -120,7 +121,7 @@ Environment EnvironmentManager::bindExpr(Environment Env,
}
namespace {
-class MarkLiveCallback : public SymbolVisitor {
+class MarkLiveCallback final : public SymbolVisitor {
SymbolReaper &SymReaper;
public:
MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
@@ -170,10 +171,6 @@ EnvironmentManager::removeDeadBindings(Environment Env,
// Copy the binding to the new map.
EBMapRef = EBMapRef.add(BlkExpr, X);
- // If the block expr's value is a memory region, then mark that region.
- if (Optional<loc::MemRegionVal> R = X.getAs<loc::MemRegionVal>())
- SymReaper.markLive(R->getRegion());
-
// Mark all symbols in the block expr's value live.
RSScaner.scan(X);
continue;
@@ -194,16 +191,16 @@ void Environment::print(raw_ostream &Out, const char *NL,
for (Environment::iterator I = begin(), E = end(); I != E; ++I) {
const EnvironmentEntry &En = I.getKey();
-
+
if (isFirst) {
Out << NL << NL
<< "Expressions:"
- << NL;
+ << NL;
isFirst = false;
} else {
Out << NL;
}
-
+
const Stmt *S = En.getStmt();
assert(S != nullptr && "Expected non-null Stmt");
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 010d26e48e1d..8a09720b2a19 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -90,8 +90,8 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// (7) The LocationContext is the same as the predecessor.
// (8) Expressions that are *not* lvalue expressions.
// (9) The PostStmt isn't for a non-consumed Stmt or Expr.
- // (10) The successor is neither a CallExpr StmtPoint nor a CallEnter or
- // PreImplicitCall (so that we would be able to find it when retrying a
+ // (10) The successor is neither a CallExpr StmtPoint nor a CallEnter or
+ // PreImplicitCall (so that we would be able to find it when retrying a
// call with no inlining).
// FIXME: It may be safe to reclaim PreCall and PostCall nodes as well.
@@ -102,7 +102,7 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
const ExplodedNode *pred = *(node->pred_begin());
if (pred->succ_size() != 1)
return false;
-
+
const ExplodedNode *succ = *(node->succ_begin());
if (succ->pred_size() != 1)
return false;
@@ -123,7 +123,7 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// Conditions 5, 6, and 7.
ProgramStateRef state = node->getState();
- ProgramStateRef pred_state = pred->getState();
+ ProgramStateRef pred_state = pred->getState();
if (state->store != pred_state->store || state->GDM != pred_state->GDM ||
progPoint.getLocationContext() != pred->getLocationContext())
return false;
@@ -174,7 +174,7 @@ void ExplodedGraph::collectNode(ExplodedNode *node) {
FreeNodes.push_back(node);
Nodes.RemoveNode(node);
--NumNodes;
- node->~ExplodedNode();
+ node->~ExplodedNode();
}
void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index a3239f591a38..662b0a2dd798 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -26,6 +26,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
@@ -174,7 +175,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
}
}
}
-
+
return state;
}
@@ -265,7 +266,7 @@ bool ExprEngine::wantsRegionChangeUpdate(ProgramStateRef state) {
return getCheckerManager().wantsRegionChangeUpdate(state);
}
-ProgramStateRef
+ProgramStateRef
ExprEngine::processRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> Explicits,
@@ -315,7 +316,7 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
const CFGStmt S,
const ExplodedNode *Pred,
const LocationContext *LC) {
-
+
// Are we never purging state values?
if (AMgr.options.AnalysisPurgeOpt == PurgeNone)
return false;
@@ -327,7 +328,7 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
// Is this on a non-expression?
if (!isa<Expr>(S.getStmt()))
return true;
-
+
// Run before processing a call.
if (CallEvent::isCallStmt(S.getStmt()))
return true;
@@ -475,8 +476,12 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
if (BMI->isAnyMemberInitializer()) {
// Constructors build the object directly in the field,
// but non-objects must be copied in from the initializer.
- const Expr *Init = BMI->getInit()->IgnoreImplicit();
- if (!isa<CXXConstructExpr>(Init)) {
+ if (auto *CtorExpr = findDirectConstructorForCurrentCFGElement()) {
+ assert(BMI->getInit()->IgnoreImplicit() == CtorExpr);
+ (void)CtorExpr;
+ // The field was directly constructed, so there is no need to bind.
+ } else {
+ const Expr *Init = BMI->getInit()->IgnoreImplicit();
const ValueDecl *Field;
if (BMI->isIndirectMemberInitializer()) {
Field = BMI->getIndirectMember();
@@ -512,7 +517,7 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
assert(Tmp.size() == 1 && "have not generated any new nodes yet");
assert(*Tmp.begin() == Pred && "have not generated any new nodes yet");
Tmp.clear();
-
+
PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP);
}
@@ -754,9 +759,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXUuidofExprClass:
case Stmt::CXXFoldExprClass:
case Stmt::MSPropertyRefExprClass:
+ case Stmt::MSPropertySubscriptExprClass:
case Stmt::CXXUnresolvedConstructExprClass:
case Stmt::DependentScopeDeclRefExprClass:
- case Stmt::TypeTraitExprClass:
case Stmt::ArrayTypeTraitExprClass:
case Stmt::ExpressionTraitExprClass:
case Stmt::UnresolvedLookupExprClass:
@@ -766,16 +771,19 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::PackExpansionExprClass:
case Stmt::SubstNonTypeTemplateParmPackExprClass:
case Stmt::FunctionParmPackExprClass:
+ case Stmt::CoroutineBodyStmtClass:
+ case Stmt::CoawaitExprClass:
+ case Stmt::CoreturnStmtClass:
+ case Stmt::CoyieldExprClass:
case Stmt::SEHTryStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::SEHLeaveStmtClass:
- case Stmt::LambdaExprClass:
case Stmt::SEHFinallyStmtClass: {
const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
Engine.addAbortedBlock(node, currBldrCtx->getBlock());
break;
}
-
+
case Stmt::ParenExprClass:
llvm_unreachable("ParenExprs already handled.");
case Stmt::GenericSelectionExprClass:
@@ -821,9 +829,13 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPOrderedDirectiveClass:
case Stmt::OMPAtomicDirectiveClass:
case Stmt::OMPTargetDirectiveClass:
+ case Stmt::OMPTargetDataDirectiveClass:
case Stmt::OMPTeamsDirectiveClass:
case Stmt::OMPCancellationPointDirectiveClass:
case Stmt::OMPCancelDirectiveClass:
+ case Stmt::OMPTaskLoopDirectiveClass:
+ case Stmt::OMPTaskLoopSimdDirectiveClass:
+ case Stmt::OMPDistributeDirectiveClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
case Stmt::ObjCSubscriptRefExprClass:
@@ -901,7 +913,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ObjCStringLiteralClass:
case Stmt::CXXPseudoDestructorExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
- case Stmt::CXXNullPtrLiteralExprClass: {
+ case Stmt::CXXNullPtrLiteralExprClass:
+ case Stmt::OMPArraySectionExprClass:
+ case Stmt::TypeTraitExprClass: {
Bldr.takeNodes(Pred);
ExplodedNodeSet preVisit;
getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
@@ -964,7 +978,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet preVisit;
getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
-
+
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr2(preVisit, Tmp, *currBldrCtx);
@@ -972,7 +986,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
QualType resultType = Ex->getType();
for (ExplodedNodeSet::iterator it = preVisit.begin(), et = preVisit.end();
- it != et; ++it) {
+ it != et; ++it) {
ExplodedNode *N = *it;
const LocationContext *LCtx = N->getLocationContext();
SVal result = svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
@@ -981,10 +995,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ProgramStateRef state = N->getState()->BindExpr(Ex, LCtx, result);
Bldr2.generateNode(S, N, state);
}
-
+
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
Bldr.addNodes(Dst);
- break;
+ break;
}
case Stmt::ArraySubscriptExprClass:
@@ -1011,6 +1025,17 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Stmt::LambdaExprClass:
+ if (AMgr.options.shouldInlineLambdas()) {
+ Bldr.takeNodes(Pred);
+ VisitLambdaExpr(cast<LambdaExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ } else {
+ const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
+ Engine.addAbortedBlock(node, currBldrCtx->getBlock());
+ }
+ break;
+
case Stmt::BinaryOperatorClass: {
const BinaryOperator* B = cast<BinaryOperator>(S);
if (B->isLogicalOp()) {
@@ -1029,7 +1054,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
}
Bldr.takeNodes(Pred);
-
+
if (AMgr.options.eagerlyAssumeBinOpBifurcation &&
(B->isRelationalOp() || B->isEqualityOp())) {
ExplodedNodeSet Tmp;
@@ -1074,7 +1099,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
}
-
+
case Stmt::CXXCatchStmtClass: {
Bldr.takeNodes(Pred);
VisitCXXCatchStmt(cast<CXXCatchStmt>(S), Pred, Dst);
@@ -1083,7 +1108,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
}
case Stmt::CXXTemporaryObjectExprClass:
- case Stmt::CXXConstructExprClass: {
+ case Stmt::CXXConstructExprClass: {
Bldr.takeNodes(Pred);
VisitCXXConstructExpr(cast<CXXConstructExpr>(S), Pred, Dst);
Bldr.addNodes(Dst);
@@ -1105,7 +1130,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
const CXXDeleteExpr *CDE = cast<CXXDeleteExpr>(S);
getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
- for (ExplodedNodeSet::iterator i = PreVisit.begin(),
+ for (ExplodedNodeSet::iterator i = PreVisit.begin(),
e = PreVisit.end(); i != e ; ++i)
VisitCXXDeleteExpr(CDE, *i, Dst);
@@ -1171,18 +1196,18 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXDynamicCastExprClass:
case Stmt::CXXReinterpretCastExprClass:
case Stmt::CXXConstCastExprClass:
- case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass:
case Stmt::ObjCBridgedCastExprClass: {
Bldr.takeNodes(Pred);
const CastExpr *C = cast<CastExpr>(S);
// Handle the previsit checks.
ExplodedNodeSet dstPrevisit;
getCheckerManager().runCheckersForPreStmt(dstPrevisit, Pred, C, *this);
-
+
// Handle the expression itself.
ExplodedNodeSet dstExpr;
for (ExplodedNodeSet::iterator i = dstPrevisit.begin(),
- e = dstPrevisit.end(); i != e ; ++i) {
+ e = dstPrevisit.end(); i != e ; ++i) {
VisitCast(C, C->getSubExpr(), *i, dstExpr);
}
@@ -1199,7 +1224,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
}
-
+
case Stmt::InitListExprClass:
Bldr.takeNodes(Pred);
VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
@@ -1294,7 +1319,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.takeNodes(Pred);
ProgramStateRef state = Pred->getState();
const PseudoObjectExpr *PE = cast<PseudoObjectExpr>(S);
- if (const Expr *Result = PE->getResultExpr()) {
+ if (const Expr *Result = PE->getResultExpr()) {
SVal V = state->getSVal(Result, Pred->getLocationContext());
Bldr.generateNode(S, Pred,
state->BindExpr(S, Pred->getLocationContext(), V));
@@ -1375,12 +1400,29 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
/// Block entrance. (Update counters).
void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
- NodeBuilderWithSinks &nodeBuilder,
+ NodeBuilderWithSinks &nodeBuilder,
ExplodedNode *Pred) {
PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
+ // If this block is terminated by a loop and it has already been visited the
+ // maximum number of times, widen the loop.
+ unsigned int BlockCount = nodeBuilder.getContext().blockCount();
+ if (BlockCount == AMgr.options.maxBlockVisitOnPath - 1 &&
+ AMgr.options.shouldWidenLoops()) {
+ const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
+ if (!(Term &&
+ (isa<ForStmt>(Term) || isa<WhileStmt>(Term) || isa<DoStmt>(Term))))
+ return;
+ // Widen.
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef WidenedState =
+ getWidenedLoopState(Pred->getState(), LCtx, BlockCount, Term);
+ nodeBuilder.generateNode(WidenedState, Pred);
+ return;
+ }
+
// FIXME: Refactor this into a checker.
- if (nodeBuilder.getContext().blockCount() >= AMgr.options.maxBlockVisitOnPath) {
+ if (BlockCount >= AMgr.options.maxBlockVisitOnPath) {
static SimpleProgramPointTag tag(TagProviderName, "Block count exceeded");
const ExplodedNode *Sink =
nodeBuilder.generateSink(Pred->getState(), Pred, &tag);
@@ -1537,7 +1579,6 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
return;
}
-
if (const Expr *Ex = dyn_cast<Expr>(Condition))
Condition = Ex->IgnoreParens();
@@ -1583,7 +1624,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
}
}
}
-
+
// If the condition is still unknown, give up.
if (X.isUnknownOrUndef()) {
builder.generateNode(PrevState, true, PredI);
@@ -1750,7 +1791,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
DefinedOrUnknownSVal CondV = CondV_untested.castAs<DefinedOrUnknownSVal>();
ProgramStateRef DefaultSt = state;
-
+
iterator I = builder.begin(), EI = builder.end();
bool defaultIsFeasible = I == EI;
@@ -1758,7 +1799,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
// Successor may be pruned out during CFG construction.
if (!I.getBlock())
continue;
-
+
const CaseStmt *Case = I.getCase();
// Evaluate the LHS of the case value.
@@ -1772,47 +1813,24 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
else
V2 = V1;
- // FIXME: Eventually we should replace the logic below with a range
- // comparison, rather than concretize the values within the range.
- // This should be easy once we have "ranges" for NonLVals.
-
- do {
- nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1));
- DefinedOrUnknownSVal Res = svalBuilder.evalEQ(DefaultSt ? DefaultSt : state,
- CondV, CaseVal);
-
- // Now "assume" that the case matches.
- if (ProgramStateRef stateNew = state->assume(Res, true)) {
- builder.generateCaseStmtNode(I, stateNew);
-
- // If CondV evaluates to a constant, then we know that this
- // is the *only* case that we can take, so stop evaluating the
- // others.
- if (CondV.getAs<nonloc::ConcreteInt>())
- return;
- }
-
- // Now "assume" that the case doesn't match. Add this state
- // to the default state (if it is feasible).
- if (DefaultSt) {
- if (ProgramStateRef stateNew = DefaultSt->assume(Res, false)) {
- defaultIsFeasible = true;
- DefaultSt = stateNew;
- }
- else {
- defaultIsFeasible = false;
- DefaultSt = nullptr;
- }
- }
-
- // Concretize the next value in the range.
- if (V1 == V2)
- break;
-
- ++V1;
- assert (V1 <= V2);
-
- } while (true);
+ ProgramStateRef StateCase;
+ if (Optional<NonLoc> NL = CondV.getAs<NonLoc>())
+ std::tie(StateCase, DefaultSt) =
+ DefaultSt->assumeWithinInclusiveRange(*NL, V1, V2);
+ else // UnknownVal
+ StateCase = DefaultSt;
+
+ if (StateCase)
+ builder.generateCaseStmtNode(I, StateCase);
+
+ // Now "assume" that the case doesn't match. Add this state
+ // to the default state (if it is feasible).
+ if (DefaultSt)
+ defaultIsFeasible = true;
+ else {
+ defaultIsFeasible = false;
+ break;
+ }
}
if (!defaultIsFeasible)
@@ -1849,13 +1867,44 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// C permits "extern void v", and if you cast the address to a valid type,
- // you can even do things with it. We simply pretend
+ // you can even do things with it. We simply pretend
assert(Ex->isGLValue() || VD->getType()->isVoidType());
- SVal V = state->getLValue(VD, Pred->getLocationContext());
+ const LocationContext *LocCtxt = Pred->getLocationContext();
+ const Decl *D = LocCtxt->getDecl();
+ const auto *MD = D ? dyn_cast<CXXMethodDecl>(D) : nullptr;
+ const auto *DeclRefEx = dyn_cast<DeclRefExpr>(Ex);
+ SVal V;
+ bool IsReference;
+ if (AMgr.options.shouldInlineLambdas() && DeclRefEx &&
+ DeclRefEx->refersToEnclosingVariableOrCapture() && MD &&
+ MD->getParent()->isLambda()) {
+ // Lookup the field of the lambda.
+ const CXXRecordDecl *CXXRec = MD->getParent();
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+ CXXRec->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField);
+ const FieldDecl *FD = LambdaCaptureFields[VD];
+ if (!FD) {
+ // When a constant is captured, sometimes no corresponding field is
+ // created in the lambda object.
+ assert(VD->getType().isConstQualified());
+ V = state->getLValue(VD, LocCtxt);
+ IsReference = false;
+ } else {
+ Loc CXXThis =
+ svalBuilder.getCXXThis(MD, LocCtxt->getCurrentStackFrame());
+ SVal CXXThisVal = state->getSVal(CXXThis);
+ V = state->getLValue(FD, CXXThisVal);
+ IsReference = FD->getType()->isReferenceType();
+ }
+ } else {
+ V = state->getLValue(VD, LocCtxt);
+ IsReference = VD->getType()->isReferenceType();
+ }
// For references, the 'lvalue' is the pointer address stored in the
// reference region.
- if (VD->getType()->isReferenceType()) {
+ if (IsReference) {
if (const MemRegion *R = V.getAsRegion())
V = state->getSVal(R);
else
@@ -1900,7 +1949,6 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
const Expr *Base = A->getBase()->IgnoreParens();
const Expr *Idx = A->getIdx()->IgnoreParens();
-
ExplodedNodeSet checkerPreStmt;
getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this);
@@ -2005,8 +2053,9 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
}
namespace {
-class CollectReachableSymbolsCallback : public SymbolVisitor {
+class CollectReachableSymbolsCallback final : public SymbolVisitor {
InvalidatedSymbols Symbols;
+
public:
CollectReachableSymbolsCallback(ProgramStateRef State) {}
const InvalidatedSymbols &getSymbols() const { return Symbols; }
@@ -2064,14 +2113,14 @@ ProgramStateRef ExprEngine::processPointerEscapedOnBind(ProgramStateRef State,
return State;
}
-ProgramStateRef
+ProgramStateRef
ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
const InvalidatedSymbols *Invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call,
RegionAndSymbolInvalidationTraits &ITraits) {
-
+
if (!Invalidated || Invalidated->empty())
return State;
@@ -2082,7 +2131,7 @@ ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
PSK_EscapeOther,
&ITraits);
- // If the symbols were invalidated by a call, we want to find out which ones
+ // If the symbols were invalidated by a call, we want to find out which ones
// were invalidated directly due to being arguments to the call.
InvalidatedSymbols SymbolsDirectlyInvalidated;
for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
@@ -2129,7 +2178,6 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
StoreE, *this, *PP);
-
StmtNodeBuilder Bldr(CheckedSet, Dst, *currBldrCtx);
// If the location is not a 'Loc', it will already be handled by
@@ -2142,13 +2190,12 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
Bldr.generateNode(L, state, Pred);
return;
}
-
for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
I!=E; ++I) {
ExplodedNode *PredI = *I;
ProgramStateRef state = PredI->getState();
-
+
state = processPointerEscapedOnBind(state, location, Val);
// When binding the value, pass on the hint that this is a initialization.
@@ -2301,7 +2348,7 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
// "p = 0" is not noted as "Null pointer value stored to 'p'" but
// instead "int *p" is noted as
// "Variable 'p' initialized to a null pointer value"
-
+
static SimpleProgramPointTag tag(TagProviderName, "Location");
Bldr.generateNode(NodeEx, Pred, state, &tag);
}
@@ -2326,7 +2373,7 @@ void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst,
ExplodedNodeSet &Src,
const Expr *Ex) {
StmtNodeBuilder Bldr(Src, Dst, *currBldrCtx);
-
+
for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
ExplodedNode *Pred = *I;
// Test if the previous node was as the same expression. This can happen
@@ -2349,7 +2396,7 @@ void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst,
// First assume that the condition is true.
if (StateTrue) {
- SVal Val = svalBuilder.makeIntVal(1U, Ex->getType());
+ SVal Val = svalBuilder.makeIntVal(1U, Ex->getType());
StateTrue = StateTrue->BindExpr(Ex, Pred->getLocationContext(), Val);
Bldr.generateNode(Ex, Pred, StateTrue, tags.first);
}
@@ -2641,10 +2688,10 @@ struct DOTGraphTraits<ExplodedNode*> :
<< " NodeID: " << (const void*) N << "\\|";
state->printDOT(Out);
- Out << "\\l";
+ Out << "\\l";
if (const ProgramPointTag *tag = Loc.getTag()) {
- Out << "\\|Tag: " << tag->getTagDescription();
+ Out << "\\|Tag: " << tag->getTagDescription();
Out << "\\l";
}
return Out.str();
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 1777ea97a402..a5b58710b215 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -25,23 +25,23 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
Expr *LHS = B->getLHS()->IgnoreParens();
Expr *RHS = B->getRHS()->IgnoreParens();
-
+
// FIXME: Prechecks eventually go in ::Visit().
ExplodedNodeSet CheckedSet;
ExplodedNodeSet Tmp2;
getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, B, *this);
-
+
// With both the LHS and RHS evaluated, process the operation itself.
for (ExplodedNodeSet::iterator it=CheckedSet.begin(), ei=CheckedSet.end();
it != ei; ++it) {
-
+
ProgramStateRef state = (*it)->getState();
const LocationContext *LCtx = (*it)->getLocationContext();
SVal LeftV = state->getSVal(LHS, LCtx);
SVal RightV = state->getSVal(RHS, LCtx);
-
+
BinaryOperator::Opcode Op = B->getOpcode();
-
+
if (Op == BO_Assign) {
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
@@ -57,7 +57,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
LeftV, RightV);
continue;
}
-
+
if (!B->isAssignmentOp()) {
StmtNodeBuilder Bldr(*it, Tmp2, *currBldrCtx);
@@ -90,19 +90,19 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// Process non-assignments except commas or short-circuited
// logical expressions (LAnd and LOr).
- SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
+ SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
if (Result.isUnknown()) {
Bldr.generateNode(B, *it, state);
continue;
- }
+ }
- state = state->BindExpr(B, LCtx, Result);
+ state = state->BindExpr(B, LCtx, Result);
Bldr.generateNode(B, *it, state);
continue;
}
-
+
assert (B->isCompoundAssignmentOp());
-
+
switch (Op) {
default:
llvm_unreachable("Invalid opcode for compound assignment.");
@@ -117,43 +117,43 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
case BO_XorAssign: Op = BO_Xor; break;
case BO_OrAssign: Op = BO_Or; break;
}
-
+
// Perform a load (the LHS). This performs the checks for
// null dereferences, and so on.
ExplodedNodeSet Tmp;
SVal location = LeftV;
evalLoad(Tmp, B, LHS, *it, state, location);
-
+
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E;
++I) {
state = (*I)->getState();
const LocationContext *LCtx = (*I)->getLocationContext();
SVal V = state->getSVal(LHS, LCtx);
-
+
// Get the computation type.
QualType CTy =
cast<CompoundAssignOperator>(B)->getComputationResultType();
CTy = getContext().getCanonicalType(CTy);
-
+
QualType CLHSTy =
cast<CompoundAssignOperator>(B)->getComputationLHSType();
CLHSTy = getContext().getCanonicalType(CLHSTy);
-
+
QualType LTy = getContext().getCanonicalType(LHS->getType());
-
+
// Promote LHS.
V = svalBuilder.evalCast(V, CLHSTy, LTy);
-
+
// Compute the result of the operation.
SVal Result = svalBuilder.evalCast(evalBinOp(state, Op, V, RightV, CTy),
B->getType(), CTy);
-
+
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
-
+
SVal LHSVal;
-
+
if (Result.isUnknown()) {
// The symbolic value is actually for the type of the left-hand side
// expression, not the computation type, as this is the value the
@@ -168,52 +168,74 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// computation type.
LHSVal = svalBuilder.evalCast(Result, LTy, CTy);
}
-
- // In C++, assignment and compound assignment operators return an
+
+ // In C++, assignment and compound assignment operators return an
// lvalue.
if (B->isGLValue())
state = state->BindExpr(B, LCtx, location);
else
state = state->BindExpr(B, LCtx, Result);
-
+
evalStore(Tmp2, B, LHS, *I, state, location, LHSVal);
}
}
-
+
// FIXME: postvisits eventually go in ::Visit()
getCheckerManager().runCheckersForPostStmt(Dst, Tmp2, B, *this);
}
void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
+
CanQualType T = getContext().getCanonicalType(BE->getType());
+ const BlockDecl *BD = BE->getBlockDecl();
// Get the value of the block itself.
- SVal V = svalBuilder.getBlockPointer(BE->getBlockDecl(), T,
+ SVal V = svalBuilder.getBlockPointer(BD, T,
Pred->getLocationContext(),
currBldrCtx->blockCount());
-
+
ProgramStateRef State = Pred->getState();
-
+
// If we created a new MemRegion for the block, we should explicitly bind
// the captured variables.
if (const BlockDataRegion *BDR =
dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
-
+
BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
E = BDR->referenced_vars_end();
-
+
+ auto CI = BD->capture_begin();
+ auto CE = BD->capture_end();
for (; I != E; ++I) {
- const MemRegion *capturedR = I.getCapturedRegion();
- const MemRegion *originalR = I.getOriginalRegion();
+ const VarRegion *capturedR = I.getCapturedRegion();
+ const VarRegion *originalR = I.getOriginalRegion();
+
+ // If the capture had a copy expression, use the result of evaluating
+ // that expression, otherwise use the original value.
+ // We rely on the invariant that the block declaration's capture variables
+ // are a prefix of the BlockDataRegion's referenced vars (which may include
+ // referenced globals, etc.) to enable fast lookup of the capture for a
+ // given referenced var.
+ const Expr *copyExpr = nullptr;
+ if (CI != CE) {
+ assert(CI->getVariable() == capturedR->getDecl());
+ copyExpr = CI->getCopyExpr();
+ CI++;
+ }
+
if (capturedR != originalR) {
- SVal originalV = State->getSVal(loc::MemRegionVal(originalR));
+ SVal originalV;
+ if (copyExpr) {
+ originalV = State->getSVal(copyExpr, Pred->getLocationContext());
+ } else {
+ originalV = State->getSVal(loc::MemRegionVal(originalR));
+ }
State = State->bindLoc(loc::MemRegionVal(capturedR), originalV);
}
}
}
-
+
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
Bldr.generateNode(BE, Pred,
@@ -224,12 +246,12 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this);
}
-void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
+void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
-
+
ExplodedNodeSet dstPreStmt;
getCheckerManager().runCheckersForPreStmt(dstPreStmt, Pred, CastE, *this);
-
+
if (CastE->getCastKind() == CK_LValueToRValue) {
for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
I!=E; ++I) {
@@ -240,18 +262,18 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
}
return;
}
-
- // All other casts.
+
+ // All other casts.
QualType T = CastE->getType();
QualType ExTy = Ex->getType();
-
+
if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
T = ExCast->getTypeAsWritten();
-
+
StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx);
for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
I != E; ++I) {
-
+
Pred = *I;
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -316,8 +338,8 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_IntegralComplexToFloatingComplex:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
- case CK_AnyPointerToBlockPointerCast:
- case CK_ObjCObjectLValueCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
case CK_ZeroToOCLEvent:
case CK_LValueBitCast: {
// Delegate to SValBuilder to process.
@@ -371,7 +393,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx, resultType,
currBldrCtx->blockCount());
state = state->BindExpr(CastE, LCtx, NewSym);
- } else
+ } else
// Else, bind to the derived region value.
state = state->BindExpr(CastE, LCtx, val);
}
@@ -417,7 +439,7 @@ void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
const Expr *Init = CL->getInitializer();
SVal V = State->getSVal(CL->getInitializer(), LCtx);
-
+
if (isa<CXXConstructExpr>(Init)) {
// No work needed. Just pass the value up to this expression.
} else {
@@ -450,11 +472,11 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
Dst.insert(Pred);
return;
}
-
+
// FIXME: all pre/post visits should eventually be handled by ::Visit().
ExplodedNodeSet dstPreVisit;
getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, DS, *this);
-
+
ExplodedNodeSet dstEvaluated;
StmtNodeBuilder B(dstPreVisit, dstEvaluated, *currBldrCtx);
for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
@@ -470,7 +492,10 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
ExplodedNode *UpdatedN = N;
SVal InitVal = state->getSVal(InitEx, LC);
- if (isa<CXXConstructExpr>(InitEx->IgnoreImplicit())) {
+ assert(DS->isSingleDecl());
+ if (auto *CtorExpr = findDirectConstructorForCurrentCFGElement()) {
+ assert(InitEx->IgnoreImplicit() == CtorExpr);
+ (void)CtorExpr;
// We constructed the object directly in the variable.
// No need to bind anything.
B.generateNode(DS, UpdatedN, state);
@@ -485,7 +510,7 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
assert(InitVal.getAs<nonloc::LazyCompoundVal>());
}
}
-
+
// Recover some path-sensitivity if a scalar value evaluated to
// UnknownVal.
if (InitVal.isUnknown()) {
@@ -596,7 +621,7 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
(T->isArrayType() || T->isRecordType() || T->isVectorType() ||
T->isAnyComplexType())) {
llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
-
+
// Handle base case where the initializer has no elements.
// e.g: static int* myArray[] = {};
if (NumInitElements == 0) {
@@ -604,13 +629,13 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
return;
}
-
+
for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
ei = IE->rend(); it != ei; ++it) {
SVal V = state->getSVal(cast<Expr>(*it), LCtx);
vals = getBasicVals().consVals(V, vals);
}
-
+
B.generateNode(IE, Pred,
state->BindExpr(IE, LCtx,
svalBuilder.makeCompoundVal(T, vals)));
@@ -632,7 +657,7 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
}
void ExprEngine::VisitGuardedExpr(const Expr *Ex,
- const Expr *L,
+ const Expr *L,
const Expr *R,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
@@ -663,9 +688,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
bool hasValue = false;
SVal V;
- for (CFGBlock::const_reverse_iterator I = SrcBlock->rbegin(),
- E = SrcBlock->rend(); I != E; ++I) {
- CFGElement CE = *I;
+ for (CFGElement CE : llvm::reverse(*SrcBlock)) {
if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
const Expr *ValEx = cast<Expr>(CS->getStmt());
ValEx = ValEx->IgnoreParens();
@@ -694,7 +717,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
}
void ExprEngine::
-VisitOffsetOfExpr(const OffsetOfExpr *OOE,
+VisitOffsetOfExpr(const OffsetOfExpr *OOE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
APSInt IV;
@@ -730,7 +753,7 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
if (Ex->getKind() == UETT_SizeOf) {
if (!T->isIncompleteType() && !T->isConstantSizeType()) {
assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
-
+
// FIXME: Add support for VLA type arguments and VLA expressions.
// When that happens, we should probably refactor VLASizeChecker's code.
continue;
@@ -741,10 +764,10 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
continue;
}
}
-
+
APSInt Value = Ex->EvaluateKnownConstInt(getContext());
CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
-
+
ProgramStateRef state = (*I)->getState();
state = state->BindExpr(Ex, (*I)->getLocationContext(),
svalBuilder.makeIntVal(amt.getQuantity(),
@@ -755,7 +778,7 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
}
-void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
+void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
// FIXME: Prechecks eventually go in ::Visit().
@@ -777,13 +800,13 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
}
case UO_Real: {
const Expr *Ex = U->getSubExpr()->IgnoreParens();
-
+
// FIXME: We don't have complex SValues yet.
if (Ex->getType()->isAnyComplexType()) {
// Just report "Unknown."
break;
}
-
+
// For all other types, UO_Real is an identity operation.
assert (U->getType() == Ex->getType());
ProgramStateRef state = (*I)->getState();
@@ -792,8 +815,8 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
state->getSVal(Ex, LCtx)));
break;
}
-
- case UO_Imag: {
+
+ case UO_Imag: {
const Expr *Ex = U->getSubExpr()->IgnoreParens();
// FIXME: We don't have complex SValues yet.
if (Ex->getType()->isAnyComplexType()) {
@@ -807,7 +830,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, X));
break;
}
-
+
case UO_Plus:
assert(!U->isGLValue());
// FALL-THROUGH.
@@ -820,7 +843,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
// Unary "+" is a no-op, similar to a parentheses. We still have places
// where it may be a block-level expression, so we need to
// generate an extra node that just propagates the value of the
- // subexpression.
+ // subexpression.
const Expr *Ex = U->getSubExpr()->IgnoreParens();
ProgramStateRef state = (*I)->getState();
const LocationContext *LCtx = (*I)->getLocationContext();
@@ -828,7 +851,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
state->getSVal(Ex, LCtx)));
break;
}
-
+
case UO_LNot:
case UO_Minus:
case UO_Not: {
@@ -836,15 +859,15 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
const Expr *Ex = U->getSubExpr()->IgnoreParens();
ProgramStateRef state = (*I)->getState();
const LocationContext *LCtx = (*I)->getLocationContext();
-
+
// Get the value of the subexpression.
SVal V = state->getSVal(Ex, LCtx);
-
+
if (V.isUnknownOrUndef()) {
Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V));
break;
}
-
+
switch (U->getOpcode()) {
default:
llvm_unreachable("Invalid Opcode.");
@@ -861,7 +884,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
//
// Note: technically we do "E == 0", but this is the same in the
// transfer functions as "0 == E".
- SVal Result;
+ SVal Result;
if (Optional<Loc> LV = V.getAs<Loc>()) {
Loc X = svalBuilder.makeNull();
Result = evalBinOp(state, BO_EQ, *LV, X, U->getType());
@@ -874,8 +897,8 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
Result = evalBinOp(state, BO_EQ, V.castAs<NonLoc>(), X,
U->getType());
}
-
- state = state->BindExpr(U, LCtx, Result);
+
+ state = state->BindExpr(U, LCtx, Result);
break;
}
Bldr.generateNode(U, *I, state);
@@ -893,81 +916,81 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
// Handle ++ and -- (both pre- and post-increment).
assert (U->isIncrementDecrementOp());
const Expr *Ex = U->getSubExpr()->IgnoreParens();
-
+
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef state = Pred->getState();
SVal loc = state->getSVal(Ex, LCtx);
-
+
// Perform a load.
ExplodedNodeSet Tmp;
evalLoad(Tmp, U, Ex, Pred, state, loc);
-
+
ExplodedNodeSet Dst2;
StmtNodeBuilder Bldr(Tmp, Dst2, *currBldrCtx);
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
-
+
state = (*I)->getState();
assert(LCtx == (*I)->getLocationContext());
SVal V2_untested = state->getSVal(Ex, LCtx);
-
+
// Propagate unknown and undefined values.
if (V2_untested.isUnknownOrUndef()) {
Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V2_untested));
continue;
}
DefinedSVal V2 = V2_untested.castAs<DefinedSVal>();
-
+
// Handle all other values.
BinaryOperator::Opcode Op = U->isIncrementOp() ? BO_Add : BO_Sub;
-
+
// If the UnaryOperator has non-location type, use its type to create the
// constant value. If the UnaryOperator has location type, create the
// constant with int type and pointer width.
SVal RHS;
-
+
if (U->getType()->isAnyPointerType())
RHS = svalBuilder.makeArrayIndex(1);
else if (U->getType()->isIntegralOrEnumerationType())
RHS = svalBuilder.makeIntVal(1, U->getType());
else
RHS = UnknownVal();
-
+
SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
-
+
// Conjure a new symbol if necessary to recover precision.
if (Result.isUnknown()){
DefinedOrUnknownSVal SymVal =
svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
currBldrCtx->blockCount());
Result = SymVal;
-
+
// If the value is a location, ++/-- should always preserve
// non-nullness. Check if the original value was non-null, and if so
// propagate that constraint.
if (Loc::isLocType(U->getType())) {
DefinedOrUnknownSVal Constraint =
svalBuilder.evalEQ(state, V2,svalBuilder.makeZeroVal(U->getType()));
-
+
if (!state->assume(Constraint, true)) {
// It isn't feasible for the original value to be null.
// Propagate this constraint.
Constraint = svalBuilder.evalEQ(state, SymVal,
svalBuilder.makeZeroVal(U->getType()));
-
-
+
+
state = state->assume(Constraint, false);
assert(state);
}
}
}
-
+
// Since the lvalue-to-rvalue conversion is explicit in the AST,
// we bind an l-value if the operator is prefix and an lvalue (in C++).
if (U->isGLValue())
state = state->BindExpr(U, LCtx, loc);
else
state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
-
+
// Perform the store.
Bldr.takeNodes(*I);
ExplodedNodeSet Dst3;
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 2a766218aaeb..556e2239abfb 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -103,49 +103,32 @@ static SVal makeZeroElementRegion(ProgramStateRef State, SVal LValue,
}
-static const MemRegion *getRegionForConstructedObject(
- const CXXConstructExpr *CE, ExplodedNode *Pred, ExprEngine &Eng,
- unsigned int CurrStmtIdx) {
+const MemRegion *
+ExprEngine::getRegionForConstructedObject(const CXXConstructExpr *CE,
+ ExplodedNode *Pred) {
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
- const NodeBuilderContext &CurrBldrCtx = Eng.getBuilderContext();
// See if we're constructing an existing region by looking at the next
// element in the CFG.
- const CFGBlock *B = CurrBldrCtx.getBlock();
- unsigned int NextStmtIdx = CurrStmtIdx + 1;
- if (NextStmtIdx < B->size()) {
- CFGElement Next = (*B)[NextStmtIdx];
-
- // Is this a destructor? If so, we might be in the middle of an assignment
- // to a local or member: look ahead one more element to see what we find.
- while (Next.getAs<CFGImplicitDtor>() && NextStmtIdx + 1 < B->size()) {
- ++NextStmtIdx;
- Next = (*B)[NextStmtIdx];
- }
- // Is this a constructor for a local variable?
- if (Optional<CFGStmt> StmtElem = Next.getAs<CFGStmt>()) {
- if (const DeclStmt *DS = dyn_cast<DeclStmt>(StmtElem->getStmt())) {
- if (const VarDecl *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
- if (Var->getInit() && Var->getInit()->IgnoreImplicit() == CE) {
- SVal LValue = State->getLValue(Var, LCtx);
- QualType Ty = Var->getType();
- LValue = makeZeroElementRegion(State, LValue, Ty);
- return LValue.getAsRegion();
- }
+ if (auto Elem = findElementDirectlyInitializedByCurrentConstructor()) {
+ if (Optional<CFGStmt> StmtElem = Elem->getAs<CFGStmt>()) {
+ auto *DS = cast<DeclStmt>(StmtElem->getStmt());
+ if (const auto *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
+ if (Var->getInit() && Var->getInit()->IgnoreImplicit() == CE) {
+ SVal LValue = State->getLValue(Var, LCtx);
+ QualType Ty = Var->getType();
+ LValue = makeZeroElementRegion(State, LValue, Ty);
+ return LValue.getAsRegion();
}
}
- }
-
- // Is this a constructor for a member?
- if (Optional<CFGInitializer> InitElem = Next.getAs<CFGInitializer>()) {
+ } else if (Optional<CFGInitializer> InitElem = Elem->getAs<CFGInitializer>()) {
const CXXCtorInitializer *Init = InitElem->getInitializer();
assert(Init->isAnyMemberInitializer());
-
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
- Loc ThisPtr = Eng.getSValBuilder().getCXXThis(CurCtor,
- LCtx->getCurrentStackFrame());
+ Loc ThisPtr =
+ getSValBuilder().getCXXThis(CurCtor, LCtx->getCurrentStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
const ValueDecl *Field;
@@ -167,13 +150,86 @@ static const MemRegion *getRegionForConstructedObject(
// Don't forget to update the pre-constructor initialization code in
// ExprEngine::VisitCXXConstructExpr.
}
-
// If we couldn't find an existing region to construct into, assume we're
// constructing a temporary.
- MemRegionManager &MRMgr = Eng.getSValBuilder().getRegionManager();
+ MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
return MRMgr.getCXXTempObjectRegion(CE, LCtx);
}
+/// Returns true if the initializer for \Elem can be a direct
+/// constructor.
+static bool canHaveDirectConstructor(CFGElement Elem){
+ // DeclStmts and CXXCtorInitializers for fields can be directly constructed.
+
+ if (Optional<CFGStmt> StmtElem = Elem.getAs<CFGStmt>()) {
+ if (isa<DeclStmt>(StmtElem->getStmt())) {
+ return true;
+ }
+ }
+
+ if (Elem.getKind() == CFGElement::Initializer) {
+ return true;
+ }
+
+ return false;
+}
+
+Optional<CFGElement>
+ExprEngine::findElementDirectlyInitializedByCurrentConstructor() {
+ const NodeBuilderContext &CurrBldrCtx = getBuilderContext();
+ // See if we're constructing an existing region by looking at the next
+ // element in the CFG.
+ const CFGBlock *B = CurrBldrCtx.getBlock();
+ assert(isa<CXXConstructExpr>(((*B)[currStmtIdx]).castAs<CFGStmt>().getStmt()));
+ unsigned int NextStmtIdx = currStmtIdx + 1;
+ if (NextStmtIdx >= B->size())
+ return None;
+
+ CFGElement Next = (*B)[NextStmtIdx];
+
+ // Is this a destructor? If so, we might be in the middle of an assignment
+ // to a local or member: look ahead one more element to see what we find.
+ while (Next.getAs<CFGImplicitDtor>() && NextStmtIdx + 1 < B->size()) {
+ ++NextStmtIdx;
+ Next = (*B)[NextStmtIdx];
+ }
+
+ if (canHaveDirectConstructor(Next))
+ return Next;
+
+ return None;
+}
+
+const CXXConstructExpr *
+ExprEngine::findDirectConstructorForCurrentCFGElement() {
+ // Go backward in the CFG to see if the previous element (ignoring
+ // destructors) was a CXXConstructExpr. If so, that constructor
+ // was constructed directly into an existing region.
+ // This process is essentially the inverse of that performed in
+ // findElementDirectlyInitializedByCurrentConstructor().
+ if (currStmtIdx == 0)
+ return nullptr;
+
+ const CFGBlock *B = getBuilderContext().getBlock();
+ assert(canHaveDirectConstructor((*B)[currStmtIdx]));
+
+ unsigned int PreviousStmtIdx = currStmtIdx - 1;
+ CFGElement Previous = (*B)[PreviousStmtIdx];
+
+ while (Previous.getAs<CFGImplicitDtor>() && PreviousStmtIdx > 0) {
+ --PreviousStmtIdx;
+ Previous = (*B)[PreviousStmtIdx];
+ }
+
+ if (Optional<CFGStmt> PrevStmtElem = Previous.getAs<CFGStmt>()) {
+ if (auto *CtorExpr = dyn_cast<CXXConstructExpr>(PrevStmtElem->getStmt())) {
+ return CtorExpr;
+ }
+ }
+
+ return nullptr;
+}
+
void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ExplodedNode *Pred,
ExplodedNodeSet &destNodes) {
@@ -188,7 +244,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
switch (CE->getConstructionKind()) {
case CXXConstructExpr::CK_Complete: {
- Target = getRegionForConstructedObject(CE, Pred, *this, currStmtIdx);
+ Target = getRegionForConstructedObject(CE, Pred);
break;
}
case CXXConstructExpr::CK_VirtualBase:
@@ -300,7 +356,7 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
const MemRegion *Dest,
const Stmt *S,
bool IsBaseDtor,
- ExplodedNode *Pred,
+ ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
@@ -373,7 +429,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// Also, we need to decide how allocators actually work -- they're not
// really part of the CXXNewExpr because they happen BEFORE the
// CXXConstructExpr subexpression. See PR12014 for some discussion.
-
+
unsigned blockCount = currBldrCtx->blockCount();
const LocationContext *LCtx = Pred->getLocationContext();
DefinedOrUnknownSVal symVal = UnknownVal();
@@ -392,8 +448,8 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
IsStandardGlobalOpNewFunction = (FD->getNumParams() == 1);
}
- // We assume all standard global 'operator new' functions allocate memory in
- // heap. We realize this is an approximation that might not correctly model
+ // We assume all standard global 'operator new' functions allocate memory in
+ // heap. We realize this is an approximation that might not correctly model
// a custom global allocator.
if (IsStandardGlobalOpNewFunction)
symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
@@ -472,7 +528,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
}
}
-void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
+void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
@@ -513,3 +569,55 @@ void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
SVal V = state->getSVal(loc::MemRegionVal(R));
Bldr.generateNode(TE, Pred, state->BindExpr(TE, LCtx, V));
}
+
+void ExprEngine::VisitLambdaExpr(const LambdaExpr *LE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const LocationContext *LocCtxt = Pred->getLocationContext();
+
+ // Get the region of the lambda itself.
+ const MemRegion *R = svalBuilder.getRegionManager().getCXXTempObjectRegion(
+ LE, LocCtxt);
+ SVal V = loc::MemRegionVal(R);
+
+ ProgramStateRef State = Pred->getState();
+
+ // If we created a new MemRegion for the lambda, we should explicitly bind
+ // the captures.
+ CXXRecordDecl::field_iterator CurField = LE->getLambdaClass()->field_begin();
+ for (LambdaExpr::const_capture_init_iterator i = LE->capture_init_begin(),
+ e = LE->capture_init_end();
+ i != e; ++i, ++CurField) {
+ FieldDecl *FieldForCapture = *CurField;
+ SVal FieldLoc = State->getLValue(FieldForCapture, V);
+
+ SVal InitVal;
+ if (!FieldForCapture->hasCapturedVLAType()) {
+ Expr *InitExpr = *i;
+ assert(InitExpr && "Capture missing initialization expression");
+ InitVal = State->getSVal(InitExpr, LocCtxt);
+ } else {
+ // The field stores the length of a captured variable-length array.
+ // These captures don't have initialization expressions; instead we
+ // get the length from the VLAType size expression.
+ Expr *SizeExpr = FieldForCapture->getCapturedVLAType()->getSizeExpr();
+ InitVal = State->getSVal(SizeExpr, LocCtxt);
+ }
+
+ State = State->bindLoc(FieldLoc, InitVal);
+ }
+
+ // Decay the Loc into an RValue, because there might be a
+ // MaterializeTemporaryExpr node above this one which expects the bound value
+ // to be an RValue.
+ SVal LambdaRVal = State->getSVal(R);
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
+ // FIXME: is this the right program point kind?
+ Bldr.generateNode(LE, Pred,
+ State->BindExpr(LE, LocCtxt, LambdaRVal),
+ nullptr, ProgramPoint::PostLValueKind);
+
+ // FIXME: Move all post/pre visits to ::Visit().
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, LE, *this);
+}
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 3f608ba79ebc..74cc8d2ccbc5 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -44,19 +44,19 @@ void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
const CFG *CalleeCFG = calleeCtx->getCFG();
const CFGBlock *Entry = &(CalleeCFG->getEntry());
-
+
// Validate the CFG.
assert(Entry->empty());
assert(Entry->succ_size() == 1);
-
+
// Get the solitary successor.
const CFGBlock *Succ = *(Entry->succ_begin());
-
+
// Construct an edge representing the starting location in the callee.
BlockEdge Loc(Entry, Succ, calleeCtx);
ProgramStateRef state = Pred->getState();
-
+
// Construct a new node and add it to the worklist.
bool isNew;
ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
@@ -207,8 +207,8 @@ static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
return isa<CXXTempObjectRegion>(MR);
}
-/// The call exit is simulated with a sequence of nodes, which occur between
-/// CallExitBegin and CallExitEnd. The following operations occur between the
+/// The call exit is simulated with a sequence of nodes, which occur between
+/// CallExitBegin and CallExitEnd. The following operations occur between the
/// two program points:
/// 1. CallExitBegin (triggers the start of call exit sequence)
/// 2. Bind the return value
@@ -220,12 +220,12 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
const StackFrameContext *calleeCtx =
CEBNode->getLocationContext()->getCurrentStackFrame();
-
+
// The parent context might not be a stack frame, so make sure we
// look up the first enclosing stack frame.
const StackFrameContext *callerCtx =
calleeCtx->getParent()->getCurrentStackFrame();
-
+
const Stmt *CE = calleeCtx->getCallSite();
ProgramStateRef state = CEBNode->getState();
// Find the last statement in the function and the corresponding basic block.
@@ -421,7 +421,8 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
const LocationContext *CurLC = Pred->getLocationContext();
const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
const LocationContext *ParentOfCallee = CallerSFC;
- if (Call.getKind() == CE_Block) {
+ if (Call.getKind() == CE_Block &&
+ !cast<BlockCall>(Call).isConversionFromLambda()) {
const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
assert(BR && "If we have the block definition we should have its region");
AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
@@ -429,7 +430,7 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
cast<BlockDecl>(D),
BR);
}
-
+
// This may be NULL, but that's fine.
const Expr *CallE = Call.getOriginExpr();
@@ -439,8 +440,8 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
CalleeADC->getStackFrame(ParentOfCallee, CallE,
currBldrCtx->getBlock(),
currStmtIdx);
-
-
+
+
CallEnter Loc(CallE, CalleeSFC, CurLC);
// Construct a new state which contains the mapping from actual to
@@ -690,9 +691,11 @@ static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
return true;
CXXBasePaths Paths(false, false, false);
- if (RD->lookupInBases(&CXXRecordDecl::FindOrdinaryMember,
- DeclName.getAsOpaquePtr(),
- Paths))
+ if (RD->lookupInBases(
+ [DeclName](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ return CXXRecordDecl::FindOrdinaryMember(Specifier, Path, DeclName);
+ },
+ Paths))
return true;
return false;
@@ -767,7 +770,7 @@ static bool mayInlineDecl(AnalysisDeclContext *CalleeADC,
if (!Ctx.getSourceManager().isInMainFile(FD->getLocation()))
if (isContainerMethod(Ctx, FD))
return false;
-
+
// Conditionally control the inlining of the destructor of C++ shared_ptr.
// We don't currently do a good job modeling shared_ptr because we can't
// see the reference count, so treating as opaque is probably the best
@@ -868,7 +871,8 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
// Do not inline large functions too many times.
if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
Opts.getMaxTimesInlineLarge()) &&
- CalleeCFG->getNumBlockIDs() > 13) {
+ CalleeCFG->getNumBlockIDs() >=
+ Opts.getMinCFGSizeTreatFunctionsAsLarge()) {
NumReachedInlineCountMax++;
return false;
}
@@ -990,12 +994,12 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
+
ExplodedNodeSet dstPreVisit;
getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
-
+
if (RS->getRetValue()) {
for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
ei = dstPreVisit.end(); it != ei; ++it) {
diff --git a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index a6611e050dc9..92c5fe6b6f1a 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -19,18 +19,18 @@
using namespace clang;
using namespace ento;
-void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex,
+void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
SVal baseVal = state->getSVal(Ex->getBase(), LCtx);
SVal location = state->getLValue(Ex->getDecl(), baseVal);
-
+
ExplodedNodeSet dstIvar;
StmtNodeBuilder Bldr(Pred, dstIvar, *currBldrCtx);
Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, location));
-
+
// Perform the post-condition check of the ObjCIvarRefExpr and store
// the created nodes in 'Dst'.
getCheckerManager().runCheckersForPostStmt(Dst, dstIvar, Ex, *this);
@@ -45,7 +45,7 @@ void ExprEngine::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
+
// ObjCForCollectionStmts are processed in two places. This method
// handles the case where an ObjCForCollectionStmt* occurs as one of the
// statements within a basic block. This transfer function does two things:
@@ -74,7 +74,7 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
const Stmt *elem = S->getElement();
ProgramStateRef state = Pred->getState();
SVal elementV;
-
+
if (const DeclStmt *DS = dyn_cast<DeclStmt>(elem)) {
const VarDecl *elemD = cast<VarDecl>(DS->getSingleDecl());
assert(elemD->getInit() == nullptr);
@@ -83,7 +83,7 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
else {
elementV = state->getSVal(elem, Pred->getLocationContext());
}
-
+
ExplodedNodeSet dstLocation;
evalLocation(dstLocation, S, elem, Pred, state, elementV, nullptr, false);
@@ -95,17 +95,17 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
Pred = *NI;
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
-
+
// Handle the case where the container still has elements.
SVal TrueV = svalBuilder.makeTruthVal(1);
ProgramStateRef hasElems = state->BindExpr(S, LCtx, TrueV);
-
+
// Handle the case where the container has no elements.
SVal FalseV = svalBuilder.makeTruthVal(0);
ProgramStateRef noElems = state->BindExpr(S, LCtx, FalseV);
if (Optional<loc::MemRegionVal> MV = elementV.getAs<loc::MemRegionVal>())
- if (const TypedValueRegion *R =
+ if (const TypedValueRegion *R =
dyn_cast<TypedValueRegion>(MV->getRegion())) {
// FIXME: The proper thing to do is to really iterate over the
// container. We will do this with dispatch logic to the store.
@@ -116,12 +116,12 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
currBldrCtx->blockCount());
SVal V = svalBuilder.makeLoc(Sym);
hasElems = hasElems->bindLoc(elementV, V);
-
+
// Bind the location to 'nil' on the false branch.
SVal nilV = svalBuilder.makeIntVal(0, T);
noElems = noElems->bindLoc(elementV, nilV);
}
-
+
// Create the new nodes.
Bldr.generateNode(S, Pred, hasElems);
Bldr.generateNode(S, Pred, noElems);
@@ -139,6 +139,76 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
CallEventRef<ObjCMethodCall> Msg =
CEMgr.getObjCMethodCall(ME, Pred->getState(), Pred->getLocationContext());
+ // There are three cases for the receiver:
+ // (1) it is definitely nil,
+ // (2) it is definitely non-nil, and
+ // (3) we don't know.
+ //
+ // If the receiver is definitely nil, we skip the pre/post callbacks and
+ // instead call the ObjCMessageNil callbacks and return.
+ //
+ // If the receiver is definitely non-nil, we call the pre- callbacks,
+ // evaluate the call, and call the post- callbacks.
+ //
+ // If we don't know, we drop the potential nil flow and instead
+ // continue from the assumed non-nil state as in (2). This approach
+ // intentionally drops coverage in order to prevent false alarms
+ // in the following scenario:
+ //
+ // id result = [o someMethod]
+ // if (result) {
+ // if (!o) {
+ // // <-- This program point should be unreachable because if o is nil
+ // // it must the case that result is nil as well.
+ // }
+ // }
+ //
+ // We could avoid dropping coverage by performing an explicit case split
+ // on each method call -- but this would get very expensive. An alternative
+ // would be to introduce lazy constraints.
+ // FIXME: This ignores many potential bugs (<rdar://problem/11733396>).
+ // Revisit once we have lazier constraints.
+ if (Msg->isInstanceMessage()) {
+ SVal recVal = Msg->getReceiverSVal();
+ if (!recVal.isUndef()) {
+ // Bifurcate the state into nil and non-nil ones.
+ DefinedOrUnknownSVal receiverVal =
+ recVal.castAs<DefinedOrUnknownSVal>();
+ ProgramStateRef State = Pred->getState();
+
+ ProgramStateRef notNilState, nilState;
+ std::tie(notNilState, nilState) = State->assume(receiverVal);
+
+ // Receiver is definitely nil, so run ObjCMessageNil callbacks and return.
+ if (nilState && !notNilState) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ bool HasTag = Pred->getLocation().getTag();
+ Pred = Bldr.generateNode(ME, Pred, nilState, nullptr,
+ ProgramPoint::PreStmtKind);
+ assert((Pred || HasTag) && "Should have cached out already!");
+ (void)HasTag;
+ if (!Pred)
+ return;
+ getCheckerManager().runCheckersForObjCMessageNil(Dst, Pred,
+ *Msg, *this);
+ return;
+ }
+
+ ExplodedNodeSet dstNonNil;
+ StmtNodeBuilder Bldr(Pred, dstNonNil, *currBldrCtx);
+ // Generate a transition to the non-nil state, dropping any potential
+ // nil flow.
+ if (notNilState != State) {
+ bool HasTag = Pred->getLocation().getTag();
+ Pred = Bldr.generateNode(ME, Pred, notNilState);
+ assert((Pred || HasTag) && "Should have cached out already!");
+ (void)HasTag;
+ if (!Pred)
+ return;
+ }
+ }
+ }
+
// Handle the previsits checks.
ExplodedNodeSet dstPrevisit;
getCheckerManager().runCheckersForPreObjCMessage(dstPrevisit, Pred,
@@ -156,39 +226,16 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
ExplodedNode *Pred = *DI;
ProgramStateRef State = Pred->getState();
CallEventRef<ObjCMethodCall> UpdatedMsg = Msg.cloneWithState(State);
-
+
if (UpdatedMsg->isInstanceMessage()) {
SVal recVal = UpdatedMsg->getReceiverSVal();
if (!recVal.isUndef()) {
- // Bifurcate the state into nil and non-nil ones.
- DefinedOrUnknownSVal receiverVal =
- recVal.castAs<DefinedOrUnknownSVal>();
-
- ProgramStateRef notNilState, nilState;
- std::tie(notNilState, nilState) = State->assume(receiverVal);
-
- // There are three cases: can be nil or non-nil, must be nil, must be
- // non-nil. We ignore must be nil, and merge the rest two into non-nil.
- // FIXME: This ignores many potential bugs (<rdar://problem/11733396>).
- // Revisit once we have lazier constraints.
- if (nilState && !notNilState) {
- continue;
- }
-
- // Check if the "raise" message was sent.
- assert(notNilState);
if (ObjCNoRet.isImplicitNoReturn(ME)) {
// If we raise an exception, for now treat it as a sink.
// Eventually we will want to handle exceptions properly.
Bldr.generateSink(ME, Pred, State);
continue;
}
-
- // Generate a transition to non-Nil state.
- if (notNilState != State) {
- Pred = Bldr.generateNode(ME, Pred, notNilState);
- assert(Pred && "Should have cached out already!");
- }
}
} else {
// Check for special class methods that are known to not return
@@ -203,7 +250,7 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
defaultEvalCall(Bldr, Pred, *UpdatedMsg);
}
-
+
ExplodedNodeSet dstPostvisit;
getCheckerManager().runCheckersForPostCall(dstPostvisit, dstEval,
*Msg, *this);
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index cfcf7c6a990b..b3edb8569bd6 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/FileManager.h"
@@ -22,6 +21,8 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -122,11 +123,11 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
// The path as already been prechecked that all parts of the path are
// from the same file and that it is non-empty.
- const SourceManager &SMgr = (*path.begin())->getLocation().getManager();
+ const SourceManager &SMgr = path.front()->getLocation().getManager();
assert(!path.empty());
FileID FID =
- (*path.begin())->getLocation().asLocation().getExpansionLoc().getFileID();
- assert(!FID.isInvalid());
+ path.front()->getLocation().asLocation().getExpansionLoc().getFileID();
+ assert(FID.isValid());
// Create a new rewriter to generate HTML.
Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOpts());
@@ -143,7 +144,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
// Retrieve the relative position of the declaration which will be used
// for the file name
FullSourceLoc L(
- SMgr.getExpansionLoc((*path.rbegin())->getLocation().asLocation()),
+ SMgr.getExpansionLoc(path.back()->getLocation().asLocation()),
SMgr);
FullSourceLoc FunL(SMgr.getExpansionLoc(Body->getLocStart()), SMgr);
offsetDecl = L.getExpansionLineNumber() - FunL.getExpansionLineNumber();
@@ -187,8 +188,8 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
DirName += '/';
}
- int LineNumber = (*path.rbegin())->getLocation().asLocation().getExpansionLineNumber();
- int ColumnNumber = (*path.rbegin())->getLocation().asLocation().getExpansionColumnNumber();
+ int LineNumber = path.back()->getLocation().asLocation().getExpansionLineNumber();
+ int ColumnNumber = path.back()->getLocation().asLocation().getExpansionColumnNumber();
// Add the name of the file as an <h1> tag.
@@ -236,6 +237,13 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
if (!BugType.empty())
os << "\n<!-- BUGTYPE " << BugType << " -->\n";
+ PathDiagnosticLocation UPDLoc = D.getUniqueingLoc();
+ FullSourceLoc L(SMgr.getExpansionLoc(UPDLoc.isValid()
+ ? UPDLoc.asLocation()
+ : D.getLocation().asLocation()),
+ SMgr);
+ const Decl *DeclWithIssue = D.getDeclWithIssue();
+
StringRef BugCategory = D.getCategory();
if (!BugCategory.empty())
os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n";
@@ -246,6 +254,10 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
os << "\n<!-- FUNCTIONNAME " << declName << " -->\n";
+ os << "\n<!-- ISSUEHASHCONTENTOFLINEINCONTEXT "
+ << GetIssueHash(SMgr, L, D.getCheckName(), D.getBugType(), DeclWithIssue,
+ PP.getLangOpts()) << " -->\n";
+
os << "\n<!-- BUGLINE "
<< LineNumber
<< " -->\n";
@@ -281,7 +293,12 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
if (!AnalyzerOpts.shouldWriteStableReportFilename()) {
llvm::sys::path::append(Model, Directory, "report-%%%%%%.html");
-
+ if (std::error_code EC =
+ llvm::sys::fs::make_absolute(Model)) {
+ llvm::errs() << "warning: could not make '" << Model
+ << "' absolute: " << EC.message() << '\n';
+ return;
+ }
if (std::error_code EC =
llvm::sys::fs::createUniqueFile(Model, FD, ResultPath)) {
llvm::errs() << "warning: could not create file in '" << Directory
diff --git a/lib/StaticAnalyzer/Core/IssueHash.cpp b/lib/StaticAnalyzer/Core/IssueHash.cpp
new file mode 100644
index 000000000000..0a3af3dcc7e9
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/IssueHash.cpp
@@ -0,0 +1,196 @@
+//===---------- IssueHash.cpp - Generate identification hashes --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/Path.h"
+
+#include <functional>
+#include <sstream>
+#include <string>
+
+using namespace clang;
+
+// Get a string representation of the parts of the signature that can be
+// overloaded on.
+static std::string GetSignature(const FunctionDecl *Target) {
+ if (!Target)
+ return "";
+ std::string Signature;
+
+ if (!isa<CXXConstructorDecl>(Target) && !isa<CXXDestructorDecl>(Target) &&
+ !isa<CXXConversionDecl>(Target))
+ Signature.append(Target->getReturnType().getAsString()).append(" ");
+ Signature.append(Target->getQualifiedNameAsString()).append("(");
+
+ for (int i = 0, paramsCount = Target->getNumParams(); i < paramsCount; ++i) {
+ if (i)
+ Signature.append(", ");
+ Signature.append(Target->getParamDecl(i)->getType().getAsString());
+ }
+
+ if (Target->isVariadic())
+ Signature.append(", ...");
+ Signature.append(")");
+
+ const auto *TargetT =
+ llvm::dyn_cast_or_null<FunctionType>(Target->getType().getTypePtr());
+
+ if (!TargetT || !isa<CXXMethodDecl>(Target))
+ return Signature;
+
+ if (TargetT->isConst())
+ Signature.append(" const");
+ if (TargetT->isVolatile())
+ Signature.append(" volatile");
+ if (TargetT->isRestrict())
+ Signature.append(" restrict");
+
+ if (const auto *TargetPT =
+ dyn_cast_or_null<FunctionProtoType>(Target->getType().getTypePtr())) {
+ switch (TargetPT->getRefQualifier()) {
+ case RQ_LValue:
+ Signature.append(" &");
+ break;
+ case RQ_RValue:
+ Signature.append(" &&");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return Signature;
+}
+
+static std::string GetEnclosingDeclContextSignature(const Decl *D) {
+ if (!D)
+ return "";
+
+ if (const auto *ND = dyn_cast<NamedDecl>(D)) {
+ std::string DeclName;
+
+ switch (ND->getKind()) {
+ case Decl::Namespace:
+ case Decl::Record:
+ case Decl::CXXRecord:
+ case Decl::Enum:
+ DeclName = ND->getQualifiedNameAsString();
+ break;
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ DeclName = GetSignature(dyn_cast_or_null<FunctionDecl>(ND));
+ break;
+ case Decl::ObjCMethod:
+ // ObjC Methods can not be overloaded, qualified name uniquely identifies
+ // the method.
+ DeclName = ND->getQualifiedNameAsString();
+ break;
+ default:
+ break;
+ }
+
+ return DeclName;
+ }
+
+ return "";
+}
+
+static StringRef GetNthLineOfFile(llvm::MemoryBuffer *Buffer, int Line) {
+ if (!Buffer)
+ return "";
+
+ llvm::line_iterator LI(*Buffer, false);
+ for (; !LI.is_at_eof() && LI.line_number() != Line; ++LI)
+ ;
+
+ return *LI;
+}
+
+static std::string NormalizeLine(const SourceManager &SM, FullSourceLoc &L,
+ const LangOptions &LangOpts) {
+ static StringRef Whitespaces = " \t\n";
+
+ StringRef Str = GetNthLineOfFile(SM.getBuffer(L.getFileID(), L),
+ L.getExpansionLineNumber());
+ unsigned col = Str.find_first_not_of(Whitespaces);
+ col++;
+ SourceLocation StartOfLine =
+ SM.translateLineCol(SM.getFileID(L), L.getExpansionLineNumber(), col);
+ llvm::MemoryBuffer *Buffer =
+ SM.getBuffer(SM.getFileID(StartOfLine), StartOfLine);
+ if (!Buffer)
+ return {};
+
+ const char *BufferPos = SM.getCharacterData(StartOfLine);
+
+ Token Token;
+ Lexer Lexer(SM.getLocForStartOfFile(SM.getFileID(StartOfLine)), LangOpts,
+ Buffer->getBufferStart(), BufferPos, Buffer->getBufferEnd());
+
+ size_t NextStart = 0;
+ std::ostringstream LineBuff;
+ while (!Lexer.LexFromRawLexer(Token) && NextStart < 2) {
+ if (Token.isAtStartOfLine() && NextStart++ > 0)
+ continue;
+ LineBuff << std::string(SM.getCharacterData(Token.getLocation()),
+ Token.getLength());
+ }
+
+ return LineBuff.str();
+}
+
+static llvm::SmallString<32> GetHashOfContent(StringRef Content) {
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result MD5Res;
+ SmallString<32> Res;
+
+ Hash.update(Content);
+ Hash.final(MD5Res);
+ llvm::MD5::stringifyResult(MD5Res, Res);
+
+ return Res;
+}
+
+std::string clang::GetIssueString(const SourceManager &SM,
+ FullSourceLoc &IssueLoc,
+ StringRef CheckerName, StringRef BugType,
+ const Decl *D,
+ const LangOptions &LangOpts) {
+ static StringRef Delimiter = "$";
+
+ return (llvm::Twine(CheckerName) + Delimiter +
+ GetEnclosingDeclContextSignature(D) + Delimiter +
+ llvm::utostr(IssueLoc.getExpansionColumnNumber()) + Delimiter +
+ NormalizeLine(SM, IssueLoc, LangOpts) + Delimiter + BugType)
+ .str();
+}
+
+SmallString<32> clang::GetIssueHash(const SourceManager &SM,
+ FullSourceLoc &IssueLoc,
+ StringRef CheckerName, StringRef BugType,
+ const Decl *D,
+ const LangOptions &LangOpts) {
+
+ return GetHashOfContent(
+ GetIssueString(SM, IssueLoc, CheckerName, BugType, D, LangOpts));
+}
diff --git a/lib/StaticAnalyzer/Core/LoopWidening.cpp b/lib/StaticAnalyzer/Core/LoopWidening.cpp
new file mode 100644
index 000000000000..05865c294cb7
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -0,0 +1,68 @@
+//===--- LoopWidening.cpp - Widen loops -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains functions which are used to widen loops. A loop may be
+/// widened to approximate the exit state(s), without analyzing every
+/// iteration. The widening is done by invalidating anything which might be
+/// modified by the body of the loop.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
+
+using namespace clang;
+using namespace ento;
+
+/// Return the loops condition Stmt or NULL if LoopStmt is not a loop
+static const Expr *getLoopCondition(const Stmt *LoopStmt) {
+ switch (LoopStmt->getStmtClass()) {
+ default:
+ return nullptr;
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(LoopStmt)->getCond();
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(LoopStmt)->getCond();
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(LoopStmt)->getCond();
+ }
+}
+
+namespace clang {
+namespace ento {
+
+ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
+ const LocationContext *LCtx,
+ unsigned BlockCount, const Stmt *LoopStmt) {
+
+ assert(isa<ForStmt>(LoopStmt) || isa<WhileStmt>(LoopStmt) ||
+ isa<DoStmt>(LoopStmt));
+
+ // Invalidate values in the current state.
+ // TODO Make this more conservative by only invalidating values that might
+ // be modified by the body of the loop.
+ // TODO Nested loops are currently widened as a result of the invalidation
+ // being so inprecise. When the invalidation is improved, the handling
+ // of nested loops will also need to be improved.
+ const StackFrameContext *STC = LCtx->getCurrentStackFrame();
+ MemRegionManager &MRMgr = PrevState->getStateManager().getRegionManager();
+ const MemRegion *Regions[] = {MRMgr.getStackLocalsRegion(STC),
+ MRMgr.getStackArgumentsRegion(STC),
+ MRMgr.getGlobalsRegion()};
+ RegionAndSymbolInvalidationTraits ITraits;
+ for (auto *Region : Regions) {
+ ITraits.setTrait(Region,
+ RegionAndSymbolInvalidationTraits::TK_EntireMemSpace);
+ }
+ return PrevState->invalidateRegions(Regions, getLoopCondition(LoopStmt),
+ BlockCount, LCtx, true, nullptr, nullptr,
+ &ITraits);
+}
+
+} // end namespace ento
+} // end namespace clang
diff --git a/lib/StaticAnalyzer/Core/Makefile b/lib/StaticAnalyzer/Core/Makefile
index 4aebc163dddc..c3e00fa36825 100644
--- a/lib/StaticAnalyzer/Core/Makefile
+++ b/lib/StaticAnalyzer/Core/Makefile
@@ -1,13 +1,13 @@
##===- clang/lib/StaticAnalyzer/Core/Makefile --------------*- Makefile -*-===##
-#
+#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
-#
+#
##===----------------------------------------------------------------------===##
#
-# This implements analyses built on top of source-level CFGs.
+# This implements analyses built on top of source-level CFGs.
#
##===----------------------------------------------------------------------===##
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index 5ac845825c8d..ad3f396e39a1 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -756,7 +756,7 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
return cast<VarRegion>(I.getCapturedRegion());
}
}
-
+
LC = LC->getParent();
}
return (const StackFrameContext *)nullptr;
@@ -788,18 +788,18 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
else
sReg = getGlobalsRegion();
}
-
- // Finally handle static locals.
+
+ // Finally handle static locals.
} else {
// FIXME: Once we implement scope handling, we will need to properly lookup
// 'D' to the proper LocationContext.
const DeclContext *DC = D->getDeclContext();
llvm::PointerUnion<const StackFrameContext *, const VarRegion *> V =
getStackOrCaptureRegionForDeclContext(LC, DC, D);
-
+
if (V.is<const VarRegion*>())
return V.get<const VarRegion*>();
-
+
const StackFrameContext *STC = V.get<const StackFrameContext*>();
if (!STC)
@@ -1013,10 +1013,22 @@ MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *RD,
const CXXThisRegion*
MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
const LocationContext *LC) {
- const StackFrameContext *STC = LC->getCurrentStackFrame();
- assert(STC);
const PointerType *PT = thisPointerTy->getAs<PointerType>();
assert(PT);
+ // Inside the body of the operator() of a lambda a this expr might refer to an
+ // object in one of the parent location contexts.
+ const auto *D = dyn_cast<CXXMethodDecl>(LC->getDecl());
+ // FIXME: when operator() of lambda is analyzed as a top level function and
+ // 'this' refers to a this to the enclosing scope, there is no right region to
+ // return.
+ while (!LC->inTopFrame() &&
+ (!D || D->isStatic() ||
+ PT != D->getThisType(getContext())->getAs<PointerType>())) {
+ LC = LC->getParent();
+ D = dyn_cast<CXXMethodDecl>(LC->getDecl());
+ }
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
return getSubRegion<CXXThisRegion>(PT, getStackArgumentsRegion(STC));
}
@@ -1165,6 +1177,7 @@ RegionRawOffset ElementRegion::getAsArrayOffset() const {
/// Returns true if \p Base is an immediate base class of \p Child
static bool isImmediateBase(const CXXRecordDecl *Child,
const CXXRecordDecl *Base) {
+ assert(Child && "Child must not be null");
// Note that we do NOT canonicalize the base class here, because
// ASTRecordLayout doesn't either. If that leads us down the wrong path,
// so be it; at least we won't crash.
@@ -1239,23 +1252,23 @@ RegionOffset MemRegion::getAsOffset() const {
Ty = SR->getSymbol()->getType()->getPointeeType();
RootIsSymbolic = true;
}
-
+
const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl();
if (!Child) {
// We cannot compute the offset of the base class.
SymbolicOffsetBase = R;
- }
-
- if (RootIsSymbolic) {
- // Base layers on symbolic regions may not be type-correct.
- // Double-check the inheritance here, and revert to a symbolic offset
- // if it's invalid (e.g. due to a reinterpret_cast).
- if (BOR->isVirtual()) {
- if (!Child->isVirtuallyDerivedFrom(BOR->getDecl()))
- SymbolicOffsetBase = R;
- } else {
- if (!isImmediateBase(Child, BOR->getDecl()))
- SymbolicOffsetBase = R;
+ } else {
+ if (RootIsSymbolic) {
+ // Base layers on symbolic regions may not be type-correct.
+ // Double-check the inheritance here, and revert to a symbolic offset
+ // if it's invalid (e.g. due to a reinterpret_cast).
+ if (BOR->isVirtual()) {
+ if (!Child->isVirtuallyDerivedFrom(BOR->getDecl()))
+ SymbolicOffsetBase = R;
+ } else {
+ if (!isImmediateBase(Child, BOR->getDecl()))
+ SymbolicOffsetBase = R;
+ }
}
}
@@ -1290,7 +1303,7 @@ RegionOffset MemRegion::getAsOffset() const {
if (Optional<nonloc::ConcreteInt> CI =
Index.getAs<nonloc::ConcreteInt>()) {
// Don't bother calculating precise offsets if we already have a
- // symbolic offset somewhere in the chain.
+ // symbolic offset somewhere in the chain.
if (SymbolicOffsetBase)
continue;
@@ -1324,7 +1337,7 @@ RegionOffset MemRegion::getAsOffset() const {
// Get the field number.
unsigned idx = 0;
- for (RecordDecl::field_iterator FI = RD->field_begin(),
+ for (RecordDecl::field_iterator FI = RD->field_begin(),
FE = RD->field_end(); FI != FE; ++FI, ++idx)
if (FR->getDecl() == *FI)
break;
@@ -1420,7 +1433,7 @@ BlockDataRegion::referenced_vars_begin() const {
BumpVector<const MemRegion*> *VecOriginal =
static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
-
+
return BlockDataRegion::referenced_vars_iterator(Vec->begin(),
VecOriginal->begin());
}
@@ -1456,12 +1469,12 @@ const VarRegion *BlockDataRegion::getOriginalRegion(const VarRegion *R) const {
// RegionAndSymbolInvalidationTraits
//===----------------------------------------------------------------------===//
-void RegionAndSymbolInvalidationTraits::setTrait(SymbolRef Sym,
+void RegionAndSymbolInvalidationTraits::setTrait(SymbolRef Sym,
InvalidationKinds IK) {
SymTraitsMap[Sym] |= IK;
}
-void RegionAndSymbolInvalidationTraits::setTrait(const MemRegion *MR,
+void RegionAndSymbolInvalidationTraits::setTrait(const MemRegion *MR,
InvalidationKinds IK) {
assert(MR);
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
@@ -1470,13 +1483,13 @@ void RegionAndSymbolInvalidationTraits::setTrait(const MemRegion *MR,
MRTraitsMap[MR] |= IK;
}
-bool RegionAndSymbolInvalidationTraits::hasTrait(SymbolRef Sym,
+bool RegionAndSymbolInvalidationTraits::hasTrait(SymbolRef Sym,
InvalidationKinds IK) {
const_symbol_iterator I = SymTraitsMap.find(Sym);
if (I != SymTraitsMap.end())
return I->second & IK;
- return false;
+ return false;
}
bool RegionAndSymbolInvalidationTraits::hasTrait(const MemRegion *MR,
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index c4900313cad4..504df30de834 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -62,8 +62,6 @@ PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
-PathPieces::~PathPieces() {}
-
void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
bool ShouldFlattenMacros) const {
for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
@@ -181,7 +179,7 @@ void PathDiagnostic::resetDiagnosticLocationToMainFile() {
// Reset the report containing declaration and location.
DeclWithIssue = CP->getCaller();
Loc = CP->getLocation();
-
+
return;
}
}
@@ -201,7 +199,7 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
std::unique_ptr<PathDiagnostic> D) {
if (!D || D->path.empty())
return;
-
+
// We need to flatten the locations (convert Stmt* to locations) because
// the referenced statements may be freed by the time the diagnostics
// are emitted.
@@ -223,12 +221,12 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
++I) {
const PathDiagnosticPiece *piece = I->get();
FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();
-
+
if (FID.isInvalid()) {
FID = SMgr.getFileID(L);
} else if (SMgr.getFileID(L) != FID)
return; // FIXME: Emit a warning?
-
+
// Check the source ranges.
ArrayRef<SourceRange> Ranges = piece->getRanges();
for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
@@ -240,7 +238,7 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
if (!L.isFileID() || SMgr.getFileID(L) != FID)
return; // FIXME: Emit a warning?
}
-
+
if (const PathDiagnosticCallPiece *call =
dyn_cast<PathDiagnosticCallPiece>(piece)) {
WorkList.push_back(&call->path);
@@ -251,10 +249,10 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
}
}
}
-
+
if (FID.isInvalid())
return; // FIXME: Emit a warning?
- }
+ }
// Profile the node to see if we already have something matching it
llvm::FoldingSetNodeID profile;
@@ -320,7 +318,7 @@ static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
const PathDiagnosticPiece &Y) {
if (X.getKind() != Y.getKind())
return X.getKind() < Y.getKind();
-
+
FullSourceLoc XL = X.getLocation().asLocation();
FullSourceLoc YL = Y.getLocation().asLocation();
if (XL != YL)
@@ -333,7 +331,7 @@ static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
return X.getRanges().size() < Y.getRanges().size();
const SourceManager &SM = XL.getManager();
-
+
for (unsigned i = 0, n = X.getRanges().size(); i < n; ++i) {
SourceRange XR = X.getRanges()[i];
SourceRange YR = Y.getRanges()[i];
@@ -343,7 +341,7 @@ static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
return SM.isBeforeInTranslationUnit(XR.getEnd(), YR.getEnd());
}
}
-
+
switch (X.getKind()) {
case clang::ento::PathDiagnosticPiece::ControlFlow:
return compareControlFlow(cast<PathDiagnosticControlFlowPiece>(X),
@@ -420,9 +418,9 @@ void PathDiagnosticConsumer::FlushDiagnostics(
PathDiagnosticConsumer::FilesMade *Files) {
if (flushed)
return;
-
+
flushed = true;
-
+
std::vector<const PathDiagnostic *> BatchDiags;
for (llvm::FoldingSet<PathDiagnostic>::iterator it = Diags.begin(),
et = Diags.end(); it != et; ++it) {
@@ -450,7 +448,7 @@ void PathDiagnosticConsumer::FlushDiagnostics(
const PathDiagnostic *D = *it;
delete D;
}
-
+
// Clear out the FoldingSet.
Diags.clear();
}
@@ -472,7 +470,7 @@ void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD,
Entry = new (Entry) PDFileEntry(NodeID);
Set.InsertNode(Entry, InsertPos);
}
-
+
// Allocate persistent storage for the file name.
char *FileName_cstr = (char*) Alloc.Allocate(FileName.size(), 1);
memcpy(FileName_cstr, FileName.data(), FileName.size());
@@ -847,7 +845,7 @@ PathDiagnosticRange
SourceRange R = S->getSourceRange();
if (R.isValid())
return R;
- break;
+ break;
}
case DeclK:
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
@@ -949,7 +947,7 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
Out << "constructor";
describeClass(Out, MD->getParent(), " for ");
-
+
} else if (isa<CXXDestructorDecl>(MD)) {
if (!MD->isUserProvided()) {
Out << "destructor";
@@ -1041,7 +1039,7 @@ static void compute_path_size(const PathPieces &pieces, unsigned &size) {
for (PathPieces::const_iterator it = pieces.begin(),
et = pieces.end(); it != et; ++it) {
const PathDiagnosticPiece *piece = it->get();
- if (const PathDiagnosticCallPiece *cp =
+ if (const PathDiagnosticCallPiece *cp =
dyn_cast<PathDiagnosticCallPiece>(piece)) {
compute_path_size(cp->path, size);
}
@@ -1077,12 +1075,12 @@ void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
I != E; ++I) {
ID.AddInteger(I->getBegin().getRawEncoding());
ID.AddInteger(I->getEnd().getRawEncoding());
- }
+ }
}
void PathDiagnosticCallPiece::Profile(llvm::FoldingSetNodeID &ID) const {
PathDiagnosticPiece::Profile(ID);
- for (PathPieces::const_iterator it = path.begin(),
+ for (PathPieces::const_iterator it = path.begin(),
et = path.end(); it != et; ++it) {
ID.Add(**it);
}
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index e0aff589e053..55e1222e0ac6 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -11,13 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/PlistSupport.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -171,7 +171,7 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
--indent;
Indent(o, indent) << "</array>\n";
}
-
+
// Output the call depth.
Indent(o, indent) << "<key>depth</key>";
EmitInteger(o, depth) << '\n';
@@ -187,7 +187,7 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
Indent(o, indent) << "<key>message</key>\n";
Indent(o, indent);
EmitString(o, P.getString()) << '\n';
-
+
// Finish up.
--indent;
Indent(o, indent); o << "</dict>\n";
@@ -208,9 +208,9 @@ static void ReportCall(raw_ostream &o,
const LangOptions &LangOpts,
unsigned indent,
unsigned depth) {
-
+
IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
- P.getCallEnterEvent();
+ P.getCallEnterEvent();
if (callEnter)
ReportPiece(o, *callEnter, FM, SM, LangOpts, indent, depth, true,
@@ -218,18 +218,18 @@ static void ReportCall(raw_ostream &o,
IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnterWithinCaller =
P.getCallEnterWithinCallerEvent();
-
+
++depth;
-
+
if (callEnterWithinCaller)
ReportPiece(o, *callEnterWithinCaller, FM, SM, LangOpts,
indent, depth, true);
-
+
for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
--depth;
-
+
IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
P.getCallExitEvent();
@@ -295,9 +295,9 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
const SourceManager* SM = nullptr;
if (!Diags.empty())
- SM = &(*(*Diags.begin())->path.begin())->getLocation().getManager();
+ SM = &Diags.front()->path.front()->getLocation().getManager();
+
-
for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(),
DE = Diags.end(); DI != DE; ++DI) {
@@ -374,7 +374,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <array>\n";
- for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
+ for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
I != E; ++I)
ReportDiag(o, **I, FM, *SM, LangOpts);
@@ -389,7 +389,19 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
EmitString(o, D->getBugType()) << '\n';
o << " <key>check_name</key>";
EmitString(o, D->getCheckName()) << '\n';
-
+
+ o << " <!-- This hash is experimental and going to change! -->\n";
+ o << " <key>issue_hash_content_of_line_in_context</key>";
+ PathDiagnosticLocation UPDLoc = D->getUniqueingLoc();
+ FullSourceLoc L(SM->getExpansionLoc(UPDLoc.isValid()
+ ? UPDLoc.asLocation()
+ : D->getLocation().asLocation()),
+ *SM);
+ const Decl *DeclWithIssue = D->getDeclWithIssue();
+ EmitString(o, GetIssueHash(*SM, L, D->getCheckName(), D->getBugType(),
+ DeclWithIssue, LangOpts))
+ << '\n';
+
// Output information about the semantic context where
// the issue occurred.
if (const Decl *DeclWithIssue = D->getDeclWithIssue()) {
@@ -423,28 +435,23 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Output the bug hash for issue unique-ing. Currently, it's just an
// offset from the beginning of the function.
if (const Stmt *Body = DeclWithIssue->getBody()) {
-
+
// If the bug uniqueing location exists, use it for the hash.
// For example, this ensures that two leaks reported on the same line
// will have different issue_hashes and that the hash will identify
// the leak location even after code is added between the allocation
// site and the end of scope (leak report location).
- PathDiagnosticLocation UPDLoc = D->getUniqueingLoc();
if (UPDLoc.isValid()) {
- FullSourceLoc UL(SM->getExpansionLoc(UPDLoc.asLocation()),
- *SM);
FullSourceLoc UFunL(SM->getExpansionLoc(
D->getUniqueingDecl()->getBody()->getLocStart()), *SM);
- o << " <key>issue_hash</key><string>"
- << UL.getExpansionLineNumber() - UFunL.getExpansionLineNumber()
+ o << " <key>issue_hash_function_offset</key><string>"
+ << L.getExpansionLineNumber() - UFunL.getExpansionLineNumber()
<< "</string>\n";
// Otherwise, use the location on which the bug is reported.
} else {
- FullSourceLoc L(SM->getExpansionLoc(D->getLocation().asLocation()),
- *SM);
FullSourceLoc FunL(SM->getExpansionLoc(Body->getLocStart()), *SM);
- o << " <key>issue_hash</key><string>"
+ o << " <key>issue_hash_function_offset</key><string>"
<< L.getExpansionLineNumber() - FunL.getExpansionLineNumber()
<< "</string>\n";
}
@@ -486,5 +493,5 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " </array>\n";
// Finish.
- o << "</dict>\n</plist>";
+ o << "</dict>\n</plist>";
}
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 60b32c722ebf..4f9ad9ebccd9 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -36,7 +36,7 @@ void ProgramStateRelease(const ProgramState *state) {
if (--s->refCount == 0) {
ProgramStateManager &Mgr = s->getStateManager();
Mgr.StateSet.RemoveNode(s);
- s->~ProgramState();
+ s->~ProgramState();
Mgr.freeStates.push_back(s);
}
}
@@ -86,7 +86,7 @@ ProgramStateManager::~ProgramStateManager() {
I->second.second(I->second.first);
}
-ProgramStateRef
+ProgramStateRef
ProgramStateManager::removeDeadBindings(ProgramStateRef state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper) {
@@ -113,7 +113,7 @@ ProgramStateManager::removeDeadBindings(ProgramStateRef state,
ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V, bool notifyChanges) const {
ProgramStateManager &Mgr = getStateManager();
- ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
+ ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
LV, V));
const MemRegion *MR = LV.getAsRegion();
if (MR && Mgr.getOwningEngine() && notifyChanges)
@@ -127,15 +127,15 @@ ProgramStateRef ProgramState::bindDefault(SVal loc, SVal V) const {
const MemRegion *R = loc.castAs<loc::MemRegionVal>().getRegion();
const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V);
ProgramStateRef new_state = makeWithStore(newStore);
- return Mgr.getOwningEngine() ?
- Mgr.getOwningEngine()->processRegionChange(new_state, R) :
+ return Mgr.getOwningEngine() ?
+ Mgr.getOwningEngine()->processRegionChange(new_state, R) :
new_state;
}
typedef ArrayRef<const MemRegion *> RegionList;
typedef ArrayRef<SVal> ValueList;
-ProgramStateRef
+ProgramStateRef
ProgramState::invalidateRegions(RegionList Regions,
const Expr *E, unsigned Count,
const LocationContext *LCtx,
@@ -197,11 +197,11 @@ ProgramState::invalidateRegionsImpl(ValueList Values,
if (CausedByPointerEscape) {
newState = Eng->notifyCheckersOfPointerEscape(newState, IS,
TopLevelInvalidated,
- Invalidated, Call,
+ Invalidated, Call,
*ITraits);
}
- return Eng->processRegionChanges(newState, IS, TopLevelInvalidated,
+ return Eng->processRegionChanges(newState, IS, TopLevelInvalidated,
Invalidated, Call);
}
@@ -224,7 +224,7 @@ ProgramStateRef ProgramState::killBinding(Loc LV) const {
return makeWithStore(newStore);
}
-ProgramStateRef
+ProgramStateRef
ProgramState::enterStackFrame(const CallEvent &Call,
const StackFrameContext *CalleeCtx) const {
const StoreRef &NewStore =
@@ -275,7 +275,7 @@ SVal ProgramState::getSVal(Loc location, QualType T) const {
// symbol for the call to foo(); the type of that symbol is 'char',
// not unsigned.
const llvm::APSInt &NewV = getBasicVals().Convert(T, *Int);
-
+
if (V.getAs<Loc>())
return loc::ConcreteInt(NewV);
else
@@ -283,7 +283,7 @@ SVal ProgramState::getSVal(Loc location, QualType T) const {
}
}
}
-
+
return V;
}
@@ -353,11 +353,11 @@ ConditionTruthVal ProgramState::isNull(SVal V) const {
if (V.isConstant())
return false;
-
+
SymbolRef Sym = V.getAsSymbol(/* IncludeBaseRegion */ true);
if (!Sym)
return ConditionTruthVal();
-
+
return getStateManager().ConstraintMgr->isNull(this, Sym);
}
@@ -390,7 +390,7 @@ ProgramStateRef ProgramStateManager::getPersistentState(ProgramState &State) {
ProgramState *newState = nullptr;
if (!freeStates.empty()) {
newState = freeStates.back();
- freeStates.pop_back();
+ freeStates.pop_back();
}
else {
newState = (ProgramState*) Alloc.Allocate<ProgramState>();
@@ -530,10 +530,10 @@ bool ScanReachableSymbols::scan(const SymExpr *sym) {
bool wasVisited = !visited.insert(sym).second;
if (wasVisited)
return true;
-
+
if (!visitor.VisitSymbol(sym))
return false;
-
+
// TODO: should be rewritten using SymExpr::symbol_iterator.
switch (sym->getKind()) {
case SymExpr::RegionValueKind:
@@ -582,11 +582,11 @@ bool ScanReachableSymbols::scan(SVal val) {
bool ScanReachableSymbols::scan(const MemRegion *R) {
if (isa<MemSpaceRegion>(R))
return true;
-
+
bool wasVisited = !visited.insert(R).second;
if (wasVisited)
return true;
-
+
if (!visitor.VisitMemRegion(R))
return false;
@@ -722,14 +722,14 @@ bool ProgramState::isTainted(const MemRegion *Reg, TaintTagType K) const {
bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
if (!Sym)
return false;
-
+
// Traverse all the symbols this symbol depends on to see if any are tainted.
bool Tainted = false;
for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end();
SI != SE; ++SI) {
if (!isa<SymbolData>(*SI))
continue;
-
+
const TaintTagType *Tag = get<TaintMap>(*SI);
Tainted = (Tag && *Tag == Kind);
@@ -748,40 +748,7 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
if (Tainted)
return true;
}
-
- return Tainted;
-}
-
-/// The GDM component containing the dynamic type info. This is a map from a
-/// symbol to its most likely type.
-REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicTypeMap,
- CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
- DynamicTypeInfo))
-
-DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const {
- Reg = Reg->StripCasts();
-
- // Look up the dynamic type in the GDM.
- const DynamicTypeInfo *GDMType = get<DynamicTypeMap>(Reg);
- if (GDMType)
- return *GDMType;
-
- // Otherwise, fall back to what we know about the region.
- if (const TypedRegion *TR = dyn_cast<TypedRegion>(Reg))
- return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false);
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
- SymbolRef Sym = SR->getSymbol();
- return DynamicTypeInfo(Sym->getType());
- }
-
- return DynamicTypeInfo();
+ return Tainted;
}
-ProgramStateRef ProgramState::setDynamicTypeInfo(const MemRegion *Reg,
- DynamicTypeInfo NewTy) const {
- Reg = Reg->StripCasts();
- ProgramStateRef NewState = set<DynamicTypeMap>(Reg, NewTy);
- assert(NewState);
- return NewState;
-}
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 170f7c02b882..0a2b2e64a142 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -81,6 +81,15 @@ public:
RangeSet(PrimRangeSet RS) : ranges(RS) {}
+ /// Create a new set with all ranges of this set and RS.
+ /// Possible intersections are not checked here.
+ RangeSet addRange(Factory &F, const RangeSet &RS) {
+ PrimRangeSet Ranges(RS.ranges);
+ for (const auto &range : ranges)
+ Ranges = F.add(Ranges, range);
+ return RangeSet(Ranges);
+ }
+
iterator begin() const { return ranges.begin(); }
iterator end() const { return ranges.end(); }
@@ -312,6 +321,14 @@ public:
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) override;
+ ProgramStateRef assumeSymbolWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymbolOutOfInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+
const llvm::APSInt* getSymVal(ProgramStateRef St,
SymbolRef sym) const override;
ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
@@ -324,6 +341,20 @@ public:
private:
RangeSet::Factory F;
+ RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymGTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymLERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymLERange(const RangeSet &RS, const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
};
} // end anonymous namespace
@@ -365,7 +396,7 @@ ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
-ProgramStateRef
+ProgramStateRef
RangeConstraintManager::removeDeadBindings(ProgramStateRef state,
SymbolReaper& SymReaper) {
@@ -415,7 +446,7 @@ RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1,
// UINT_MAX, 0, 1, and 2.
-ProgramStateRef
+ProgramStateRef
RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
@@ -435,7 +466,7 @@ RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
-ProgramStateRef
+ProgramStateRef
RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
@@ -450,122 +481,199 @@ RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
-ProgramStateRef
-RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
+ SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
- return nullptr;
+ return F.getEmptySet();
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
- return St;
+ return GetRange(St, Sym);
}
// Special case for Int == Min. This is always false.
llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
llvm::APSInt Min = AdjustmentType.getMinValue();
if (ComparisonVal == Min)
- return nullptr;
+ return F.getEmptySet();
- llvm::APSInt Lower = Min-Adjustment;
- llvm::APSInt Upper = ComparisonVal-Adjustment;
+ llvm::APSInt Lower = Min - Adjustment;
+ llvm::APSInt Upper = ComparisonVal - Adjustment;
--Upper;
- RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
}
-ProgramStateRef
-RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymLTRange(St, Sym, Int, Adjustment);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+RangeSet
+RangeConstraintManager::getSymGTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
- return St;
+ return GetRange(St, Sym);
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
- return nullptr;
+ return F.getEmptySet();
}
// Special case for Int == Max. This is always false.
llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
llvm::APSInt Max = AdjustmentType.getMaxValue();
if (ComparisonVal == Max)
- return nullptr;
+ return F.getEmptySet();
- llvm::APSInt Lower = ComparisonVal-Adjustment;
- llvm::APSInt Upper = Max-Adjustment;
+ llvm::APSInt Lower = ComparisonVal - Adjustment;
+ llvm::APSInt Upper = Max - Adjustment;
++Lower;
- RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
}
-ProgramStateRef
-RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymGTRange(St, Sym, Int, Adjustment);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+RangeSet
+RangeConstraintManager::getSymGERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
- return St;
+ return GetRange(St, Sym);
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
- return nullptr;
+ return F.getEmptySet();
}
// Special case for Int == Min. This is always feasible.
llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
llvm::APSInt Min = AdjustmentType.getMinValue();
if (ComparisonVal == Min)
- return St;
+ return GetRange(St, Sym);
llvm::APSInt Max = AdjustmentType.getMaxValue();
- llvm::APSInt Lower = ComparisonVal-Adjustment;
- llvm::APSInt Upper = Max-Adjustment;
+ llvm::APSInt Lower = ComparisonVal - Adjustment;
+ llvm::APSInt Upper = Max - Adjustment;
- RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
- return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
}
-ProgramStateRef
-RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymGERange(St, Sym, Int, Adjustment);
+ return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
+}
+
+RangeSet
+RangeConstraintManager::getSymLERange(const RangeSet &RS,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
- return nullptr;
+ return F.getEmptySet();
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
- return St;
+ return RS;
}
// Special case for Int == Max. This is always feasible.
llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
llvm::APSInt Max = AdjustmentType.getMaxValue();
if (ComparisonVal == Max)
- return St;
+ return RS;
+
+ llvm::APSInt Min = AdjustmentType.getMinValue();
+ llvm::APSInt Lower = Min - Adjustment;
+ llvm::APSInt Upper = ComparisonVal - Adjustment;
+
+ return RS.Intersect(getBasicVals(), F, Lower, Upper);
+}
+
+RangeSet
+RangeConstraintManager::getSymLERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ // Before we do any real work, see if the value can even show up.
+ APSIntType AdjustmentType(Adjustment);
+ switch (AdjustmentType.testInRange(Int, true)) {
+ case APSIntType::RTR_Below:
+ return F.getEmptySet();
+ case APSIntType::RTR_Within:
+ break;
+ case APSIntType::RTR_Above:
+ return GetRange(St, Sym);
+ }
+
+ // Special case for Int == Max. This is always feasible.
+ llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+ llvm::APSInt Max = AdjustmentType.getMaxValue();
+ if (ComparisonVal == Max)
+ return GetRange(St, Sym);
llvm::APSInt Min = AdjustmentType.getMinValue();
- llvm::APSInt Lower = Min-Adjustment;
- llvm::APSInt Upper = ComparisonVal-Adjustment;
+ llvm::APSInt Lower = Min - Adjustment;
+ llvm::APSInt Upper = ComparisonVal - Adjustment;
+
+ return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+}
- RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ProgramStateRef
+RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymLERange(St, Sym, Int, Adjustment);
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
+ProgramStateRef
+RangeConstraintManager::assumeSymbolWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) {
+ RangeSet New = getSymGERange(State, Sym, From, Adjustment);
+ if (New.isEmpty())
+ return nullptr;
+ New = getSymLERange(New, To, Adjustment);
+ return New.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymbolOutOfInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) {
+ RangeSet RangeLT = getSymLTRange(State, Sym, From, Adjustment);
+ RangeSet RangeGT = getSymGTRange(State, Sym, To, Adjustment);
+ RangeSet New(RangeLT.addRange(F, RangeGT));
+ return New.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, New);
+}
+
//===------------------------------------------------------------------------===
// Pretty-printing.
//===------------------------------------------------------------------------===/
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index 6d41fc2146fe..a63f6e496272 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -149,7 +149,8 @@ typedef llvm::ImmutableMap<const MemRegion *, ClusterBindings>
namespace {
class RegionBindingsRef : public llvm::ImmutableMapRef<const MemRegion *,
ClusterBindings> {
- ClusterBindings::Factory &CBFactory;
+ ClusterBindings::Factory *CBFactory;
+
public:
typedef llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>
ParentTy;
@@ -157,21 +158,21 @@ public:
RegionBindingsRef(ClusterBindings::Factory &CBFactory,
const RegionBindings::TreeTy *T,
RegionBindings::TreeTy::Factory *F)
- : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(T, F),
- CBFactory(CBFactory) {}
+ : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(T, F),
+ CBFactory(&CBFactory) {}
RegionBindingsRef(const ParentTy &P, ClusterBindings::Factory &CBFactory)
- : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(P),
- CBFactory(CBFactory) {}
+ : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(P),
+ CBFactory(&CBFactory) {}
RegionBindingsRef add(key_type_ref K, data_type_ref D) const {
- return RegionBindingsRef(static_cast<const ParentTy*>(this)->add(K, D),
- CBFactory);
+ return RegionBindingsRef(static_cast<const ParentTy *>(this)->add(K, D),
+ *CBFactory);
}
RegionBindingsRef remove(key_type_ref K) const {
- return RegionBindingsRef(static_cast<const ParentTy*>(this)->remove(K),
- CBFactory);
+ return RegionBindingsRef(static_cast<const ParentTy *>(this)->remove(K),
+ *CBFactory);
}
RegionBindingsRef addBinding(BindingKey K, SVal V) const;
@@ -179,16 +180,9 @@ public:
RegionBindingsRef addBinding(const MemRegion *R,
BindingKey::Kind k, SVal V) const;
- RegionBindingsRef &operator=(const RegionBindingsRef &X) {
- *static_cast<ParentTy*>(this) = X;
- return *this;
- }
-
const SVal *lookup(BindingKey K) const;
const SVal *lookup(const MemRegion *R, BindingKey::Kind k) const;
- const ClusterBindings *lookup(const MemRegion *R) const {
- return static_cast<const ParentTy*>(this)->lookup(R);
- }
+ using llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>::lookup;
RegionBindingsRef removeBinding(BindingKey K);
@@ -245,10 +239,10 @@ RegionBindingsRef RegionBindingsRef::addBinding(BindingKey K, SVal V) const {
const MemRegion *Base = K.getBaseRegion();
const ClusterBindings *ExistingCluster = lookup(Base);
- ClusterBindings Cluster = (ExistingCluster ? *ExistingCluster
- : CBFactory.getEmptyMap());
+ ClusterBindings Cluster =
+ (ExistingCluster ? *ExistingCluster : CBFactory->getEmptyMap());
- ClusterBindings NewCluster = CBFactory.add(Cluster, K, V);
+ ClusterBindings NewCluster = CBFactory->add(Cluster, K, V);
return add(Base, NewCluster);
}
@@ -277,7 +271,7 @@ RegionBindingsRef RegionBindingsRef::removeBinding(BindingKey K) {
if (!Cluster)
return *this;
- ClusterBindings NewCluster = CBFactory.remove(*Cluster, K);
+ ClusterBindings NewCluster = CBFactory->remove(*Cluster, K);
if (NewCluster.isEmpty())
return remove(Base);
return add(Base, NewCluster);
@@ -470,9 +464,9 @@ public: // Part of public interface to class.
StoreRef killBinding(Store ST, Loc L) override;
void incrementReferenceCount(Store store) override {
- getRegionBindings(store).manualRetain();
+ getRegionBindings(store).manualRetain();
}
-
+
/// If the StoreManager supports it, decrement the reference count of
/// the specified Store object. If the reference count hits 0, the memory
/// associated with the object is recycled.
@@ -514,7 +508,7 @@ public: // Part of public interface to class.
SVal getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
const TypedValueRegion *R,
QualType Ty);
-
+
SVal getLazyBinding(const SubRegion *LazyBindingRegion,
RegionBindingsRef LazyBinding);
@@ -656,35 +650,25 @@ protected:
RegionBindingsRef B;
-private:
- GlobalsFilterKind GlobalsFilter;
protected:
const ClusterBindings *getCluster(const MemRegion *R) {
return B.lookup(R);
}
- /// Returns true if the memory space of the given region is one of the global
- /// regions specially included at the start of analysis.
- bool isInitiallyIncludedGlobalRegion(const MemRegion *R) {
- switch (GlobalsFilter) {
- case GFK_None:
- return false;
- case GFK_SystemOnly:
- return isa<GlobalSystemSpaceRegion>(R->getMemorySpace());
- case GFK_All:
- return isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace());
- }
-
- llvm_unreachable("unknown globals filter");
+ /// Returns true if all clusters in the given memspace should be initially
+ /// included in the cluster analysis. Subclasses may provide their
+ /// own implementation.
+ bool includeEntireMemorySpace(const MemRegion *Base) {
+ return false;
}
public:
ClusterAnalysis(RegionStoreManager &rm, ProgramStateManager &StateMgr,
- RegionBindingsRef b, GlobalsFilterKind GFK)
+ RegionBindingsRef b )
: RM(rm), Ctx(StateMgr.getContext()),
svalBuilder(StateMgr.getSValBuilder()),
- B(b), GlobalsFilter(GFK) {}
+ B(b) {}
RegionBindingsRef getRegionBindings() const { return B; }
@@ -702,8 +686,9 @@ public:
assert(!Cluster.isEmpty() && "Empty clusters should be removed");
static_cast<DERIVED*>(this)->VisitAddedToCluster(Base, Cluster);
- // If this is an interesting global region, add it the work list up front.
- if (isInitiallyIncludedGlobalRegion(Base))
+ // If the base's memspace should be entirely invalidated, add the cluster
+ // to the workspace up front.
+ if (static_cast<DERIVED*>(this)->includeEntireMemorySpace(Base))
AddToWorkList(WorkListElement(Base), &Cluster);
}
}
@@ -716,8 +701,7 @@ public:
}
bool AddToWorkList(const MemRegion *R) {
- const MemRegion *BaseR = R->getBaseRegion();
- return AddToWorkList(WorkListElement(BaseR), getCluster(BaseR));
+ return static_cast<DERIVED*>(this)->AddToWorkList(R);
}
void RunWorkList() {
@@ -947,6 +931,7 @@ class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
InvalidatedSymbols &IS;
RegionAndSymbolInvalidationTraits &ITraits;
StoreManager::InvalidatedRegions *Regions;
+ GlobalsFilterKind GlobalsFilter;
public:
invalidateRegionsWorker(RegionStoreManager &rm,
ProgramStateManager &stateMgr,
@@ -957,14 +942,34 @@ public:
RegionAndSymbolInvalidationTraits &ITraitsIn,
StoreManager::InvalidatedRegions *r,
GlobalsFilterKind GFK)
- : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, GFK),
- Ex(ex), Count(count), LCtx(lctx), IS(is), ITraits(ITraitsIn), Regions(r){}
+ : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b),
+ Ex(ex), Count(count), LCtx(lctx), IS(is), ITraits(ITraitsIn), Regions(r),
+ GlobalsFilter(GFK) {}
void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
void VisitBinding(SVal V);
+
+ using ClusterAnalysis::AddToWorkList;
+
+ bool AddToWorkList(const MemRegion *R);
+
+ /// Returns true if all clusters in the memory space for \p Base should be
+ /// be invalidated.
+ bool includeEntireMemorySpace(const MemRegion *Base);
+
+ /// Returns true if the memory space of the given region is one of the global
+ /// regions specially included at the start of invalidation.
+ bool isInitiallyIncludedGlobalRegion(const MemRegion *R);
};
}
+bool invalidateRegionsWorker::AddToWorkList(const MemRegion *R) {
+ bool doNotInvalidateSuperRegion = ITraits.hasTrait(
+ R, RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ const MemRegion *BaseR = doNotInvalidateSuperRegion ? R : R->getBaseRegion();
+ return AddToWorkList(WorkListElement(BaseR), getCluster(BaseR));
+}
+
void invalidateRegionsWorker::VisitBinding(SVal V) {
// A symbol? Mark it touched by the invalidation.
if (SymbolRef Sym = V.getAsSymbol())
@@ -993,8 +998,8 @@ void invalidateRegionsWorker::VisitBinding(SVal V) {
void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
const ClusterBindings *C) {
- bool PreserveRegionsContents =
- ITraits.hasTrait(baseR,
+ bool PreserveRegionsContents =
+ ITraits.hasTrait(baseR,
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
if (C) {
@@ -1077,6 +1082,70 @@ void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
}
if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
+ bool doNotInvalidateSuperRegion = ITraits.hasTrait(
+ baseR,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+
+ if (doNotInvalidateSuperRegion) {
+ // We are not doing blank invalidation of the whole array region so we
+ // have to manually invalidate each elements.
+ Optional<uint64_t> NumElements;
+
+ // Compute lower and upper offsets for region within array.
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ NumElements = CAT->getSize().getZExtValue();
+ if (!NumElements) // We are not dealing with a constant size array
+ goto conjure_default;
+ QualType ElementTy = AT->getElementType();
+ uint64_t ElemSize = Ctx.getTypeSize(ElementTy);
+ const RegionOffset &RO = baseR->getAsOffset();
+ const MemRegion *SuperR = baseR->getBaseRegion();
+ if (RO.hasSymbolicOffset()) {
+ // If base region has a symbolic offset,
+ // we revert to invalidating the super region.
+ if (SuperR)
+ AddToWorkList(SuperR);
+ goto conjure_default;
+ }
+
+ uint64_t LowerOffset = RO.getOffset();
+ uint64_t UpperOffset = LowerOffset + *NumElements * ElemSize;
+ bool UpperOverflow = UpperOffset < LowerOffset;
+
+ // Invalidate regions which are within array boundaries,
+ // or have a symbolic offset.
+ if (!SuperR)
+ goto conjure_default;
+
+ const ClusterBindings *C = B.lookup(SuperR);
+ if (!C)
+ goto conjure_default;
+
+ for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E;
+ ++I) {
+ const BindingKey &BK = I.getKey();
+ Optional<uint64_t> ROffset =
+ BK.hasSymbolicOffset() ? Optional<uint64_t>() : BK.getOffset();
+
+ // Check offset is not symbolic and within array's boundaries.
+ // Handles arrays of 0 elements and of 0-sized elements as well.
+ if (!ROffset ||
+ (ROffset &&
+ ((*ROffset >= LowerOffset && *ROffset < UpperOffset) ||
+ (UpperOverflow &&
+ (*ROffset >= LowerOffset || *ROffset < UpperOffset)) ||
+ (LowerOffset == UpperOffset && *ROffset == LowerOffset)))) {
+ B = B.removeBinding(I.getKey());
+ // Bound symbolic regions need to be invalidated for dead symbol
+ // detection.
+ SVal V = I.getData();
+ const MemRegion *R = V.getAsRegion();
+ if (R && isa<SymbolicRegion>(R))
+ VisitBinding(V);
+ }
+ }
+ }
+ conjure_default:
// Set the default value of the array to conjured symbol.
DefinedOrUnknownSVal V =
svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
@@ -1091,6 +1160,29 @@ void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
B = B.addBinding(baseR, BindingKey::Direct, V);
}
+bool invalidateRegionsWorker::isInitiallyIncludedGlobalRegion(
+ const MemRegion *R) {
+ switch (GlobalsFilter) {
+ case GFK_None:
+ return false;
+ case GFK_SystemOnly:
+ return isa<GlobalSystemSpaceRegion>(R->getMemorySpace());
+ case GFK_All:
+ return isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace());
+ }
+
+ llvm_unreachable("unknown globals filter");
+}
+
+bool invalidateRegionsWorker::includeEntireMemorySpace(const MemRegion *Base) {
+ if (isInitiallyIncludedGlobalRegion(Base))
+ return true;
+
+ const MemSpaceRegion *MemSpace = Base->getMemorySpace();
+ return ITraits.hasTrait(MemSpace,
+ RegionAndSymbolInvalidationTraits::TK_EntireMemSpace);
+}
+
RegionBindingsRef
RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
const Expr *Ex,
@@ -1273,6 +1365,10 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
const MemRegion *MR = L.castAs<loc::MemRegionVal>().getRegion();
+ if (isa<BlockDataRegion>(MR)) {
+ return UnknownVal();
+ }
+
if (isa<AllocaRegion>(MR) ||
isa<SymbolicRegion>(MR) ||
isa<CodeTextRegion>(MR)) {
@@ -1462,7 +1558,7 @@ RegionStoreManager::findLazyBinding(RegionBindingsConstRef B,
// through to look for lazy compound value. It is like a field region.
Result = findLazyBinding(B, cast<SubRegion>(BaseReg->getSuperRegion()),
originalRegion);
-
+
if (Result.second)
Result.second = MRMgr.getCXXBaseObjectRegionWithSuper(BaseReg,
Result.second);
@@ -1508,7 +1604,7 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
return svalBuilder.makeIntVal(c, T);
}
}
-
+
// Check for loads from a code text region. For such loads, just give up.
if (isa<CodeTextRegion>(superR))
return UnknownVal();
@@ -1520,12 +1616,12 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
// return *y;
// FIXME: This is a hack, and doesn't do anything really intelligent yet.
const RegionRawOffset &O = R->getAsArrayOffset();
-
+
// If we cannot reason about the offset, return an unknown value.
if (!O.getRegion())
return UnknownVal();
-
- if (const TypedValueRegion *baseR =
+
+ if (const TypedValueRegion *baseR =
dyn_cast_or_null<TypedValueRegion>(O.getRegion())) {
QualType baseT = baseR->getValueType();
if (baseT->isScalarType()) {
@@ -1616,7 +1712,7 @@ SVal RegionStoreManager::getLazyBinding(const SubRegion *LazyBindingRegion,
return Result;
}
-
+
SVal
RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
const TypedValueRegion *R,
@@ -1670,7 +1766,7 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
if (!index.isConstant())
hasSymbolicIndex = true;
}
-
+
// If our super region is a field or element itself, walk up the region
// hierarchy to see if there is a default value installed in an ancestor.
SR = dyn_cast<SubRegion>(Base);
@@ -1680,7 +1776,7 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
if (isa<ElementRegion>(R)) {
// Currently we don't reason specially about Clang-style vectors. Check
// if superR is a vector and if so return Unknown.
- if (const TypedValueRegion *typedSuperR =
+ if (const TypedValueRegion *typedSuperR =
dyn_cast<TypedValueRegion>(R->getSuperRegion())) {
if (typedSuperR->getValueType()->isVectorType())
return UnknownVal();
@@ -1807,7 +1903,7 @@ RegionStoreManager::getInterestingValues(nonloc::LazyCompoundVal LCV) {
List.insert(List.end(), InnerList.begin(), InnerList.end());
continue;
}
-
+
List.push_back(V);
}
@@ -1844,7 +1940,7 @@ SVal RegionStoreManager::getBindingForArray(RegionBindingsConstRef B,
const TypedValueRegion *R) {
assert(Ctx.getAsConstantArrayType(R->getValueType()) &&
"Only constant array types can have compound bindings.");
-
+
return createLazyBinding(B, R);
}
@@ -2018,11 +2114,11 @@ RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
QualType T = R->getValueType();
assert(T->isVectorType());
const VectorType *VT = T->getAs<VectorType>(); // Use getAs for typedefs.
-
+
// Handle lazy compound values and symbolic values.
if (V.getAs<nonloc::LazyCompoundVal>() || V.getAs<nonloc::SymbolVal>())
return bindAggregate(B, R, V);
-
+
// We may get non-CompoundVal accidentally due to imprecise cast logic or
// that we are binding symbolic struct value. Kill the field values, and if
// the value is symbolic go and bind it as a "default" binding.
@@ -2039,7 +2135,7 @@ RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
for ( ; index != numElements ; ++index) {
if (VI == VE)
break;
-
+
NonLoc Idx = svalBuilder.makeArrayIndex(index);
const ElementRegion *ER = MRMgr.getElementRegion(ElemType, Idx, R, Ctx);
@@ -2081,7 +2177,7 @@ RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
}
RegionBindingsRef NewB = B;
-
+
for (FieldVector::iterator I = Fields.begin(), E = Fields.end(); I != E; ++I){
const FieldRegion *SourceFR = MRMgr.getFieldRegion(*I, LCV.getRegion());
SVal V = getBindingForField(getRegionBindings(LCV.getStore()), SourceFR);
@@ -2185,7 +2281,7 @@ public:
ProgramStateManager &stateMgr,
RegionBindingsRef b, SymbolReaper &symReaper,
const StackFrameContext *LCtx)
- : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b, GFK_None),
+ : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b),
SymReaper(symReaper), CurrentLCtx(LCtx) {}
// Called by ClusterAnalysis.
@@ -2193,11 +2289,20 @@ public:
void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
using ClusterAnalysis<removeDeadBindingsWorker>::VisitCluster;
+ using ClusterAnalysis::AddToWorkList;
+
+ bool AddToWorkList(const MemRegion *R);
+
bool UpdatePostponed();
void VisitBinding(SVal V);
};
}
+bool removeDeadBindingsWorker::AddToWorkList(const MemRegion *R) {
+ const MemRegion *BaseR = R->getBaseRegion();
+ return AddToWorkList(WorkListElement(BaseR), getCluster(BaseR));
+}
+
void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
const ClusterBindings &C) {
@@ -2243,8 +2348,12 @@ void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(baseR))
SymReaper.markLive(SymR->getSymbol());
- for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
+ for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I) {
+ // Element index of a binding key is live.
+ SymReaper.markElementIndicesLive(I.getKey().getRegion());
+
VisitBinding(I.getData());
+ }
}
void removeDeadBindingsWorker::VisitBinding(SVal V) {
@@ -2265,7 +2374,8 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) {
// If V is a region, then add it to the worklist.
if (const MemRegion *R = V.getAsRegion()) {
AddToWorkList(R);
-
+ SymReaper.markLive(R);
+
// All regions captured by a block are also live.
if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(),
@@ -2274,7 +2384,7 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) {
AddToWorkList(I.getCapturedRegion());
}
}
-
+
// Update the set of live symbols.
for (SymExpr::symbol_iterator SI = V.symbol_begin(), SE = V.symbol_end();
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 3ed2bde1e4f8..cdae04068e1d 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -91,10 +91,13 @@ nonloc::ConcreteInt SValBuilder::makeBoolVal(const CXXBoolLiteralExpr *boolean){
return makeTruthVal(boolean->getValue());
}
-DefinedOrUnknownSVal
+DefinedOrUnknownSVal
SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
QualType T = region->getValueType();
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
+
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
@@ -112,6 +115,9 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *SymbolTag,
unsigned Count) {
QualType T = Ex->getType();
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
+
// Compute the type of the result. If the expression is not an R-value, the
// result should be a location.
QualType ExType = Ex->getType();
@@ -126,6 +132,9 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
const LocationContext *LCtx,
QualType type,
unsigned count) {
+ if (type->isNullPtrType())
+ return makeZeroVal(type);
+
if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
@@ -142,14 +151,17 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt,
const LocationContext *LCtx,
QualType type,
unsigned visitCount) {
+ if (type->isNullPtrType())
+ return makeZeroVal(type);
+
if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
SymbolRef sym = SymMgr.conjureSymbol(stmt, LCtx, type, visitCount);
-
+
if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
+
return nonloc::SymbolVal(sym);
}
@@ -160,6 +172,8 @@ SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
QualType T = E->getType();
assert(Loc::isLocType(T));
assert(SymbolManager::canSymbolicate(T));
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, T, VisitCount);
return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
@@ -185,6 +199,9 @@ SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
const TypedValueRegion *region) {
QualType T = region->getValueType();
+ if (T->isNullPtrType())
+ return makeZeroVal(T);
+
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
@@ -259,6 +276,11 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
case Stmt::CXXBoolLiteralExprClass:
return makeBoolVal(cast<CXXBoolLiteralExpr>(E));
+ case Stmt::TypeTraitExprClass: {
+ const TypeTraitExpr *TE = cast<TypeTraitExpr>(E);
+ return makeTruthVal(TE->getValue(), TE->getType());
+ }
+
case Stmt::IntegerLiteralClass:
return makeIntVal(cast<IntegerLiteral>(E));
@@ -270,11 +292,17 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
case Stmt::ImplicitCastExprClass: {
const CastExpr *CE = cast<CastExpr>(E);
- if (CE->getCastKind() == CK_ArrayToPointerDecay) {
- Optional<SVal> ArrayVal = getConstantVal(CE->getSubExpr());
- if (!ArrayVal)
+ switch (CE->getCastKind()) {
+ default:
+ break;
+ case CK_ArrayToPointerDecay:
+ case CK_BitCast: {
+ const Expr *SE = CE->getSubExpr();
+ Optional<SVal> Val = getConstantVal(SE);
+ if (!Val)
return None;
- return evalCast(*ArrayVal, CE->getType(), CE->getSubExpr()->getType());
+ return evalCast(*Val, CE->getType(), SE->getType());
+ }
}
// FALLTHROUGH
}
@@ -307,7 +335,7 @@ SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
QualType ResultTy) {
if (!State->isTainted(RHS) && !State->isTainted(LHS))
return UnknownVal();
-
+
const SymExpr *symLHS = LHS.getAsSymExpr();
const SymExpr *symRHS = RHS.getAsSymExpr();
// TODO: When the Max Complexity is reached, we should conjure a symbol
@@ -430,7 +458,7 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
if (shouldBeModeledWithNoOp(Context, Context.getPointerType(castTy),
Context.getPointerType(originalTy)))
return val;
-
+
// Check for casts from pointers to integers.
if (castTy->isIntegralOrEnumerationType() && Loc::isLocType(originalTy))
return evalCastFromLoc(val.castAs<Loc>(), castTy);
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 35930e47f82a..4051242434ec 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -190,6 +190,42 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
} // end switch
}
+ProgramStateRef SimpleConstraintManager::assumeWithinInclusiveRange(
+ ProgramStateRef State, NonLoc Value, const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InRange) {
+
+ assert(From.isUnsigned() == To.isUnsigned() &&
+ From.getBitWidth() == To.getBitWidth() &&
+ "Values should have same types!");
+
+ if (!canReasonAbout(Value)) {
+ // Just add the constraint to the expression without trying to simplify.
+ SymbolRef Sym = Value.getAsSymExpr();
+ assert(Sym);
+ return assumeSymWithinInclusiveRange(State, Sym, From, To, InRange);
+ }
+
+ switch (Value.getSubKind()) {
+ default:
+ llvm_unreachable("'assumeWithinInclusiveRange' is not implemented"
+ "for this NonLoc");
+
+ case nonloc::LocAsIntegerKind:
+ case nonloc::SymbolValKind: {
+ if (SymbolRef Sym = Value.getAsSymbol())
+ return assumeSymWithinInclusiveRange(State, Sym, From, To, InRange);
+ return State;
+ } // end switch
+
+ case nonloc::ConcreteIntKind: {
+ const llvm::APSInt &IntVal = Value.castAs<nonloc::ConcreteInt>().getValue();
+ bool IsInRange = IntVal >= From && IntVal <= To;
+ bool isFeasible = (IsInRange == InRange);
+ return isFeasible ? State : nullptr;
+ }
+ } // end switch
+}
+
static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment) {
// Is it a "($sym+constant1)" expression?
if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(Sym)) {
@@ -262,6 +298,37 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
} // end switch
}
+ProgramStateRef
+SimpleConstraintManager::assumeSymWithinInclusiveRange(ProgramStateRef State,
+ SymbolRef Sym,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange) {
+ // Get the type used for calculating wraparound.
+ BasicValueFactory &BVF = getBasicVals();
+ APSIntType WraparoundType = BVF.getAPSIntType(Sym->getType());
+
+ llvm::APSInt Adjustment = WraparoundType.getZeroValue();
+ SymbolRef AdjustedSym = Sym;
+ computeAdjustment(AdjustedSym, Adjustment);
+
+ // Convert the right-hand side integer as necessary.
+ APSIntType ComparisonType = std::max(WraparoundType, APSIntType(From));
+ llvm::APSInt ConvertedFrom = ComparisonType.convert(From);
+ llvm::APSInt ConvertedTo = ComparisonType.convert(To);
+
+ // Prefer unsigned comparisons.
+ if (ComparisonType.getBitWidth() == WraparoundType.getBitWidth() &&
+ ComparisonType.isUnsigned() && !WraparoundType.isUnsigned())
+ Adjustment.setIsSigned(false);
+
+ if (InRange)
+ return assumeSymbolWithinInclusiveRange(State, AdjustedSym, ConvertedFrom,
+ ConvertedTo, Adjustment);
+ return assumeSymbolOutOfInclusiveRange(State, AdjustedSym, ConvertedFrom,
+ ConvertedTo, Adjustment);
+}
+
} // end of namespace ento
} // end of namespace clang
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index 135cd4ef8649..b26bc9486110 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -38,11 +38,24 @@ public:
ProgramStateRef assume(ProgramStateRef state, NonLoc Cond, bool Assumption);
+ ProgramStateRef assumeWithinInclusiveRange(ProgramStateRef State,
+ NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange) override;
+
ProgramStateRef assumeSymRel(ProgramStateRef state,
const SymExpr *LHS,
BinaryOperator::Opcode op,
const llvm::APSInt& Int);
+ ProgramStateRef assumeSymWithinInclusiveRange(ProgramStateRef State,
+ SymbolRef Sym,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange);
+
+
protected:
//===------------------------------------------------------------------===//
@@ -75,6 +88,14 @@ protected:
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymbolWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymbolOutOfInclusiveRange(
+ ProgramStateRef state, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
//===------------------------------------------------------------------===//
// Internal implementation.
//===------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index b3cab87c8080..a704ce224554 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -638,7 +638,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// on the ABI).
// FIXME: we can probably do a comparison against other MemRegions, though.
// FIXME: is there a way to tell if two labels refer to the same location?
- return UnknownVal();
+ return UnknownVal();
case loc::ConcreteIntKind: {
// If one of the operands is a symbol and the other is a constant,
@@ -863,7 +863,7 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
// Special case: rhs is a zero constant.
if (rhs.isZeroConstant())
return lhs;
-
+
// We are dealing with pointer arithmetic.
// Handle pointer arithmetic on constant values.
@@ -880,7 +880,7 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
// Offset the increment by the pointer size.
llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true);
rightI *= Multiplicand;
-
+
// Compute the adjusted pointer.
switch (op) {
case BO_Add:
@@ -911,8 +911,9 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
elementType = elemReg->getElementType();
}
else if (isa<SubRegion>(region)) {
+ assert(op == BO_Add || op == BO_Sub);
+ index = (op == BO_Add) ? rhs : evalMinus(rhs);
superR = region;
- index = rhs;
if (resultTy->isAnyPointerType())
elementType = resultTy->getPointeeType();
}
@@ -922,7 +923,7 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
superR, getContext()));
}
}
- return UnknownVal();
+ return UnknownVal();
}
const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index 99ec1e704340..7cdb55a59782 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -52,7 +52,7 @@ StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) {
return StoreRef(store, *this);
}
-const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R,
+const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R,
QualType T) {
NonLoc idx = svalBuilder.makeZeroArrayIndex();
assert(!T.isNull());
@@ -366,22 +366,22 @@ SVal StoreManager::evalDynamicCast(SVal Base, QualType TargetType,
/// as another region.
SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
QualType castTy, bool performTestOnly) {
-
+
if (castTy.isNull() || V.isUnknownOrUndef())
return V;
-
+
ASTContext &Ctx = svalBuilder.getContext();
- if (performTestOnly) {
+ if (performTestOnly) {
// Automatically translate references to pointers.
QualType T = R->getValueType();
if (const ReferenceType *RT = T->getAs<ReferenceType>())
T = Ctx.getPointerType(RT->getPointeeType());
-
+
assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T));
return V;
}
-
+
return svalBuilder.dispatchCast(V, castTy);
}
@@ -424,7 +424,7 @@ SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
return getLValueFieldOrIvar(decl, base);
}
-SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
+SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
SVal Base) {
// If the base is an unknown or undefined value, just return it back.
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index cca0461a4748..99b2e147cb49 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -391,6 +391,18 @@ void SymbolReaper::markLive(SymbolRef sym) {
void SymbolReaper::markLive(const MemRegion *region) {
RegionRoots.insert(region);
+ markElementIndicesLive(region);
+}
+
+void SymbolReaper::markElementIndicesLive(const MemRegion *region) {
+ for (auto SR = dyn_cast<SubRegion>(region); SR;
+ SR = dyn_cast<SubRegion>(SR->getSuperRegion())) {
+ if (auto ER = dyn_cast<ElementRegion>(SR)) {
+ SVal Idx = ER->getIndex();
+ for (auto SI = Idx.symbol_begin(), SE = Idx.symbol_end(); SI != SE; ++SI)
+ markLive(*SI);
+ }
+ }
}
void SymbolReaper::markInUse(SymbolRef sym) {
@@ -409,7 +421,7 @@ bool SymbolReaper::maybeDead(SymbolRef sym) {
bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
if (RegionRoots.count(MR))
return true;
-
+
MR = MR->getBaseRegion();
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
@@ -442,9 +454,9 @@ bool SymbolReaper::isLive(SymbolRef sym) {
markDependentsLive(sym);
return true;
}
-
+
bool KnownLive;
-
+
switch (sym->getKind()) {
case SymExpr::RegionValueKind:
KnownLive = isLiveRegion(cast<SymbolRegionValue>(sym)->getRegion());
@@ -525,7 +537,7 @@ bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
if (!includeStoreBindings)
return false;
-
+
unsigned &cachedQuery =
const_cast<SymbolReaper*>(this)->includedRegionCache[VR];
@@ -535,16 +547,14 @@ bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
// Query the store to see if the region occurs in any live bindings.
if (Store store = reapedStore.getStore()) {
- bool hasRegion =
+ bool hasRegion =
reapedStore.getStoreManager().includedInBindings(store, VR);
cachedQuery = hasRegion ? 1 : 2;
return hasRegion;
}
-
+
return false;
}
return VarContext->isParentOf(CurrentContext);
}
-
-SymbolVisitor::~SymbolVisitor() {}
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index c957a654a84c..bf85c4ca0c60 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -14,7 +14,7 @@
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
#include "ModelInjector.h"
#include "clang/AST/ASTConsumer.h"
-#include "clang/AST/DataRecursiveASTVisitor.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -141,7 +141,7 @@ public:
namespace {
class AnalysisConsumer : public AnalysisASTConsumer,
- public DataRecursiveASTVisitor<AnalysisConsumer> {
+ public RecursiveASTVisitor<AnalysisConsumer> {
enum {
AM_None = 0,
AM_Syntax = 0x1,
@@ -168,7 +168,7 @@ public:
/// The local declaration to all declarations ratio might be very small when
/// working with a PCH file.
SetOfDecls LocalTUDecls;
-
+
// Set of PathDiagnosticConsumers. Owned by AnalysisManager.
PathDiagnosticConsumers PathConsumers;
@@ -364,11 +364,15 @@ public:
}
return true;
}
-
+
bool VisitBlockDecl(BlockDecl *BD) {
if (BD->hasBody()) {
assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
- HandleCode(BD, RecVisitorMode);
+ // Since we skip function template definitions, we should skip blocks
+ // declared in those functions as well.
+ if (!BD->isDependentContext()) {
+ HandleCode(BD, RecVisitorMode);
+ }
}
return true;
}
@@ -475,7 +479,7 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
CallGraphNode *N = *I;
Decl *D = N->getDecl();
-
+
// Skip the abstract root node.
if (!D)
continue;
@@ -588,8 +592,8 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
// - Header files: run non-path-sensitive checks only.
// - System headers: don't run any checks.
SourceManager &SM = Ctx->getSourceManager();
- SourceLocation SL = D->hasBody() ? D->getBody()->getLocStart()
- : D->getLocation();
+ const Stmt *Body = D->getBody();
+ SourceLocation SL = Body ? Body->getLocStart() : D->getLocation();
SL = SM.getExpansionLoc(SL);
if (!Opts->AnalyzeAll && !SM.isWrittenInMainFile(SL)) {
@@ -679,11 +683,11 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
case LangOptions::NonGC:
ActionExprEngine(D, false, IMode, Visited);
break;
-
+
case LangOptions::GCOnly:
ActionExprEngine(D, true, IMode, Visited);
break;
-
+
case LangOptions::HybridGC:
ActionExprEngine(D, false, IMode, Visited);
ActionExprEngine(D, true, IMode, Visited);
@@ -778,8 +782,9 @@ void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
<< ", ('arrow','true'), ('oriented', 'true'))\n";
}
-UbigraphViz::UbigraphViz(std::unique_ptr<raw_ostream> Out, StringRef Filename)
- : Out(std::move(Out)), Filename(Filename), Cntr(0) {
+UbigraphViz::UbigraphViz(std::unique_ptr<raw_ostream> OutStream,
+ StringRef Filename)
+ : Out(std::move(OutStream)), Filename(Filename), Cntr(0) {
*Out << "('vertex_style_attribute', 0, ('shape', 'icosahedron'))\n";
*Out << "('vertex_style', 1, 0, ('shape', 'sphere'), ('color', '#ffcc66'),"
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index 7fced1e5c71a..75fa4c651ace 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -42,7 +42,7 @@ public:
ClangCheckerRegistry(ArrayRef<std::string> plugins,
DiagnosticsEngine *diags = nullptr);
};
-
+
} // end anonymous namespace
ClangCheckerRegistry::ClangCheckerRegistry(ArrayRef<std::string> plugins,
@@ -52,7 +52,12 @@ ClangCheckerRegistry::ClangCheckerRegistry(ArrayRef<std::string> plugins,
for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
i != e; ++i) {
// Get access to the plugin.
- DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str());
+ std::string err;
+ DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str(), &err);
+ if (!lib.isValid()) {
+ diags->Report(diag::err_fe_unable_to_load_plugin) << *i << err;
+ continue;
+ }
// See if it's compatible with this build of clang.
const char *pluginAPIVersion =
@@ -78,10 +83,7 @@ bool ClangCheckerRegistry::isCompatibleAPIVersion(const char *versionString) {
// For now, none of the static analyzer API is considered stable.
// Versions must match exactly.
- if (strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0)
- return true;
-
- return false;
+ return strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
}
void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
diff --git a/lib/StaticAnalyzer/Frontend/Makefile b/lib/StaticAnalyzer/Frontend/Makefile
index 2698120d9098..3f15988bfddb 100644
--- a/lib/StaticAnalyzer/Frontend/Makefile
+++ b/lib/StaticAnalyzer/Frontend/Makefile
@@ -1,10 +1,10 @@
##===- clang/lib/StaticAnalyzer/Frontend/Makefile ----------*- Makefile -*-===##
-#
+#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
-#
+#
##===----------------------------------------------------------------------===##
#
# Starting point into the static analyzer land for the driver.
diff --git a/lib/Tooling/ArgumentsAdjusters.cpp b/lib/Tooling/ArgumentsAdjusters.cpp
index 1722ede08a86..2f3d829d7d19 100644
--- a/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/lib/Tooling/ArgumentsAdjusters.cpp
@@ -13,15 +13,13 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/ArgumentsAdjusters.h"
-#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/StringRef.h"
namespace clang {
namespace tooling {
/// Add -fsyntax-only option to the commnand line arguments.
ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
- return [](const CommandLineArguments &Args) {
+ return [](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments AdjustedArgs;
for (size_t i = 0, e = Args.size(); i != e; ++i) {
StringRef Arg = Args[i];
@@ -36,7 +34,7 @@ ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
}
ArgumentsAdjuster getClangStripOutputAdjuster() {
- return [](const CommandLineArguments &Args) {
+ return [](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments AdjustedArgs;
for (size_t i = 0, e = Args.size(); i < e; ++i) {
StringRef Arg = Args[i];
@@ -55,7 +53,7 @@ ArgumentsAdjuster getClangStripOutputAdjuster() {
ArgumentsAdjuster getInsertArgumentAdjuster(const CommandLineArguments &Extra,
ArgumentInsertPosition Pos) {
- return [Extra, Pos](const CommandLineArguments &Args) {
+ return [Extra, Pos](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments Return(Args);
CommandLineArguments::iterator I;
@@ -78,8 +76,8 @@ ArgumentsAdjuster getInsertArgumentAdjuster(const char *Extra,
ArgumentsAdjuster combineAdjusters(ArgumentsAdjuster First,
ArgumentsAdjuster Second) {
- return [First, Second](const CommandLineArguments &Args) {
- return Second(First(Args));
+ return [First, Second](const CommandLineArguments &Args, StringRef File) {
+ return Second(First(Args, File), File);
};
}
diff --git a/lib/Tooling/CommonOptionsParser.cpp b/lib/Tooling/CommonOptionsParser.cpp
index adae1781f450..82f560140085 100644
--- a/lib/Tooling/CommonOptionsParser.cpp
+++ b/lib/Tooling/CommonOptionsParser.cpp
@@ -86,22 +86,22 @@ private:
adjustCommands(std::vector<CompileCommand> Commands) const {
for (CompileCommand &Command : Commands)
for (const auto &Adjuster : Adjusters)
- Command.CommandLine = Adjuster(Command.CommandLine);
+ Command.CommandLine = Adjuster(Command.CommandLine, Command.Filename);
return Commands;
}
};
} // namespace
-CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv,
- cl::OptionCategory &Category,
- const char *Overview) {
+CommonOptionsParser::CommonOptionsParser(
+ int &argc, const char **argv, cl::OptionCategory &Category,
+ llvm::cl::NumOccurrencesFlag OccurrencesFlag, const char *Overview) {
static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
static cl::opt<std::string> BuildPath("p", cl::desc("Build path"),
cl::Optional, cl::cat(Category));
static cl::list<std::string> SourcePaths(
- cl::Positional, cl::desc("<source0> [... <sourceN>]"), cl::OneOrMore,
+ cl::Positional, cl::desc("<source0> [... <sourceN>]"), OccurrencesFlag,
cl::cat(Category));
static cl::list<std::string> ArgsAfter(
@@ -116,10 +116,12 @@ CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv,
cl::HideUnrelatedOptions(Category);
- Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc,
- argv));
+ Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc, argv));
cl::ParseCommandLineOptions(argc, argv, Overview);
SourcePathList = SourcePaths;
+ if ((OccurrencesFlag == cl::ZeroOrMore || OccurrencesFlag == cl::Optional) &&
+ SourcePathList.empty())
+ return;
if (!Compilations) {
std::string ErrorMessage;
if (!BuildPath.empty()) {
@@ -129,8 +131,12 @@ CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv,
Compilations = CompilationDatabase::autoDetectFromSource(SourcePaths[0],
ErrorMessage);
}
- if (!Compilations)
- llvm::report_fatal_error(ErrorMessage);
+ if (!Compilations) {
+ llvm::errs() << "Error while trying to load a compilation database:\n"
+ << ErrorMessage << "Running without flags.\n";
+ Compilations.reset(
+ new FixedCompilationDatabase(".", std::vector<std::string>()));
+ }
}
auto AdjustingCompilations =
llvm::make_unique<ArgumentsAdjustingCompilations>(
diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp
index 2272be632b9e..957e40137eac 100644
--- a/lib/Tooling/CompilationDatabase.cpp
+++ b/lib/Tooling/CompilationDatabase.cpp
@@ -299,13 +299,15 @@ FixedCompilationDatabase(Twine Directory, ArrayRef<std::string> CommandLine) {
std::vector<std::string> ToolCommandLine(1, "clang-tool");
ToolCommandLine.insert(ToolCommandLine.end(),
CommandLine.begin(), CommandLine.end());
- CompileCommands.emplace_back(Directory, std::move(ToolCommandLine));
+ CompileCommands.emplace_back(Directory, StringRef(),
+ std::move(ToolCommandLine));
}
std::vector<CompileCommand>
FixedCompilationDatabase::getCompileCommands(StringRef FilePath) const {
std::vector<CompileCommand> Result(CompileCommands);
Result[0].CommandLine.push_back(FilePath);
+ Result[0].Filename = FilePath;
return Result;
}
@@ -325,7 +327,7 @@ namespace tooling {
// This anchor is used to force the linker to link in the generated object file
// and thus register the JSONCompilationDatabasePlugin.
extern volatile int JSONAnchorSource;
-static int JSONAnchorDest = JSONAnchorSource;
+static int LLVM_ATTRIBUTE_UNUSED JSONAnchorDest = JSONAnchorSource;
} // end namespace tooling
} // end namespace clang
diff --git a/lib/Tooling/Core/CMakeLists.txt b/lib/Tooling/Core/CMakeLists.txt
index c8c75f95f3cb..b88e1f8333a2 100644
--- a/lib/Tooling/Core/CMakeLists.txt
+++ b/lib/Tooling/Core/CMakeLists.txt
@@ -1,9 +1,11 @@
set(LLVM_LINK_COMPONENTS support)
add_clang_library(clangToolingCore
+ Lookup.cpp
Replacement.cpp
LINK_LIBS
+ clangAST
clangBasic
clangLex
clangRewrite
diff --git a/lib/Tooling/Core/Lookup.cpp b/lib/Tooling/Core/Lookup.cpp
new file mode 100644
index 000000000000..697eeb46ce41
--- /dev/null
+++ b/lib/Tooling/Core/Lookup.cpp
@@ -0,0 +1,113 @@
+//===--- Lookup.cpp - Framework for clang refactoring tools ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines helper methods for clang tools performing name lookup.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Core/Lookup.h"
+#include "clang/AST/Decl.h"
+using namespace clang;
+using namespace clang::tooling;
+
+static bool isInsideDifferentNamespaceWithSameName(const DeclContext *DeclA,
+ const DeclContext *DeclB) {
+ while (true) {
+ // Look past non-namespaces on DeclA.
+ while (DeclA && !isa<NamespaceDecl>(DeclA))
+ DeclA = DeclA->getParent();
+
+ // Look past non-namespaces on DeclB.
+ while (DeclB && !isa<NamespaceDecl>(DeclB))
+ DeclB = DeclB->getParent();
+
+ // We hit the root, no namespace collision.
+ if (!DeclA || !DeclB)
+ return false;
+
+ // Literally the same namespace, not a collision.
+ if (DeclA == DeclB)
+ return false;
+
+ // Now check the names. If they match we have a different namespace with the
+ // same name.
+ if (cast<NamespaceDecl>(DeclA)->getDeclName() ==
+ cast<NamespaceDecl>(DeclB)->getDeclName())
+ return true;
+
+ DeclA = DeclA->getParent();
+ DeclB = DeclB->getParent();
+ }
+}
+
+static StringRef getBestNamespaceSubstr(const DeclContext *DeclA,
+ StringRef NewName,
+ bool HadLeadingColonColon) {
+ while (true) {
+ while (DeclA && !isa<NamespaceDecl>(DeclA))
+ DeclA = DeclA->getParent();
+
+ // Fully qualified it is! Leave :: in place if it's there already.
+ if (!DeclA)
+ return HadLeadingColonColon ? NewName : NewName.substr(2);
+
+ // Otherwise strip off redundant namespace qualifications from the new name.
+ // We use the fully qualified name of the namespace and remove that part
+ // from NewName if it has an identical prefix.
+ std::string NS =
+ "::" + cast<NamespaceDecl>(DeclA)->getQualifiedNameAsString() + "::";
+ if (NewName.startswith(NS))
+ return NewName.substr(NS.size());
+
+ // No match yet. Strip of a namespace from the end of the chain and try
+ // again. This allows to get optimal qualifications even if the old and new
+ // decl only share common namespaces at a higher level.
+ DeclA = DeclA->getParent();
+ }
+}
+
+/// Check if the name specifier begins with a written "::".
+static bool isFullyQualified(const NestedNameSpecifier *NNS) {
+ while (NNS) {
+ if (NNS->getKind() == NestedNameSpecifier::Global)
+ return true;
+ NNS = NNS->getPrefix();
+ }
+ return false;
+}
+
+std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
+ const DeclContext *UseContext,
+ const NamedDecl *FromDecl,
+ StringRef ReplacementString) {
+ assert(ReplacementString.startswith("::") &&
+ "Expected fully-qualified name!");
+
+ // We can do a raw name replacement when we are not inside the namespace for
+ // the original function and it is not in the global namespace. The
+ // assumption is that outside the original namespace we must have a using
+ // statement that makes this work out and that other parts of this refactor
+ // will automatically fix using statements to point to the new function
+ const bool class_name_only = !Use;
+ const bool in_global_namespace =
+ isa<TranslationUnitDecl>(FromDecl->getDeclContext());
+ if (class_name_only && !in_global_namespace &&
+ !isInsideDifferentNamespaceWithSameName(FromDecl->getDeclContext(),
+ UseContext)) {
+ auto Pos = ReplacementString.rfind("::");
+ return Pos != StringRef::npos ? ReplacementString.substr(Pos + 2)
+ : ReplacementString;
+ }
+ // We did not match this because of a using statement, so we will need to
+ // figure out how good a namespace match we have with our destination type.
+ // We work backwards (from most specific possible namespace to least
+ // specific).
+ return getBestNamespaceSubstr(UseContext, ReplacementString,
+ isFullyQualified(Use));
+}
diff --git a/lib/Tooling/Core/Replacement.cpp b/lib/Tooling/Core/Replacement.cpp
index 6d37a49db381..47bbdeb470ee 100644
--- a/lib/Tooling/Core/Replacement.cpp
+++ b/lib/Tooling/Core/Replacement.cpp
@@ -113,15 +113,7 @@ void Replacement::setFromSourceLocation(const SourceManager &Sources,
const std::pair<FileID, unsigned> DecomposedLocation =
Sources.getDecomposedLoc(Start);
const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
- if (Entry) {
- // Make FilePath absolute so replacements can be applied correctly when
- // relative paths for files are used.
- llvm::SmallString<256> FilePath(Entry->getName());
- std::error_code EC = llvm::sys::fs::make_absolute(FilePath);
- this->FilePath = EC ? FilePath.c_str() : Entry->getName();
- } else {
- this->FilePath = InvalidLocation;
- }
+ this->FilePath = Entry ? Entry->getName() : InvalidLocation;
this->ReplacementRange = Range(DecomposedLocation.second, Length);
this->ReplacementText = ReplacementText;
}
@@ -151,34 +143,32 @@ void Replacement::setFromSourceRange(const SourceManager &Sources,
ReplacementText);
}
-unsigned shiftedCodePosition(const Replacements &Replaces, unsigned Position) {
- unsigned NewPosition = Position;
- for (Replacements::iterator I = Replaces.begin(), E = Replaces.end(); I != E;
- ++I) {
- if (I->getOffset() >= Position)
- break;
- if (I->getOffset() + I->getLength() > Position)
- NewPosition += I->getOffset() + I->getLength() - Position;
- NewPosition += I->getReplacementText().size() - I->getLength();
+template <typename T>
+unsigned shiftedCodePositionInternal(const T &Replaces, unsigned Position) {
+ unsigned Offset = 0;
+ for (const auto& R : Replaces) {
+ if (R.getOffset() + R.getLength() <= Position) {
+ Offset += R.getReplacementText().size() - R.getLength();
+ continue;
+ }
+ if (R.getOffset() < Position &&
+ R.getOffset() + R.getReplacementText().size() <= Position) {
+ Position = R.getOffset() + R.getReplacementText().size() - 1;
+ }
+ break;
}
- return NewPosition;
+ return Position + Offset;
+}
+
+unsigned shiftedCodePosition(const Replacements &Replaces, unsigned Position) {
+ return shiftedCodePositionInternal(Replaces, Position);
}
// FIXME: Remove this function when Replacements is implemented as std::vector
// instead of std::set.
unsigned shiftedCodePosition(const std::vector<Replacement> &Replaces,
unsigned Position) {
- unsigned NewPosition = Position;
- for (std::vector<Replacement>::const_iterator I = Replaces.begin(),
- E = Replaces.end();
- I != E; ++I) {
- if (I->getOffset() >= Position)
- break;
- if (I->getOffset() + I->getLength() > Position)
- NewPosition += I->getOffset() + I->getLength() - Position;
- NewPosition += I->getReplacementText().size() - I->getLength();
- }
- return NewPosition;
+ return shiftedCodePositionInternal(Replaces, Position);
}
void deduplicate(std::vector<Replacement> &Replaces,
@@ -265,19 +255,18 @@ bool applyAllReplacements(const std::vector<Replacement> &Replaces,
}
std::string applyAllReplacements(StringRef Code, const Replacements &Replaces) {
- FileManager Files((FileSystemOptions()));
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticsEngine Diagnostics(
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
new DiagnosticOptions);
SourceManager SourceMgr(Diagnostics, Files);
Rewriter Rewrite(SourceMgr, LangOptions());
- std::unique_ptr<llvm::MemoryBuffer> Buf =
- llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>");
- const clang::FileEntry *Entry =
- Files.getVirtualFile("<stdin>", Buf->getBufferSize(), 0);
- SourceMgr.overrideFileContents(Entry, std::move(Buf));
- FileID ID =
- SourceMgr.createFileID(Entry, SourceLocation(), clang::SrcMgr::C_User);
+ InMemoryFileSystem->addFile(
+ "<stdin>", 0, llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>"));
+ FileID ID = SourceMgr.createFileID(Files.getFile("<stdin>"), SourceLocation(),
+ clang::SrcMgr::C_User);
for (Replacements::const_iterator I = Replaces.begin(), E = Replaces.end();
I != E; ++I) {
Replacement Replace("<stdin>", I->getOffset(), I->getLength(),
@@ -292,6 +281,139 @@ std::string applyAllReplacements(StringRef Code, const Replacements &Replaces) {
return Result;
}
+namespace {
+// Represents a merged replacement, i.e. a replacement consisting of multiple
+// overlapping replacements from 'First' and 'Second' in mergeReplacements.
+//
+// Position projection:
+// Offsets and lengths of the replacements can generally refer to two different
+// coordinate spaces. Replacements from 'First' refer to the original text
+// whereas replacements from 'Second' refer to the text after applying 'First'.
+//
+// MergedReplacement always operates in the coordinate space of the original
+// text, i.e. transforms elements from 'Second' to take into account what was
+// changed based on the elements from 'First'.
+//
+// We can correctly calculate this projection as we look at the replacements in
+// order of strictly increasing offsets.
+//
+// Invariants:
+// * We always merge elements from 'First' into elements from 'Second' and vice
+// versa. Within each set, the replacements are non-overlapping.
+// * We only extend to the right, i.e. merge elements with strictly increasing
+// offsets.
+class MergedReplacement {
+public:
+ MergedReplacement(const Replacement &R, bool MergeSecond, int D)
+ : MergeSecond(MergeSecond), Delta(D), FilePath(R.getFilePath()),
+ Offset(R.getOffset() + (MergeSecond ? 0 : Delta)), Length(R.getLength()),
+ Text(R.getReplacementText()) {
+ Delta += MergeSecond ? 0 : Text.size() - Length;
+ DeltaFirst = MergeSecond ? Text.size() - Length : 0;
+ }
+
+ // Merges the next element 'R' into this merged element. As we always merge
+ // from 'First' into 'Second' or vice versa, the MergedReplacement knows what
+ // set the next element is coming from.
+ void merge(const Replacement &R) {
+ if (MergeSecond) {
+ unsigned REnd = R.getOffset() + Delta + R.getLength();
+ unsigned End = Offset + Text.size();
+ if (REnd > End) {
+ Length += REnd - End;
+ MergeSecond = false;
+ }
+ StringRef TextRef = Text;
+ StringRef Head = TextRef.substr(0, R.getOffset() + Delta - Offset);
+ StringRef Tail = TextRef.substr(REnd - Offset);
+ Text = (Head + R.getReplacementText() + Tail).str();
+ Delta += R.getReplacementText().size() - R.getLength();
+ } else {
+ unsigned End = Offset + Length;
+ StringRef RText = R.getReplacementText();
+ StringRef Tail = RText.substr(End - R.getOffset());
+ Text = (Text + Tail).str();
+ if (R.getOffset() + RText.size() > End) {
+ Length = R.getOffset() + R.getLength() - Offset;
+ MergeSecond = true;
+ } else {
+ Length += R.getLength() - RText.size();
+ }
+ DeltaFirst += RText.size() - R.getLength();
+ }
+ }
+
+ // Returns 'true' if 'R' starts strictly after the MergedReplacement and thus
+ // doesn't need to be merged.
+ bool endsBefore(const Replacement &R) const {
+ if (MergeSecond)
+ return Offset + Text.size() < R.getOffset() + Delta;
+ return Offset + Length < R.getOffset();
+ }
+
+ // Returns 'true' if an element from the second set should be merged next.
+ bool mergeSecond() const { return MergeSecond; }
+ int deltaFirst() const { return DeltaFirst; }
+ Replacement asReplacement() const { return {FilePath, Offset, Length, Text}; }
+
+private:
+ bool MergeSecond;
+
+ // Amount of characters that elements from 'Second' need to be shifted by in
+ // order to refer to the original text.
+ int Delta;
+
+ // Sum of all deltas (text-length - length) of elements from 'First' merged
+ // into this element. This is used to update 'Delta' once the
+ // MergedReplacement is completed.
+ int DeltaFirst;
+
+ // Data of the actually merged replacement. FilePath and Offset aren't changed
+ // as the element is only extended to the right.
+ const StringRef FilePath;
+ const unsigned Offset;
+ unsigned Length;
+ std::string Text;
+};
+} // namespace
+
+Replacements mergeReplacements(const Replacements &First,
+ const Replacements &Second) {
+ if (First.empty() || Second.empty())
+ return First.empty() ? Second : First;
+
+ // Delta is the amount of characters that replacements from 'Second' need to
+ // be shifted so that their offsets refer to the original text.
+ int Delta = 0;
+ Replacements Result;
+
+ // Iterate over both sets and always add the next element (smallest total
+ // Offset) from either 'First' or 'Second'. Merge that element with
+ // subsequent replacements as long as they overlap. See more details in the
+ // comment on MergedReplacement.
+ for (auto FirstI = First.begin(), SecondI = Second.begin();
+ FirstI != First.end() || SecondI != Second.end();) {
+ bool NextIsFirst = SecondI == Second.end() ||
+ (FirstI != First.end() &&
+ FirstI->getOffset() < SecondI->getOffset() + Delta);
+ MergedReplacement Merged(NextIsFirst ? *FirstI : *SecondI, NextIsFirst,
+ Delta);
+ ++(NextIsFirst ? FirstI : SecondI);
+
+ while ((Merged.mergeSecond() && SecondI != Second.end()) ||
+ (!Merged.mergeSecond() && FirstI != First.end())) {
+ auto &I = Merged.mergeSecond() ? SecondI : FirstI;
+ if (Merged.endsBefore(*I))
+ break;
+ Merged.merge(*I);
+ ++I;
+ }
+ Delta -= Merged.deltaFirst();
+ Result.insert(Merged.asReplacement());
+ }
+ return Result;
+}
+
} // end namespace tooling
} // end namespace clang
diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp
index 454a2ffd9587..299fbdc149bf 100644
--- a/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/lib/Tooling/JSONCompilationDatabase.cpp
@@ -206,24 +206,33 @@ JSONCompilationDatabase::getAllFiles() const {
std::vector<CompileCommand>
JSONCompilationDatabase::getAllCompileCommands() const {
std::vector<CompileCommand> Commands;
- for (llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefI = IndexByFile.begin(), CommandsRefEnd = IndexByFile.end();
- CommandsRefI != CommandsRefEnd; ++CommandsRefI) {
- getCommands(CommandsRefI->getValue(), Commands);
- }
+ getCommands(AllCommands, Commands);
return Commands;
}
+static std::vector<std::string>
+nodeToCommandLine(const std::vector<llvm::yaml::ScalarNode *> &Nodes) {
+ SmallString<1024> Storage;
+ if (Nodes.size() == 1) {
+ return unescapeCommandLine(Nodes[0]->getValue(Storage));
+ }
+ std::vector<std::string> Arguments;
+ for (auto *Node : Nodes) {
+ Arguments.push_back(Node->getValue(Storage));
+ }
+ return Arguments;
+}
+
void JSONCompilationDatabase::getCommands(
- ArrayRef<CompileCommandRef> CommandsRef,
- std::vector<CompileCommand> &Commands) const {
+ ArrayRef<CompileCommandRef> CommandsRef,
+ std::vector<CompileCommand> &Commands) const {
for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
SmallString<8> DirectoryStorage;
- SmallString<1024> CommandStorage;
+ SmallString<32> FilenameStorage;
Commands.emplace_back(
- // FIXME: Escape correctly:
- CommandsRef[I].first->getValue(DirectoryStorage),
- unescapeCommandLine(CommandsRef[I].second->getValue(CommandStorage)));
+ std::get<0>(CommandsRef[I])->getValue(DirectoryStorage),
+ std::get<1>(CommandsRef[I])->getValue(FilenameStorage),
+ nodeToCommandLine(std::get<2>(CommandsRef[I])));
}
}
@@ -243,43 +252,56 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
ErrorMessage = "Expected array.";
return false;
}
- for (llvm::yaml::SequenceNode::iterator AI = Array->begin(),
- AE = Array->end();
- AI != AE; ++AI) {
- llvm::yaml::MappingNode *Object = dyn_cast<llvm::yaml::MappingNode>(&*AI);
+ for (auto& NextObject : *Array) {
+ llvm::yaml::MappingNode *Object = dyn_cast<llvm::yaml::MappingNode>(&NextObject);
if (!Object) {
ErrorMessage = "Expected object.";
return false;
}
llvm::yaml::ScalarNode *Directory = nullptr;
- llvm::yaml::ScalarNode *Command = nullptr;
+ llvm::Optional<std::vector<llvm::yaml::ScalarNode *>> Command;
llvm::yaml::ScalarNode *File = nullptr;
- for (llvm::yaml::MappingNode::iterator KVI = Object->begin(),
- KVE = Object->end();
- KVI != KVE; ++KVI) {
- llvm::yaml::Node *Value = (*KVI).getValue();
+ for (auto& NextKeyValue : *Object) {
+ llvm::yaml::ScalarNode *KeyString =
+ dyn_cast<llvm::yaml::ScalarNode>(NextKeyValue.getKey());
+ if (!KeyString) {
+ ErrorMessage = "Expected strings as key.";
+ return false;
+ }
+ SmallString<10> KeyStorage;
+ StringRef KeyValue = KeyString->getValue(KeyStorage);
+ llvm::yaml::Node *Value = NextKeyValue.getValue();
if (!Value) {
ErrorMessage = "Expected value.";
return false;
}
llvm::yaml::ScalarNode *ValueString =
dyn_cast<llvm::yaml::ScalarNode>(Value);
- if (!ValueString) {
- ErrorMessage = "Expected string as value.";
+ llvm::yaml::SequenceNode *SequenceString =
+ dyn_cast<llvm::yaml::SequenceNode>(Value);
+ if (KeyValue == "arguments" && !SequenceString) {
+ ErrorMessage = "Expected sequence as value.";
return false;
- }
- llvm::yaml::ScalarNode *KeyString =
- dyn_cast<llvm::yaml::ScalarNode>((*KVI).getKey());
- if (!KeyString) {
- ErrorMessage = "Expected strings as key.";
+ } else if (KeyValue != "arguments" && !ValueString) {
+ ErrorMessage = "Expected string as value.";
return false;
}
- SmallString<8> KeyStorage;
- if (KeyString->getValue(KeyStorage) == "directory") {
+ if (KeyValue == "directory") {
Directory = ValueString;
- } else if (KeyString->getValue(KeyStorage) == "command") {
- Command = ValueString;
- } else if (KeyString->getValue(KeyStorage) == "file") {
+ } else if (KeyValue == "arguments") {
+ Command = std::vector<llvm::yaml::ScalarNode *>();
+ for (auto &Argument : *SequenceString) {
+ auto Scalar = dyn_cast<llvm::yaml::ScalarNode>(&Argument);
+ if (!Scalar) {
+ ErrorMessage = "Only strings are allowed in 'arguments'.";
+ return false;
+ }
+ Command->push_back(Scalar);
+ }
+ } else if (KeyValue == "command") {
+ if (!Command)
+ Command = std::vector<llvm::yaml::ScalarNode *>(1, ValueString);
+ } else if (KeyValue == "file") {
File = ValueString;
} else {
ErrorMessage = ("Unknown key: \"" +
@@ -292,7 +314,7 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
return false;
}
if (!Command) {
- ErrorMessage = "Missing key: \"command\".";
+ ErrorMessage = "Missing key: \"command\" or \"arguments\".";
return false;
}
if (!Directory) {
@@ -311,8 +333,9 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
} else {
llvm::sys::path::native(FileName, NativeFilePath);
}
- IndexByFile[NativeFilePath].push_back(
- CompileCommandRef(Directory, Command));
+ auto Cmd = CompileCommandRef(Directory, File, *Command);
+ IndexByFile[NativeFilePath].push_back(Cmd);
+ AllCommands.push_back(Cmd);
MatchTrie.insert(NativeFilePath);
}
return true;
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index f9cb7c641344..fd5596ec2ded 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -17,6 +17,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -31,13 +32,6 @@
#include "llvm/Support/Host.h"
#include "llvm/Support/raw_ostream.h"
-// For chdir, see the comment in ClangTool::run for more information.
-#ifdef LLVM_ON_WIN32
-# include <direct.h>
-#else
-# include <unistd.h>
-#endif
-
#define DEBUG_TYPE "clang-tooling"
namespace clang {
@@ -52,10 +46,11 @@ FrontendActionFactory::~FrontendActionFactory() {}
// it to be based on the same framework.
/// \brief Builds a clang driver initialized for running clang tools.
-static clang::driver::Driver *newDriver(clang::DiagnosticsEngine *Diagnostics,
- const char *BinaryName) {
+static clang::driver::Driver *newDriver(
+ clang::DiagnosticsEngine *Diagnostics, const char *BinaryName,
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
clang::driver::Driver *CompilerDriver = new clang::driver::Driver(
- BinaryName, llvm::sys::getDefaultTargetTriple(), *Diagnostics);
+ BinaryName, llvm::sys::getDefaultTargetTriple(), *Diagnostics, VFS);
CompilerDriver->setTitle("clang_based_tool");
return CompilerDriver;
}
@@ -130,18 +125,25 @@ bool runToolOnCodeWithArgs(
SmallString<16> FileNameStorage;
StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
+ llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
+ new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
- new FileManager(FileSystemOptions()));
+ new FileManager(FileSystemOptions(), OverlayFileSystem));
ToolInvocation Invocation(getSyntaxOnlyToolArgs(Args, FileNameRef),
ToolAction, Files.get(), PCHContainerOps);
SmallString<1024> CodeStorage;
- Invocation.mapVirtualFile(FileNameRef,
- Code.toNullTerminatedStringRef(CodeStorage));
+ InMemoryFileSystem->addFile(FileNameRef, 0,
+ llvm::MemoryBuffer::getMemBuffer(
+ Code.toNullTerminatedStringRef(CodeStorage)));
for (auto &FilenameWithContent : VirtualMappedFiles) {
- Invocation.mapVirtualFile(FilenameWithContent.first,
- FilenameWithContent.second);
+ InMemoryFileSystem->addFile(
+ FilenameWithContent.first, 0,
+ llvm::MemoryBuffer::getMemBuffer(FilenameWithContent.second));
}
return Invocation.run();
@@ -162,6 +164,31 @@ std::string getAbsolutePath(StringRef File) {
return AbsolutePath.str();
}
+void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
+ StringRef InvokedAs) {
+ if (!CommandLine.empty() && !InvokedAs.empty()) {
+ bool AlreadyHasTarget = false;
+ bool AlreadyHasMode = false;
+ // Skip CommandLine[0].
+ for (auto Token = ++CommandLine.begin(); Token != CommandLine.end();
+ ++Token) {
+ StringRef TokenRef(*Token);
+ AlreadyHasTarget |=
+ (TokenRef == "-target" || TokenRef.startswith("-target="));
+ AlreadyHasMode |= (TokenRef == "--driver-mode" ||
+ TokenRef.startswith("--driver-mode="));
+ }
+ auto TargetMode =
+ clang::driver::ToolChain::getTargetAndModeFromProgramName(InvokedAs);
+ if (!AlreadyHasMode && !TargetMode.second.empty()) {
+ CommandLine.insert(++CommandLine.begin(), TargetMode.second);
+ }
+ if (!AlreadyHasTarget && !TargetMode.first.empty()) {
+ CommandLine.insert(++CommandLine.begin(), {"-target", TargetMode.first});
+ }
+ }
+}
+
namespace {
class SingleFrontendActionFactory : public FrontendActionFactory {
@@ -212,7 +239,7 @@ bool ToolInvocation::run() {
DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
const std::unique_ptr<clang::driver::Driver> Driver(
- newDriver(&Diagnostics, BinaryName));
+ newDriver(&Diagnostics, BinaryName, Files->getVirtualFileSystem()));
// Since the input might only be virtual, don't check whether it exists.
Driver->setCheckInputsExist(false);
const std::unique_ptr<clang::driver::Compilation> Compilation(
@@ -224,6 +251,7 @@ bool ToolInvocation::run() {
}
std::unique_ptr<clang::CompilerInvocation> Invocation(
newInvocation(&Diagnostics, *CC1Args));
+ // FIXME: remove this when all users have migrated!
for (const auto &It : MappedFileContents) {
// Inject the code as the given file name into the preprocessor options.
std::unique_ptr<llvm::MemoryBuffer> Input =
@@ -282,7 +310,11 @@ ClangTool::ClangTool(const CompilationDatabase &Compilations,
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
: Compilations(Compilations), SourcePaths(SourcePaths),
PCHContainerOps(PCHContainerOps),
- Files(new FileManager(FileSystemOptions())), DiagConsumer(nullptr) {
+ OverlayFileSystem(new vfs::OverlayFileSystem(vfs::getRealFileSystem())),
+ InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ Files(new FileManager(FileSystemOptions(), OverlayFileSystem)),
+ DiagConsumer(nullptr) {
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
appendArgumentsAdjuster(getClangStripOutputAdjuster());
appendArgumentsAdjuster(getClangSyntaxOnlyAdjuster());
}
@@ -320,6 +352,16 @@ int ClangTool::run(ToolAction *Action) {
if (std::error_code EC = llvm::sys::fs::current_path(InitialDirectory))
llvm::report_fatal_error("Cannot detect current path: " +
Twine(EC.message()));
+
+ // First insert all absolute paths into the in-memory VFS. These are global
+ // for all compile commands.
+ if (SeenWorkingDirectories.insert("/").second)
+ for (const auto &MappedFile : MappedFileContents)
+ if (llvm::sys::path::is_absolute(MappedFile.first))
+ InMemoryFileSystem->addFile(
+ MappedFile.first, 0,
+ llvm::MemoryBuffer::getMemBuffer(MappedFile.second));
+
bool ProcessingFailed = false;
for (const auto &SourcePath : SourcePaths) {
std::string File(getAbsolutePath(SourcePath));
@@ -350,12 +392,24 @@ int ClangTool::run(ToolAction *Action) {
// difference for example on network filesystems, where symlinks might be
// switched during runtime of the tool. Fixing this depends on having a
// file system abstraction that allows openat() style interactions.
- if (chdir(CompileCommand.Directory.c_str()))
+ if (OverlayFileSystem->setCurrentWorkingDirectory(
+ CompileCommand.Directory))
llvm::report_fatal_error("Cannot chdir into \"" +
Twine(CompileCommand.Directory) + "\n!");
+
+ // Now fill the in-memory VFS with the relative file mappings so it will
+ // have the correct relative paths. We never remove mappings but that
+ // should be fine.
+ if (SeenWorkingDirectories.insert(CompileCommand.Directory).second)
+ for (const auto &MappedFile : MappedFileContents)
+ if (!llvm::sys::path::is_absolute(MappedFile.first))
+ InMemoryFileSystem->addFile(
+ MappedFile.first, 0,
+ llvm::MemoryBuffer::getMemBuffer(MappedFile.second));
+
std::vector<std::string> CommandLine = CompileCommand.CommandLine;
if (ArgsAdjuster)
- CommandLine = ArgsAdjuster(CommandLine);
+ CommandLine = ArgsAdjuster(CommandLine, CompileCommand.Filename);
assert(!CommandLine.empty());
CommandLine[0] = MainExecutable;
// FIXME: We need a callback mechanism for the tool writer to output a
@@ -364,8 +418,7 @@ int ClangTool::run(ToolAction *Action) {
ToolInvocation Invocation(std::move(CommandLine), Action, Files.get(),
PCHContainerOps);
Invocation.setDiagnosticConsumer(DiagConsumer);
- for (const auto &MappedFile : MappedFileContents)
- Invocation.mapVirtualFile(MappedFile.first, MappedFile.second);
+
if (!Invocation.run()) {
// FIXME: Diagnostics should be used instead.
llvm::errs() << "Error while processing " << File << ".\n";
@@ -373,7 +426,7 @@ int ClangTool::run(ToolAction *Action) {
}
// Return to the initial directory to correctly resolve next file by
// relative path.
- if (chdir(InitialDirectory.c_str()))
+ if (OverlayFileSystem->setCurrentWorkingDirectory(InitialDirectory.c_str()))
llvm::report_fatal_error("Cannot chdir into \"" +
Twine(InitialDirectory) + "\n!");
}
@@ -392,12 +445,12 @@ public:
bool runInvocation(CompilerInvocation *Invocation, FileManager *Files,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagConsumer) override {
- // FIXME: This should use the provided FileManager.
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromCompilerInvocation(
Invocation, PCHContainerOps,
CompilerInstance::createDiagnostics(&Invocation->getDiagnosticOpts(),
DiagConsumer,
- /*ShouldOwnClient=*/false));
+ /*ShouldOwnClient=*/false),
+ Files);
if (!AST)
return false;
@@ -429,12 +482,20 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
std::vector<std::unique_ptr<ASTUnit>> ASTs;
ASTBuilderAction Action(ASTs);
+ llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
+ new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
+ llvm::IntrusiveRefCntPtr<FileManager> Files(
+ new FileManager(FileSystemOptions(), OverlayFileSystem));
ToolInvocation Invocation(getSyntaxOnlyToolArgs(Args, FileNameRef), &Action,
- nullptr, PCHContainerOps);
+ Files.get(), PCHContainerOps);
SmallString<1024> CodeStorage;
- Invocation.mapVirtualFile(FileNameRef,
- Code.toNullTerminatedStringRef(CodeStorage));
+ InMemoryFileSystem->addFile(FileNameRef, 0,
+ llvm::MemoryBuffer::getMemBuffer(
+ Code.toNullTerminatedStringRef(CodeStorage)));
if (!Invocation.run())
return nullptr;