aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-04 22:11:23 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-04 22:11:23 +0000
commit0c75eea8f661a82866688fd1fc4465883c4dd7d5 (patch)
tree96848f3b7cc25f95142a52b6dd2ba93f1de6e413 /lib
parentbab175ec4b075c8076ba14c762900392533f6ee4 (diff)
downloadsrc-0c75eea8f661a82866688fd1fc4465883c4dd7d5.tar.gz
src-0c75eea8f661a82866688fd1fc4465883c4dd7d5.zip
Vendor import of clang trunk r291012:vendor/clang/clang-trunk-r291012
Notes
Notes: svn path=/vendor/clang/dist/; revision=311317 svn path=/vendor/clang/clang-trunk-r291012/; revision=311318; tag=vendor/clang/clang-trunk-r291012
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ExprConstant.cpp10
-rw-r--r--lib/AST/StmtOpenMP.cpp61
-rw-r--r--lib/AST/StmtPrinter.cpp6
-rw-r--r--lib/AST/StmtProfile.cpp5
-rw-r--r--lib/Analysis/CFG.cpp10
-rw-r--r--lib/Basic/OpenMPKinds.cpp37
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp64
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h32
-rw-r--r--lib/CodeGen/CGStmt.cpp4
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp28
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp27
-rw-r--r--lib/CodeGen/CodeGenFunction.h4
-rw-r--r--lib/Driver/Tools.cpp16
-rw-r--r--lib/Format/Format.cpp3
-rw-r--r--lib/Format/UnwrappedLineParser.cpp5
-rw-r--r--lib/Frontend/ASTConsumers.cpp54
-rw-r--r--lib/Frontend/CompilerInvocation.cpp1
-rw-r--r--lib/Headers/__clang_cuda_runtime_wrapper.h5
-rw-r--r--lib/Parse/ParseOpenMP.cpp10
-rw-r--r--lib/Sema/SemaCUDA.cpp6
-rw-r--r--lib/Sema/SemaCodeComplete.cpp17
-rw-r--r--lib/Sema/SemaOpenMP.cpp64
-rw-r--r--lib/Sema/SemaOverload.cpp4
-rw-r--r--lib/Sema/SemaTemplate.cpp14
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp270
-rw-r--r--lib/Sema/TreeTransform.h14
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp13
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp1
-rw-r--r--lib/Tooling/Core/CMakeLists.txt1
-rw-r--r--lib/Tooling/Core/Diagnostic.cpp46
31 files changed, 565 insertions, 274 deletions
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index b3f8925b6464..6dcb705c44d3 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -7192,6 +7192,12 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
CharUnits &EndOffset) {
bool DetermineForCompleteObject = refersToCompleteObject(LVal);
+ auto CheckedHandleSizeof = [&](QualType Ty, CharUnits &Result) {
+ if (Ty.isNull() || Ty->isIncompleteType() || Ty->isFunctionType())
+ return false;
+ return HandleSizeof(Info, ExprLoc, Ty, Result);
+ };
+
// We want to evaluate the size of the entire object. This is a valid fallback
// for when Type=1 and the designator is invalid, because we're asked for an
// upper-bound.
@@ -7209,7 +7215,7 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
return false;
QualType BaseTy = getObjectType(LVal.getLValueBase());
- return !BaseTy.isNull() && HandleSizeof(Info, ExprLoc, BaseTy, EndOffset);
+ return CheckedHandleSizeof(BaseTy, EndOffset);
}
// We want to evaluate the size of a subobject.
@@ -7238,7 +7244,7 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
}
CharUnits BytesPerElem;
- if (!HandleSizeof(Info, ExprLoc, Designator.MostDerivedType, BytesPerElem))
+ if (!CheckedHandleSizeof(Designator.MostDerivedType, BytesPerElem))
return false;
// According to the GCC documentation, we want the size of the subobject
diff --git a/lib/AST/StmtOpenMP.cpp b/lib/AST/StmtOpenMP.cpp
index 0a90740162b9..a7c71bb5f45c 100644
--- a/lib/AST/StmtOpenMP.cpp
+++ b/lib/AST/StmtOpenMP.cpp
@@ -1659,3 +1659,64 @@ OMPTargetTeamsDistributeParallelForDirective::CreateEmpty(const ASTContext &C,
return new (Mem)
OMPTargetTeamsDistributeParallelForDirective(CollapsedNum, NumClauses);
}
+
+OMPTargetTeamsDistributeParallelForSimdDirective *
+OMPTargetTeamsDistributeParallelForSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto Size =
+ llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForSimdDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum,
+ OMPD_target_teams_distribute_parallel_for_simd));
+ OMPTargetTeamsDistributeParallelForSimdDirective *Dir =
+ new (Mem) OMPTargetTeamsDistributeParallelForSimdDirective(
+ StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTargetTeamsDistributeParallelForSimdDirective *
+OMPTargetTeamsDistributeParallelForSimdDirective::CreateEmpty(
+ const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
+ EmptyShell) {
+ auto Size =
+ llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForSimdDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum,
+ OMPD_target_teams_distribute_parallel_for_simd));
+ return new (Mem) OMPTargetTeamsDistributeParallelForSimdDirective(
+ CollapsedNum, NumClauses);
+}
+
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index a8f493dca07d..a9c64c3ba6ae 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -1244,6 +1244,12 @@ void StmtPrinter::VisitOMPTargetTeamsDistributeParallelForDirective(
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPTargetTeamsDistributeParallelForSimdDirective(
+ OMPTargetTeamsDistributeParallelForSimdDirective *Node) {
+ Indent() << "#pragma omp target teams distribute parallel for simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index dd59a9b96c98..df36bf06b843 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -763,6 +763,11 @@ void StmtProfiler::VisitOMPTargetTeamsDistributeParallelForDirective(
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPTargetTeamsDistributeParallelForSimdDirective(
+ const OMPTargetTeamsDistributeParallelForSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index bf3cc05cdb6e..a1a463f1d037 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -1690,15 +1690,19 @@ CFGBuilder::VisitLogicalOperator(BinaryOperator *B,
// we have been provided.
ExitBlock = RHSBlock = createBlock(false);
+ // Even though KnownVal is only used in the else branch of the next
+ // conditional, tryEvaluateBool performs additional checking on the
+ // Expr, so it should be called unconditionally.
+ TryResult KnownVal = tryEvaluateBool(RHS);
+ if (!KnownVal.isKnown())
+ KnownVal = tryEvaluateBool(B);
+
if (!Term) {
assert(TrueBlock == FalseBlock);
addSuccessor(RHSBlock, TrueBlock);
}
else {
RHSBlock->setTerminator(Term);
- TryResult KnownVal = tryEvaluateBool(RHS);
- if (!KnownVal.isKnown())
- KnownVal = tryEvaluateBool(B);
addSuccessor(RHSBlock, TrueBlock, !KnownVal.isFalse());
addSuccessor(RHSBlock, FalseBlock, !KnownVal.isTrue());
}
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index 4675995ea722..7bd1f8762bff 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -690,6 +690,16 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ switch (CKind) {
+#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_unknown:
@@ -721,7 +731,8 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_teams_distribute_parallel_for_simd ||
DKind == OMPD_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for;
+ DKind == OMPD_target_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute_parallel_for_simd;
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -735,8 +746,8 @@ bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_parallel_for_simd ||
DKind == OMPD_teams_distribute_parallel_for_simd ||
DKind == OMPD_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for;
- // TODO add next directives.
+ DKind == OMPD_target_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute_parallel_for_simd;
}
bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) {
@@ -752,8 +763,8 @@ bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_parallel_for_simd ||
DKind == OMPD_teams_distribute_parallel_for ||
DKind == OMPD_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_parallel_for;
- // TODO add next directives.
+ DKind == OMPD_target_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute_parallel_for_simd;
}
bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
@@ -761,7 +772,8 @@ bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_parallel_for ||
DKind == OMPD_target_parallel_for_simd || DKind == OMPD_target_simd ||
DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for;
+ DKind == OMPD_target_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute_parallel_for_simd;
}
bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
@@ -779,7 +791,8 @@ bool clang::isOpenMPNestingTeamsDirective(OpenMPDirectiveKind DKind) {
bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
return isOpenMPNestingTeamsDirective(DKind) ||
DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for;
+ DKind == OMPD_target_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute_parallel_for_simd;
}
bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
@@ -788,8 +801,8 @@ bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_distribute_parallel_for_simd ||
DKind == OMPD_distribute_simd || DKind == OMPD_target_simd ||
DKind == OMPD_teams_distribute_simd ||
- DKind == OMPD_teams_distribute_parallel_for_simd;
- // TODO add next directives.
+ DKind == OMPD_teams_distribute_parallel_for_simd ||
+ DKind == OMPD_target_teams_distribute_parallel_for_simd;
}
bool clang::isOpenMPNestingDistributeDirective(OpenMPDirectiveKind Kind) {
@@ -805,7 +818,8 @@ bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
Kind == OMPD_teams_distribute_parallel_for_simd ||
Kind == OMPD_teams_distribute_parallel_for ||
Kind == OMPD_target_teams_distribute ||
- Kind == OMPD_target_teams_distribute_parallel_for;
+ Kind == OMPD_target_teams_distribute_parallel_for ||
+ Kind == OMPD_target_teams_distribute_parallel_for_simd;
}
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
@@ -830,5 +844,6 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
Kind == OMPD_teams_distribute_parallel_for_simd ||
Kind == OMPD_teams_distribute_parallel_for ||
Kind == OMPD_target_teams_distribute ||
- Kind == OMPD_target_teams_distribute_parallel_for;
+ Kind == OMPD_target_teams_distribute_parallel_for ||
+ Kind == OMPD_target_teams_distribute_parallel_for_simd;
}
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index 451f9e9221ad..fe0e2acdfdbf 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -20,53 +20,64 @@
using namespace clang;
using namespace CodeGen;
-/// \brief Get the GPU warp size.
-llvm::Value *CGOpenMPRuntimeNVPTX::getNVPTXWarpSize(CodeGenFunction &CGF) {
+namespace {
+enum OpenMPRTLFunctionNVPTX {
+ /// \brief Call to void __kmpc_kernel_init(kmp_int32 omp_handle,
+ /// kmp_int32 thread_limit);
+ OMPRTL_NVPTX__kmpc_kernel_init,
+};
+
+// NVPTX Address space
+enum AddressSpace {
+ AddressSpaceShared = 3,
+};
+} // namespace
+
+/// Get the GPU warp size.
+static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
return Bld.CreateCall(
llvm::Intrinsic::getDeclaration(
- &CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
+ &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
llvm::None, "nvptx_warp_size");
}
-/// \brief Get the id of the current thread on the GPU.
-llvm::Value *CGOpenMPRuntimeNVPTX::getNVPTXThreadID(CodeGenFunction &CGF) {
+/// Get the id of the current thread on the GPU.
+static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
return Bld.CreateCall(
llvm::Intrinsic::getDeclaration(
- &CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
+ &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
llvm::None, "nvptx_tid");
}
-// \brief Get the maximum number of threads in a block of the GPU.
-llvm::Value *CGOpenMPRuntimeNVPTX::getNVPTXNumThreads(CodeGenFunction &CGF) {
+/// Get the maximum number of threads in a block of the GPU.
+static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
return Bld.CreateCall(
llvm::Intrinsic::getDeclaration(
- &CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
+ &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
llvm::None, "nvptx_num_threads");
}
-/// \brief Get barrier to synchronize all threads in a block.
-void CGOpenMPRuntimeNVPTX::getNVPTXCTABarrier(CodeGenFunction &CGF) {
+/// Get barrier to synchronize all threads in a block.
+static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
Bld.CreateCall(llvm::Intrinsic::getDeclaration(
- &CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
+ &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
}
-// \brief Synchronize all GPU threads in a block.
-void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
- getNVPTXCTABarrier(CGF);
-}
+/// Synchronize all GPU threads in a block.
+static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
-/// \brief Get the thread id of the OMP master thread.
+/// Get the thread id of the OMP master thread.
/// The master thread id is the first thread (lane) of the last warp in the
/// GPU block. Warp size is assumed to be some power of 2.
/// Thread id is 0 indexed.
/// E.g: If NumThreads is 33, master id is 32.
/// If NumThreads is 64, master id is 32.
/// If NumThreads is 1024, master id is 992.
-llvm::Value *CGOpenMPRuntimeNVPTX::getMasterThreadID(CodeGenFunction &CGF) {
+static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
@@ -77,19 +88,6 @@ llvm::Value *CGOpenMPRuntimeNVPTX::getMasterThreadID(CodeGenFunction &CGF) {
Bld.CreateNot(Mask), "master_tid");
}
-namespace {
-enum OpenMPRTLFunctionNVPTX {
- /// \brief Call to void __kmpc_kernel_init(kmp_int32 omp_handle,
- /// kmp_int32 thread_limit);
- OMPRTL_NVPTX__kmpc_kernel_init,
-};
-
-// NVPTX Address space
-enum ADDRESS_SPACE {
- ADDRESS_SPACE_SHARED = 3,
-};
-} // namespace
-
CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
CodeGenModule &CGM)
: WorkerFn(nullptr), CGFI(nullptr) {
@@ -119,14 +117,14 @@ void CGOpenMPRuntimeNVPTX::initializeEnvironment() {
CGM.getModule(), CGM.Int32Ty, /*isConstant=*/false,
llvm::GlobalValue::CommonLinkage,
llvm::Constant::getNullValue(CGM.Int32Ty), "__omp_num_threads", 0,
- llvm::GlobalVariable::NotThreadLocal, ADDRESS_SPACE_SHARED);
+ llvm::GlobalVariable::NotThreadLocal, AddressSpaceShared);
ActiveWorkers->setAlignment(DL.getPrefTypeAlignment(CGM.Int32Ty));
WorkID = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int64Ty, /*isConstant=*/false,
llvm::GlobalValue::CommonLinkage,
llvm::Constant::getNullValue(CGM.Int64Ty), "__tgt_work_id", 0,
- llvm::GlobalVariable::NotThreadLocal, ADDRESS_SPACE_SHARED);
+ llvm::GlobalVariable::NotThreadLocal, AddressSpaceShared);
WorkID->setAlignment(DL.getPrefTypeAlignment(CGM.Int64Ty));
}
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index e18d28cdda9f..a33fb27579f6 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -50,38 +50,6 @@ public:
private:
//
- // NVPTX calls.
- //
-
- /// \brief Get the GPU warp size.
- llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF);
-
- /// \brief Get the id of the current thread on the GPU.
- llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF);
-
- // \brief Get the maximum number of threads in a block of the GPU.
- llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF);
-
- /// \brief Get barrier to synchronize all threads in a block.
- void getNVPTXCTABarrier(CodeGenFunction &CGF);
-
- // \brief Synchronize all GPU threads in a block.
- void syncCTAThreads(CodeGenFunction &CGF);
-
- //
- // OMP calls.
- //
-
- /// \brief Get the thread id of the OMP master thread.
- /// The master thread id is the first thread (lane) of the last warp in the
- /// GPU block. Warp size is assumed to be some power of 2.
- /// Thread id is 0 indexed.
- /// E.g: If NumThreads is 33, master id is 32.
- /// If NumThreads is 64, master id is 32.
- /// If NumThreads is 1024, master id is 992.
- llvm::Value *getMasterThreadID(CodeGenFunction &CGF);
-
- //
// Private state and methods.
//
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index f2acb798b881..8d391f95d9f7 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -326,6 +326,10 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
EmitOMPTargetTeamsDistributeParallelForDirective(
cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
break;
+ case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
+ EmitOMPTargetTeamsDistributeParallelForSimdDirective(
+ cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
+ break;
}
}
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index ba39e1fbd41f..386c4f0fe69c 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -2032,6 +2032,16 @@ void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
});
}
+void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
+ const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_target_teams_distribute_parallel_for_simd,
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
/// \brief Emit a helper variable and return corresponding lvalue.
static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
const DeclRefExpr *Helper) {
@@ -2760,6 +2770,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
auto &RT = CGM.getOpenMPRuntime();
+ bool HasLastprivateClause = false;
// Check pre-condition.
{
OMPLoopScope PreInitScope(*this, S);
@@ -2793,6 +2804,16 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
OMPPrivateScope LoopScope(*this);
+ if (EmitOMPFirstprivateClause(S, LoopScope)) {
+ // Emit implicit barrier to synchronize threads and avoid data races on
+ // initialization of firstprivate variables and post-update of
+ // lastprivate variables.
+ CGM.getOpenMPRuntime().emitBarrierCall(
+ *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ }
+ EmitOMPPrivateClause(S, LoopScope);
+ HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
EmitOMPPrivateLoopCounters(S, LoopScope);
(void)LoopScope.Privatize();
@@ -2849,6 +2870,13 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
LB.getAddress(), UB.getAddress(), ST.getAddress(),
IL.getAddress(), Chunk);
}
+
+ // Emit final copy of the lastprivate variables if IsLastIter != 0.
+ if (HasLastprivateClause)
+ EmitOMPLastprivateClauseFinal(
+ S, /*NoFinals=*/false,
+ Builder.CreateIsNotNull(
+ EmitLoadOfScalar(IL, S.getLocStart())));
}
// We're now done with the loop, so jump to the continuation block.
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index a954f487d1e4..7cab13de923b 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -1049,6 +1049,19 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
return ResTy;
}
+static bool
+shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
+ const ASTContext &Context) {
+ QualType T = FD->getReturnType();
+ // Avoid the optimization for functions that return a record type with a
+ // trivial destructor or another trivially copyable type.
+ if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ return !ClassDecl->hasTrivialDestructor();
+ }
+ return !T.isTriviallyCopyableType(Context);
+}
+
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
@@ -1127,17 +1140,23 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// function call is used by the caller, the behavior is undefined.
if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
!FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
+ bool ShouldEmitUnreachable =
+ CGM.getCodeGenOpts().StrictReturn ||
+ shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
if (SanOpts.has(SanitizerKind::Return)) {
SanitizerScope SanScope(this);
llvm::Value *IsFalse = Builder.getFalse();
EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
SanitizerHandler::MissingReturn,
EmitCheckSourceLocation(FD->getLocation()), None);
- } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
- EmitTrapCall(llvm::Intrinsic::trap);
+ } else if (ShouldEmitUnreachable) {
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ EmitTrapCall(llvm::Intrinsic::trap);
+ }
+ if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
}
- Builder.CreateUnreachable();
- Builder.ClearInsertionPoint();
}
// Emit the standard function epilogue.
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 222d0e97968a..1347f54df9ac 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -2699,6 +2699,8 @@ public:
const OMPTargetTeamsDistributeDirective &S);
void EmitOMPTargetTeamsDistributeParallelForDirective(
const OMPTargetTeamsDistributeParallelForDirective &S);
+ void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
+ const OMPTargetTeamsDistributeParallelForSimdDirective &S);
/// Emit outlined function for the target directive.
static std::pair<llvm::Function * /*OutlinedFn*/,
@@ -3569,7 +3571,7 @@ public:
// If we still have any arguments, emit them using the type of the argument.
for (auto *A : llvm::make_range(Arg, ArgRange.end()))
- ArgTypes.push_back(getVarArgType(A));
+ ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
EmitCallArgs(Args, ArgTypes, ArgRange, CalleeDecl, ParamsToSkip, Order);
}
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 2a367bb29aa5..ea5ad7d051b6 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -2235,6 +2235,15 @@ static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
UseSeparateSections)) {
CmdArgs.push_back("-plugin-opt=-data-sections");
}
+
+ if (Arg *A = Args.getLastArg(options::OPT_fprofile_sample_use_EQ)) {
+ StringRef FName = A->getValue();
+ if (!llvm::sys::fs::exists(FName))
+ D.Diag(diag::err_drv_no_such_file) << FName;
+ else
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=sample-profile=") + FName));
+ }
}
/// This is a helper function for validating the optional refinement step
@@ -3058,6 +3067,10 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
continue;
}
+ if (C.getDefaultToolChain().getTriple().isOSBinFormatCOFF() &&
+ Value == "-mbig-obj")
+ continue; // LLVM handles bigobj automatically
+
switch (C.getDefaultToolChain().getArch()) {
default:
break;
@@ -4453,6 +4466,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums,
false))
CmdArgs.push_back("-fstrict-enums");
+ if (!Args.hasFlag(options::OPT_fstrict_return, options::OPT_fno_strict_return,
+ true))
+ CmdArgs.push_back("-fno-strict-return");
if (Args.hasFlag(options::OPT_fstrict_vtable_pointers,
options::OPT_fno_strict_vtable_pointers,
false))
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 70b90d6fa14e..389761d48249 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -638,6 +638,9 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
ChromiumStyle.BreakAfterJavaFieldAnnotations = true;
ChromiumStyle.ContinuationIndentWidth = 8;
ChromiumStyle.IndentWidth = 4;
+ } else if (Language == FormatStyle::LK_JavaScript) {
+ ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
+ ChromiumStyle.AllowShortLoopsOnASingleLine = false;
} else {
ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
ChromiumStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index 84e06d05c739..370cf7afa330 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -1255,10 +1255,13 @@ void UnwrappedLineParser::tryToParseJSFunction() {
if (FormatTok->is(tok::l_brace))
tryToParseBracedList();
else
- while (FormatTok->isNot(tok::l_brace) && !eof())
+ while (!FormatTok->isOneOf(tok::l_brace, tok::semi) && !eof())
nextToken();
}
+ if (FormatTok->is(tok::semi))
+ return;
+
parseChildBlock();
}
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index bd2ee06d1653..d8118cb30f63 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -370,6 +370,26 @@ void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
break;
}
+ case Decl::ClassTemplateSpecialization: {
+ const auto *CTSD = cast<ClassTemplateSpecializationDecl>(DC);
+ if (CTSD->isCompleteDefinition())
+ Out << "[class template specialization] ";
+ else
+ Out << "<class template specialization> ";
+ Out << *CTSD;
+ break;
+ }
+
+ case Decl::ClassTemplatePartialSpecialization: {
+ const auto *CTPSD = cast<ClassTemplatePartialSpecializationDecl>(DC);
+ if (CTPSD->isCompleteDefinition())
+ Out << "[class template partial specialization] ";
+ else
+ Out << "<class template partial specialization> ";
+ Out << *CTPSD;
+ break;
+ }
+
default:
llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
@@ -400,7 +420,8 @@ void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXConversion:
- {
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization: {
DeclContext* DC = cast<DeclContext>(I);
PrintDeclContext(DC, Indentation+2);
break;
@@ -478,6 +499,37 @@ void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
Out << "<omp threadprivate> " << '"' << I << "\"\n";
break;
}
+ case Decl::Friend: {
+ Out << "<friend>";
+ if (const NamedDecl *ND = cast<FriendDecl>(I)->getFriendDecl())
+ Out << ' ' << *ND;
+ Out << "\n";
+ break;
+ }
+ case Decl::Using: {
+ Out << "<using> " << *cast<UsingDecl>(I) << "\n";
+ break;
+ }
+ case Decl::UsingShadow: {
+ Out << "<using shadow> " << *cast<UsingShadowDecl>(I) << "\n";
+ break;
+ }
+ case Decl::Empty: {
+ Out << "<empty>\n";
+ break;
+ }
+ case Decl::AccessSpec: {
+ Out << "<access specifier>\n";
+ break;
+ }
+ case Decl::VarTemplate: {
+ Out << "<var template> " << *cast<VarTemplateDecl>(I) << "\n";
+ break;
+ }
+ case Decl::StaticAssert: {
+ Out << "<static assert>\n";
+ break;
+ }
default:
Out << "DeclKind: " << DK << '"' << I << "\"\n";
llvm_unreachable("decl unhandled");
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index a0682e26e702..ca4a7655a37d 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -602,6 +602,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoDwarfDirectoryAsm = Args.hasArg(OPT_fno_dwarf_directory_asm);
Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
Opts.StrictEnums = Args.hasArg(OPT_fstrict_enums);
+ Opts.StrictReturn = !Args.hasArg(OPT_fno_strict_return);
Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
diff --git a/lib/Headers/__clang_cuda_runtime_wrapper.h b/lib/Headers/__clang_cuda_runtime_wrapper.h
index 205e15b40b5d..931d44b6965b 100644
--- a/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -232,6 +232,11 @@ static inline __device__ void __brkpt(int __c) { __brkpt(); }
// hardware, seems to generate faster machine code because ptxas can more easily
// reason about our code.
+#if CUDA_VERSION >= 8000
+#include "sm_60_atomic_functions.hpp"
+#include "sm_61_intrinsics.hpp"
+#endif
+
#undef __MATH_FUNCTIONS_HPP__
// math_functions.hpp defines ::signbit as a __host__ __device__ function. This
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index 061721dfb8da..f9ea8af00f50 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -119,7 +119,8 @@ static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
{ OMPD_target, OMPD_teams, OMPD_target_teams },
{ OMPD_target_teams, OMPD_distribute, OMPD_target_teams_distribute },
{ OMPD_target_teams_distribute, OMPD_parallel, OMPD_target_teams_distribute_parallel },
- { OMPD_target_teams_distribute_parallel, OMPD_for, OMPD_target_teams_distribute_parallel_for }
+ { OMPD_target_teams_distribute_parallel, OMPD_for, OMPD_target_teams_distribute_parallel_for },
+ { OMPD_target_teams_distribute_parallel_for, OMPD_simd, OMPD_target_teams_distribute_parallel_for_simd }
};
enum { CancellationPoint = 0, DeclareReduction = 1, TargetData = 2 };
auto Tok = P.getCurToken();
@@ -758,6 +759,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
Diag(Tok, diag::err_omp_unexpected_directive)
<< getOpenMPDirectiveName(DKind);
break;
@@ -796,7 +798,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// 'teams distribute parallel for simd' |
/// 'teams distribute parallel for' | 'target teams' |
/// 'target teams distribute' |
-/// 'target teams distribute parallel for' {clause}
+/// 'target teams distribute parallel for' |
+/// 'target teams distribute parallel for simd' {clause}
/// annot_pragma_openmp_end
///
StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
@@ -912,7 +915,8 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
case OMPD_teams_distribute_parallel_for:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_parallel_for: {
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd: {
ConsumeToken();
// Parse directive name of the 'critical' directive if any.
if (DKind == OMPD_critical) {
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
index 6f272ec839f5..282633bbc9e1 100644
--- a/lib/Sema/SemaCUDA.cpp
+++ b/lib/Sema/SemaCUDA.cpp
@@ -228,10 +228,8 @@ void Sema::EraseUnwantedCUDAMatches(
[&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));
// Erase all functions with lower priority.
- Matches.erase(
- llvm::remove_if(
- Matches, [&](const Pair &Match) { return GetCFP(Match) < BestCFP; }),
- Matches.end());
+ llvm::erase_if(Matches,
+ [&](const Pair &Match) { return GetCFP(Match) < BestCFP; });
}
/// When an implicitly-declared special member has to invoke more than one
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index d76bde574677..3eef366b75b3 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -7471,6 +7471,23 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
}
Results.ExitScope();
+
+ if (!AtParameterName && !SelIdents.empty() &&
+ SelIdents.front()->getName().startswith("init")) {
+ for (const auto &M : PP.macros()) {
+ if (M.first->getName() != "NS_DESIGNATED_INITIALIZER")
+ continue;
+ Results.EnterNewScope();
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(M.first->getName()));
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(), CCP_Macro,
+ CXCursor_MacroDefinition));
+ Results.ExitScope();
+ }
+ }
+
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index 804aadc0ff77..edceb537df75 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -1700,7 +1700,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_parallel_for: {
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
@@ -2439,6 +2440,12 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
AllowedNameModifiers.push_back(OMPD_target);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
@@ -6375,6 +6382,52 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ auto NestedLoopCount = CheckOpenMPLoop(
+ OMPD_target_teams_distribute_parallel_for_simd,
+ getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp target teams distribute parallel for simd loop exprs were not "
+ "built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTargetTeamsDistributeParallelForSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -7397,7 +7450,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
if (CurrDir == OMPD_target || CurrDir == OMPD_target_parallel ||
CurrDir == OMPD_target_teams ||
CurrDir == OMPD_target_teams_distribute ||
- CurrDir == OMPD_target_teams_distribute_parallel_for) {
+ CurrDir == OMPD_target_teams_distribute_parallel_for ||
+ CurrDir == OMPD_target_teams_distribute_parallel_for_simd) {
OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
@@ -7657,7 +7711,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
if (CurrDir == OMPD_target || CurrDir == OMPD_target_parallel ||
CurrDir == OMPD_target_teams ||
CurrDir == OMPD_target_teams_distribute ||
- CurrDir == OMPD_target_teams_distribute_parallel_for) {
+ CurrDir == OMPD_target_teams_distribute_parallel_for ||
+ CurrDir == OMPD_target_teams_distribute_parallel_for_simd) {
OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
@@ -10175,7 +10230,8 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// attribute clause on the same construct
if ((DKind == OMPD_target || DKind == OMPD_target_teams ||
DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for) && VD) {
+ DKind == OMPD_target_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute_parallel_for_simd) && VD) {
auto DVar = DSAS->getTopDSA(VD, false);
if (isOpenMPPrivate(DVar.CKind)) {
SemaRef.Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 47e3df20d911..b5c0e634fa50 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -8958,9 +8958,7 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
S.IdentifyCUDAPreference(Caller, Cand->Function) ==
Sema::CFP_WrongSide;
};
- Candidates.erase(std::remove_if(Candidates.begin(), Candidates.end(),
- IsWrongSideCandidate),
- Candidates.end());
+ llvm::erase_if(Candidates, IsWrongSideCandidate);
}
}
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index facc5d1b375b..66a10ef7993e 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -4244,7 +4244,7 @@ namespace {
UnnamedLocalNoLinkageFinder(Sema &S, SourceRange SR) : S(S), SR(SR) { }
bool Visit(QualType T) {
- return inherited::Visit(T.getTypePtr());
+ return T.isNull() ? false : inherited::Visit(T.getTypePtr());
}
#define TYPE(Class, Parent) \
@@ -4497,17 +4497,7 @@ bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
//
// C++11 allows these, and even in C++03 we allow them as an extension with
// a warning.
- bool NeedsCheck;
- if (LangOpts.CPlusPlus11)
- NeedsCheck =
- !Diags.isIgnored(diag::warn_cxx98_compat_template_arg_unnamed_type,
- SR.getBegin()) ||
- !Diags.isIgnored(diag::warn_cxx98_compat_template_arg_local_type,
- SR.getBegin());
- else
- NeedsCheck = Arg->hasUnnamedOrLocalType();
-
- if (NeedsCheck) {
+ if (LangOpts.CPlusPlus11 || Arg->hasUnnamedOrLocalType()) {
UnnamedLocalNoLinkageFinder Finder(*this, SR);
(void)Finder.Visit(Context.getCanonicalType(Arg));
}
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index 0bc85a2f2635..7f1fd91c46f0 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -288,19 +288,22 @@ checkDeducedTemplateArguments(ASTContext &Context,
X.pack_size() != Y.pack_size())
return DeducedTemplateArgument();
+ llvm::SmallVector<TemplateArgument, 8> NewPack;
for (TemplateArgument::pack_iterator XA = X.pack_begin(),
XAEnd = X.pack_end(),
YA = Y.pack_begin();
XA != XAEnd; ++XA, ++YA) {
- // FIXME: Do we need to merge the results together here?
- if (checkDeducedTemplateArguments(Context,
- DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
- DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()))
- .isNull())
+ TemplateArgument Merged = checkDeducedTemplateArguments(
+ Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
+ DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()));
+ if (Merged.isNull())
return DeducedTemplateArgument();
+ NewPack.push_back(Merged);
}
- return X;
+ return DeducedTemplateArgument(
+ TemplateArgument::CreatePackCopy(Context, NewPack),
+ X.wasDeducedFromArrayBound() && Y.wasDeducedFromArrayBound());
}
llvm_unreachable("Invalid TemplateArgument Kind!");
@@ -672,17 +675,20 @@ public:
// for that pack, then clear out the deduced argument.
for (auto &Pack : Packs) {
DeducedTemplateArgument &DeducedArg = Deduced[Pack.Index];
- if (!DeducedArg.isNull()) {
+ if (!Pack.New.empty() || !DeducedArg.isNull()) {
+ while (Pack.New.size() < PackElements)
+ Pack.New.push_back(DeducedTemplateArgument());
Pack.New.push_back(DeducedArg);
DeducedArg = DeducedTemplateArgument();
}
}
+ ++PackElements;
}
/// \brief Finish template argument deduction for a set of argument packs,
/// producing the argument packs and checking for consistency with prior
/// deductions.
- Sema::TemplateDeductionResult finish(bool HasAnyArguments) {
+ Sema::TemplateDeductionResult finish() {
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
for (auto &Pack : Packs) {
@@ -691,7 +697,7 @@ public:
// Build or find a new value for this pack.
DeducedTemplateArgument NewPack;
- if (HasAnyArguments && Pack.New.empty()) {
+ if (PackElements && Pack.New.empty()) {
if (Pack.DeferredDeduction.isNull()) {
// We were not able to deduce anything for this parameter pack
// (because it only appeared in non-deduced contexts), so just
@@ -758,6 +764,7 @@ private:
TemplateParameterList *TemplateParams;
SmallVectorImpl<DeducedTemplateArgument> &Deduced;
TemplateDeductionInfo &Info;
+ unsigned PackElements = 0;
SmallVector<DeducedPack, 2> Packs;
};
@@ -861,10 +868,7 @@ DeduceTemplateArguments(Sema &S,
QualType Pattern = Expansion->getPattern();
PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern);
- bool HasAnyArguments = false;
for (; ArgIdx < NumArgs; ++ArgIdx) {
- HasAnyArguments = true;
-
// Deduce template arguments from the pattern.
if (Sema::TemplateDeductionResult Result
= DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern,
@@ -877,7 +881,7 @@ DeduceTemplateArguments(Sema &S,
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
- if (auto Result = PackScope.finish(HasAnyArguments))
+ if (auto Result = PackScope.finish())
return Result;
}
@@ -1935,10 +1939,7 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// Keep track of the deduced template arguments for each parameter pack
// expanded by this pack expansion (the outer index) and for each
// template argument (the inner SmallVectors).
- bool HasAnyArguments = false;
for (; hasTemplateArgumentForDeduction(Args, ArgIdx); ++ArgIdx) {
- HasAnyArguments = true;
-
// Deduce template arguments from the pattern.
if (Sema::TemplateDeductionResult Result
= DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
@@ -1950,7 +1951,7 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
- if (auto Result = PackScope.finish(HasAnyArguments))
+ if (auto Result = PackScope.finish())
return Result;
}
@@ -2145,6 +2146,16 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
InnerArg.setDeducedFromArrayBound(Arg.wasDeducedFromArrayBound());
assert(InnerArg.getKind() != TemplateArgument::Pack &&
"deduced nested pack");
+ if (P.isNull()) {
+ // We deduced arguments for some elements of this pack, but not for
+ // all of them. This happens if we get a conditionally-non-deduced
+ // context in a pack expansion (such as an overload set in one of the
+ // arguments).
+ S.Diag(Param->getLocation(),
+ diag::err_template_arg_deduced_incomplete_pack)
+ << Arg << Param;
+ return true;
+ }
if (ConvertArg(InnerArg, PackedArgsBuilder.size()))
return true;
@@ -3192,67 +3203,59 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentByListElement(
/// \brief Attempt template argument deduction from an initializer list
/// deemed to be an argument in a function call.
-static bool
+static Sema::TemplateDeductionResult
DeduceFromInitializerList(Sema &S, TemplateParameterList *TemplateParams,
QualType AdjustedParamType, InitListExpr *ILE,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- unsigned TDF, Sema::TemplateDeductionResult &Result) {
-
- // [temp.deduct.call] p1 (post CWG-1591)
- // If removing references and cv-qualifiers from P gives
- // std::initializer_list<P0> or P0[N] for some P0 and N and the argument is a
- // non-empty initializer list (8.5.4), then deduction is performed instead for
- // each element of the initializer list, taking P0 as a function template
- // parameter type and the initializer element as its argument, and in the
- // P0[N] case, if N is a non-type template parameter, N is deduced from the
- // length of the initializer list. Otherwise, an initializer list argument
- // causes the parameter to be considered a non-deduced context
-
- const bool IsConstSizedArray = AdjustedParamType->isConstantArrayType();
-
- const bool IsDependentSizedArray =
- !IsConstSizedArray && AdjustedParamType->isDependentSizedArrayType();
-
- QualType ElTy; // The element type of the std::initializer_list or the array.
-
- const bool IsSTDList = !IsConstSizedArray && !IsDependentSizedArray &&
- S.isStdInitializerList(AdjustedParamType, &ElTy);
-
- if (!IsConstSizedArray && !IsDependentSizedArray && !IsSTDList)
- return false;
-
- Result = Sema::TDK_Success;
- // If we are not deducing against the 'T' in a std::initializer_list<T> then
- // deduce against the 'T' in T[N].
- if (ElTy.isNull()) {
- assert(!IsSTDList);
- ElTy = S.Context.getAsArrayType(AdjustedParamType)->getElementType();
+ unsigned TDF) {
+ // C++ [temp.deduct.call]p1: (CWG 1591)
+ // If removing references and cv-qualifiers from P gives
+ // std::initializer_list<P0> or P0[N] for some P0 and N and the argument is
+ // a non-empty initializer list, then deduction is performed instead for
+ // each element of the initializer list, taking P0 as a function template
+ // parameter type and the initializer element as its argument
+ //
+ // FIXME: Remove references and cv-qualifiers here? Consider
+ // std::initializer_list<std::initializer_list<T>&&>
+ QualType ElTy;
+ auto *ArrTy = S.Context.getAsArrayType(AdjustedParamType);
+ if (ArrTy)
+ ElTy = ArrTy->getElementType();
+ else if (!S.isStdInitializerList(AdjustedParamType, &ElTy)) {
+ // Otherwise, an initializer list argument causes the parameter to be
+ // considered a non-deduced context
+ return Sema::TDK_Success;
}
+
// Deduction only needs to be done for dependent types.
if (ElTy->isDependentType()) {
for (Expr *E : ILE->inits()) {
- if ((Result = DeduceTemplateArgumentByListElement(S, TemplateParams, ElTy,
- E, Info, Deduced, TDF)))
- return true;
+ if (auto Result = DeduceTemplateArgumentByListElement(
+ S, TemplateParams, ElTy, E, Info, Deduced, TDF))
+ return Result;
}
}
- if (IsDependentSizedArray) {
- const DependentSizedArrayType *ArrTy =
- S.Context.getAsDependentSizedArrayType(AdjustedParamType);
+
+ // in the P0[N] case, if N is a non-type template parameter, N is deduced
+ // from the length of the initializer list.
+ // FIXME: We're not supposed to get here if N would be deduced as 0.
+ if (auto *DependentArrTy = dyn_cast_or_null<DependentSizedArrayType>(ArrTy)) {
// Determine the array bound is something we can deduce.
if (NonTypeTemplateParmDecl *NTTP =
- getDeducedParameterFromExpr(Info, ArrTy->getSizeExpr())) {
+ getDeducedParameterFromExpr(Info, DependentArrTy->getSizeExpr())) {
// We can perform template argument deduction for the given non-type
// template parameter.
llvm::APInt Size(S.Context.getIntWidth(NTTP->getType()),
ILE->getNumInits());
- Result = DeduceNonTypeTemplateArgument(
- S, TemplateParams, NTTP, llvm::APSInt(Size), NTTP->getType(),
- /*ArrayBound=*/true, Info, Deduced);
+ if (auto Result = DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, llvm::APSInt(Size), NTTP->getType(),
+ /*ArrayBound=*/true, Info, Deduced))
+ return Result;
}
}
- return true;
+
+ return Sema::TDK_Success;
}
/// \brief Perform template argument deduction by matching a parameter type
@@ -3268,15 +3271,10 @@ DeduceTemplateArgumentByListElement(Sema &S,
unsigned TDF) {
// Handle the case where an init list contains another init list as the
// element.
- if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
- Sema::TemplateDeductionResult Result;
- if (!DeduceFromInitializerList(S, TemplateParams,
- ParamType.getNonReferenceType(), ILE, Info,
- Deduced, TDF, Result))
- return Sema::TDK_Success; // Just ignore this expression.
-
- return Result;
- }
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg))
+ return DeduceFromInitializerList(S, TemplateParams,
+ ParamType.getNonReferenceType(), ILE, Info,
+ Deduced, TDF);
// For all other cases, just match by type.
QualType ArgType = Arg->getType();
@@ -3363,58 +3361,51 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
ParamTypes.push_back(Function->getParamDecl(I)->getType());
}
- // Deduce template arguments from the function parameters.
- Deduced.resize(TemplateParams->size());
- unsigned ArgIdx = 0;
SmallVector<OriginalCallArg, 4> OriginalCallArgs;
- for (unsigned ParamIdx = 0, NumParamTypes = ParamTypes.size();
- ParamIdx != NumParamTypes; ++ParamIdx) {
- QualType OrigParamType = ParamTypes[ParamIdx];
- QualType ParamType = OrigParamType;
- const PackExpansionType *ParamExpansion
- = dyn_cast<PackExpansionType>(ParamType);
- if (!ParamExpansion) {
- // Simple case: matching a function parameter to a function argument.
- if (ArgIdx >= CheckArgs)
- break;
+ // Deduce an argument of type ParamType from an expression with index ArgIdx.
+ auto DeduceCallArgument = [&](QualType ParamType, unsigned ArgIdx) {
+ Expr *Arg = Args[ArgIdx];
+ QualType ArgType = Arg->getType();
+ QualType OrigParamType = ParamType;
- Expr *Arg = Args[ArgIdx++];
- QualType ArgType = Arg->getType();
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ ParamType, ArgType, Arg,
+ TDF))
+ return Sema::TDK_Success;
- unsigned TDF = 0;
- if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
- ParamType, ArgType, Arg,
- TDF))
- continue;
+ // If we have nothing to deduce, we're done.
+ if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ return Sema::TDK_Success;
- // If we have nothing to deduce, we're done.
- if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
- continue;
+ // If the argument is an initializer list ...
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg))
+ return DeduceFromInitializerList(*this, TemplateParams, ParamType, ILE,
+ Info, Deduced, TDF);
- // If the argument is an initializer list ...
- if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
- TemplateDeductionResult Result;
- // Removing references was already done.
- if (!DeduceFromInitializerList(*this, TemplateParams, ParamType, ILE,
- Info, Deduced, TDF, Result))
- continue;
+ // Keep track of the argument type and corresponding parameter index,
+ // so we can check for compatibility between the deduced A and A.
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx, ArgType));
- if (Result)
- return Result;
- // Don't track the argument type, since an initializer list has none.
- continue;
- }
+ return DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, ParamType,
+ ArgType, Info, Deduced, TDF);
+ };
- // Keep track of the argument type and corresponding parameter index,
- // so we can check for compatibility between the deduced A and A.
- OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1,
- ArgType));
+ // Deduce template arguments from the function parameters.
+ Deduced.resize(TemplateParams->size());
+ for (unsigned ParamIdx = 0, NumParamTypes = ParamTypes.size(), ArgIdx = 0;
+ ParamIdx != NumParamTypes; ++ParamIdx) {
+ QualType ParamType = ParamTypes[ParamIdx];
- if (TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
- ParamType, ArgType,
- Info, Deduced, TDF))
+ const PackExpansionType *ParamExpansion =
+ dyn_cast<PackExpansionType>(ParamType);
+ if (!ParamExpansion) {
+ // Simple case: matching a function parameter to a function argument.
+ if (ArgIdx >= CheckArgs)
+ break;
+
+ if (auto Result = DeduceCallArgument(ParamType, ArgIdx++))
return Result;
continue;
@@ -3429,6 +3420,9 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// the function parameter pack. For a function parameter pack that does
// not occur at the end of the parameter-declaration-list, the type of
// the parameter pack is a non-deduced context.
+ // FIXME: This does not say that subsequent parameters are also non-deduced.
+ // See also DR1388 / DR1399, which effectively says we should keep deducing
+ // after the pack.
if (ParamIdx + 1 < NumParamTypes)
break;
@@ -3436,57 +3430,13 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
PackDeductionScope PackScope(*this, TemplateParams, Deduced, Info,
ParamPattern);
- bool HasAnyArguments = false;
- for (; ArgIdx < Args.size(); ++ArgIdx) {
- HasAnyArguments = true;
-
- QualType OrigParamType = ParamPattern;
- ParamType = OrigParamType;
- Expr *Arg = Args[ArgIdx];
- QualType ArgType = Arg->getType();
-
- unsigned TDF = 0;
- if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
- ParamType, ArgType, Arg,
- TDF)) {
- // We can't actually perform any deduction for this argument, so stop
- // deduction at this point.
- ++ArgIdx;
- break;
- }
-
- // As above, initializer lists need special handling.
- if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
- TemplateDeductionResult Result;
- if (!DeduceFromInitializerList(*this, TemplateParams, ParamType, ILE,
- Info, Deduced, TDF, Result)) {
- ++ArgIdx;
- break;
- }
-
- if (Result)
- return Result;
- } else {
-
- // Keep track of the argument type and corresponding argument index,
- // so we can check for compatibility between the deduced A and A.
- if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
- OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
- ArgType));
-
- if (TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
- ParamType, ArgType, Info,
- Deduced, TDF))
- return Result;
- }
-
- PackScope.nextPackElement();
- }
+ for (; ArgIdx < Args.size(); PackScope.nextPackElement(), ++ArgIdx)
+ if (auto Result = DeduceCallArgument(ParamPattern, ArgIdx))
+ return Result;
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
- if (auto Result = PackScope.finish(HasAnyArguments))
+ if (auto Result = PackScope.finish())
return Result;
// After we've matching against a parameter pack, we're done.
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index 3ab6019f0ec3..66892936e573 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -7766,6 +7766,20 @@ TreeTransform<Derived>::TransformOMPTargetTeamsDistributeParallelForDirective(
return Res;
}
+template <typename Derived>
+StmtResult TreeTransform<Derived>::
+ TransformOMPTargetTeamsDistributeParallelForSimdDirective(
+ OMPTargetTeamsDistributeParallelForSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_target_teams_distribute_parallel_for_simd, DirName, nullptr,
+ D->getLocStart());
+ auto Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
//===----------------------------------------------------------------------===//
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 5607f764a9c3..19fac55664ae 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -2860,6 +2860,11 @@ void ASTStmtReader::VisitOMPTargetTeamsDistributeParallelForDirective(
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPTargetTeamsDistributeParallelForSimdDirective(
+ OMPTargetTeamsDistributeParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -3638,6 +3643,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE: {
+ auto NumClauses = Record[ASTStmtReader::NumStmtFields];
+ auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetTeamsDistributeParallelForSimdDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
case EXPR_CXX_OPERATOR_CALL:
S = new (Context) CXXOperatorCallExpr(Context, Empty);
break;
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 3993be146edf..162b2bd25260 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -2556,6 +2556,13 @@ void ASTStmtWriter::VisitOMPTargetTeamsDistributeParallelForDirective(
Code = serialization::STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPTargetTeamsDistributeParallelForSimdDirective(
+ OMPTargetTeamsDistributeParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::
+ STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 5b2119aeda27..707168b4de0a 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -869,6 +869,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTargetTeamsDirectiveClass:
case Stmt::OMPTargetTeamsDistributeDirectiveClass:
case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
+ case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
case Stmt::ObjCSubscriptRefExprClass:
diff --git a/lib/Tooling/Core/CMakeLists.txt b/lib/Tooling/Core/CMakeLists.txt
index f6348cbf80ed..e2b0dd424d10 100644
--- a/lib/Tooling/Core/CMakeLists.txt
+++ b/lib/Tooling/Core/CMakeLists.txt
@@ -4,6 +4,7 @@ add_clang_library(clangToolingCore
Lookup.cpp
Replacement.cpp
QualTypeNames.cpp
+ Diagnostic.cpp
LINK_LIBS
clangAST
diff --git a/lib/Tooling/Core/Diagnostic.cpp b/lib/Tooling/Core/Diagnostic.cpp
new file mode 100644
index 000000000000..3bbc2b901e38
--- /dev/null
+++ b/lib/Tooling/Core/Diagnostic.cpp
@@ -0,0 +1,46 @@
+//===--- Diagnostic.cpp - Framework for clang diagnostics tools ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements classes to support/store diagnostics refactoring.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Core/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+
+namespace clang {
+namespace tooling {
+
+DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message)
+ : Message(Message), FileOffset(0) {}
+
+DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message,
+ const SourceManager &Sources,
+ SourceLocation Loc)
+ : Message(Message) {
+ assert(Loc.isValid() && Loc.isFileID());
+ FilePath = Sources.getFilename(Loc);
+ FileOffset = Sources.getFileOffset(Loc);
+}
+
+Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
+ Diagnostic::Level DiagLevel, StringRef BuildDirectory)
+ : DiagnosticName(DiagnosticName), DiagLevel(DiagLevel),
+ BuildDirectory(BuildDirectory) {}
+
+Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
+ DiagnosticMessage &Message,
+ llvm::StringMap<Replacements> &Fix,
+ SmallVector<DiagnosticMessage, 1> &Notes,
+ Level DiagLevel, llvm::StringRef BuildDirectory)
+ : DiagnosticName(DiagnosticName), Message(Message), Fix(Fix), Notes(Notes),
+ DiagLevel(DiagLevel), BuildDirectory(BuildDirectory) {}
+
+} // end namespace tooling
+} // end namespace clang