aboutsummaryrefslogtreecommitdiff
path: root/lib/Sema/SemaOpenMP.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:08 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:08 +0000
commitbab175ec4b075c8076ba14c762900392533f6ee4 (patch)
tree01f4f29419a2cb10abe13c1e63cd2a66068b0137 /lib/Sema/SemaOpenMP.cpp
parent8b7a8012d223fac5d17d16a66bb39168a9a1dfc0 (diff)
downloadsrc-bab175ec4b075c8076ba14c762900392533f6ee4.tar.gz
src-bab175ec4b075c8076ba14c762900392533f6ee4.zip
Vendor import of clang trunk r290819:vendor/clang/clang-trunk-r290819
Notes
Notes: svn path=/vendor/clang/dist/; revision=311118 svn path=/vendor/clang/clang-trunk-r290819/; revision=311119; tag=vendor/clang/clang-trunk-r290819
Diffstat (limited to 'lib/Sema/SemaOpenMP.cpp')
-rw-r--r--lib/Sema/SemaOpenMP.cpp2179
1 files changed, 731 insertions, 1448 deletions
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index b7ac48583e1a..804aadc0ff77 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -72,8 +72,13 @@ private:
typedef llvm::DenseMap<ValueDecl *, Expr *> AlignedMapTy;
typedef std::pair<unsigned, VarDecl *> LCDeclInfo;
typedef llvm::DenseMap<ValueDecl *, LCDeclInfo> LoopControlVariablesMapTy;
- typedef llvm::DenseMap<
- ValueDecl *, OMPClauseMappableExprCommon::MappableExprComponentLists>
+ /// Struct that associates a component with the clause kind where they are
+ /// found.
+ struct MappedExprComponentTy {
+ OMPClauseMappableExprCommon::MappableExprComponentLists Components;
+ OpenMPClauseKind Kind = OMPC_unknown;
+ };
+ typedef llvm::DenseMap<ValueDecl *, MappedExprComponentTy>
MappedExprComponentsTy;
typedef llvm::StringMap<std::pair<OMPCriticalDirective *, llvm::APSInt>>
CriticalsWithHintsTy;
@@ -123,7 +128,7 @@ private:
typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
- DSAVarData getDSA(StackTy::reverse_iterator& Iter, ValueDecl *D);
+ DSAVarData getDSA(StackTy::reverse_iterator &Iter, ValueDecl *D);
/// \brief Checks if the variable is a local for OpenMP region.
bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter);
@@ -293,9 +298,7 @@ public:
Stack[Stack.size() - 2].CancelRegion || Cancel;
}
/// \brief Return true if current region has inner cancel construct.
- bool isCancelRegion() const {
- return Stack.back().CancelRegion;
- }
+ bool isCancelRegion() const { return Stack.back().CancelRegion; }
/// \brief Set collapse value for the region.
void setAssociatedLoops(unsigned Val) { Stack.back().AssociatedLoops = Val; }
@@ -323,12 +326,13 @@ public:
Scope *getCurScope() { return Stack.back().CurScope; }
SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; }
- // Do the check specified in \a Check to all component lists and return true
- // if any issue is found.
+ /// Do the check specified in \a Check to all component lists and return true
+ /// if any issue is found.
bool checkMappableExprComponentListsForDecl(
ValueDecl *VD, bool CurrentRegionOnly,
- const llvm::function_ref<bool(
- OMPClauseMappableExprCommon::MappableExprComponentListRef)> &Check) {
+ const llvm::function_ref<
+ bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind)> &Check) {
auto SI = Stack.rbegin();
auto SE = Stack.rend();
@@ -344,24 +348,26 @@ public:
for (; SI != SE; ++SI) {
auto MI = SI->MappedExprComponents.find(VD);
if (MI != SI->MappedExprComponents.end())
- for (auto &L : MI->second)
- if (Check(L))
+ for (auto &L : MI->second.Components)
+ if (Check(L, MI->second.Kind))
return true;
}
return false;
}
- // Create a new mappable expression component list associated with a given
- // declaration and initialize it with the provided list of components.
+ /// Create a new mappable expression component list associated with a given
+ /// declaration and initialize it with the provided list of components.
void addMappableExpressionComponents(
ValueDecl *VD,
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components) {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
+ OpenMPClauseKind WhereFoundClauseKind) {
assert(Stack.size() > 1 &&
"Not expecting to retrieve components from a empty stack!");
auto &MEC = Stack.back().MappedExprComponents[VD];
// Create new entry and append the new components there.
- MEC.resize(MEC.size() + 1);
- MEC.back().append(Components.begin(), Components.end());
+ MEC.Components.resize(MEC.Components.size() + 1);
+ MEC.Components.back().append(Components.begin(), Components.end());
+ MEC.Kind = WhereFoundClauseKind;
}
unsigned getNestingLevel() const {
@@ -393,7 +399,7 @@ bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
static ValueDecl *getCanonicalDecl(ValueDecl *D) {
auto *VD = dyn_cast<VarDecl>(D);
auto *FD = dyn_cast<FieldDecl>(D);
- if (VD != nullptr) {
+ if (VD != nullptr) {
VD = VD->getCanonicalDecl();
D = VD;
} else {
@@ -404,7 +410,7 @@ static ValueDecl *getCanonicalDecl(ValueDecl *D) {
return D;
}
-DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator& Iter,
+DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
ValueDecl *D) {
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
@@ -771,18 +777,12 @@ DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
D = getCanonicalDecl(D);
auto StartI = std::next(Stack.rbegin());
auto EndI = Stack.rend();
- if (FromParent && StartI != EndI) {
+ if (FromParent && StartI != EndI)
StartI = std::next(StartI);
- }
- for (auto I = StartI, EE = EndI; I != EE; ++I) {
- if (!DPred(I->Directive))
- break;
- DSAVarData DVar = getDSA(I, D);
- if (CPred(DVar.CKind))
- return DVar;
+ if (StartI == EndI || !DPred(StartI->Directive))
return DSAVarData();
- }
- return DSAVarData();
+ DSAVarData DVar = getDSA(StartI, D);
+ return CPred(DVar.CKind) ? DVar : DSAVarData();
}
bool DSAStackTy::hasExplicitDSA(
@@ -903,7 +903,6 @@ bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
// array section, the runtime library may pass the NULL value to the
// device instead of the value passed to it by the compiler.
-
if (Ty->isReferenceType())
Ty = Ty->castAs<ReferenceType>()->getPointeeType();
@@ -916,7 +915,13 @@ bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
DSAStack->checkMappableExprComponentListsForDecl(
D, /*CurrentRegionOnly=*/true,
[&](OMPClauseMappableExprCommon::MappableExprComponentListRef
- MapExprComponents) {
+ MapExprComponents,
+ OpenMPClauseKind WhereFoundClauseKind) {
+ // Only the map clause information influences how a variable is
+ // captured. E.g. is_device_ptr does not require changing the default
+ // behavior.
+ if (WhereFoundClauseKind != OMPC_map)
+ return false;
auto EI = MapExprComponents.rbegin();
auto EE = MapExprComponents.rend();
@@ -1062,7 +1067,7 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// clause requires an accessible, unambiguous default constructor for the
// class type, unless the list item is also specified in a firstprivate
// clause.
- if (auto D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
+ if (auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
for (auto *C : D->clauses()) {
if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
SmallVector<Expr *, 8> PrivateCopies;
@@ -1121,7 +1126,7 @@ public:
explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
- if (VarDecl *VD = dyn_cast_or_null<VarDecl>(ND)) {
+ if (auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
return VD->hasGlobalStorage() &&
SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
@@ -1290,7 +1295,7 @@ class LocalVarRefChecker : public ConstStmtVisitor<LocalVarRefChecker, bool> {
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
- if (auto VD = dyn_cast<VarDecl>(E->getDecl())) {
+ if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
if (VD->hasLocalStorage()) {
SemaRef.Diag(E->getLocStart(),
diag::err_omp_local_var_in_threadprivate_init)
@@ -1471,7 +1476,8 @@ public:
auto DVar = Stack->getTopDSA(VD, false);
// Check if the variable has explicit DSA set and stop analysis if it so.
- if (DVar.RefExpr) return;
+ if (DVar.RefExpr)
+ return;
auto ELoc = E->getExprLoc();
auto DKind = Stack->getCurrentDirective();
@@ -1550,7 +1556,8 @@ public:
!Stack->isLoopControlVariable(FD).first)
ImplicitFirstprivate.push_back(E);
}
- }
+ } else
+ Visit(E->getBase());
}
void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
for (auto *C : S->clauses()) {
@@ -1587,7 +1594,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
- case OMPD_teams: {
+ case OMPD_teams:
+ case OMPD_target_teams: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
@@ -1616,7 +1624,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_target:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd: {
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -1685,7 +1694,13 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
}
case OMPD_distribute_parallel_for_simd:
case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for: {
+ case OMPD_distribute_parallel_for:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_parallel_for: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
@@ -1740,7 +1755,8 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
}
WithInit = true;
}
- auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty);
+ auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
+ CaptureExpr->getLocStart());
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C, SourceRange()));
S.CurContext->addHiddenDecl(CED);
@@ -1868,1241 +1884,12 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
const DeclarationNameInfo &CurrentName,
OpenMPDirectiveKind CancelRegion,
SourceLocation StartLoc) {
- // Allowed nesting of constructs
- // +------------------+-----------------+------------------------------------+
- // | Parent directive | Child directive | Closely (!), No-Closely(+), Both(*)|
- // +------------------+-----------------+------------------------------------+
- // | parallel | parallel | * |
- // | parallel | for | * |
- // | parallel | for simd | * |
- // | parallel | master | * |
- // | parallel | critical | * |
- // | parallel | simd | * |
- // | parallel | sections | * |
- // | parallel | section | + |
- // | parallel | single | * |
- // | parallel | parallel for | * |
- // | parallel |parallel for simd| * |
- // | parallel |parallel sections| * |
- // | parallel | task | * |
- // | parallel | taskyield | * |
- // | parallel | barrier | * |
- // | parallel | taskwait | * |
- // | parallel | taskgroup | * |
- // | parallel | flush | * |
- // | parallel | ordered | + |
- // | parallel | atomic | * |
- // | parallel | target | * |
- // | parallel | target parallel | * |
- // | parallel | target parallel | * |
- // | | for | |
- // | parallel | target enter | * |
- // | | data | |
- // | parallel | target exit | * |
- // | | data | |
- // | parallel | teams | + |
- // | parallel | cancellation | |
- // | | point | ! |
- // | parallel | cancel | ! |
- // | parallel | taskloop | * |
- // | parallel | taskloop simd | * |
- // | parallel | distribute | + |
- // | parallel | distribute | + |
- // | | parallel for | |
- // | parallel | distribute | + |
- // | |parallel for simd| |
- // | parallel | distribute simd | + |
- // +------------------+-----------------+------------------------------------+
- // | for | parallel | * |
- // | for | for | + |
- // | for | for simd | + |
- // | for | master | + |
- // | for | critical | * |
- // | for | simd | * |
- // | for | sections | + |
- // | for | section | + |
- // | for | single | + |
- // | for | parallel for | * |
- // | for |parallel for simd| * |
- // | for |parallel sections| * |
- // | for | task | * |
- // | for | taskyield | * |
- // | for | barrier | + |
- // | for | taskwait | * |
- // | for | taskgroup | * |
- // | for | flush | * |
- // | for | ordered | * (if construct is ordered) |
- // | for | atomic | * |
- // | for | target | * |
- // | for | target parallel | * |
- // | for | target parallel | * |
- // | | for | |
- // | for | target enter | * |
- // | | data | |
- // | for | target exit | * |
- // | | data | |
- // | for | teams | + |
- // | for | cancellation | |
- // | | point | ! |
- // | for | cancel | ! |
- // | for | taskloop | * |
- // | for | taskloop simd | * |
- // | for | distribute | + |
- // | for | distribute | + |
- // | | parallel for | |
- // | for | distribute | + |
- // | |parallel for simd| |
- // | for | distribute simd | + |
- // | for | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | master | parallel | * |
- // | master | for | + |
- // | master | for simd | + |
- // | master | master | * |
- // | master | critical | * |
- // | master | simd | * |
- // | master | sections | + |
- // | master | section | + |
- // | master | single | + |
- // | master | parallel for | * |
- // | master |parallel for simd| * |
- // | master |parallel sections| * |
- // | master | task | * |
- // | master | taskyield | * |
- // | master | barrier | + |
- // | master | taskwait | * |
- // | master | taskgroup | * |
- // | master | flush | * |
- // | master | ordered | + |
- // | master | atomic | * |
- // | master | target | * |
- // | master | target parallel | * |
- // | master | target parallel | * |
- // | | for | |
- // | master | target enter | * |
- // | | data | |
- // | master | target exit | * |
- // | | data | |
- // | master | teams | + |
- // | master | cancellation | |
- // | | point | |
- // | master | cancel | |
- // | master | taskloop | * |
- // | master | taskloop simd | * |
- // | master | distribute | + |
- // | master | distribute | + |
- // | | parallel for | |
- // | master | distribute | + |
- // | |parallel for simd| |
- // | master | distribute simd | + |
- // | master | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | critical | parallel | * |
- // | critical | for | + |
- // | critical | for simd | + |
- // | critical | master | * |
- // | critical | critical | * (should have different names) |
- // | critical | simd | * |
- // | critical | sections | + |
- // | critical | section | + |
- // | critical | single | + |
- // | critical | parallel for | * |
- // | critical |parallel for simd| * |
- // | critical |parallel sections| * |
- // | critical | task | * |
- // | critical | taskyield | * |
- // | critical | barrier | + |
- // | critical | taskwait | * |
- // | critical | taskgroup | * |
- // | critical | ordered | + |
- // | critical | atomic | * |
- // | critical | target | * |
- // | critical | target parallel | * |
- // | critical | target parallel | * |
- // | | for | |
- // | critical | target enter | * |
- // | | data | |
- // | critical | target exit | * |
- // | | data | |
- // | critical | teams | + |
- // | critical | cancellation | |
- // | | point | |
- // | critical | cancel | |
- // | critical | taskloop | * |
- // | critical | taskloop simd | * |
- // | critical | distribute | + |
- // | critical | distribute | + |
- // | | parallel for | |
- // | critical | distribute | + |
- // | |parallel for simd| |
- // | critical | distribute simd | + |
- // | critical | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | simd | parallel | |
- // | simd | for | |
- // | simd | for simd | |
- // | simd | master | |
- // | simd | critical | |
- // | simd | simd | * |
- // | simd | sections | |
- // | simd | section | |
- // | simd | single | |
- // | simd | parallel for | |
- // | simd |parallel for simd| |
- // | simd |parallel sections| |
- // | simd | task | |
- // | simd | taskyield | |
- // | simd | barrier | |
- // | simd | taskwait | |
- // | simd | taskgroup | |
- // | simd | flush | |
- // | simd | ordered | + (with simd clause) |
- // | simd | atomic | |
- // | simd | target | |
- // | simd | target parallel | |
- // | simd | target parallel | |
- // | | for | |
- // | simd | target enter | |
- // | | data | |
- // | simd | target exit | |
- // | | data | |
- // | simd | teams | |
- // | simd | cancellation | |
- // | | point | |
- // | simd | cancel | |
- // | simd | taskloop | |
- // | simd | taskloop simd | |
- // | simd | distribute | |
- // | simd | distribute | |
- // | | parallel for | |
- // | simd | distribute | |
- // | |parallel for simd| |
- // | simd | distribute simd | |
- // | simd | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | for simd | parallel | |
- // | for simd | for | |
- // | for simd | for simd | |
- // | for simd | master | |
- // | for simd | critical | |
- // | for simd | simd | * |
- // | for simd | sections | |
- // | for simd | section | |
- // | for simd | single | |
- // | for simd | parallel for | |
- // | for simd |parallel for simd| |
- // | for simd |parallel sections| |
- // | for simd | task | |
- // | for simd | taskyield | |
- // | for simd | barrier | |
- // | for simd | taskwait | |
- // | for simd | taskgroup | |
- // | for simd | flush | |
- // | for simd | ordered | + (with simd clause) |
- // | for simd | atomic | |
- // | for simd | target | |
- // | for simd | target parallel | |
- // | for simd | target parallel | |
- // | | for | |
- // | for simd | target enter | |
- // | | data | |
- // | for simd | target exit | |
- // | | data | |
- // | for simd | teams | |
- // | for simd | cancellation | |
- // | | point | |
- // | for simd | cancel | |
- // | for simd | taskloop | |
- // | for simd | taskloop simd | |
- // | for simd | distribute | |
- // | for simd | distribute | |
- // | | parallel for | |
- // | for simd | distribute | |
- // | |parallel for simd| |
- // | for simd | distribute simd | |
- // | for simd | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | parallel for simd| parallel | |
- // | parallel for simd| for | |
- // | parallel for simd| for simd | |
- // | parallel for simd| master | |
- // | parallel for simd| critical | |
- // | parallel for simd| simd | * |
- // | parallel for simd| sections | |
- // | parallel for simd| section | |
- // | parallel for simd| single | |
- // | parallel for simd| parallel for | |
- // | parallel for simd|parallel for simd| |
- // | parallel for simd|parallel sections| |
- // | parallel for simd| task | |
- // | parallel for simd| taskyield | |
- // | parallel for simd| barrier | |
- // | parallel for simd| taskwait | |
- // | parallel for simd| taskgroup | |
- // | parallel for simd| flush | |
- // | parallel for simd| ordered | + (with simd clause) |
- // | parallel for simd| atomic | |
- // | parallel for simd| target | |
- // | parallel for simd| target parallel | |
- // | parallel for simd| target parallel | |
- // | | for | |
- // | parallel for simd| target enter | |
- // | | data | |
- // | parallel for simd| target exit | |
- // | | data | |
- // | parallel for simd| teams | |
- // | parallel for simd| cancellation | |
- // | | point | |
- // | parallel for simd| cancel | |
- // | parallel for simd| taskloop | |
- // | parallel for simd| taskloop simd | |
- // | parallel for simd| distribute | |
- // | parallel for simd| distribute | |
- // | | parallel for | |
- // | parallel for simd| distribute | |
- // | |parallel for simd| |
- // | parallel for simd| distribute simd | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | sections | parallel | * |
- // | sections | for | + |
- // | sections | for simd | + |
- // | sections | master | + |
- // | sections | critical | * |
- // | sections | simd | * |
- // | sections | sections | + |
- // | sections | section | * |
- // | sections | single | + |
- // | sections | parallel for | * |
- // | sections |parallel for simd| * |
- // | sections |parallel sections| * |
- // | sections | task | * |
- // | sections | taskyield | * |
- // | sections | barrier | + |
- // | sections | taskwait | * |
- // | sections | taskgroup | * |
- // | sections | flush | * |
- // | sections | ordered | + |
- // | sections | atomic | * |
- // | sections | target | * |
- // | sections | target parallel | * |
- // | sections | target parallel | * |
- // | | for | |
- // | sections | target enter | * |
- // | | data | |
- // | sections | target exit | * |
- // | | data | |
- // | sections | teams | + |
- // | sections | cancellation | |
- // | | point | ! |
- // | sections | cancel | ! |
- // | sections | taskloop | * |
- // | sections | taskloop simd | * |
- // | sections | distribute | + |
- // | sections | distribute | + |
- // | | parallel for | |
- // | sections | distribute | + |
- // | |parallel for simd| |
- // | sections | distribute simd | + |
- // | sections | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | section | parallel | * |
- // | section | for | + |
- // | section | for simd | + |
- // | section | master | + |
- // | section | critical | * |
- // | section | simd | * |
- // | section | sections | + |
- // | section | section | + |
- // | section | single | + |
- // | section | parallel for | * |
- // | section |parallel for simd| * |
- // | section |parallel sections| * |
- // | section | task | * |
- // | section | taskyield | * |
- // | section | barrier | + |
- // | section | taskwait | * |
- // | section | taskgroup | * |
- // | section | flush | * |
- // | section | ordered | + |
- // | section | atomic | * |
- // | section | target | * |
- // | section | target parallel | * |
- // | section | target parallel | * |
- // | | for | |
- // | section | target enter | * |
- // | | data | |
- // | section | target exit | * |
- // | | data | |
- // | section | teams | + |
- // | section | cancellation | |
- // | | point | ! |
- // | section | cancel | ! |
- // | section | taskloop | * |
- // | section | taskloop simd | * |
- // | section | distribute | + |
- // | section | distribute | + |
- // | | parallel for | |
- // | section | distribute | + |
- // | |parallel for simd| |
- // | section | distribute simd | + |
- // | section | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | single | parallel | * |
- // | single | for | + |
- // | single | for simd | + |
- // | single | master | + |
- // | single | critical | * |
- // | single | simd | * |
- // | single | sections | + |
- // | single | section | + |
- // | single | single | + |
- // | single | parallel for | * |
- // | single |parallel for simd| * |
- // | single |parallel sections| * |
- // | single | task | * |
- // | single | taskyield | * |
- // | single | barrier | + |
- // | single | taskwait | * |
- // | single | taskgroup | * |
- // | single | flush | * |
- // | single | ordered | + |
- // | single | atomic | * |
- // | single | target | * |
- // | single | target parallel | * |
- // | single | target parallel | * |
- // | | for | |
- // | single | target enter | * |
- // | | data | |
- // | single | target exit | * |
- // | | data | |
- // | single | teams | + |
- // | single | cancellation | |
- // | | point | |
- // | single | cancel | |
- // | single | taskloop | * |
- // | single | taskloop simd | * |
- // | single | distribute | + |
- // | single | distribute | + |
- // | | parallel for | |
- // | single | distribute | + |
- // | |parallel for simd| |
- // | single | distribute simd | + |
- // | single | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | parallel for | parallel | * |
- // | parallel for | for | + |
- // | parallel for | for simd | + |
- // | parallel for | master | + |
- // | parallel for | critical | * |
- // | parallel for | simd | * |
- // | parallel for | sections | + |
- // | parallel for | section | + |
- // | parallel for | single | + |
- // | parallel for | parallel for | * |
- // | parallel for |parallel for simd| * |
- // | parallel for |parallel sections| * |
- // | parallel for | task | * |
- // | parallel for | taskyield | * |
- // | parallel for | barrier | + |
- // | parallel for | taskwait | * |
- // | parallel for | taskgroup | * |
- // | parallel for | flush | * |
- // | parallel for | ordered | * (if construct is ordered) |
- // | parallel for | atomic | * |
- // | parallel for | target | * |
- // | parallel for | target parallel | * |
- // | parallel for | target parallel | * |
- // | | for | |
- // | parallel for | target enter | * |
- // | | data | |
- // | parallel for | target exit | * |
- // | | data | |
- // | parallel for | teams | + |
- // | parallel for | cancellation | |
- // | | point | ! |
- // | parallel for | cancel | ! |
- // | parallel for | taskloop | * |
- // | parallel for | taskloop simd | * |
- // | parallel for | distribute | + |
- // | parallel for | distribute | + |
- // | | parallel for | |
- // | parallel for | distribute | + |
- // | |parallel for simd| |
- // | parallel for | distribute simd | + |
- // | parallel for | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | parallel sections| parallel | * |
- // | parallel sections| for | + |
- // | parallel sections| for simd | + |
- // | parallel sections| master | + |
- // | parallel sections| critical | + |
- // | parallel sections| simd | * |
- // | parallel sections| sections | + |
- // | parallel sections| section | * |
- // | parallel sections| single | + |
- // | parallel sections| parallel for | * |
- // | parallel sections|parallel for simd| * |
- // | parallel sections|parallel sections| * |
- // | parallel sections| task | * |
- // | parallel sections| taskyield | * |
- // | parallel sections| barrier | + |
- // | parallel sections| taskwait | * |
- // | parallel sections| taskgroup | * |
- // | parallel sections| flush | * |
- // | parallel sections| ordered | + |
- // | parallel sections| atomic | * |
- // | parallel sections| target | * |
- // | parallel sections| target parallel | * |
- // | parallel sections| target parallel | * |
- // | | for | |
- // | parallel sections| target enter | * |
- // | | data | |
- // | parallel sections| target exit | * |
- // | | data | |
- // | parallel sections| teams | + |
- // | parallel sections| cancellation | |
- // | | point | ! |
- // | parallel sections| cancel | ! |
- // | parallel sections| taskloop | * |
- // | parallel sections| taskloop simd | * |
- // | parallel sections| distribute | + |
- // | parallel sections| distribute | + |
- // | | parallel for | |
- // | parallel sections| distribute | + |
- // | |parallel for simd| |
- // | parallel sections| distribute simd | + |
- // | parallel sections| target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | task | parallel | * |
- // | task | for | + |
- // | task | for simd | + |
- // | task | master | + |
- // | task | critical | * |
- // | task | simd | * |
- // | task | sections | + |
- // | task | section | + |
- // | task | single | + |
- // | task | parallel for | * |
- // | task |parallel for simd| * |
- // | task |parallel sections| * |
- // | task | task | * |
- // | task | taskyield | * |
- // | task | barrier | + |
- // | task | taskwait | * |
- // | task | taskgroup | * |
- // | task | flush | * |
- // | task | ordered | + |
- // | task | atomic | * |
- // | task | target | * |
- // | task | target parallel | * |
- // | task | target parallel | * |
- // | | for | |
- // | task | target enter | * |
- // | | data | |
- // | task | target exit | * |
- // | | data | |
- // | task | teams | + |
- // | task | cancellation | |
- // | | point | ! |
- // | task | cancel | ! |
- // | task | taskloop | * |
- // | task | taskloop simd | * |
- // | task | distribute | + |
- // | task | distribute | + |
- // | | parallel for | |
- // | task | distribute | + |
- // | |parallel for simd| |
- // | task | distribute simd | + |
- // | task | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | ordered | parallel | * |
- // | ordered | for | + |
- // | ordered | for simd | + |
- // | ordered | master | * |
- // | ordered | critical | * |
- // | ordered | simd | * |
- // | ordered | sections | + |
- // | ordered | section | + |
- // | ordered | single | + |
- // | ordered | parallel for | * |
- // | ordered |parallel for simd| * |
- // | ordered |parallel sections| * |
- // | ordered | task | * |
- // | ordered | taskyield | * |
- // | ordered | barrier | + |
- // | ordered | taskwait | * |
- // | ordered | taskgroup | * |
- // | ordered | flush | * |
- // | ordered | ordered | + |
- // | ordered | atomic | * |
- // | ordered | target | * |
- // | ordered | target parallel | * |
- // | ordered | target parallel | * |
- // | | for | |
- // | ordered | target enter | * |
- // | | data | |
- // | ordered | target exit | * |
- // | | data | |
- // | ordered | teams | + |
- // | ordered | cancellation | |
- // | | point | |
- // | ordered | cancel | |
- // | ordered | taskloop | * |
- // | ordered | taskloop simd | * |
- // | ordered | distribute | + |
- // | ordered | distribute | + |
- // | | parallel for | |
- // | ordered | distribute | + |
- // | |parallel for simd| |
- // | ordered | distribute simd | + |
- // | ordered | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | atomic | parallel | |
- // | atomic | for | |
- // | atomic | for simd | |
- // | atomic | master | |
- // | atomic | critical | |
- // | atomic | simd | |
- // | atomic | sections | |
- // | atomic | section | |
- // | atomic | single | |
- // | atomic | parallel for | |
- // | atomic |parallel for simd| |
- // | atomic |parallel sections| |
- // | atomic | task | |
- // | atomic | taskyield | |
- // | atomic | barrier | |
- // | atomic | taskwait | |
- // | atomic | taskgroup | |
- // | atomic | flush | |
- // | atomic | ordered | |
- // | atomic | atomic | |
- // | atomic | target | |
- // | atomic | target parallel | |
- // | atomic | target parallel | |
- // | | for | |
- // | atomic | target enter | |
- // | | data | |
- // | atomic | target exit | |
- // | | data | |
- // | atomic | teams | |
- // | atomic | cancellation | |
- // | | point | |
- // | atomic | cancel | |
- // | atomic | taskloop | |
- // | atomic | taskloop simd | |
- // | atomic | distribute | |
- // | atomic | distribute | |
- // | | parallel for | |
- // | atomic | distribute | |
- // | |parallel for simd| |
- // | atomic | distribute simd | |
- // | atomic | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target | parallel | * |
- // | target | for | * |
- // | target | for simd | * |
- // | target | master | * |
- // | target | critical | * |
- // | target | simd | * |
- // | target | sections | * |
- // | target | section | * |
- // | target | single | * |
- // | target | parallel for | * |
- // | target |parallel for simd| * |
- // | target |parallel sections| * |
- // | target | task | * |
- // | target | taskyield | * |
- // | target | barrier | * |
- // | target | taskwait | * |
- // | target | taskgroup | * |
- // | target | flush | * |
- // | target | ordered | * |
- // | target | atomic | * |
- // | target | target | |
- // | target | target parallel | |
- // | target | target parallel | |
- // | | for | |
- // | target | target enter | |
- // | | data | |
- // | target | target exit | |
- // | | data | |
- // | target | teams | * |
- // | target | cancellation | |
- // | | point | |
- // | target | cancel | |
- // | target | taskloop | * |
- // | target | taskloop simd | * |
- // | target | distribute | + |
- // | target | distribute | + |
- // | | parallel for | |
- // | target | distribute | + |
- // | |parallel for simd| |
- // | target | distribute simd | + |
- // | target | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target parallel | parallel | * |
- // | target parallel | for | * |
- // | target parallel | for simd | * |
- // | target parallel | master | * |
- // | target parallel | critical | * |
- // | target parallel | simd | * |
- // | target parallel | sections | * |
- // | target parallel | section | * |
- // | target parallel | single | * |
- // | target parallel | parallel for | * |
- // | target parallel |parallel for simd| * |
- // | target parallel |parallel sections| * |
- // | target parallel | task | * |
- // | target parallel | taskyield | * |
- // | target parallel | barrier | * |
- // | target parallel | taskwait | * |
- // | target parallel | taskgroup | * |
- // | target parallel | flush | * |
- // | target parallel | ordered | * |
- // | target parallel | atomic | * |
- // | target parallel | target | |
- // | target parallel | target parallel | |
- // | target parallel | target parallel | |
- // | | for | |
- // | target parallel | target enter | |
- // | | data | |
- // | target parallel | target exit | |
- // | | data | |
- // | target parallel | teams | |
- // | target parallel | cancellation | |
- // | | point | ! |
- // | target parallel | cancel | ! |
- // | target parallel | taskloop | * |
- // | target parallel | taskloop simd | * |
- // | target parallel | distribute | |
- // | target parallel | distribute | |
- // | | parallel for | |
- // | target parallel | distribute | |
- // | |parallel for simd| |
- // | target parallel | distribute simd | |
- // | target parallel | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target parallel | parallel | * |
- // | for | | |
- // | target parallel | for | * |
- // | for | | |
- // | target parallel | for simd | * |
- // | for | | |
- // | target parallel | master | * |
- // | for | | |
- // | target parallel | critical | * |
- // | for | | |
- // | target parallel | simd | * |
- // | for | | |
- // | target parallel | sections | * |
- // | for | | |
- // | target parallel | section | * |
- // | for | | |
- // | target parallel | single | * |
- // | for | | |
- // | target parallel | parallel for | * |
- // | for | | |
- // | target parallel |parallel for simd| * |
- // | for | | |
- // | target parallel |parallel sections| * |
- // | for | | |
- // | target parallel | task | * |
- // | for | | |
- // | target parallel | taskyield | * |
- // | for | | |
- // | target parallel | barrier | * |
- // | for | | |
- // | target parallel | taskwait | * |
- // | for | | |
- // | target parallel | taskgroup | * |
- // | for | | |
- // | target parallel | flush | * |
- // | for | | |
- // | target parallel | ordered | * |
- // | for | | |
- // | target parallel | atomic | * |
- // | for | | |
- // | target parallel | target | |
- // | for | | |
- // | target parallel | target parallel | |
- // | for | | |
- // | target parallel | target parallel | |
- // | for | for | |
- // | target parallel | target enter | |
- // | for | data | |
- // | target parallel | target exit | |
- // | for | data | |
- // | target parallel | teams | |
- // | for | | |
- // | target parallel | cancellation | |
- // | for | point | ! |
- // | target parallel | cancel | ! |
- // | for | | |
- // | target parallel | taskloop | * |
- // | for | | |
- // | target parallel | taskloop simd | * |
- // | for | | |
- // | target parallel | distribute | |
- // | for | | |
- // | target parallel | distribute | |
- // | for | parallel for | |
- // | target parallel | distribute | |
- // | for |parallel for simd| |
- // | target parallel | distribute simd | |
- // | for | | |
- // | target parallel | target parallel | |
- // | for | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | teams | parallel | * |
- // | teams | for | + |
- // | teams | for simd | + |
- // | teams | master | + |
- // | teams | critical | + |
- // | teams | simd | + |
- // | teams | sections | + |
- // | teams | section | + |
- // | teams | single | + |
- // | teams | parallel for | * |
- // | teams |parallel for simd| * |
- // | teams |parallel sections| * |
- // | teams | task | + |
- // | teams | taskyield | + |
- // | teams | barrier | + |
- // | teams | taskwait | + |
- // | teams | taskgroup | + |
- // | teams | flush | + |
- // | teams | ordered | + |
- // | teams | atomic | + |
- // | teams | target | + |
- // | teams | target parallel | + |
- // | teams | target parallel | + |
- // | | for | |
- // | teams | target enter | + |
- // | | data | |
- // | teams | target exit | + |
- // | | data | |
- // | teams | teams | + |
- // | teams | cancellation | |
- // | | point | |
- // | teams | cancel | |
- // | teams | taskloop | + |
- // | teams | taskloop simd | + |
- // | teams | distribute | ! |
- // | teams | distribute | ! |
- // | | parallel for | |
- // | teams | distribute | ! |
- // | |parallel for simd| |
- // | teams | distribute simd | ! |
- // | teams | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | taskloop | parallel | * |
- // | taskloop | for | + |
- // | taskloop | for simd | + |
- // | taskloop | master | + |
- // | taskloop | critical | * |
- // | taskloop | simd | * |
- // | taskloop | sections | + |
- // | taskloop | section | + |
- // | taskloop | single | + |
- // | taskloop | parallel for | * |
- // | taskloop |parallel for simd| * |
- // | taskloop |parallel sections| * |
- // | taskloop | task | * |
- // | taskloop | taskyield | * |
- // | taskloop | barrier | + |
- // | taskloop | taskwait | * |
- // | taskloop | taskgroup | * |
- // | taskloop | flush | * |
- // | taskloop | ordered | + |
- // | taskloop | atomic | * |
- // | taskloop | target | * |
- // | taskloop | target parallel | * |
- // | taskloop | target parallel | * |
- // | | for | |
- // | taskloop | target enter | * |
- // | | data | |
- // | taskloop | target exit | * |
- // | | data | |
- // | taskloop | teams | + |
- // | taskloop | cancellation | |
- // | | point | |
- // | taskloop | cancel | |
- // | taskloop | taskloop | * |
- // | taskloop | distribute | + |
- // | taskloop | distribute | + |
- // | | parallel for | |
- // | taskloop | distribute | + |
- // | |parallel for simd| |
- // | taskloop | distribute simd | + |
- // | taskloop | target parallel | * |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | taskloop simd | parallel | |
- // | taskloop simd | for | |
- // | taskloop simd | for simd | |
- // | taskloop simd | master | |
- // | taskloop simd | critical | |
- // | taskloop simd | simd | * |
- // | taskloop simd | sections | |
- // | taskloop simd | section | |
- // | taskloop simd | single | |
- // | taskloop simd | parallel for | |
- // | taskloop simd |parallel for simd| |
- // | taskloop simd |parallel sections| |
- // | taskloop simd | task | |
- // | taskloop simd | taskyield | |
- // | taskloop simd | barrier | |
- // | taskloop simd | taskwait | |
- // | taskloop simd | taskgroup | |
- // | taskloop simd | flush | |
- // | taskloop simd | ordered | + (with simd clause) |
- // | taskloop simd | atomic | |
- // | taskloop simd | target | |
- // | taskloop simd | target parallel | |
- // | taskloop simd | target parallel | |
- // | | for | |
- // | taskloop simd | target enter | |
- // | | data | |
- // | taskloop simd | target exit | |
- // | | data | |
- // | taskloop simd | teams | |
- // | taskloop simd | cancellation | |
- // | | point | |
- // | taskloop simd | cancel | |
- // | taskloop simd | taskloop | |
- // | taskloop simd | taskloop simd | |
- // | taskloop simd | distribute | |
- // | taskloop simd | distribute | |
- // | | parallel for | |
- // | taskloop simd | distribute | |
- // | |parallel for simd| |
- // | taskloop simd | distribute simd | |
- // | taskloop simd | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute | parallel | * |
- // | distribute | for | * |
- // | distribute | for simd | * |
- // | distribute | master | * |
- // | distribute | critical | * |
- // | distribute | simd | * |
- // | distribute | sections | * |
- // | distribute | section | * |
- // | distribute | single | * |
- // | distribute | parallel for | * |
- // | distribute |parallel for simd| * |
- // | distribute |parallel sections| * |
- // | distribute | task | * |
- // | distribute | taskyield | * |
- // | distribute | barrier | * |
- // | distribute | taskwait | * |
- // | distribute | taskgroup | * |
- // | distribute | flush | * |
- // | distribute | ordered | + |
- // | distribute | atomic | * |
- // | distribute | target | |
- // | distribute | target parallel | |
- // | distribute | target parallel | |
- // | | for | |
- // | distribute | target enter | |
- // | | data | |
- // | distribute | target exit | |
- // | | data | |
- // | distribute | teams | |
- // | distribute | cancellation | + |
- // | | point | |
- // | distribute | cancel | + |
- // | distribute | taskloop | * |
- // | distribute | taskloop simd | * |
- // | distribute | distribute | |
- // | distribute | distribute | |
- // | | parallel for | |
- // | distribute | distribute | |
- // | |parallel for simd| |
- // | distribute | distribute simd | |
- // | distribute | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute | parallel | * |
- // | parallel for | | |
- // | distribute | for | * |
- // | parallel for | | |
- // | distribute | for simd | * |
- // | parallel for | | |
- // | distribute | master | * |
- // | parallel for | | |
- // | distribute | critical | * |
- // | parallel for | | |
- // | distribute | simd | * |
- // | parallel for | | |
- // | distribute | sections | * |
- // | parallel for | | |
- // | distribute | section | * |
- // | parallel for | | |
- // | distribute | single | * |
- // | parallel for | | |
- // | distribute | parallel for | * |
- // | parallel for | | |
- // | distribute |parallel for simd| * |
- // | parallel for | | |
- // | distribute |parallel sections| * |
- // | parallel for | | |
- // | distribute | task | * |
- // | parallel for | | |
- // | parallel for | | |
- // | distribute | taskyield | * |
- // | parallel for | | |
- // | distribute | barrier | * |
- // | parallel for | | |
- // | distribute | taskwait | * |
- // | parallel for | | |
- // | distribute | taskgroup | * |
- // | parallel for | | |
- // | distribute | flush | * |
- // | parallel for | | |
- // | distribute | ordered | + |
- // | parallel for | | |
- // | distribute | atomic | * |
- // | parallel for | | |
- // | distribute | target | |
- // | parallel for | | |
- // | distribute | target parallel | |
- // | parallel for | | |
- // | distribute | target parallel | |
- // | parallel for | for | |
- // | distribute | target enter | |
- // | parallel for | data | |
- // | distribute | target exit | |
- // | parallel for | data | |
- // | distribute | teams | |
- // | parallel for | | |
- // | distribute | cancellation | + |
- // | parallel for | point | |
- // | distribute | cancel | + |
- // | parallel for | | |
- // | distribute | taskloop | * |
- // | parallel for | | |
- // | distribute | taskloop simd | * |
- // | parallel for | | |
- // | distribute | distribute | |
- // | parallel for | | |
- // | distribute | distribute | |
- // | parallel for | parallel for | |
- // | distribute | distribute | |
- // | parallel for |parallel for simd| |
- // | distribute | distribute simd | |
- // | parallel for | | |
- // | distribute | target parallel | |
- // | parallel for | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute | parallel | * |
- // | parallel for simd| | |
- // | distribute | for | * |
- // | parallel for simd| | |
- // | distribute | for simd | * |
- // | parallel for simd| | |
- // | distribute | master | * |
- // | parallel for simd| | |
- // | distribute | critical | * |
- // | parallel for simd| | |
- // | distribute | simd | * |
- // | parallel for simd| | |
- // | distribute | sections | * |
- // | parallel for simd| | |
- // | distribute | section | * |
- // | parallel for simd| | |
- // | distribute | single | * |
- // | parallel for simd| | |
- // | distribute | parallel for | * |
- // | parallel for simd| | |
- // | distribute |parallel for simd| * |
- // | parallel for simd| | |
- // | distribute |parallel sections| * |
- // | parallel for simd| | |
- // | distribute | task | * |
- // | parallel for simd| | |
- // | distribute | taskyield | * |
- // | parallel for simd| | |
- // | distribute | barrier | * |
- // | parallel for simd| | |
- // | distribute | taskwait | * |
- // | parallel for simd| | |
- // | distribute | taskgroup | * |
- // | parallel for simd| | |
- // | distribute | flush | * |
- // | parallel for simd| | |
- // | distribute | ordered | + |
- // | parallel for simd| | |
- // | distribute | atomic | * |
- // | parallel for simd| | |
- // | distribute | target | |
- // | parallel for simd| | |
- // | distribute | target parallel | |
- // | parallel for simd| | |
- // | distribute | target parallel | |
- // | parallel for simd| for | |
- // | distribute | target enter | |
- // | parallel for simd| data | |
- // | distribute | target exit | |
- // | parallel for simd| data | |
- // | distribute | teams | |
- // | parallel for simd| | |
- // | distribute | cancellation | + |
- // | parallel for simd| point | |
- // | distribute | cancel | + |
- // | parallel for simd| | |
- // | distribute | taskloop | * |
- // | parallel for simd| | |
- // | distribute | taskloop simd | * |
- // | parallel for simd| | |
- // | distribute | distribute | |
- // | parallel for simd| | |
- // | distribute | distribute | * |
- // | parallel for simd| parallel for | |
- // | distribute | distribute | * |
- // | parallel for simd|parallel for simd| |
- // | distribute | distribute simd | * |
- // | parallel for simd| | |
- // | distribute | target parallel | |
- // | parallel for simd| for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute simd | parallel | * |
- // | distribute simd | for | * |
- // | distribute simd | for simd | * |
- // | distribute simd | master | * |
- // | distribute simd | critical | * |
- // | distribute simd | simd | * |
- // | distribute simd | sections | * |
- // | distribute simd | section | * |
- // | distribute simd | single | * |
- // | distribute simd | parallel for | * |
- // | distribute simd |parallel for simd| * |
- // | distribute simd |parallel sections| * |
- // | distribute simd | task | * |
- // | distribute simd | taskyield | * |
- // | distribute simd | barrier | * |
- // | distribute simd | taskwait | * |
- // | distribute simd | taskgroup | * |
- // | distribute simd | flush | * |
- // | distribute simd | ordered | + |
- // | distribute simd | atomic | * |
- // | distribute simd | target | * |
- // | distribute simd | target parallel | * |
- // | distribute simd | target parallel | * |
- // | | for | |
- // | distribute simd | target enter | * |
- // | | data | |
- // | distribute simd | target exit | * |
- // | | data | |
- // | distribute simd | teams | * |
- // | distribute simd | cancellation | + |
- // | | point | |
- // | distribute simd | cancel | + |
- // | distribute simd | taskloop | * |
- // | distribute simd | taskloop simd | * |
- // | distribute simd | distribute | |
- // | distribute simd | distribute | * |
- // | | parallel for | |
- // | distribute simd | distribute | * |
- // | |parallel for simd| |
- // | distribute simd | distribute simd | * |
- // | distribute simd | target parallel | * |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target parallel | parallel | * |
- // | for simd | | |
- // | target parallel | for | * |
- // | for simd | | |
- // | target parallel | for simd | * |
- // | for simd | | |
- // | target parallel | master | * |
- // | for simd | | |
- // | target parallel | critical | * |
- // | for simd | | |
- // | target parallel | simd | ! |
- // | for simd | | |
- // | target parallel | sections | * |
- // | for simd | | |
- // | target parallel | section | * |
- // | for simd | | |
- // | target parallel | single | * |
- // | for simd | | |
- // | target parallel | parallel for | * |
- // | for simd | | |
- // | target parallel |parallel for simd| * |
- // | for simd | | |
- // | target parallel |parallel sections| * |
- // | for simd | | |
- // | target parallel | task | * |
- // | for simd | | |
- // | target parallel | taskyield | * |
- // | for simd | | |
- // | target parallel | barrier | * |
- // | for simd | | |
- // | target parallel | taskwait | * |
- // | for simd | | |
- // | target parallel | taskgroup | * |
- // | for simd | | |
- // | target parallel | flush | * |
- // | for simd | | |
- // | target parallel | ordered | + (with simd clause) |
- // | for simd | | |
- // | target parallel | atomic | * |
- // | for simd | | |
- // | target parallel | target | * |
- // | for simd | | |
- // | target parallel | target parallel | * |
- // | for simd | | |
- // | target parallel | target parallel | * |
- // | for simd | for | |
- // | target parallel | target enter | * |
- // | for simd | data | |
- // | target parallel | target exit | * |
- // | for simd | data | |
- // | target parallel | teams | * |
- // | for simd | | |
- // | target parallel | cancellation | * |
- // | for simd | point | |
- // | target parallel | cancel | * |
- // | for simd | | |
- // | target parallel | taskloop | * |
- // | for simd | | |
- // | target parallel | taskloop simd | * |
- // | for simd | | |
- // | target parallel | distribute | * |
- // | for simd | | |
- // | target parallel | distribute | * |
- // | for simd | parallel for | |
- // | target parallel | distribute | * |
- // | for simd |parallel for simd| |
- // | target parallel | distribute simd | * |
- // | for simd | | |
- // | target parallel | target parallel | * |
- // | for simd | for simd | |
- // +------------------+-----------------+------------------------------------+
if (Stack->getCurScope()) {
auto ParentRegion = Stack->getParentDirective();
auto OffendingRegion = ParentRegion;
bool NestingProhibited = false;
bool CloseNesting = true;
+ bool OrphanSeen = false;
enum {
NoRecommend,
ShouldBeInParallelRegion,
@@ -3116,7 +1903,7 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// OpenMP [2.8.1,simd Construct, Restrictions]
// An ordered construct with the simd clause is the only OpenMP
// construct that can appear in the simd region.
- // Allowing a SIMD consruct nested in another SIMD construct is an
+ // Allowing a SIMD construct nested in another SIMD construct is an
// extension. The OpenMP 4.5 spec does not allow it. Issue a warning
// message.
SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
@@ -3144,9 +1931,11 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
}
return false;
}
- // Allow some constructs to be orphaned (they could be used in functions,
- // called from OpenMP regions with the required preconditions).
- if (ParentRegion == OMPD_unknown)
+ // Allow some constructs (except teams) to be orphaned (they could be
+ // used in functions, called from OpenMP regions with the required
+ // preconditions).
+ if (ParentRegion == OMPD_unknown &&
+ !isOpenMPNestingTeamsDirective(CurrentRegion))
return false;
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
@@ -3184,20 +1973,17 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// critical region with the same name. Note that this restriction is not
// sufficient to prevent deadlock.
SourceLocation PreviousCriticalLoc;
- bool DeadLock =
- Stack->hasDirective([CurrentName, &PreviousCriticalLoc](
- OpenMPDirectiveKind K,
- const DeclarationNameInfo &DNI,
- SourceLocation Loc)
- ->bool {
- if (K == OMPD_critical &&
- DNI.getName() == CurrentName.getName()) {
- PreviousCriticalLoc = Loc;
- return true;
- } else
- return false;
- },
- false /* skip top directive */);
+ bool DeadLock = Stack->hasDirective(
+ [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
+ const DeclarationNameInfo &DNI,
+ SourceLocation Loc) -> bool {
+ if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
+ PreviousCriticalLoc = Loc;
+ return true;
+ } else
+ return false;
+ },
+ false /* skip top directive */);
if (DeadLock) {
SemaRef.Diag(StartLoc,
diag::err_omp_prohibited_region_critical_same_name)
@@ -3217,7 +2003,8 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
ParentRegion == OMPD_critical ||
ParentRegion == OMPD_ordered;
} else if (isOpenMPWorksharingDirective(CurrentRegion) &&
- !isOpenMPParallelDirective(CurrentRegion)) {
+ !isOpenMPParallelDirective(CurrentRegion) &&
+ !isOpenMPTeamsDirective(CurrentRegion)) {
// OpenMP [2.16, Nesting of Regions]
// A worksharing region may not be closely nested inside a worksharing,
// explicit task, critical, ordered, atomic, or master region.
@@ -3241,15 +2028,19 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
!(isOpenMPSimdDirective(ParentRegion) ||
Stack->isParentOrderedRegion());
Recommend = ShouldBeInOrderedRegion;
- } else if (isOpenMPTeamsDirective(CurrentRegion)) {
+ } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
// OpenMP [2.16, Nesting of Regions]
// If specified, a teams construct must be contained within a target
// construct.
NestingProhibited = ParentRegion != OMPD_target;
+ OrphanSeen = ParentRegion == OMPD_unknown;
Recommend = ShouldBeInTargetRegion;
Stack->setParentTeamsRegionLoc(Stack->getConstructLoc());
}
- if (!NestingProhibited && isOpenMPTeamsDirective(ParentRegion)) {
+ if (!NestingProhibited &&
+ !isOpenMPTargetExecutionDirective(CurrentRegion) &&
+ !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
+ (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
// OpenMP [2.16, Nesting of Regions]
// distribute, parallel, parallel sections, parallel workshare, and the
// parallel loop and parallel loop SIMD constructs are the only OpenMP
@@ -3258,11 +2049,13 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
!isOpenMPDistributeDirective(CurrentRegion);
Recommend = ShouldBeInParallelRegion;
}
- if (!NestingProhibited && isOpenMPDistributeDirective(CurrentRegion)) {
+ if (!NestingProhibited &&
+ isOpenMPNestingDistributeDirective(CurrentRegion)) {
// OpenMP 4.5 [2.17 Nesting of Regions]
// The region associated with the distribute construct must be strictly
// nested inside a teams region
- NestingProhibited = !isOpenMPTeamsDirective(ParentRegion);
+ NestingProhibited =
+ (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
Recommend = ShouldBeInTeamsRegion;
}
if (!NestingProhibited &&
@@ -3285,9 +2078,14 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
CloseNesting = false;
}
if (NestingProhibited) {
- SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
- << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
- << Recommend << getOpenMPDirectiveName(CurrentRegion);
+ if (OrphanSeen) {
+ SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
+ << getOpenMPDirectiveName(CurrentRegion) << Recommend;
+ } else {
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
+ << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
+ << Recommend << getOpenMPDirectiveName(CurrentRegion);
+ }
return true;
}
}
@@ -3602,6 +2400,45 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
AllowedNameModifiers.push_back(OMPD_target);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
+ case OMPD_target_simd:
+ Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_teams_distribute:
+ Res = ActOnOpenMPTeamsDistributeDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_teams_distribute_simd:
+ Res = ActOnOpenMPTeamsDistributeSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_teams_distribute_parallel_for_simd:
+ Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_teams_distribute_parallel_for:
+ Res = ActOnOpenMPTeamsDistributeParallelForDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_target_teams:
+ Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_target_teams_distribute:
+ Res = ActOnOpenMPTargetTeamsDistributeDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_target_teams_distribute_parallel_for:
+ Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
@@ -3969,7 +2806,7 @@ public:
/// \brief Build reference expression to the private counter be used for
/// codegen.
Expr *BuildPrivateCounterVar() const;
- /// \brief Build initization of the counter be used for codegen.
+ /// \brief Build initialization of the counter be used for codegen.
Expr *BuildCounterInit() const;
/// \brief Build step of the counter be used for codegen.
Expr *BuildCounterStep() const;
@@ -4094,8 +2931,9 @@ bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) {
return true;
}
if (TestIsLessOp == Subtract) {
- NewStep = SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus,
- NewStep).get();
+ NewStep =
+ SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
+ .get();
Subtract = !Subtract;
}
}
@@ -4127,7 +2965,7 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
InitSrcRange = S->getSourceRange();
if (Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParens();
- if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->getOpcode() == BO_Assign) {
auto *LHS = BO->getLHS()->IgnoreParens();
if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
@@ -4142,9 +2980,9 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
}
}
- } else if (auto DS = dyn_cast<DeclStmt>(S)) {
+ } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
if (DS->isSingleDecl()) {
- if (auto Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
+ if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
if (Var->hasInit() && !Var->getType()->isReferenceType()) {
// Accept non-canonical init form here but emit ext. warning.
if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
@@ -4155,10 +2993,10 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
}
}
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getOperator() == OO_Equal) {
auto *LHS = CE->getArg(0);
- if (auto DRE = dyn_cast<DeclRefExpr>(LHS)) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
@@ -4220,7 +3058,7 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
}
S = getExprAsWritten(S);
SourceLocation CondLoc = S->getLocStart();
- if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->isRelationalOp()) {
if (GetInitLCDecl(BO->getLHS()) == LCDecl)
return SetUB(BO->getRHS(),
@@ -4233,7 +3071,7 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
(BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
BO->getSourceRange(), BO->getOperatorLoc());
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getNumArgs() == 2) {
auto Op = CE->getOperator();
switch (Op) {
@@ -4269,7 +3107,7 @@ bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) {
// var - incr
//
RHS = RHS->IgnoreParenImpCasts();
- if (auto BO = dyn_cast<BinaryOperator>(RHS)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
if (BO->isAdditiveOp()) {
bool IsAdd = BO->getOpcode() == BO_Add;
if (GetInitLCDecl(BO->getLHS()) == LCDecl)
@@ -4277,7 +3115,7 @@ bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) {
if (IsAdd && GetInitLCDecl(BO->getRHS()) == LCDecl)
return SetStep(BO->getLHS(), false);
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
bool IsAdd = CE->getOperator() == OO_Plus;
if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
@@ -4317,14 +3155,15 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
IncrementSrcRange = S->getSourceRange();
S = S->IgnoreParens();
- if (auto UO = dyn_cast<UnaryOperator>(S)) {
+ if (auto *UO = dyn_cast<UnaryOperator>(S)) {
if (UO->isIncrementDecrementOp() &&
GetInitLCDecl(UO->getSubExpr()) == LCDecl)
- return SetStep(
- SemaRef.ActOnIntegerConstant(UO->getLocStart(),
- (UO->isDecrementOp() ? -1 : 1)).get(),
- false);
- } else if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ return SetStep(SemaRef
+ .ActOnIntegerConstant(UO->getLocStart(),
+ (UO->isDecrementOp() ? -1 : 1))
+ .get(),
+ false);
+ } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
switch (BO->getOpcode()) {
case BO_AddAssign:
case BO_SubAssign:
@@ -4338,16 +3177,17 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
default:
break;
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
switch (CE->getOperator()) {
case OO_PlusPlus:
case OO_MinusMinus:
if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
- return SetStep(
- SemaRef.ActOnIntegerConstant(
- CE->getLocStart(),
- ((CE->getOperator() == OO_MinusMinus) ? -1 : 1)).get(),
- false);
+ return SetStep(SemaRef
+ .ActOnIntegerConstant(
+ CE->getLocStart(),
+ ((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
+ .get(),
+ false);
break;
case OO_PlusEqual:
case OO_MinusEqual:
@@ -4544,7 +3384,7 @@ Expr *OpenMPIterationSpaceChecker::BuildPrivateCounterVar() const {
return nullptr;
}
-/// \brief Build initization of the counter be used for codegen.
+/// \brief Build initialization of the counter to be used for codegen.
Expr *OpenMPIterationSpaceChecker::BuildCounterInit() const { return LB; }
/// \brief Build step of the counter be used for codegen.
@@ -4615,7 +3455,7 @@ static bool CheckOpenMPIterationSpace(
llvm::MapVector<Expr *, DeclRefExpr *> &Captures) {
// OpenMP [2.6, Canonical Loop Form]
// for (init-expr; test-expr; incr-expr) structured-block
- auto For = dyn_cast_or_null<ForStmt>(S);
+ auto *For = dyn_cast_or_null<ForStmt>(S);
if (!For) {
SemaRef.Diag(S->getLocStart(), diag::err_omp_not_for)
<< (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
@@ -4855,8 +3695,7 @@ BuildCounterUpdate(Sema &SemaRef, Scope *S, SourceLocation Loc,
/// \brief Convert integer expression \a E to make it have at least \a Bits
/// bits.
-static ExprResult WidenIterationCount(unsigned Bits, Expr *E,
- Sema &SemaRef) {
+static ExprResult WidenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
if (E == nullptr)
return ExprError();
auto &C = SemaRef.Context;
@@ -5014,15 +3853,17 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
auto PreCond = ExprResult(IterSpaces[0].PreCond);
auto N0 = IterSpaces[0].NumIterations;
ExprResult LastIteration32 = WidenIterationCount(
- 32 /* Bits */, SemaRef.PerformImplicitConversion(
- N0->IgnoreImpCasts(), N0->getType(),
- Sema::AA_Converting, /*AllowExplicit=*/true)
+ 32 /* Bits */, SemaRef
+ .PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
.get(),
SemaRef);
ExprResult LastIteration64 = WidenIterationCount(
- 64 /* Bits */, SemaRef.PerformImplicitConversion(
- N0->IgnoreImpCasts(), N0->getType(),
- Sema::AA_Converting, /*AllowExplicit=*/true)
+ 64 /* Bits */, SemaRef
+ .PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
.get(),
SemaRef);
@@ -5035,24 +3876,28 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Scope *CurScope = DSA.getCurScope();
for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
if (PreCond.isUsable()) {
- PreCond = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LAnd,
- PreCond.get(), IterSpaces[Cnt].PreCond);
+ PreCond =
+ SemaRef.BuildBinOp(CurScope, PreCond.get()->getExprLoc(), BO_LAnd,
+ PreCond.get(), IterSpaces[Cnt].PreCond);
}
auto N = IterSpaces[Cnt].NumIterations;
+ SourceLocation Loc = N->getExprLoc();
AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
if (LastIteration32.isUsable())
LastIteration32 = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Mul, LastIteration32.get(),
- SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
- Sema::AA_Converting,
- /*AllowExplicit=*/true)
+ CurScope, Loc, BO_Mul, LastIteration32.get(),
+ SemaRef
+ .PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
.get());
if (LastIteration64.isUsable())
LastIteration64 = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Mul, LastIteration64.get(),
- SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
- Sema::AA_Converting,
- /*AllowExplicit=*/true)
+ CurScope, Loc, BO_Mul, LastIteration64.get(),
+ SemaRef
+ .PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
.get());
}
@@ -5083,7 +3928,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
ExprResult NumIterations = LastIteration;
{
LastIteration = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Sub, LastIteration.get(),
+ CurScope, LastIteration.get()->getExprLoc(), BO_Sub,
+ LastIteration.get(),
SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
if (!LastIteration.isUsable())
return 0;
@@ -5102,7 +3948,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Prepare SaveRef + 1.
NumIterations = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Add, SaveRef.get(),
+ CurScope, SaveRef.get()->getExprLoc(), BO_Add, SaveRef.get(),
SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
if (!NumIterations.isUsable())
return 0;
@@ -5110,7 +3956,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
- // Build variables passed into runtime, nesessary for worksharing directives.
+ // Build variables passed into runtime, necessary for worksharing directives.
ExprResult LB, UB, IL, ST, EUB, PrevLB, PrevUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind)) {
@@ -5146,7 +3992,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
/*DirectInit*/ false, /*TypeMayContainAuto*/ false);
// Build expression: UB = min(UB, LastIteration)
- // It is nesessary for CodeGen of directives with static scheduling.
+ // It is necessary for CodeGen of directives with static scheduling.
ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
UB.get(), LastIteration.get());
ExprResult CondOp = SemaRef.ActOnConditionalOp(
@@ -5187,11 +4033,11 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
{
VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
- Expr *RHS = (isOpenMPWorksharingDirective(DKind) ||
- isOpenMPTaskLoopDirective(DKind) ||
- isOpenMPDistributeDirective(DKind))
- ? LB.get()
- : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
+ Expr *RHS =
+ (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
+ ? LB.get()
+ : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
Init = SemaRef.ActOnFinishFullExpr(Init.get());
}
@@ -5394,9 +4240,10 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
assert(I->second == OO_Plus || I->second == OO_Minus);
BinaryOperatorKind BOK = (I->second == OO_Plus) ? BO_Add : BO_Sub;
- UpCounterVal =
- SemaRef.BuildBinOp(CurScope, I->first->getExprLoc(), BOK,
- UpCounterVal, NormalizedOffset).get();
+ UpCounterVal = SemaRef
+ .BuildBinOp(CurScope, I->first->getExprLoc(), BOK,
+ UpCounterVal, NormalizedOffset)
+ .get();
}
Multiplier = *ILM;
++I;
@@ -5491,7 +4338,7 @@ StmtResult Sema::ActOnOpenMPSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5530,7 +4377,7 @@ StmtResult Sema::ActOnOpenMPForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5567,7 +4414,7 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5592,9 +4439,9 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
auto BaseStmt = AStmt;
- while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
+ while (auto *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
BaseStmt = CS->getCapturedStmt();
- if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
+ if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
auto S = C->children();
if (S.begin() == S.end())
return StmtError();
@@ -5769,7 +4616,7 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5811,7 +4658,7 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5836,9 +4683,9 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
auto BaseStmt = AStmt;
- while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
+ while (auto *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
BaseStmt = CS->getCapturedStmt();
- if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
+ if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
auto S = C->children();
if (S.begin() == S.end())
return StmtError();
@@ -5872,7 +4719,7 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6169,21 +5016,21 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
AtomicCompAssignOp->getOpcode());
OpLoc = AtomicCompAssignOp->getOperatorLoc();
E = AtomicCompAssignOp->getRHS();
- X = AtomicCompAssignOp->getLHS();
+ X = AtomicCompAssignOp->getLHS()->IgnoreParens();
IsXLHSInRHSPart = true;
} else if (auto *AtomicBinOp = dyn_cast<BinaryOperator>(
AtomicBody->IgnoreParenImpCasts())) {
// Check for Binary Operation
- if(checkBinaryOperation(AtomicBinOp, DiagId, NoteId))
+ if (checkBinaryOperation(AtomicBinOp, DiagId, NoteId))
return true;
- } else if (auto *AtomicUnaryOp =
- dyn_cast<UnaryOperator>(AtomicBody->IgnoreParenImpCasts())) {
+ } else if (auto *AtomicUnaryOp = dyn_cast<UnaryOperator>(
+ AtomicBody->IgnoreParenImpCasts())) {
// Check for Unary Operation
if (AtomicUnaryOp->isIncrementDecrementOp()) {
IsPostfixUpdate = AtomicUnaryOp->isPostfix();
Op = AtomicUnaryOp->isIncrementOp() ? BO_Add : BO_Sub;
OpLoc = AtomicUnaryOp->getOperatorLoc();
- X = AtomicUnaryOp->getSubExpr();
+ X = AtomicUnaryOp->getSubExpr()->IgnoreParens();
E = SemaRef.ActOnIntegerConstant(OpLoc, /*uint64_t Val=*/1).get();
IsXLHSInRHSPart = true;
} else {
@@ -6243,7 +5090,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- auto CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6311,8 +5158,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange ErrorRange, NoteRange;
// If clause is read:
// v = x;
- if (auto AtomicBody = dyn_cast<Expr>(Body)) {
- auto AtomicBinOp =
+ if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ auto *AtomicBinOp =
dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
X = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
@@ -6373,8 +5220,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange ErrorRange, NoteRange;
// If clause is write:
// x = expr;
- if (auto AtomicBody = dyn_cast<Expr>(Body)) {
- auto AtomicBinOp =
+ if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ auto *AtomicBinOp =
dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
X = AtomicBinOp->getLHS();
@@ -6692,7 +5539,7 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
if (auto *CS = dyn_cast<CompoundStmt>(S)) {
auto I = CS->body_begin();
while (I != CS->body_end()) {
- auto OED = dyn_cast<OMPExecutableDirective>(*I);
+ auto *OED = dyn_cast<OMPExecutableDirective>(*I);
if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind())) {
OMPTeamsFound = false;
break;
@@ -6772,7 +5619,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -6810,8 +5657,8 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
// OpenMP [2.10.1, Restrictions, p. 97]
// At least one map clause must appear on the directive.
if (!HasMapClause(Clauses)) {
- Diag(StartLoc, diag::err_omp_no_map_for_directive) <<
- getOpenMPDirectiveName(OMPD_target_data);
+ Diag(StartLoc, diag::err_omp_no_map_for_directive)
+ << getOpenMPDirectiveName(OMPD_target_data);
return StmtError();
}
@@ -7011,7 +5858,7 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -7192,7 +6039,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -7207,6 +6054,327 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPTargetSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will define the
+ // nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_target_simd, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp target simd loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTargetSimdDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_teams_distribute, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt,
+ *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp teams distribute loop exprs were not built");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_teams_distribute_simd, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp teams distribute simd loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ auto NestedLoopCount = CheckOpenMPLoop(
+ OMPD_teams_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeParallelForSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTargetTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ auto NestedLoopCount = CheckOpenMPLoop(
+ OMPD_target_teams_distribute,
+ getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp target teams distribute loop exprs were not built");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTargetTeamsDistributeDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ auto NestedLoopCount = CheckOpenMPLoop(
+ OMPD_target_teams_distribute_parallel_for,
+ getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp target teams distribute parallel for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTargetTeamsDistributeParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -7683,8 +6851,8 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
Res = ActOnOpenMPDefaultmapClause(
static_cast<OpenMPDefaultmapClauseModifier>(Argument[Modifier]),
static_cast<OpenMPDefaultmapClauseKind>(Argument[DefaultmapKind]),
- StartLoc, LParenLoc, ArgumentLoc[Modifier],
- ArgumentLoc[DefaultmapKind], EndLoc);
+ StartLoc, LParenLoc, ArgumentLoc[Modifier], ArgumentLoc[DefaultmapKind],
+ EndLoc);
break;
case OMPC_final:
case OMPC_num_threads:
@@ -8025,7 +7193,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
Res = ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_depend:
- Res = ActOnOpenMPDependClause(DepKind, DepLinMapLoc, ColonLoc, VarList,
+ Res = ActOnOpenMPDependClause(DepKind, DepLinMapLoc, ColonLoc, VarList,
StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
@@ -8207,12 +7375,13 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
continue;
}
+ auto CurrDir = DSAStack->getCurrentDirective();
// Variably modified types are not supported for tasks.
if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() &&
- isOpenMPTaskingDirective(DSAStack->getCurrentDirective())) {
+ isOpenMPTaskingDirective(CurrDir)) {
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_private) << Type
- << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ << getOpenMPDirectiveName(CurrDir);
bool IsDecl =
!VD ||
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
@@ -8225,14 +7394,22 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if (DSAStack->getCurrentDirective() == OMPD_target) {
+ if (CurrDir == OMPD_target || CurrDir == OMPD_target_parallel ||
+ CurrDir == OMPD_target_teams ||
+ CurrDir == OMPD_target_teams_distribute ||
+ CurrDir == OMPD_target_teams_distribute_parallel_for) {
+ OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
- VD, /* CurrentRegionOnly = */ true,
- [&](OMPClauseMappableExprCommon::MappableExprComponentListRef)
- -> bool { return true; })) {
- Diag(ELoc, diag::err_omp_variable_in_map_and_dsa)
+ VD, /*CurrentRegionOnly=*/true,
+ [&](OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind WhereFoundClauseKind) -> bool {
+ ConflictKind = WhereFoundClauseKind;
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(OMPC_private)
- << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ << getOpenMPClauseName(ConflictKind)
+ << getOpenMPDirectiveName(CurrDir);
ReportOriginalDSA(*this, DSAStack, D, DVar);
continue;
}
@@ -8388,7 +7565,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// worksharing regions arising from the worksharing construct ever bind
// to any of the parallel regions arising from the parallel construct.
if (isOpenMPWorksharingDirective(CurrDir) &&
- !isOpenMPParallelDirective(CurrDir)) {
+ !isOpenMPParallelDirective(CurrDir) &&
+ !isOpenMPTeamsDirective(CurrDir)) {
DVar = DSAStack->getImplicitDSA(D, true);
if (DVar.CKind != OMPC_shared &&
(isOpenMPParallelDirective(DVar.DKind) ||
@@ -8476,13 +7654,21 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if (CurrDir == OMPD_target) {
+ if (CurrDir == OMPD_target || CurrDir == OMPD_target_parallel ||
+ CurrDir == OMPD_target_teams ||
+ CurrDir == OMPD_target_teams_distribute ||
+ CurrDir == OMPD_target_teams_distribute_parallel_for) {
+ OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
- VD, /* CurrentRegionOnly = */ true,
- [&](OMPClauseMappableExprCommon::MappableExprComponentListRef)
- -> bool { return true; })) {
- Diag(ELoc, diag::err_omp_variable_in_map_and_dsa)
+ VD, /*CurrentRegionOnly=*/true,
+ [&](OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind WhereFoundClauseKind) -> bool {
+ ConflictKind = WhereFoundClauseKind;
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(OMPC_firstprivate)
+ << getOpenMPClauseName(ConflictKind)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
ReportOriginalDSA(*this, DSAStack, D, DVar);
continue;
@@ -8645,7 +7831,8 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
// regions.
DSAStackTy::DSAVarData TopDVar = DVar;
if (isOpenMPWorksharingDirective(CurrDir) &&
- !isOpenMPParallelDirective(CurrDir)) {
+ !isOpenMPParallelDirective(CurrDir) &&
+ !isOpenMPTeamsDirective(CurrDir)) {
DVar = DSAStack->getImplicitDSA(D, true);
if (DVar.CKind != OMPC_shared) {
Diag(ELoc, diag::err_omp_required_access)
@@ -8884,7 +8071,7 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
cast_or_null<UnresolvedLookupExpr>(UnresolvedReduction)) {
Lookups.push_back(UnresolvedSet<8>());
Decl *PrevD = nullptr;
- for(auto *D : ULE->decls()) {
+ for (auto *D : ULE->decls()) {
if (D == PrevD)
Lookups.push_back(UnresolvedSet<8>());
else if (auto *DRD = cast<OMPDeclareReductionDecl>(D))
@@ -9175,7 +8362,8 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
// worksharing regions arising from the worksharing construct bind.
OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
if (isOpenMPWorksharingDirective(CurrDir) &&
- !isOpenMPParallelDirective(CurrDir)) {
+ !isOpenMPParallelDirective(CurrDir) &&
+ !isOpenMPTeamsDirective(CurrDir)) {
DVar = DSAStack->getImplicitDSA(D, true);
if (DVar.CKind != OMPC_shared) {
Diag(ELoc, diag::err_omp_required_access)
@@ -9260,7 +8448,7 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
if (OASE ||
(!ASE &&
D->getType().getNonReferenceType()->isVariablyModifiedType())) {
- // For arays/array sections only:
+ // For arrays/array sections only:
// Create pseudo array type for private copy. The size for this array will
// be generated during codegen.
// For array subscripts or single variables Private Ty is the same as Type
@@ -9738,7 +8926,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Expr *InitExpr = *CurInit;
// Build privatized reference to the current linear var.
- auto DE = cast<DeclRefExpr>(SimpleRefExpr);
+ auto *DE = cast<DeclRefExpr>(SimpleRefExpr);
Expr *CapturedRef;
if (LinKind == OMPC_LINEAR_uval)
CapturedRef = cast<VarDecl>(DE->getDecl())->getInit();
@@ -10040,8 +9228,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
auto *DstVD =
buildVarDecl(*this, RefExpr->getLocStart(), Type, ".copyprivate.dst",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *PseudoDstExpr =
- buildDeclRefExpr(*this, DstVD, Type, ELoc);
+ auto *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
auto AssignmentOp = BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign,
PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
@@ -10256,9 +9443,6 @@ static bool IsCXXRecordForMappable(Sema &SemaRef, SourceLocation Loc,
if (!RD || RD->isInvalidDecl())
return true;
- if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
- if (auto *CTD = CTSD->getSpecializedTemplate())
- RD = CTD->getTemplatedDecl();
auto QTy = SemaRef.Context.getRecordType(RD);
if (RD->isDynamicClass()) {
SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
@@ -10302,8 +9486,7 @@ static bool CheckTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
SemaRef.Diag(SL, diag::err_incomplete_type) << QTy << SR;
return false;
} else if (CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(ND)) {
- if (!RD->isInvalidDecl() &&
- !IsCXXRecordForMappable(SemaRef, SL, Stack, RD))
+ if (!RD->isInvalidDecl() && !IsCXXRecordForMappable(SemaRef, SL, Stack, RD))
return false;
}
return true;
@@ -10332,7 +9515,7 @@ static bool CheckArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
auto *Length = OASE->getLength();
// If there is a lower bound that does not evaluates to zero, we are not
- // convering the whole dimension.
+ // covering the whole dimension.
if (LowerBound) {
llvm::APSInt ConstLowerBound;
if (!LowerBound->EvaluateAsInt(ConstLowerBound, SemaRef.getASTContext()))
@@ -10367,8 +9550,8 @@ static bool CheckArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// section or array subscript) does NOT specify a single element of the array
// whose base type is \a BaseQTy.
static bool CheckArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
- const Expr *E,
- QualType BaseQTy) {
+ const Expr *E,
+ QualType BaseQTy) {
auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
// An array subscript always refer to a single element. Also, an array section
@@ -10574,16 +9757,16 @@ static Expr *CheckMapClauseExpressionBase(
bool NotUnity =
CheckArrayExpressionDoesNotReferToUnitySize(SemaRef, CurE, CurType);
- if (AllowWholeSizeArraySection && AllowUnitySizeArraySection) {
- // Any array section is currently allowed.
+ if (AllowWholeSizeArraySection) {
+ // Any array section is currently allowed. Allowing a whole size array
+ // section implies allowing a unity array section as well.
//
// If this array section refers to the whole dimension we can still
// accept other array sections before this one, except if the base is a
// pointer. Otherwise, only unitary sections are accepted.
if (NotWhole || IsPointer)
AllowWholeSizeArraySection = false;
- } else if ((AllowUnitySizeArraySection && NotUnity) ||
- (AllowWholeSizeArraySection && NotWhole)) {
+ } else if (AllowUnitySizeArraySection && NotUnity) {
// A unity or whole array section is not allowed and that is not
// compatible with the properties of the current array section.
SemaRef.Diag(
@@ -10634,7 +9817,8 @@ static bool CheckMapConflicts(
bool FoundError = DSAS->checkMappableExprComponentListsForDecl(
VD, CurrentRegionOnly,
[&](OMPClauseMappableExprCommon::MappableExprComponentListRef
- StackComponents) -> bool {
+ StackComponents,
+ OpenMPClauseKind) -> bool {
assert(!StackComponents.empty() &&
"Map clause expression with no components!");
@@ -10686,10 +9870,10 @@ static bool CheckMapConflicts(
for (; SI != SE; ++SI) {
QualType Type;
if (auto *ASE =
- dyn_cast<ArraySubscriptExpr>(SI->getAssociatedExpression())) {
+ dyn_cast<ArraySubscriptExpr>(SI->getAssociatedExpression())) {
Type = ASE->getBase()->IgnoreParenImpCasts()->getType();
- } else if (auto *OASE =
- dyn_cast<OMPArraySectionExpr>(SI->getAssociatedExpression())) {
+ } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(
+ SI->getAssociatedExpression())) {
auto *E = OASE->getBase()->IgnoreParenImpCasts();
Type =
OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
@@ -10989,11 +10173,14 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if (DKind == OMPD_target && VD) {
+ if ((DKind == OMPD_target || DKind == OMPD_target_teams ||
+ DKind == OMPD_target_teams_distribute ||
+ DKind == OMPD_target_teams_distribute_parallel_for) && VD) {
auto DVar = DSAS->getTopDSA(VD, false);
if (isOpenMPPrivate(DVar.CKind)) {
- SemaRef.Diag(ELoc, diag::err_omp_variable_in_map_and_dsa)
+ SemaRef.Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_map)
<< getOpenMPDirectiveName(DSAS->getCurrentDirective());
ReportOriginalDSA(SemaRef, DSAS, CurDeclaration, DVar);
continue;
@@ -11006,7 +10193,8 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// Store the components in the stack so that they can be used to check
// against other clauses later on.
- DSAS->addMappableExpressionComponents(CurDeclaration, CurComponents);
+ DSAS->addMappableExpressionComponents(CurDeclaration, CurComponents,
+ /*WhereFoundClauseKind=*/OMPC_map);
// Save the components and declaration to create the clause. For purposes of
// the clause creation, any component list that has has base 'this' uses
@@ -11274,7 +10462,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd(
return DeclReductions;
}
-OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
+OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
@@ -11301,8 +10489,8 @@ OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
/*StrictlyPositive=*/true))
return nullptr;
- return new (Context) OMPThreadLimitClause(ValExpr, StartLoc, LParenLoc,
- EndLoc);
+ return new (Context)
+ OMPThreadLimitClause(ValExpr, StartLoc, LParenLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
@@ -11421,18 +10609,17 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc) {
// OpenMP 4.5 only supports 'defaultmap(tofrom: scalar)'
- if (M != OMPC_DEFAULTMAP_MODIFIER_tofrom ||
- Kind != OMPC_DEFAULTMAP_scalar) {
+ if (M != OMPC_DEFAULTMAP_MODIFIER_tofrom || Kind != OMPC_DEFAULTMAP_scalar) {
std::string Value;
SourceLocation Loc;
Value += "'";
if (M != OMPC_DEFAULTMAP_MODIFIER_tofrom) {
Value += getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- OMPC_DEFAULTMAP_MODIFIER_tofrom);
+ OMPC_DEFAULTMAP_MODIFIER_tofrom);
Loc = MLoc;
} else {
Value += getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- OMPC_DEFAULTMAP_scalar);
+ OMPC_DEFAULTMAP_scalar);
Loc = KindLoc;
}
Value += "'";
@@ -11469,11 +10656,11 @@ void Sema::ActOnFinishOpenMPDeclareTargetDirective() {
IsInOpenMPDeclareTargetContext = false;
}
-void
-Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
- const DeclarationNameInfo &Id,
- OMPDeclareTargetDeclAttr::MapTypeTy MT,
- NamedDeclSetType &SameDirectiveDecls) {
+void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
+ CXXScopeSpec &ScopeSpec,
+ const DeclarationNameInfo &Id,
+ OMPDeclareTargetDeclAttr::MapTypeTy MT,
+ NamedDeclSetType &SameDirectiveDecls) {
LookupResult Lookup(*this, Id, LookupOrdinaryName);
LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
@@ -11671,7 +10858,10 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- SmallVector<Expr *, 8> Vars;
+ MappableVarListInfo MVLI(VarList);
+ SmallVector<Expr *, 8> PrivateCopies;
+ SmallVector<Expr *, 8> Inits;
+
for (auto &RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP use_device_ptr clause.");
SourceLocation ELoc;
@@ -11680,43 +10870,89 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
- Vars.push_back(RefExpr);
+ MVLI.ProcessedVarList.push_back(RefExpr);
+ PrivateCopies.push_back(nullptr);
+ Inits.push_back(nullptr);
}
ValueDecl *D = Res.first;
if (!D)
continue;
QualType Type = D->getType();
- // item should be a pointer or reference to pointer
- if (!Type.getNonReferenceType()->isPointerType()) {
+ Type = Type.getNonReferenceType().getUnqualifiedType();
+
+ auto *VD = dyn_cast<VarDecl>(D);
+
+ // Item should be a pointer or reference to pointer.
+ if (!Type->isPointerType()) {
Diag(ELoc, diag::err_omp_usedeviceptr_not_a_pointer)
<< 0 << RefExpr->getSourceRange();
continue;
}
- Vars.push_back(RefExpr->IgnoreParens());
+
+ // Build the private variable and the expression that refers to it.
+ auto VDPrivate = buildVarDecl(*this, ELoc, Type, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ if (VDPrivate->isInvalidDecl())
+ continue;
+
+ CurContext->addDecl(VDPrivate);
+ auto VDPrivateRefExpr = buildDeclRefExpr(
+ *this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
+
+ // Add temporary variable to initialize the private copy of the pointer.
+ auto *VDInit =
+ buildVarDecl(*this, RefExpr->getExprLoc(), Type, ".devptr.temp");
+ auto *VDInitRefExpr = buildDeclRefExpr(*this, VDInit, RefExpr->getType(),
+ RefExpr->getExprLoc());
+ AddInitializerToDecl(VDPrivate,
+ DefaultLvalueConversion(VDInitRefExpr).get(),
+ /*DirectInit=*/false, /*TypeMayContainAuto=*/false);
+
+ // If required, build a capture to implement the privatization initialized
+ // with the current list item value.
+ DeclRefExpr *Ref = nullptr;
+ if (!VD)
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref);
+ PrivateCopies.push_back(VDPrivateRefExpr);
+ Inits.push_back(VDInitRefExpr);
+
+ // We need to add a data sharing attribute for this variable to make sure it
+ // is correctly captured. A variable that shows up in a use_device_ptr has
+ // similar properties of a first private variable.
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component.
+ MVLI.VarBaseDeclarations.push_back(D);
+ MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
+ MVLI.VarComponents.back().push_back(
+ OMPClauseMappableExprCommon::MappableComponent(SimpleRefExpr, D));
}
- if (Vars.empty())
+ if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPUseDevicePtrClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars);
+ return OMPUseDevicePtrClause::Create(
+ Context, StartLoc, LParenLoc, EndLoc, MVLI.ProcessedVarList,
+ PrivateCopies, Inits, MVLI.VarBaseDeclarations, MVLI.VarComponents);
}
OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- SmallVector<Expr *, 8> Vars;
+ MappableVarListInfo MVLI(VarList);
for (auto &RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP use_device_ptr clause.");
+ assert(RefExpr && "NULL expr in OpenMP is_device_ptr clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
- Vars.push_back(RefExpr);
+ MVLI.ProcessedVarList.push_back(RefExpr);
}
ValueDecl *D = Res.first;
if (!D)
@@ -11730,12 +10966,59 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
<< 0 << RefExpr->getSourceRange();
continue;
}
- Vars.push_back(RefExpr->IgnoreParens());
+
+ // Check if the declaration in the clause does not show up in any data
+ // sharing attribute.
+ auto DVar = DSAStack->getTopDSA(D, false);
+ if (isOpenMPPrivate(DVar.CKind)) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_is_device_ptr)
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ ReportOriginalDSA(*this, DSAStack, D, DVar);
+ continue;
+ }
+
+ Expr *ConflictExpr;
+ if (DSAStack->checkMappableExprComponentListsForDecl(
+ D, /*CurrentRegionOnly=*/true,
+ [&ConflictExpr](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef R,
+ OpenMPClauseKind) -> bool {
+ ConflictExpr = R.front().getAssociatedExpression();
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << RefExpr->getSourceRange();
+ Diag(ConflictExpr->getExprLoc(), diag::note_used_here)
+ << ConflictExpr->getSourceRange();
+ continue;
+ }
+
+ // Store the components in the stack so that they can be used to check
+ // against other clauses later on.
+ OMPClauseMappableExprCommon::MappableComponent MC(SimpleRefExpr, D);
+ DSAStack->addMappableExpressionComponents(
+ D, MC, /*WhereFoundClauseKind=*/OMPC_is_device_ptr);
+
+ // Record the expression we've just processed.
+ MVLI.ProcessedVarList.push_back(SimpleRefExpr);
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component. We use a null declaration to signal fields in
+ // 'this'.
+ assert((isa<DeclRefExpr>(SimpleRefExpr) ||
+ isa<CXXThisExpr>(cast<MemberExpr>(SimpleRefExpr)->getBase())) &&
+ "Unexpected device pointer expression!");
+ MVLI.VarBaseDeclarations.push_back(
+ isa<DeclRefExpr>(SimpleRefExpr) ? D : nullptr);
+ MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
+ MVLI.VarComponents.back().push_back(MC);
}
- if (Vars.empty())
+ if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPIsDevicePtrClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars);
+ return OMPIsDevicePtrClause::Create(
+ Context, StartLoc, LParenLoc, EndLoc, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations, MVLI.VarComponents);
}