blob: 6060e5b75c22dba887b0382aafa10bb1617606ea [file] [log] [blame]
//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements semantic analysis for OpenMP directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#include "TreeTransform.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// Stack of data-sharing attributes for variables
//===----------------------------------------------------------------------===//
static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
OpenMPClauseKind CKind, bool NoDiagnose);
namespace {
/// Default data sharing attributes, which can be applied to directive.
enum DefaultDataSharingAttributes {
DSA_unspecified = 0, /// Data sharing attribute not specified.
DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
};
/// Attributes of the defaultmap clause.
enum DefaultMapAttributes {
DMA_unspecified, /// Default mapping is not specified.
DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
};
/// Stack for tracking declarations used in OpenMP directives and
/// clauses and their data-sharing attributes.
class DSAStackTy {
public:
struct DSAVarData {
OpenMPDirectiveKind DKind = OMPD_unknown;
OpenMPClauseKind CKind = OMPC_unknown;
const Expr *RefExpr = nullptr;
DeclRefExpr *PrivateCopy = nullptr;
SourceLocation ImplicitDSALoc;
DSAVarData() = default;
DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
const Expr *RefExpr, DeclRefExpr *PrivateCopy,
SourceLocation ImplicitDSALoc)
: DKind(DKind), CKind(CKind), RefExpr(RefExpr),
PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
using DoacrossDependMapTy =
llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
private:
struct DSAInfo {
OpenMPClauseKind Attributes = OMPC_unknown;
/// Pointer to a reference expression and a flag which shows that the
/// variable is marked as lastprivate(true) or not (false).
llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
DeclRefExpr *PrivateCopy = nullptr;
};
using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
using LCDeclInfo = std::pair<unsigned, VarDecl *>;
using LoopControlVariablesMapTy =
llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
/// Struct that associates a component with the clause kind where they are
/// found.
struct MappedExprComponentTy {
OMPClauseMappableExprCommon::MappableExprComponentLists Components;
OpenMPClauseKind Kind = OMPC_unknown;
};
using MappedExprComponentsTy =
llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
using CriticalsWithHintsTy =
llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
struct ReductionData {
using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
SourceRange ReductionRange;
llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
ReductionData() = default;
void set(BinaryOperatorKind BO, SourceRange RR) {
ReductionRange = RR;
ReductionOp = BO;
}
void set(const Expr *RefExpr, SourceRange RR) {
ReductionRange = RR;
ReductionOp = RefExpr;
}
};
using DeclReductionMapTy =
llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
struct SharingMapTy {
DeclSAMapTy SharingMap;
DeclReductionMapTy ReductionMap;
AlignedMapTy AlignedMap;
MappedExprComponentsTy MappedExprComponents;
LoopControlVariablesMapTy LCVMap;
DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
SourceLocation DefaultAttrLoc;
DefaultMapAttributes DefaultMapAttr = DMA_unspecified;
SourceLocation DefaultMapAttrLoc;
OpenMPDirectiveKind Directive = OMPD_unknown;
DeclarationNameInfo DirectiveName;
Scope *CurScope = nullptr;
SourceLocation ConstructLoc;
/// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
/// get the data (loop counters etc.) about enclosing loop-based construct.
/// This data is required during codegen.
DoacrossDependMapTy DoacrossDepends;
/// first argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
bool NowaitRegion = false;
bool CancelRegion = false;
unsigned AssociatedLoops = 1;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
ConstructLoc(Loc) {}
SharingMapTy() = default;
};
using StackTy = SmallVector<SharingMapTy, 4>;
/// Stack of used declaration and their data-sharing attributes.
DeclSAMapTy Threadprivates;
const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
SmallVector<std::pair<StackTy, const FunctionScopeInfo *>, 4> Stack;
/// true, if check for DSA must be from parent directive, false, if
/// from current directive.
OpenMPClauseKind ClauseKindMode = OMPC_unknown;
Sema &SemaRef;
bool ForceCapturing = false;
CriticalsWithHintsTy Criticals;
using iterator = StackTy::const_reverse_iterator;
DSAVarData getDSA(iterator &Iter, ValueDecl *D) const;
/// Checks if the variable is a local for OpenMP region.
bool isOpenMPLocal(VarDecl *D, iterator Iter) const;
bool isStackEmpty() const {
return Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope ||
Stack.back().first.empty();
}
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
OpenMPClauseKind getClauseParsingMode() const {
assert(isClauseParsingMode() && "Must be in clause parsing mode.");
return ClauseKindMode;
}
void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
if (Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope)
Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
Stack.back().first.back().DefaultAttrLoc = Loc;
}
void pop() {
assert(!Stack.back().first.empty() &&
"Data-sharing attributes stack is empty!");
Stack.back().first.pop_back();
}
/// Start new OpenMP region stack in new non-capturing function.
void pushFunction() {
const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
assert(!isa<CapturingScopeInfo>(CurFnScope));
CurrentNonCapturingFunctionScope = CurFnScope;
}
/// Pop region stack for non-capturing function.
void popFunction(const FunctionScopeInfo *OldFSI) {
if (!Stack.empty() && Stack.back().second == OldFSI) {
assert(Stack.back().first.empty());
Stack.pop_back();
}
CurrentNonCapturingFunctionScope = nullptr;
for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
if (!isa<CapturingScopeInfo>(FSI)) {
CurrentNonCapturingFunctionScope = FSI;
break;
}
}
}
void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
}
const std::pair<const OMPCriticalDirective *, llvm::APSInt>
getCriticalWithHint(const DeclarationNameInfo &Name) const {
auto I = Criticals.find(Name.getAsString());
if (I != Criticals.end())
return I->second;
return std::make_pair(nullptr, llvm::APSInt());
}
/// If 'aligned' declaration for given variable \a D was not seen yet,
/// add it and return NULL; otherwise return previous occurrence's expression
/// for diagnostics.
const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
/// Register specified variable as loop control variable.
void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
/// Check if the specified variable is a loop control variable for
/// current region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
/// Check if the specified variable is a loop control variable for
/// parent region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
/// Get the loop control variable for the I-th loop (or nullptr) in
/// parent directive.
const ValueDecl *getParentLoopControlVariable(unsigned I) const;
/// Adds explicit data sharing attribute to the specified declaration.
void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy = nullptr);
/// Adds additional information for the reduction items with the reduction id
/// represented as an operator.
void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK);
/// Adds additional information for the reduction items with the reduction id
/// represented as reduction identifier.
void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
const Expr *ReductionRef);
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
const DSAVarData
getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
BinaryOperatorKind &BOK,
Expr *&TaskgroupDescriptor) const;
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
const DSAVarData
getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor) const;
/// Return reduction reference expression for the current taskgroup.
Expr *getTaskgroupReductionRef() const {
assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
"taskgroup reference expression requested for non taskgroup "
"directive.");
return Stack.back().first.back().TaskgroupReductionRef;
}
/// Checks if the given \p VD declaration is actually a taskgroup reduction
/// descriptor variable at the \p Level of OpenMP regions.
bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
return Stack.back().first[Level].TaskgroupReductionRef &&
cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
->getDecl() == VD;
}
/// Returns data sharing attributes from top of the stack for the
/// specified declaration.
const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
/// Returns data-sharing attributes for the specified declaration.
const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
/// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any directive which matches \a DPred
/// predicate.
const DSAVarData
hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any innermost directive which
/// matches \a DPred predicate.
const DSAVarData
hasInnermostDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has explicit data-sharing
/// attributes which match specified \a CPred predicate at the specified
/// OpenMP region.
bool hasExplicitDSA(const ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
unsigned Level, bool NotLastprivate = false) const;
/// Returns true if the directive at level \Level matches in the
/// specified \a DPred predicate.
bool hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
unsigned Level) const;
/// Finds a directive which matches specified \a DPred predicate.
bool hasDirective(
const llvm::function_ref<bool(
OpenMPDirectiveKind, const DeclarationNameInfo &, SourceLocation)>
DPred,
bool FromParent) const;
/// Returns currently analyzed directive.
OpenMPDirectiveKind getCurrentDirective() const {
return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
}
/// Returns directive kind at specified level.
OpenMPDirectiveKind getDirective(unsigned Level) const {
assert(!isStackEmpty() && "No directive at specified level.");
return Stack.back().first[Level].Directive;
}
/// Returns parent directive.
OpenMPDirectiveKind getParentDirective() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return OMPD_unknown;
return std::next(Stack.back().first.rbegin())->Directive;
}
/// Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_none;
Stack.back().first.back().DefaultAttrLoc = Loc;
}
/// Set default data sharing attribute to shared.
void setDefaultDSAShared(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_shared;
Stack.back().first.back().DefaultAttrLoc = Loc;
}
/// Set default data mapping attribute to 'tofrom:scalar'.
void setDefaultDMAToFromScalar(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultMapAttr = DMA_tofrom_scalar;
Stack.back().first.back().DefaultMapAttrLoc = Loc;
}
DefaultDataSharingAttributes getDefaultDSA() const {
return isStackEmpty() ? DSA_unspecified
: Stack.back().first.back().DefaultAttr;
}
SourceLocation getDefaultDSALocation() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().DefaultAttrLoc;
}
DefaultMapAttributes getDefaultDMA() const {
return isStackEmpty() ? DMA_unspecified
: Stack.back().first.back().DefaultMapAttr;
}
DefaultMapAttributes getDefaultDMAAtLevel(unsigned Level) const {
return Stack.back().first[Level].DefaultMapAttr;
}
SourceLocation getDefaultDMALocation() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().DefaultMapAttrLoc;
}
/// Checks if the specified variable is a threadprivate.
bool isThreadPrivate(VarDecl *D) {
const DSAVarData DVar = getTopDSA(D, false);
return isOpenMPThreadPrivate(DVar.CKind);
}
/// Marks current region as ordered (it has an 'ordered' clause).
void setOrderedRegion(bool IsOrdered, const Expr *Param,
OMPOrderedClause *Clause) {
assert(!isStackEmpty());
if (IsOrdered)
Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
else
Stack.back().first.back().OrderedRegion.reset();
}
/// Returns true, if region is ordered (has associated 'ordered' clause),
/// false - otherwise.
bool isOrderedRegion() const {
if (isStackEmpty())
return false;
return Stack.back().first.rbegin()->OrderedRegion.hasValue();
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
if (isStackEmpty() ||
!Stack.back().first.rbegin()->OrderedRegion.hasValue())
return std::make_pair(nullptr, nullptr);
return Stack.back().first.rbegin()->OrderedRegion.getValue();
}
/// Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *>
getParentOrderedRegionParam() const {
if (isStackEmpty() || Stack.back().first.size() == 1 ||
!std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
return std::make_pair(nullptr, nullptr);
return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
}
/// Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
assert(!isStackEmpty());
Stack.back().first.back().NowaitRegion = IsNowait;
}
/// Returns true, if parent region is nowait (has associated
/// 'nowait' clause), false - otherwise.
bool isParentNowaitRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->NowaitRegion;
}
/// Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
if (!isStackEmpty() && Stack.back().first.size() > 1) {
auto &StackElemRef = *std::next(Stack.back().first.rbegin());
StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
}
}
/// Return true if current region has inner cancel construct.
bool isCancelRegion() const {
return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
}
/// Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
assert(!isStackEmpty());
Stack.back().first.back().AssociatedLoops = Val;
}
/// Return collapse value for region.
unsigned getAssociatedLoops() const {
return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
}
/// Marks current target region as one with closely nested teams
/// region.
void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
if (!isStackEmpty() && Stack.back().first.size() > 1) {
std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
TeamsRegionLoc;
}
}
/// Returns true, if current region has closely nested teams region.
bool hasInnerTeamsRegion() const {
return getInnerTeamsRegionLoc().isValid();
}
/// Returns location of the nested teams region (if any).
SourceLocation getInnerTeamsRegionLoc() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().InnerTeamsRegionLoc;
}
Scope *getCurScope() const {
return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
}
SourceLocation getConstructLoc() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().ConstructLoc;
}
/// Do the check specified in \a Check to all component lists and return true
/// if any issue is found.
bool checkMappableExprComponentListsForDecl(
const ValueDecl *VD, bool CurrentRegionOnly,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)>
Check) const {
if (isStackEmpty())
return false;
auto SI = Stack.back().first.rbegin();
auto SE = Stack.back().first.rend();
if (SI == SE)
return false;
if (CurrentRegionOnly)
SE = std::next(SI);
else
std::advance(SI, 1);
for (; SI != SE; ++SI) {
auto MI = SI->MappedExprComponents.find(VD);
if (MI != SI->MappedExprComponents.end())
for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
}
return false;
}
/// Do the check specified in \a Check to all component lists at a given level
/// and return true if any issue is found.
bool checkMappableExprComponentListsForDeclAtLevel(
const ValueDecl *VD, unsigned Level,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)>
Check) const {
if (isStackEmpty())
return false;
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
auto MI = StartI->MappedExprComponents.find(VD);
if (MI != StartI->MappedExprComponents.end())
for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
return false;
}
/// Create a new mappable expression component list associated with a given
/// declaration and initialize it with the provided list of components.
void addMappableExpressionComponents(
const ValueDecl *VD,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPClauseKind WhereFoundClauseKind) {
assert(!isStackEmpty() &&
"Not expecting to retrieve components from a empty stack!");
MappedExprComponentTy &MEC =
Stack.back().first.back().MappedExprComponents[VD];
// Create new entry and append the new components there.
MEC.Components.resize(MEC.Components.size() + 1);
MEC.Components.back().append(Components.begin(), Components.end());
MEC.Kind = WhereFoundClauseKind;
}
unsigned getNestingLevel() const {
assert(!isStackEmpty());
return Stack.back().first.size() - 1;
}
void addDoacrossDependClause(OMPDependClause *C,
const OperatorOffsetTy &OpsOffs) {
assert(!isStackEmpty() && Stack.back().first.size() > 1);
SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
assert(isOpenMPWorksharingDirective(StackElem.Directive));
StackElem.DoacrossDepends.try_emplace(C, OpsOffs);
}
llvm::iterator_range<DoacrossDependMapTy::const_iterator>
getDoacrossDependClauses() const {
assert(!isStackEmpty());
const SharingMapTy &StackElem = Stack.back().first.back();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
}
return llvm::make_range(StackElem.DoacrossDepends.end(),
StackElem.DoacrossDepends.end());
}
};
bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
return isOpenMPParallelDirective(DKind) || isOpenMPTaskingDirective(DKind) ||
isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
}
} // namespace
static const Expr *getExprAsWritten(const Expr *E) {
if (const auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
E = ExprTemp->getSubExpr();
if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
E = MTE->GetTemporaryExpr();
while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
E = Binder->getSubExpr();
if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExprAsWritten();
return E->IgnoreParens();
}
static Expr *getExprAsWritten(Expr *E) {
return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
}
static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
D = ME->getMemberDecl();
const auto *VD = dyn_cast<VarDecl>(D);
const auto *FD = dyn_cast<FieldDecl>(D);
if (VD != nullptr) {
VD = VD->getCanonicalDecl();
D = VD;
} else {
assert(FD);
FD = FD->getCanonicalDecl();
D = FD;
}
return D;
}
static ValueDecl *getCanonicalDecl(ValueDecl *D) {
return const_cast<ValueDecl *>(
getCanonicalDecl(const_cast<const ValueDecl *>(D)));
}
DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
ValueDecl *D) const {
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
const auto *FD = dyn_cast<FieldDecl>(D);
DSAVarData DVar;
if (isStackEmpty() || Iter == Stack.back().first.rend()) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// File-scope or namespace-scope variables referenced in called routines
// in the region are shared unless they appear in a threadprivate
// directive.
if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
DVar.CKind = OMPC_shared;
// OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// Variables with static storage duration that are declared in called
// routines in the region are shared.
if (VD && VD->hasGlobalStorage())
DVar.CKind = OMPC_shared;
// Non-static data members are shared by default.
if (FD)
DVar.CKind = OMPC_shared;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables with automatic storage duration that are declared in a scope
// inside the construct are private.
if (VD && isOpenMPLocal(VD, Iter) && VD->isLocalVarDecl() &&
(VD->getStorageClass() == SC_Auto || VD->getStorageClass() == SC_None)) {
DVar.CKind = OMPC_private;
return DVar;
}
DVar.DKind = Iter->Directive;
// Explicitly specified attributes and local variables with predetermined
// attributes.
if (Iter->SharingMap.count(D)) {
const DSAInfo &Data = Iter->SharingMap.lookup(D);
DVar.RefExpr = Data.RefExpr.getPointer();
DVar.PrivateCopy = Data.PrivateCopy;
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, implicitly determined, p.1]
// In a parallel or task construct, the data-sharing attributes of these
// variables are determined by the default clause, if present.
switch (Iter->DefaultAttr) {
case DSA_shared:
DVar.CKind = OMPC_shared;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
case DSA_none:
return DVar;
case DSA_unspecified:
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.2]
// In a parallel construct, if no default clause is present, these
// variables are shared.
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
if (isOpenMPParallelDirective(DVar.DKind) ||
isOpenMPTeamsDirective(DVar.DKind)) {
DVar.CKind = OMPC_shared;
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.4]
// In a task construct, if no default clause is present, a variable that in
// the enclosing context is determined to be shared by all implicit tasks
// bound to the current team is shared.
if (isOpenMPTaskingDirective(DVar.DKind)) {
DSAVarData DVarTemp;
iterator I = Iter, E = Stack.back().first.rend();
do {
++I;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
// Referenced in a Construct, implicitly determined, p.6]
// In a task construct, if no default clause is present, a variable
// whose data-sharing attribute is not determined by the rules above is
// firstprivate.
DVarTemp = getDSA(I, D);
if (DVarTemp.CKind != OMPC_shared) {
DVar.RefExpr = nullptr;
DVar.CKind = OMPC_firstprivate;
return DVar;
}
} while (I != E && !isParallelOrTaskRegion(I->Directive));
DVar.CKind =
(DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
return DVar;
}
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.3]
// For constructs other than task, if no default clause is present, these
// variables inherit their data-sharing attributes from the enclosing
// context.
return getDSA(++Iter, D);
}
const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
const Expr *NewDE) {
assert(!isStackEmpty() && "Data sharing attributes stack is empty");
D = getCanonicalDecl(D);
SharingMapTy &StackElem = Stack.back().first.back();
auto It = StackElem.AlignedMap.find(D);
if (It == StackElem.AlignedMap.end()) {
assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
StackElem.AlignedMap[D] = NewDE;
return nullptr;
}
assert(It->second && "Unexpected nullptr expr in the aligned map");
return It->second;
}
void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
SharingMapTy &StackElem = Stack.back().first.back();
StackElem.LCVMap.try_emplace(
D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
}
const DSAStackTy::LCDeclInfo
DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
const SharingMapTy &StackElem = Stack.back().first.back();
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
const DSAStackTy::LCDeclInfo
DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
if (StackElem.LCVMap.size() < I)
return nullptr;
for (const auto &Pair : StackElem.LCVMap)
if (Pair.second.first == I)
return Pair.first;
return nullptr;
}
void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
DSAInfo &Data = Threadprivates[D];
Data.Attributes = A;
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
} else {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
DSAInfo &Data = Stack.back().first.back().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
(isLoopControlVariable(D).first && A == OMPC_private));
if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
Data.RefExpr.setInt(/*IntVal=*/true);
return;
}
const bool IsLastprivate =
A == OMPC_lastprivate || Data.Attributes == OMPC_lastprivate;
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(E, IsLastprivate);
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
DSAInfo &Data =
Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
}
}
}
/// Build a variable declaration for OpenMP loop iteration variable.
static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
StringRef Name, const AttrVec *Attrs = nullptr,
DeclRefExpr *OrigRef = nullptr) {
DeclContext *DC = SemaRef.CurContext;
IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
auto *Decl =
VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
if (Attrs) {
for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
I != E; ++I)
Decl->addAttr(*I);
}
Decl->setImplicit();
if (OrigRef) {
Decl->addAttr(
OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
}
return Decl;
}
static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty,
SourceLocation Loc,
bool RefersToCapture = false) {
D->setReferenced();
D->markUsed(S.Context);
return DeclRefExpr::Create(S.getASTContext(), NestedNameSpecifierLoc(),
SourceLocation(), D, RefersToCapture, Loc, Ty,
VK_LValue);
}
void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
Stack.back().first.back().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
Expr *&TaskgroupReductionRef =
Stack.back().first.back().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
const Expr *ReductionRef) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
Stack.back().first.back().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
Expr *&TaskgroupReductionRef =
Stack.back().first.back().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
if (Stack.back().first.empty())
return DSAVarData();
for (iterator I = std::next(Stack.back().first.rbegin(), 1),
E = Stack.back().first.rend();
I != E; std::advance(I, 1)) {
const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
SR = ReductionData.ReductionRange;
BOK = ReductionData.ReductionOp.get<ReductionData::BOKPtrType>();
assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
Data.PrivateCopy, I->DefaultAttrLoc);
}
return DSAVarData();
}
const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
if (Stack.back().first.empty())
return DSAVarData();
for (iterator I = std::next(Stack.back().first.rbegin(), 1),
E = Stack.back().first.rend();
I != E; std::advance(I, 1)) {
const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
!ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
SR = ReductionData.ReductionRange;
ReductionRef = ReductionData.ReductionOp.get<const Expr *>();
assert(I->TaskgroupReductionRef && "taskgroup reduction reference "
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
Data.PrivateCopy, I->DefaultAttrLoc);
}
return DSAVarData();
}
bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
D = D->getCanonicalDecl();
if (!isStackEmpty()) {
iterator I = Iter, E = Stack.back().first.rend();
Scope *TopScope = nullptr;
while (I != E && !isParallelOrTaskRegion(I->Directive) &&
!isOpenMPTargetExecutionDirective(I->Directive))
++I;
if (I == E)
return false;
TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
Scope *CurScope = getCurScope();
while (CurScope != TopScope && !CurScope->isDeclScope(D))
CurScope = CurScope->getParent();
return CurScope != TopScope;
}
return false;
}
const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
bool FromParent) {
D = getCanonicalDecl(D);
DSAVarData DVar;
auto *VD = dyn_cast<VarDecl>(D);
auto TI = Threadprivates.find(D);
if (TI != Threadprivates.end()) {
DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
return DVar;
}
if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
DVar.RefExpr = buildDeclRefExpr(
SemaRef, VD, D->getType().getNonReferenceType(),
VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
DVar.CKind = OMPC_threadprivate;
addDSA(D, DVar.RefExpr, OMPC_threadprivate);
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables appearing in threadprivate directives are threadprivate.
if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
!(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
SemaRef.getLangOpts().OpenMPUseTLS &&
SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
(VD && VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
DVar.RefExpr = buildDeclRefExpr(
SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
DVar.CKind = OMPC_threadprivate;
addDSA(D, DVar.RefExpr, OMPC_threadprivate);
return DVar;
}
if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
!isLoopControlVariable(D).first) {
iterator IterTarget =
std::find_if(Stack.back().first.rbegin(), Stack.back().first.rend(),
[](const SharingMapTy &Data) {
return isOpenMPTargetExecutionDirective(Data.Directive);
});
if (IterTarget != Stack.back().first.rend()) {
iterator ParentIterTarget = std::next(IterTarget, 1);
for (iterator Iter = Stack.back().first.rbegin();
Iter != ParentIterTarget; std::advance(Iter, 1)) {
if (isOpenMPLocal(VD, Iter)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
D->getLocation());
DVar.CKind = OMPC_threadprivate;
return DVar;
}
}
if (!isClauseParsingMode() || IterTarget != Stack.back().first.rbegin()) {
auto DSAIter = IterTarget->SharingMap.find(D);
if (DSAIter != IterTarget->SharingMap.end() &&
isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
return DVar;
}
iterator End = Stack.back().first.rend();
if (!SemaRef.isOpenMPCapturedByRef(
D, std::distance(ParentIterTarget, End))) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
IterTarget->ConstructLoc);
DVar.CKind = OMPC_threadprivate;
return DVar;
}
}
}
}
if (isStackEmpty())
// Not in OpenMP execution region and top scope was already checked.
return DVar;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.4]
// Static data members are shared.
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.7]
// Variables with static storage duration that are declared in a scope
// inside the construct are shared.
auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
if (VD && VD->isStaticDataMember()) {
DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
return DVar;
DVar.CKind = OMPC_shared;
return DVar;
}
QualType Type = D->getType().getNonReferenceType().getCanonicalType();
bool IsConstant = Type.isConstant(SemaRef.getASTContext());
Type = SemaRef.getASTContext().getBaseElementType(Type);
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.6]
// Variables with const qualified type having no mutable member are
// shared.
const CXXRecordDecl *RD =
SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
RD = CTD->getTemplatedDecl();
if (IsConstant &&
!(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasDefinition() &&
RD->hasMutableFields())) {
// Variables with const-qualified type having no mutable member may be
// listed in a firstprivate clause, even if they are static data members.
DSAVarData DVarTemp =
hasDSA(D, [](OpenMPClauseKind C) { return C == OMPC_firstprivate; },
MatchesAlways, FromParent);
if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
return DVarTemp;
DVar.CKind = OMPC_shared;
return DVar;
}
// Explicitly specified attributes and local variables with predetermined
// attributes.
iterator I = Stack.back().first.rbegin();
iterator EndI = Stack.back().first.rend();
if (FromParent && I != EndI)
std::advance(I, 1);
auto It = I->SharingMap.find(D);
if (It != I->SharingMap.end()) {
const DSAInfo &Data = It->getSecond();
DVar.RefExpr = Data.RefExpr.getPointer();
DVar.PrivateCopy = Data.PrivateCopy;
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
}
return DVar;
}
const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
bool FromParent) const {
if (isStackEmpty()) {
iterator I;
return getDSA(I, D);
}
D = getCanonicalDecl(D);
iterator StartI = Stack.back().first.rbegin();
iterator EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
std::advance(StartI, 1);
return getDSA(StartI, D);
}
const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
iterator I = Stack.back().first.rbegin();
iterator EndI = Stack.back().first.rend();
if (FromParent && I != EndI)
std::advance(I, 1);
for (; I != EndI; std::advance(I, 1)) {
if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
continue;
iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
if (I == NewI && CPred(DVar.CKind))
return DVar;
}
return {};
}
const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
iterator StartI = Stack.back().first.rbegin();
iterator EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
std::advance(StartI, 1);
if (StartI == EndI || !DPred(StartI->Directive))
return {};
iterator NewI = StartI;
DSAVarData DVar = getDSA(NewI, D);
return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
}
bool DSAStackTy::hasExplicitDSA(
const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
unsigned Level, bool NotLastprivate) const {
if (isStackEmpty())
return false;
D = getCanonicalDecl(D);
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
auto I = StartI->SharingMap.find(D);
return (I != StartI->SharingMap.end()) &&
I->getSecond().RefExpr.getPointer() &&
CPred(I->getSecond().Attributes) &&
(!NotLastprivate || !I->getSecond().RefExpr.getInt());
}
bool DSAStackTy::hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
unsigned Level) const {
if (isStackEmpty())
return false;
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
return DPred(StartI->Directive);
}
bool DSAStackTy::hasDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind,
const DeclarationNameInfo &, SourceLocation)>
DPred,
bool FromParent) const {
// We look only in the enclosing region.
if (isStackEmpty())
return false;
auto StartI = std::next(Stack.back().first.rbegin());
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
StartI = std::next(StartI);
for (auto I = StartI, EE = EndI; I != EE; ++I) {
if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
return true;
}
return false;
}
void Sema::InitDataSharingAttributesStack() {
VarDataSharingAttributesStack = new DSAStackTy(*this);
}
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
void Sema::pushOpenMPFunctionRegion() {
DSAStack->pushFunction();
}
void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
ASTContext &Ctx = getASTContext();
bool IsByRef = true;
// Find the directive that is associated with the provided scope.
D = cast<ValueDecl>(D->getCanonicalDecl());
QualType Ty = D->getType();
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
// This table summarizes how a given variable should be passed to the device
// given its type and the clauses where it appears. This table is based on
// the description in OpenMP 4.5 [2.10.4, target Construct] and
// OpenMP 4.5 [2.15.5, Data-mapping Attribute Rules and Clauses].
//
// =========================================================================
// | type | defaultmap | pvt | first | is_device_ptr | map | res. |
// | |(tofrom:scalar)| | pvt | | | |
// =========================================================================
// | scl | | | | - | | bycopy|
// | scl | | - | x | - | - | bycopy|
// | scl | | x | - | - | - | null |
// | scl | x | | | - | | byref |
// | scl | x | - | x | - | - | bycopy|
// | scl | x | x | - | - | - | null |
// | scl | | - | - | - | x | byref |
// | scl | x | - | - | - | x | byref |
//
// | agg | n.a. | | | - | | byref |
// | agg | n.a. | - | x | - | - | byref |
// | agg | n.a. | x | - | - | - | null |
// | agg | n.a. | - | - | - | x | byref |
// | agg | n.a. | - | - | - | x[] | byref |
//
// | ptr | n.a. | | | - | | bycopy|
// | ptr | n.a. | - | x | - | - | bycopy|
// | ptr | n.a. | x | - | - | - | null |
// | ptr | n.a. | - | - | - | x | byref |
// | ptr | n.a. | - | - | - | x[] | bycopy|
// | ptr | n.a. | - | - | x | | bycopy|
// | ptr | n.a. | - | - | x | x | bycopy|
// | ptr | n.a. | - | - | x | x[] | bycopy|
// =========================================================================
// Legend:
// scl - scalar
// ptr - pointer
// agg - aggregate
// x - applies
// - - invalid in this combination
// [] - mapped with an array section
// byref - should be mapped by reference
// byval - should be mapped by value
// null - initialize a local variable to null on the device
//
// Observations:
// - All scalar declarations that show up in a map clause have to be passed
// by reference, because they may have been mapped in the enclosing data
// environment.
// - If the scalar value does not fit the size of uintptr, it has to be
// passed by reference, regardless the result in the table above.
// - For pointers mapped by value that have either an implicit map or an
// array section, the runtime library may pass the NULL value to the
// device instead of the value passed to it by the compiler.
if (Ty->isReferenceType())
Ty = Ty->castAs<ReferenceType>()->getPointeeType();
// Locate map clauses and see if the variable being captured is referred to
// in any of those clauses. Here we only care about variables, not fields,
// because fields are part of aggregates.
bool IsVariableUsedInMapClause = false;
bool IsVariableAssociatedWithSection = false;
DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, Level,
[&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
OMPClauseMappableExprCommon::MappableExprComponentListRef
MapExprComponents,
OpenMPClauseKind WhereFoundClauseKind) {
// Only the map clause information influences how a variable is
// captured. E.g. is_device_ptr does not require changing the default
// behavior.
if (WhereFoundClauseKind != OMPC_map)
return false;
auto EI = MapExprComponents.rbegin();
auto EE = MapExprComponents.rend();
assert(EI != EE && "Invalid map expression!");
if (isa<DeclRefExpr>(EI->getAssociatedExpression()))
IsVariableUsedInMapClause |= EI->getAssociatedDeclaration() == D;
++EI;
if (EI == EE)
return false;
if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
isa<MemberExpr>(EI->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
// There is nothing more we need to know about this variable.
return true;
}
// Keep looking for more map info.
return false;
});
if (IsVariableUsedInMapClause) {
// If variable is identified in a map clause it is always captured by
// reference except if it is a pointer that is dereferenced somehow.
IsByRef = !(Ty->isPointerType() && IsVariableAssociatedWithSection);
} else {
// By default, all the data that has a scalar type is mapped by copy
// (except for reduction variables).
IsByRef =
!Ty->isScalarType() ||
DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_reduction; }, Level);
}
}
if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
IsByRef =
!DSAStack->hasExplicitDSA(
D,
[](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
Level, /*NotLastprivate=*/true) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
!cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
}
// When passing data by copy, we need to make sure it fits the uintptr size
// and alignment, because the runtime library only deals with uintptr types.
// If it does not fit the uintptr size, we need to pass the data by reference
// instead.
if (!IsByRef &&
(Ctx.getTypeSizeInChars(Ty) >
Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
IsByRef = true;
}
return IsByRef;
}
unsigned Sema::getOpenMPNestingLevel() const {
assert(getLangOpts().OpenMP);
return DSAStack->getNestingLevel();
}
bool Sema::isInOpenMPTargetExecutionDirective() const {
return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
!DSAStack->isClauseParsingMode()) ||
DSAStack->hasDirective(
[](OpenMPDirectiveKind K, const DeclarationNameInfo &,
SourceLocation) -> bool {
return isOpenMPTargetExecutionDirective(K);
},
false);
}
VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
// If we are attempting to capture a global variable in a directive with
// 'target' we return true so that this global is also mapped to the device.
//
auto *VD = dyn_cast<VarDecl>(D);
if (VD && !VD->hasLocalStorage() && isInOpenMPTargetExecutionDirective()) {
// If the declaration is enclosed in a 'declare target' directive,
// then it should not be captured.
//
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return nullptr;
return VD;
}
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
(!DSAStack->isClauseParsingMode() ||
DSAStack->getParentDirective() != OMPD_unknown)) {
auto &&Info = DSAStack->isLoopControlVariable(D);
if (Info.first ||
(VD && VD->hasLocalStorage() &&
isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
(VD && DSAStack->isForceVarCapturing()))
return VD ? VD : Info.second;
DSAStackTy::DSAVarData DVarPrivate =
DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
[](OpenMPDirectiveKind) { return true; },
DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown)
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
return nullptr;
}
void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const {
SmallVector<OpenMPDirectiveKind, 4> Regions;
getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
FunctionScopesIndex -= Regions.size();
}
bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
return DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
(DSAStack->isClauseParsingMode() &&
DSAStack->getClauseParsingMode() == OMPC_private) ||
// Consider taskgroup reduction descriptor variable a private to avoid
// possible capture in the region.
(DSAStack->hasExplicitDirective(
[](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
Level) &&
DSAStack->isTaskgroupReductionRef(D, Level));
}
void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
OpenMPClauseKind OMPC = OMPC_unknown;
for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
const unsigned NewLevel = I - 1;
if (DSAStack->hasExplicitDSA(D,
[&OMPC](const OpenMPClauseKind K) {
if (isOpenMPPrivate(K)) {
OMPC = K;
return true;
}
return false;
},
NewLevel))
break;
if (DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, NewLevel,
[](OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind) { return true; })) {
OMPC = OMPC_map;
break;
}
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
NewLevel)) {
OMPC = OMPC_map;
if (D->getType()->isScalarType() &&
DSAStack->getDefaultDMAAtLevel(NewLevel) !=
DefaultMapAttributes::DMA_tofrom_scalar)
OMPC = OMPC_firstprivate;
break;
}
}
if (OMPC != OMPC_unknown)
FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
}
bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D,
unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
const auto *VD = dyn_cast<VarDecl>(D);
return VD && !VD->hasLocalStorage() &&
DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
Level);
}
void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
DSAStack->push(DKind, DirName, CurScope, Loc);
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
}
void Sema::StartOpenMPClause(OpenMPClauseKind K) {
DSAStack->setClauseParsingMode(K);
}
void Sema::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
}
void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a lastprivate
// clause requires an accessible, unambiguous default constructor for the
// class type, unless the list item is also specified in a firstprivate
// clause.
if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
for (OMPClause *C : D->clauses()) {
if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
SmallVector<Expr *, 8> PrivateCopies;
for (Expr *DE : Clause->varlists()) {
if (DE->isValueDependent() || DE->isTypeDependent()) {
PrivateCopies.push_back(nullptr);
continue;
}
auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
auto *VD = cast<VarDecl>(DRE->getDecl());
QualType Type = VD->getType().getNonReferenceType();
const DSAStackTy::DSAVarData DVar =
DSAStack->getTopDSA(VD, /*FromParent=*/false);
if (DVar.CKind == OMPC_lastprivate) {
// Generate helper private variable and initialize it with the
// default value. The address of the original variable is replaced
// by the address of the new private variable in CodeGen. This new
// variable is not added to IdResolver, so the code in the OpenMP
// region uses original variable for proper diagnostics.
VarDecl *VDPrivate = buildVarDecl(
*this, DE->getExprLoc(), Type.getUnqualifiedType(),
VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
ActOnUninitializedDecl(VDPrivate);
if (VDPrivate->isInvalidDecl())
continue;
PrivateCopies.push_back(buildDeclRefExpr(
*this, VDPrivate, DE->getType(), DE->getExprLoc()));
} else {
// The variable is also a firstprivate, so initialization sequence
// for private copy is generated already.
PrivateCopies.push_back(nullptr);
}
}
// Set initializers to private copies if no errors were found.
if (PrivateCopies.size() == Clause->varlist_size())
Clause->setPrivateCopies(PrivateCopies);
}
}
}
DSAStack->pop();
DiscardCleanupsInEvaluationContext();
PopExpressionEvaluationContext();
}
static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Expr *NumIterations, Sema &SemaRef,
Scope *S, DSAStackTy *Stack);
namespace {
class VarDeclFilterCCC final : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
public:
explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
return VD->hasGlobalStorage() &&
SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
}
return false;
}
};
class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
public:
explicit VarOrFuncDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
if (ND && (isa<VarDecl>(ND) || isa<FunctionDecl>(ND))) {
return SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
}
return false;
}
};
} // namespace
ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id) {
LookupResult Lookup(*this, Id, LookupOrdinaryName);
LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
if (Lookup.isAmbiguous())
return ExprError();
VarDecl *VD;
if (!Lookup.isSingleResult()) {
if (TypoCorrection Corrected = CorrectTypo(
Id, LookupOrdinaryName, CurScope, nullptr,
llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) {
diagnoseTypo(Corrected,
PDiag(Lookup.empty()
? diag::err_undeclared_var_use_suggest
: diag::err_omp_expected_var_arg_suggest)
<< Id.getName());
VD = Corrected.getCorrectionDeclAs<VarDecl>();
} else {
Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
: diag::err_omp_expected_var_arg)
<< Id.getName();
return ExprError();
}
} else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
return ExprError();
}
Lookup.suppressDiagnostics();
// OpenMP [2.9.2, Syntax, C/C++]
// Variables must be file-scope, namespace-scope, or static block-scope.
if (!VD->hasGlobalStorage()) {
Diag(Id.getLoc(), diag::err_omp_global_var_arg)
<< getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal();
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
VarDecl *CanonicalVD = VD->getCanonicalDecl();
NamedDecl *ND = CanonicalVD;
// OpenMP [2.9.2, Restrictions, C/C++, p.2]
// A threadprivate directive for file-scope variables must appear outside
// any definition or declaration.
if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
!getCurLexicalContext()->isTranslationUnit()) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.3]
// A threadprivate directive for static class member variables must appear
// in the class definition, in the same scope in which the member
// variables are declared.
if (CanonicalVD->isStaticDataMember() &&
!CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.4]
// A threadprivate directive for namespace-scope variables must appear
// outside any definition or declaration other than the namespace
// definition itself.
if (CanonicalVD->getDeclContext()->isNamespace() &&
(!getCurLexicalContext()->isFileContext() ||
!getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.6]
// A threadprivate directive for static block-scope variables must appear
// in the scope of the variable and not in a nested scope.
if (CanonicalVD->isStaticLocal() && CurScope &&
!isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
return ExprError();
}
// OpenMP [2.9.2, Restrictions, C/C++, p.2-6]
// A threadprivate directive must lexically precede all references to any
// of the variables in its list.
if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) {
Diag(Id.getLoc(), diag::err_omp_var_used)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD;
return ExprError();
}
QualType ExprType = VD->getType().getNonReferenceType();
return DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
SourceLocation(), VD,
/*RefersToEnclosingVariableOrCapture=*/false,
Id.getLoc(), ExprType, VK_LValue);
}
Sema::DeclGroupPtrTy
Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList) {
if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
CurContext->addDecl(D);
return DeclGroupPtrTy::make(DeclGroupRef(D));
}
return nullptr;
}
namespace {
class LocalVarRefChecker final
: public ConstStmtVisitor<LocalVarRefChecker, bool> {
Sema &SemaRef;
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
if (VD->hasLocalStorage()) {
SemaRef.Diag(E->getLocStart(),
diag::err_omp_local_var_in_threadprivate_init)
<< E->getSourceRange();
SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
<< VD << VD->getSourceRange();
return true;
}
}
return false;
}
bool VisitStmt(const Stmt *S) {
for (const Stmt *Child : S->children()) {
if (Child && Visit(Child))
return true;
}
return false;
}
explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {}
};
} // namespace
OMPThreadPrivateDecl *
Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
auto *DE = cast<DeclRefExpr>(RefExpr);
auto *VD = cast<VarDecl>(DE->getDecl());
SourceLocation ILoc = DE->getExprLoc();
// Mark variable as used.
VD->setReferenced();
VD->markUsed(Context);
QualType QType = VD->getType();
if (QType->isDependentType() || QType->isInstantiationDependentType()) {
// It will be analyzed later.
Vars.push_back(DE);
continue;
}
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have an incomplete type.
if (RequireCompleteType(ILoc, VD->getType(),
diag::err_omp_threadprivate_incomplete_type)) {
continue;
}
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have a reference type.
if (VD->getType()->isReferenceType()) {
Diag(ILoc, diag::err_omp_ref_type_arg)
<< getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType();
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
continue;
}
// Check if this is a TLS variable. If TLS is not being supported, produce
// the corresponding diagnostic.
if ((VD->getTLSKind() != VarDecl::TLS_None &&
!(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
getLangOpts().OpenMPUseTLS &&
getASTContext().getTargetInfo().isTLSSupported())) ||
(VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() &&
!VD->isLocalVarDecl())) {
Diag(ILoc, diag::err_omp_var_thread_local)
<< VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1);
bool IsDecl =
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< VD;
continue;
}
// Check if initial value of threadprivate variable reference variable with
// local storage (it is not supported by runtime).
if (const Expr *Init = VD->getAnyInitializer()) {
LocalVarRefChecker Checker(*this);
if (Checker.Visit(Init))
continue;
}
Vars.push_back(RefExpr);
DSAStack->addDSA(VD, DE, OMPC_threadprivate);
VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
Context, SourceRange(Loc, Loc)));
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPThreadPrivate(VD);
}
OMPThreadPrivateDecl *D = nullptr;
if (!Vars.empty()) {
D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
Vars);
D->setAccess(AS_public);
}
return D;
}
static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
const ValueDecl *D,
const DSAStackTy::DSAVarData &DVar,
bool IsLoopIterVar = false) {
if (DVar.RefExpr) {
SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
<< getOpenMPClauseName(DVar.CKind);
return;
}
enum {
PDSA_StaticMemberShared,
PDSA_StaticLocalVarShared,
PDSA_LoopIterVarPrivate,
PDSA_LoopIterVarLinear,
PDSA_LoopIterVarLastprivate,
PDSA_ConstVarShared,
PDSA_GlobalVarShared,
PDSA_TaskVarFirstprivate,
PDSA_LocalVarPrivate,
PDSA_Implicit
} Reason = PDSA_Implicit;
bool ReportHint = false;
auto ReportLoc = D->getLocation();
auto *VD = dyn_cast<VarDecl>(D);
if (IsLoopIterVar) {
if (DVar.CKind == OMPC_private)
Reason = PDSA_LoopIterVarPrivate;
else if (DVar.CKind == OMPC_lastprivate)
Reason = PDSA_LoopIterVarLastprivate;
else
Reason = PDSA_LoopIterVarLinear;
} else if (isOpenMPTaskingDirective(DVar.DKind) &&
DVar.CKind == OMPC_firstprivate) {
Reason = PDSA_TaskVarFirstprivate;
ReportLoc = DVar.ImplicitDSALoc;
} else if (VD && VD->isStaticLocal())
Reason = PDSA_StaticLocalVarShared;
else if (VD && VD->isStaticDataMember())
Reason = PDSA_StaticMemberShared;
else if (VD && VD->isFileVarDecl())
Reason = PDSA_GlobalVarShared;
else if (D->getType().isConstant(SemaRef.getASTContext()))
Reason = PDSA_ConstVarShared;
else if (VD && VD->isLocalVarDecl() && DVar.CKind == OMPC_private) {
ReportHint = true;
Reason = PDSA_LocalVarPrivate;
}
if (Reason != PDSA_Implicit) {
SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa)
<< Reason << ReportHint
<< getOpenMPDirectiveName(Stack->getCurrentDirective());
} else if (DVar.ImplicitDSALoc.isValid()) {
SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa)
<< getOpenMPClauseName(DVar.CKind);
}
}
namespace {
class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
DSAStackTy *Stack;
Sema &SemaRef;
bool ErrorFound = false;
CapturedStmt *CS = nullptr;
llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
llvm::SmallVector<Expr *, 4> ImplicitMap;
Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
public:
void VisitDeclRefExpr(DeclRefExpr *E) {
if (E->isTypeDependent() || E->isValueDependent() ||
E->containsUnexpandedParameterPack() || E->isInstantiationDependent())
return;
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
VD = VD->getCanonicalDecl();
// Skip internally declared variables.
if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
// Check if the variable has explicit DSA set and stop analysis if it so.
if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
return;
// Skip internally declared static variables.
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
(!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
return;
SourceLocation ELoc = E->getExprLoc();
OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
// The default(none) clause requires that each variable that is referenced
// in the construct, and does not have a predetermined data-sharing
// attribute, must have its data-sharing attribute explicitly determined
// by being listed in a data-sharing attribute clause.
if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
isParallelOrTaskRegion(DKind) &&
VarsWithInheritedDSA.count(VD) == 0) {
VarsWithInheritedDSA[VD] = E;
return;
}
if (isOpenMPTargetExecutionDirective(DKind) &&
!Stack->isLoopControlVariable(VD).first) {
if (!Stack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
[](OMPClauseMappableExprCommon::MappableExprComponentListRef
StackComponents,
OpenMPClauseKind) {
// Variable is used if it has been marked as an array, array
// section or the variable iself.
return StackComponents.size() == 1 ||
std::all_of(
std::next(StackComponents.rbegin()),
StackComponents.rend(),
[](const OMPClauseMappableExprCommon::
MappableComponent &MC) {
return MC.getAssociatedDeclaration() ==
nullptr &&
(isa<OMPArraySectionExpr>(
MC.getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(
MC.getAssociatedExpression()));
});
})) {
bool IsFirstprivate = false;
// By default lambdas are captured as firstprivates.
if (const auto *RD =
VD->getType().getNonReferenceType()->getAsCXXRecordDecl())
IsFirstprivate = RD->isLambda();
IsFirstprivate =
IsFirstprivate ||
(VD->getType().getNonReferenceType()->isScalarType() &&
Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
if (IsFirstprivate)
ImplicitFirstprivate.emplace_back(E);
else
ImplicitMap.emplace_back(E);
return;
}
}
// OpenMP [2.9.3.6, Restrictions, p.2]
// A list item that appears in a reduction clause of the innermost
// enclosing worksharing or parallel construct may not be accessed in an
// explicit task.
DVar = Stack->hasInnermostDSA(
VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
[](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
},
/*FromParent=*/true);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
ErrorFound = true;
SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
reportOriginalDsa(SemaRef, Stack, VD, DVar);
return;
}
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
!Stack->isLoopControlVariable(VD).first)
ImplicitFirstprivate.push_back(E);
}
}
void VisitMemberExpr(MemberExpr *E) {
if (E->isTypeDependent() || E->isValueDependent() ||
E->containsUnexpandedParameterPack() || E->isInstantiationDependent())
return;
auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
if (!FD)
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
// Check if the variable has explicit DSA set and stop analysis if it
// so.
if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
return;
if (isOpenMPTargetExecutionDirective(DKind) &&
!Stack->isLoopControlVariable(FD).first &&
!Stack->checkMappableExprComponentListsForDecl(
FD, /*CurrentRegionOnly=*/true,
[](OMPClauseMappableExprCommon::MappableExprComponentListRef
StackComponents,
OpenMPClauseKind) {
return isa<CXXThisExpr>(
cast<MemberExpr>(
StackComponents.back().getAssociatedExpression())
->getBase()
->IgnoreParens());
})) {
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
// A bit-field cannot appear in a map clause.
//
if (FD->isBitField())
return;
ImplicitMap.emplace_back(E);
return;
}
SourceLocation ELoc = E->getExprLoc();
// OpenMP [2.9.3.6, Restrictions, p.2]
// A list item that appears in a reduction clause of the innermost
// enclosing worksharing or parallel construct may not be accessed in
// an explicit task.
DVar = Stack->hasInnermostDSA(
FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
[](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
},
/*FromParent=*/true);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
ErrorFound = true;
SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
reportOriginalDsa(SemaRef, Stack, FD, DVar);
return;
}
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
!Stack->isLoopControlVariable(FD).first)
ImplicitFirstprivate.push_back(E);
return;
}
if (isOpenMPTargetExecutionDirective(DKind)) {
OMPClauseMappableExprCommon::MappableExprComponentList CurComponents;
if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
/*NoDiagnose=*/true))
return;
const auto *VD = cast<ValueDecl>(
CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
if (!Stack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
[&CurComponents](
OMPClauseMappableExprCommon::MappableExprComponentListRef
StackComponents,
OpenMPClauseKind) {
auto CCI = CurComponents.rbegin();
auto CCE = CurComponents.rend();
for (const auto &SC : llvm::reverse(StackComponents)) {
// Do both expressions have the same kind?
if (CCI->getAssociatedExpression()->getStmtClass() !=
SC.getAssociatedExpression()->getStmtClass())
if (!(isa<OMPArraySectionExpr>(
SC.getAssociatedExpression()) &&
isa<ArraySubscriptExpr>(
CCI->getAssociatedExpression())))
return false;
const Decl *CCD = CCI->getAssociatedDeclaration();
const Decl *SCD = SC.getAssociatedDeclaration();
CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
if (SCD != CCD)
return false;
std::advance(CCI, 1);
if (CCI == CCE)
break;
}
return true;
})) {
Visit(E->getBase());
}
} else {
Visit(E->getBase());
}
}
void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
for (OMPClause *C : S->clauses()) {
// Skip analysis of arguments of implicitly defined firstprivate clause
// for task|target directives.
// Skip analysis of arguments of implicitly defined map clause for target
// directives.
if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
C->isImplicit())) {
for (Stmt *CC : C->children()) {
if (CC)
Visit(CC);
}
}
}
}
void VisitStmt(Stmt *S) {
for (Stmt *C : S->children()) {
if (C && !isa<OMPExecutableDirective>(C))
Visit(C);
}
}
bool isErrorFound() const { return ErrorFound; }
ArrayRef<Expr *> getImplicitFirstprivate() const {
return ImplicitFirstprivate;
}
ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
return VarsWithInheritedDSA;
}
DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS)
: Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {}
};
} // namespace
void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
switch (DKind) {
case OMPD_parallel:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
break;
}
case OMPD_target_teams:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", KmpInt32PtrTy),
std::make_pair(".privates.", VoidPtrTy),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline));
Sema::CapturedParamNameType ParamsTarget[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTarget);
Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'teams' or 'parallel'. Both regions have
// the same implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTeamsOrParallel);
break;
}
case OMPD_target:
case OMPD_target_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", KmpInt32PtrTy),
std::make_pair(".privates.", VoidPtrTy),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline));
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
std::make_pair(StringRef(), QualType()));
break;
}
case OMPD_simd:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_ordered:
case OMPD_atomic:
case OMPD_target_data: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
break;
}
case OMPD_task: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", KmpInt32PtrTy),
std::make_pair(".privates.", VoidPtrTy),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_taskloop:
case OMPD_taskloop_simd: {
QualType KmpInt32Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
.withConst();
QualType KmpUInt64Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
.withConst();
QualType KmpInt64Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
.withConst();
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", KmpInt32PtrTy),
std::make_pair(".privates.", VoidPtrTy),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(".lb.", KmpUInt64Ty),
std::make_pair(".ub.", KmpUInt64Ty),
std::make_pair(".st.", KmpInt64Ty),
std::make_pair(".liter.", KmpInt32Ty),
std::make_pair(".reductions.", VoidPtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_distribute_parallel_for_simd:
case OMPD_distribute_parallel_for: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
break;
}
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", KmpInt32PtrTy),
std::make_pair(".privates.", VoidPtrTy),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline));
Sema::CapturedParamNameType ParamsTarget[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTarget);
Sema::CapturedParamNameType ParamsTeams[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTeams);
Sema::CapturedParamNameType ParamsParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'teams' or 'parallel'. Both regions have
// the same implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsParallel);
break;
}
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType ParamsTeams[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTeams);
Sema::CapturedParamNameType ParamsParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'teams' or 'parallel'. Both regions have
// the same implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsParallel);
break;
}
case OMPD_target_update:
case OMPD_target_enter_data:
case OMPD_target_exit_data: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
std::make_pair(".part_id.", KmpInt32PtrTy),
std::make_pair(".privates.", VoidPtrTy),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
Params);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
Context, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_threadprivate:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_cancel:
case OMPD_flush:
case OMPD_declare_reduction:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
}
}
int Sema::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, DKind);
return CaptureRegions.size();