diff options
Diffstat (limited to 'contrib/llvm/tools/clang/lib/AST')
57 files changed, 74636 insertions, 0 deletions
diff --git a/contrib/llvm/tools/clang/lib/AST/APValue.cpp b/contrib/llvm/tools/clang/lib/AST/APValue.cpp new file mode 100644 index 000000000000..541836b21b70 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/APValue.cpp @@ -0,0 +1,642 @@ +//===--- APValue.cpp - Union class for APFloat/APSInt/Complex -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the APValue class. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/APValue.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/Expr.h" +#include "clang/AST/Type.h" +#include "clang/Basic/Diagnostic.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; + +namespace { + struct LVBase { + llvm::PointerIntPair<APValue::LValueBase, 1, bool> BaseAndIsOnePastTheEnd; + CharUnits Offset; + unsigned PathLength; + unsigned CallIndex; + }; +} + +struct APValue::LV : LVBase { + static const unsigned InlinePathSpace = + (MaxSize - sizeof(LVBase)) / sizeof(LValuePathEntry); + + /// Path - The sequence of base classes, fields and array indices to follow to + /// walk from Base to the subobject. When performing GCC-style folding, there + /// may not be such a path. + union { + LValuePathEntry Path[InlinePathSpace]; + LValuePathEntry *PathPtr; + }; + + LV() { PathLength = (unsigned)-1; } + ~LV() { resizePath(0); } + + void resizePath(unsigned Length) { + if (Length == PathLength) + return; + if (hasPathPtr()) + delete [] PathPtr; + PathLength = Length; + if (hasPathPtr()) + PathPtr = new LValuePathEntry[Length]; + } + + bool hasPath() const { return PathLength != (unsigned)-1; } + bool hasPathPtr() const { return hasPath() && PathLength > InlinePathSpace; } + + LValuePathEntry *getPath() { return hasPathPtr() ? PathPtr : Path; } + const LValuePathEntry *getPath() const { + return hasPathPtr() ? PathPtr : Path; + } +}; + +namespace { + struct MemberPointerBase { + llvm::PointerIntPair<const ValueDecl*, 1, bool> MemberAndIsDerivedMember; + unsigned PathLength; + }; +} + +struct APValue::MemberPointerData : MemberPointerBase { + static const unsigned InlinePathSpace = + (MaxSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*); + typedef const CXXRecordDecl *PathElem; + union { + PathElem Path[InlinePathSpace]; + PathElem *PathPtr; + }; + + MemberPointerData() { PathLength = 0; } + ~MemberPointerData() { resizePath(0); } + + void resizePath(unsigned Length) { + if (Length == PathLength) + return; + if (hasPathPtr()) + delete [] PathPtr; + PathLength = Length; + if (hasPathPtr()) + PathPtr = new PathElem[Length]; + } + + bool hasPathPtr() const { return PathLength > InlinePathSpace; } + + PathElem *getPath() { return hasPathPtr() ? PathPtr : Path; } + const PathElem *getPath() const { + return hasPathPtr() ? PathPtr : Path; + } +}; + +// FIXME: Reduce the malloc traffic here. + +APValue::Arr::Arr(unsigned NumElts, unsigned Size) : + Elts(new APValue[NumElts + (NumElts != Size ? 1 : 0)]), + NumElts(NumElts), ArrSize(Size) {} +APValue::Arr::~Arr() { delete [] Elts; } + +APValue::StructData::StructData(unsigned NumBases, unsigned NumFields) : + Elts(new APValue[NumBases+NumFields]), + NumBases(NumBases), NumFields(NumFields) {} +APValue::StructData::~StructData() { + delete [] Elts; +} + +APValue::UnionData::UnionData() : Field(0), Value(new APValue) {} +APValue::UnionData::~UnionData () { + delete Value; +} + +APValue::APValue(const APValue &RHS) : Kind(Uninitialized) { + switch (RHS.getKind()) { + case Uninitialized: + break; + case Int: + MakeInt(); + setInt(RHS.getInt()); + break; + case Float: + MakeFloat(); + setFloat(RHS.getFloat()); + break; + case Vector: + MakeVector(); + setVector(((const Vec *)(const char *)RHS.Data)->Elts, + RHS.getVectorLength()); + break; + case ComplexInt: + MakeComplexInt(); + setComplexInt(RHS.getComplexIntReal(), RHS.getComplexIntImag()); + break; + case ComplexFloat: + MakeComplexFloat(); + setComplexFloat(RHS.getComplexFloatReal(), RHS.getComplexFloatImag()); + break; + case LValue: + MakeLValue(); + if (RHS.hasLValuePath()) + setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(), + RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex()); + else + setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(), + RHS.getLValueCallIndex()); + break; + case Array: + MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize()); + for (unsigned I = 0, N = RHS.getArrayInitializedElts(); I != N; ++I) + getArrayInitializedElt(I) = RHS.getArrayInitializedElt(I); + if (RHS.hasArrayFiller()) + getArrayFiller() = RHS.getArrayFiller(); + break; + case Struct: + MakeStruct(RHS.getStructNumBases(), RHS.getStructNumFields()); + for (unsigned I = 0, N = RHS.getStructNumBases(); I != N; ++I) + getStructBase(I) = RHS.getStructBase(I); + for (unsigned I = 0, N = RHS.getStructNumFields(); I != N; ++I) + getStructField(I) = RHS.getStructField(I); + break; + case Union: + MakeUnion(); + setUnion(RHS.getUnionField(), RHS.getUnionValue()); + break; + case MemberPointer: + MakeMemberPointer(RHS.getMemberPointerDecl(), + RHS.isMemberPointerToDerivedMember(), + RHS.getMemberPointerPath()); + break; + case AddrLabelDiff: + MakeAddrLabelDiff(); + setAddrLabelDiff(RHS.getAddrLabelDiffLHS(), RHS.getAddrLabelDiffRHS()); + break; + } +} + +void APValue::DestroyDataAndMakeUninit() { + if (Kind == Int) + ((APSInt*)(char*)Data)->~APSInt(); + else if (Kind == Float) + ((APFloat*)(char*)Data)->~APFloat(); + else if (Kind == Vector) + ((Vec*)(char*)Data)->~Vec(); + else if (Kind == ComplexInt) + ((ComplexAPSInt*)(char*)Data)->~ComplexAPSInt(); + else if (Kind == ComplexFloat) + ((ComplexAPFloat*)(char*)Data)->~ComplexAPFloat(); + else if (Kind == LValue) + ((LV*)(char*)Data)->~LV(); + else if (Kind == Array) + ((Arr*)(char*)Data)->~Arr(); + else if (Kind == Struct) + ((StructData*)(char*)Data)->~StructData(); + else if (Kind == Union) + ((UnionData*)(char*)Data)->~UnionData(); + else if (Kind == MemberPointer) + ((MemberPointerData*)(char*)Data)->~MemberPointerData(); + else if (Kind == AddrLabelDiff) + ((AddrLabelDiffData*)(char*)Data)->~AddrLabelDiffData(); + Kind = Uninitialized; +} + +bool APValue::needsCleanup() const { + switch (getKind()) { + case Uninitialized: + case AddrLabelDiff: + return false; + case Struct: + case Union: + case Array: + case Vector: + return true; + case Int: + return getInt().needsCleanup(); + case Float: + return getFloat().needsCleanup(); + case ComplexFloat: + assert(getComplexFloatImag().needsCleanup() == + getComplexFloatReal().needsCleanup() && + "In _Complex float types, real and imaginary values always have the " + "same size."); + return getComplexFloatReal().needsCleanup(); + case ComplexInt: + assert(getComplexIntImag().needsCleanup() == + getComplexIntReal().needsCleanup() && + "In _Complex int types, real and imaginary values must have the " + "same size."); + return getComplexIntReal().needsCleanup(); + case LValue: + return reinterpret_cast<const LV *>(Data)->hasPathPtr(); + case MemberPointer: + return reinterpret_cast<const MemberPointerData *>(Data)->hasPathPtr(); + } + llvm_unreachable("Unknown APValue kind!"); +} + +void APValue::swap(APValue &RHS) { + std::swap(Kind, RHS.Kind); + char TmpData[MaxSize]; + memcpy(TmpData, Data, MaxSize); + memcpy(Data, RHS.Data, MaxSize); + memcpy(RHS.Data, TmpData, MaxSize); +} + +void APValue::dump() const { + dump(llvm::errs()); + llvm::errs() << '\n'; +} + +static double GetApproxValue(const llvm::APFloat &F) { + llvm::APFloat V = F; + bool ignored; + V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven, + &ignored); + return V.convertToDouble(); +} + +void APValue::dump(raw_ostream &OS) const { + switch (getKind()) { + case Uninitialized: + OS << "Uninitialized"; + return; + case Int: + OS << "Int: " << getInt(); + return; + case Float: + OS << "Float: " << GetApproxValue(getFloat()); + return; + case Vector: + OS << "Vector: "; + getVectorElt(0).dump(OS); + for (unsigned i = 1; i != getVectorLength(); ++i) { + OS << ", "; + getVectorElt(i).dump(OS); + } + return; + case ComplexInt: + OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag(); + return; + case ComplexFloat: + OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal()) + << ", " << GetApproxValue(getComplexFloatImag()); + return; + case LValue: + OS << "LValue: <todo>"; + return; + case Array: + OS << "Array: "; + for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) { + getArrayInitializedElt(I).dump(OS); + if (I != getArraySize() - 1) OS << ", "; + } + if (hasArrayFiller()) { + OS << getArraySize() - getArrayInitializedElts() << " x "; + getArrayFiller().dump(OS); + } + return; + case Struct: + OS << "Struct "; + if (unsigned N = getStructNumBases()) { + OS << " bases: "; + getStructBase(0).dump(OS); + for (unsigned I = 1; I != N; ++I) { + OS << ", "; + getStructBase(I).dump(OS); + } + } + if (unsigned N = getStructNumFields()) { + OS << " fields: "; + getStructField(0).dump(OS); + for (unsigned I = 1; I != N; ++I) { + OS << ", "; + getStructField(I).dump(OS); + } + } + return; + case Union: + OS << "Union: "; + getUnionValue().dump(OS); + return; + case MemberPointer: + OS << "MemberPointer: <todo>"; + return; + case AddrLabelDiff: + OS << "AddrLabelDiff: <todo>"; + return; + } + llvm_unreachable("Unknown APValue kind!"); +} + +void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{ + switch (getKind()) { + case APValue::Uninitialized: + Out << "<uninitialized>"; + return; + case APValue::Int: + if (Ty->isBooleanType()) + Out << (getInt().getBoolValue() ? "true" : "false"); + else + Out << getInt(); + return; + case APValue::Float: + Out << GetApproxValue(getFloat()); + return; + case APValue::Vector: { + Out << '{'; + QualType ElemTy = Ty->getAs<VectorType>()->getElementType(); + getVectorElt(0).printPretty(Out, Ctx, ElemTy); + for (unsigned i = 1; i != getVectorLength(); ++i) { + Out << ", "; + getVectorElt(i).printPretty(Out, Ctx, ElemTy); + } + Out << '}'; + return; + } + case APValue::ComplexInt: + Out << getComplexIntReal() << "+" << getComplexIntImag() << "i"; + return; + case APValue::ComplexFloat: + Out << GetApproxValue(getComplexFloatReal()) << "+" + << GetApproxValue(getComplexFloatImag()) << "i"; + return; + case APValue::LValue: { + LValueBase Base = getLValueBase(); + if (!Base) { + Out << "0"; + return; + } + + bool IsReference = Ty->isReferenceType(); + QualType InnerTy + = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType(); + if (InnerTy.isNull()) + InnerTy = Ty; + + if (!hasLValuePath()) { + // No lvalue path: just print the offset. + CharUnits O = getLValueOffset(); + CharUnits S = Ctx.getTypeSizeInChars(InnerTy); + if (!O.isZero()) { + if (IsReference) + Out << "*("; + if (O % S) { + Out << "(char*)"; + S = CharUnits::One(); + } + Out << '&'; + } else if (!IsReference) + Out << '&'; + + if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) + Out << *VD; + else + Base.get<const Expr*>()->printPretty(Out, 0, Ctx.getPrintingPolicy()); + if (!O.isZero()) { + Out << " + " << (O / S); + if (IsReference) + Out << ')'; + } + return; + } + + // We have an lvalue path. Print it out nicely. + if (!IsReference) + Out << '&'; + else if (isLValueOnePastTheEnd()) + Out << "*(&"; + + QualType ElemTy; + if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) { + Out << *VD; + ElemTy = VD->getType(); + } else { + const Expr *E = Base.get<const Expr*>(); + E->printPretty(Out, 0, Ctx.getPrintingPolicy()); + ElemTy = E->getType(); + } + + ArrayRef<LValuePathEntry> Path = getLValuePath(); + const CXXRecordDecl *CastToBase = 0; + for (unsigned I = 0, N = Path.size(); I != N; ++I) { + if (ElemTy->getAs<RecordType>()) { + // The lvalue refers to a class type, so the next path entry is a base + // or member. + const Decl *BaseOrMember = + BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer(); + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) { + CastToBase = RD; + ElemTy = Ctx.getRecordType(RD); + } else { + const ValueDecl *VD = cast<ValueDecl>(BaseOrMember); + Out << "."; + if (CastToBase) + Out << *CastToBase << "::"; + Out << *VD; + ElemTy = VD->getType(); + } + } else { + // The lvalue must refer to an array. + Out << '[' << Path[I].ArrayIndex << ']'; + ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType(); + } + } + + // Handle formatting of one-past-the-end lvalues. + if (isLValueOnePastTheEnd()) { + // FIXME: If CastToBase is non-0, we should prefix the output with + // "(CastToBase*)". + Out << " + 1"; + if (IsReference) + Out << ')'; + } + return; + } + case APValue::Array: { + const ArrayType *AT = Ctx.getAsArrayType(Ty); + QualType ElemTy = AT->getElementType(); + Out << '{'; + if (unsigned N = getArrayInitializedElts()) { + getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy); + for (unsigned I = 1; I != N; ++I) { + Out << ", "; + if (I == 10) { + // Avoid printing out the entire contents of large arrays. + Out << "..."; + break; + } + getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy); + } + } + Out << '}'; + return; + } + case APValue::Struct: { + Out << '{'; + const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl(); + bool First = true; + if (unsigned N = getStructNumBases()) { + const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD); + CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin(); + for (unsigned I = 0; I != N; ++I, ++BI) { + assert(BI != CD->bases_end()); + if (!First) + Out << ", "; + getStructBase(I).printPretty(Out, Ctx, BI->getType()); + First = false; + } + } + for (RecordDecl::field_iterator FI = RD->field_begin(); + FI != RD->field_end(); ++FI) { + if (!First) + Out << ", "; + if (FI->isUnnamedBitfield()) continue; + getStructField(FI->getFieldIndex()). + printPretty(Out, Ctx, FI->getType()); + First = false; + } + Out << '}'; + return; + } + case APValue::Union: + Out << '{'; + if (const FieldDecl *FD = getUnionField()) { + Out << "." << *FD << " = "; + getUnionValue().printPretty(Out, Ctx, FD->getType()); + } + Out << '}'; + return; + case APValue::MemberPointer: + // FIXME: This is not enough to unambiguously identify the member in a + // multiple-inheritance scenario. + if (const ValueDecl *VD = getMemberPointerDecl()) { + Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD; + return; + } + Out << "0"; + return; + case APValue::AddrLabelDiff: + Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName(); + Out << " - "; + Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName(); + return; + } + llvm_unreachable("Unknown APValue kind!"); +} + +std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const { + std::string Result; + llvm::raw_string_ostream Out(Result); + printPretty(Out, Ctx, Ty); + Out.flush(); + return Result; +} + +const APValue::LValueBase APValue::getLValueBase() const { + assert(isLValue() && "Invalid accessor"); + return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getPointer(); +} + +bool APValue::isLValueOnePastTheEnd() const { + assert(isLValue() && "Invalid accessor"); + return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getInt(); +} + +CharUnits &APValue::getLValueOffset() { + assert(isLValue() && "Invalid accessor"); + return ((LV*)(void*)Data)->Offset; +} + +bool APValue::hasLValuePath() const { + assert(isLValue() && "Invalid accessor"); + return ((const LV*)(const char*)Data)->hasPath(); +} + +ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const { + assert(isLValue() && hasLValuePath() && "Invalid accessor"); + const LV &LVal = *((const LV*)(const char*)Data); + return ArrayRef<LValuePathEntry>(LVal.getPath(), LVal.PathLength); +} + +unsigned APValue::getLValueCallIndex() const { + assert(isLValue() && "Invalid accessor"); + return ((const LV*)(const char*)Data)->CallIndex; +} + +void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath, + unsigned CallIndex) { + assert(isLValue() && "Invalid accessor"); + LV &LVal = *((LV*)(char*)Data); + LVal.BaseAndIsOnePastTheEnd.setPointer(B); + LVal.BaseAndIsOnePastTheEnd.setInt(false); + LVal.Offset = O; + LVal.CallIndex = CallIndex; + LVal.resizePath((unsigned)-1); +} + +void APValue::setLValue(LValueBase B, const CharUnits &O, + ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd, + unsigned CallIndex) { + assert(isLValue() && "Invalid accessor"); + LV &LVal = *((LV*)(char*)Data); + LVal.BaseAndIsOnePastTheEnd.setPointer(B); + LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd); + LVal.Offset = O; + LVal.CallIndex = CallIndex; + LVal.resizePath(Path.size()); + memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry)); +} + +const ValueDecl *APValue::getMemberPointerDecl() const { + assert(isMemberPointer() && "Invalid accessor"); + const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data); + return MPD.MemberAndIsDerivedMember.getPointer(); +} + +bool APValue::isMemberPointerToDerivedMember() const { + assert(isMemberPointer() && "Invalid accessor"); + const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data); + return MPD.MemberAndIsDerivedMember.getInt(); +} + +ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const { + assert(isMemberPointer() && "Invalid accessor"); + const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data); + return ArrayRef<const CXXRecordDecl*>(MPD.getPath(), MPD.PathLength); +} + +void APValue::MakeLValue() { + assert(isUninit() && "Bad state change"); + assert(sizeof(LV) <= MaxSize && "LV too big"); + new ((void*)(char*)Data) LV(); + Kind = LValue; +} + +void APValue::MakeArray(unsigned InitElts, unsigned Size) { + assert(isUninit() && "Bad state change"); + new ((void*)(char*)Data) Arr(InitElts, Size); + Kind = Array; +} + +void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember, + ArrayRef<const CXXRecordDecl*> Path) { + assert(isUninit() && "Bad state change"); + MemberPointerData *MPD = new ((void*)(char*)Data) MemberPointerData; + Kind = MemberPointer; + MPD->MemberAndIsDerivedMember.setPointer(Member); + MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember); + MPD->resizePath(Path.size()); + memcpy(MPD->getPath(), Path.data(), Path.size()*sizeof(const CXXRecordDecl*)); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp new file mode 100644 index 000000000000..55033b238c66 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp @@ -0,0 +1,31 @@ +//===--- ASTConsumer.cpp - Abstract interface for reading ASTs --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ASTConsumer class. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclGroup.h" +using namespace clang; + +bool ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) { + return true; +} + +void ASTConsumer::HandleInterestingDecl(DeclGroupRef D) { + HandleTopLevelDecl(D); +} + +void ASTConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {} + +void ASTConsumer::HandleImplicitImportDecl(ImportDecl *D) { + HandleTopLevelDecl(DeclGroupRef(D)); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp new file mode 100644 index 000000000000..a03cf9e7d47b --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp @@ -0,0 +1,8253 @@ +//===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ASTContext interface. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "CXXABI.h" +#include "clang/AST/ASTMutationListener.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/Comment.h" +#include "clang/AST/CommentCommandTraits.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExternalASTSource.h" +#include "clang/AST/Mangle.h" +#include "clang/AST/MangleNumberingContext.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Support/Capacity.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include <map> + +using namespace clang; + +unsigned ASTContext::NumImplicitDefaultConstructors; +unsigned ASTContext::NumImplicitDefaultConstructorsDeclared; +unsigned ASTContext::NumImplicitCopyConstructors; +unsigned ASTContext::NumImplicitCopyConstructorsDeclared; +unsigned ASTContext::NumImplicitMoveConstructors; +unsigned ASTContext::NumImplicitMoveConstructorsDeclared; +unsigned ASTContext::NumImplicitCopyAssignmentOperators; +unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared; +unsigned ASTContext::NumImplicitMoveAssignmentOperators; +unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared; +unsigned ASTContext::NumImplicitDestructors; +unsigned ASTContext::NumImplicitDestructorsDeclared; + +enum FloatingRank { + HalfRank, FloatRank, DoubleRank, LongDoubleRank +}; + +RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { + if (!CommentsLoaded && ExternalSource) { + ExternalSource->ReadComments(); + CommentsLoaded = true; + } + + assert(D); + + // User can not attach documentation to implicit declarations. + if (D->isImplicit()) + return NULL; + + // User can not attach documentation to implicit instantiations. + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return NULL; + } + + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + if (VD->isStaticDataMember() && + VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return NULL; + } + + if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) { + if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return NULL; + } + + if (const ClassTemplateSpecializationDecl *CTSD = + dyn_cast<ClassTemplateSpecializationDecl>(D)) { + TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); + if (TSK == TSK_ImplicitInstantiation || + TSK == TSK_Undeclared) + return NULL; + } + + if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) { + if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return NULL; + } + if (const TagDecl *TD = dyn_cast<TagDecl>(D)) { + // When tag declaration (but not definition!) is part of the + // decl-specifier-seq of some other declaration, it doesn't get comment + if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) + return NULL; + } + // TODO: handle comments for function parameters properly. + if (isa<ParmVarDecl>(D)) + return NULL; + + // TODO: we could look up template parameter documentation in the template + // documentation. + if (isa<TemplateTypeParmDecl>(D) || + isa<NonTypeTemplateParmDecl>(D) || + isa<TemplateTemplateParmDecl>(D)) + return NULL; + + ArrayRef<RawComment *> RawComments = Comments.getComments(); + + // If there are no comments anywhere, we won't find anything. + if (RawComments.empty()) + return NULL; + + // Find declaration location. + // For Objective-C declarations we generally don't expect to have multiple + // declarators, thus use declaration starting location as the "declaration + // location". + // For all other declarations multiple declarators are used quite frequently, + // so we use the location of the identifier as the "declaration location". + SourceLocation DeclLoc; + if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || + isa<ObjCPropertyDecl>(D) || + isa<RedeclarableTemplateDecl>(D) || + isa<ClassTemplateSpecializationDecl>(D)) + DeclLoc = D->getLocStart(); + else { + DeclLoc = D->getLocation(); + // If location of the typedef name is in a macro, it is because being + // declared via a macro. Try using declaration's starting location + // as the "declaration location". + if (DeclLoc.isMacroID() && isa<TypedefDecl>(D)) + DeclLoc = D->getLocStart(); + } + + // If the declaration doesn't map directly to a location in a file, we + // can't find the comment. + if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) + return NULL; + + // Find the comment that occurs just after this declaration. + ArrayRef<RawComment *>::iterator Comment; + { + // When searching for comments during parsing, the comment we are looking + // for is usually among the last two comments we parsed -- check them + // first. + RawComment CommentAtDeclLoc( + SourceMgr, SourceRange(DeclLoc), false, + LangOpts.CommentOpts.ParseAllComments); + BeforeThanCompare<RawComment> Compare(SourceMgr); + ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1; + bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc); + if (!Found && RawComments.size() >= 2) { + MaybeBeforeDecl--; + Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc); + } + + if (Found) { + Comment = MaybeBeforeDecl + 1; + assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(), + &CommentAtDeclLoc, Compare)); + } else { + // Slow path. + Comment = std::lower_bound(RawComments.begin(), RawComments.end(), + &CommentAtDeclLoc, Compare); + } + } + + // Decompose the location for the declaration and find the beginning of the + // file buffer. + std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc); + + // First check whether we have a trailing comment. + if (Comment != RawComments.end() && + (*Comment)->isDocumentation() && (*Comment)->isTrailingComment() && + (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || + isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { + std::pair<FileID, unsigned> CommentBeginDecomp + = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin()); + // Check that Doxygen trailing comment comes after the declaration, starts + // on the same line and in the same file as the declaration. + if (DeclLocDecomp.first == CommentBeginDecomp.first && + SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) + == SourceMgr.getLineNumber(CommentBeginDecomp.first, + CommentBeginDecomp.second)) { + return *Comment; + } + } + + // The comment just after the declaration was not a trailing comment. + // Let's look at the previous comment. + if (Comment == RawComments.begin()) + return NULL; + --Comment; + + // Check that we actually have a non-member Doxygen comment. + if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment()) + return NULL; + + // Decompose the end of the comment. + std::pair<FileID, unsigned> CommentEndDecomp + = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd()); + + // If the comment and the declaration aren't in the same file, then they + // aren't related. + if (DeclLocDecomp.first != CommentEndDecomp.first) + return NULL; + + // Get the corresponding buffer. + bool Invalid = false; + const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, + &Invalid).data(); + if (Invalid) + return NULL; + + // Extract text between the comment and declaration. + StringRef Text(Buffer + CommentEndDecomp.second, + DeclLocDecomp.second - CommentEndDecomp.second); + + // There should be no other declarations or preprocessor directives between + // comment and declaration. + if (Text.find_first_of(";{}#@") != StringRef::npos) + return NULL; + + return *Comment; +} + +namespace { +/// If we have a 'templated' declaration for a template, adjust 'D' to +/// refer to the actual template. +/// If we have an implicit instantiation, adjust 'D' to refer to template. +const Decl *adjustDeclToTemplate(const Decl *D) { + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + // Is this function declaration part of a function template? + if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) + return FTD; + + // Nothing to do if function is not an implicit instantiation. + if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) + return D; + + // Function is an implicit instantiation of a function template? + if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) + return FTD; + + // Function is instantiated from a member definition of a class template? + if (const FunctionDecl *MemberDecl = + FD->getInstantiatedFromMemberFunction()) + return MemberDecl; + + return D; + } + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + // Static data member is instantiated from a member definition of a class + // template? + if (VD->isStaticDataMember()) + if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) + return MemberDecl; + + return D; + } + if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) { + // Is this class declaration part of a class template? + if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) + return CTD; + + // Class is an implicit instantiation of a class template or partial + // specialization? + if (const ClassTemplateSpecializationDecl *CTSD = + dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { + if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) + return D; + llvm::PointerUnion<ClassTemplateDecl *, + ClassTemplatePartialSpecializationDecl *> + PU = CTSD->getSpecializedTemplateOrPartial(); + return PU.is<ClassTemplateDecl*>() ? + static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) : + static_cast<const Decl*>( + PU.get<ClassTemplatePartialSpecializationDecl *>()); + } + + // Class is instantiated from a member definition of a class template? + if (const MemberSpecializationInfo *Info = + CRD->getMemberSpecializationInfo()) + return Info->getInstantiatedFrom(); + + return D; + } + if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) { + // Enum is instantiated from a member definition of a class template? + if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) + return MemberDecl; + + return D; + } + // FIXME: Adjust alias templates? + return D; +} +} // unnamed namespace + +const RawComment *ASTContext::getRawCommentForAnyRedecl( + const Decl *D, + const Decl **OriginalDecl) const { + D = adjustDeclToTemplate(D); + + // Check whether we have cached a comment for this declaration already. + { + llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos = + RedeclComments.find(D); + if (Pos != RedeclComments.end()) { + const RawCommentAndCacheFlags &Raw = Pos->second; + if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { + if (OriginalDecl) + *OriginalDecl = Raw.getOriginalDecl(); + return Raw.getRaw(); + } + } + } + + // Search for comments attached to declarations in the redeclaration chain. + const RawComment *RC = NULL; + const Decl *OriginalDeclForRC = NULL; + for (Decl::redecl_iterator I = D->redecls_begin(), + E = D->redecls_end(); + I != E; ++I) { + llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos = + RedeclComments.find(*I); + if (Pos != RedeclComments.end()) { + const RawCommentAndCacheFlags &Raw = Pos->second; + if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { + RC = Raw.getRaw(); + OriginalDeclForRC = Raw.getOriginalDecl(); + break; + } + } else { + RC = getRawCommentForDeclNoCache(*I); + OriginalDeclForRC = *I; + RawCommentAndCacheFlags Raw; + if (RC) { + Raw.setRaw(RC); + Raw.setKind(RawCommentAndCacheFlags::FromDecl); + } else + Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl); + Raw.setOriginalDecl(*I); + RedeclComments[*I] = Raw; + if (RC) + break; + } + } + + // If we found a comment, it should be a documentation comment. + assert(!RC || RC->isDocumentation()); + + if (OriginalDecl) + *OriginalDecl = OriginalDeclForRC; + + // Update cache for every declaration in the redeclaration chain. + RawCommentAndCacheFlags Raw; + Raw.setRaw(RC); + Raw.setKind(RawCommentAndCacheFlags::FromRedecl); + Raw.setOriginalDecl(OriginalDeclForRC); + + for (Decl::redecl_iterator I = D->redecls_begin(), + E = D->redecls_end(); + I != E; ++I) { + RawCommentAndCacheFlags &R = RedeclComments[*I]; + if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl) + R = Raw; + } + + return RC; +} + +static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, + SmallVectorImpl<const NamedDecl *> &Redeclared) { + const DeclContext *DC = ObjCMethod->getDeclContext(); + if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) { + const ObjCInterfaceDecl *ID = IMD->getClassInterface(); + if (!ID) + return; + // Add redeclared method here. + for (ObjCInterfaceDecl::known_extensions_iterator + Ext = ID->known_extensions_begin(), + ExtEnd = ID->known_extensions_end(); + Ext != ExtEnd; ++Ext) { + if (ObjCMethodDecl *RedeclaredMethod = + Ext->getMethod(ObjCMethod->getSelector(), + ObjCMethod->isInstanceMethod())) + Redeclared.push_back(RedeclaredMethod); + } + } +} + +comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, + const Decl *D) const { + comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo; + ThisDeclInfo->CommentDecl = D; + ThisDeclInfo->IsFilled = false; + ThisDeclInfo->fill(); + ThisDeclInfo->CommentDecl = FC->getDecl(); + comments::FullComment *CFC = + new (*this) comments::FullComment(FC->getBlocks(), + ThisDeclInfo); + return CFC; + +} + +comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { + const RawComment *RC = getRawCommentForDeclNoCache(D); + return RC ? RC->parse(*this, 0, D) : 0; +} + +comments::FullComment *ASTContext::getCommentForDecl( + const Decl *D, + const Preprocessor *PP) const { + if (D->isInvalidDecl()) + return NULL; + D = adjustDeclToTemplate(D); + + const Decl *Canonical = D->getCanonicalDecl(); + llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = + ParsedComments.find(Canonical); + + if (Pos != ParsedComments.end()) { + if (Canonical != D) { + comments::FullComment *FC = Pos->second; + comments::FullComment *CFC = cloneFullComment(FC, D); + return CFC; + } + return Pos->second; + } + + const Decl *OriginalDecl; + + const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); + if (!RC) { + if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { + SmallVector<const NamedDecl*, 8> Overridden; + const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D); + if (OMD && OMD->isPropertyAccessor()) + if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) + if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) + return cloneFullComment(FC, D); + if (OMD) + addRedeclaredMethods(OMD, Overridden); + getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); + for (unsigned i = 0, e = Overridden.size(); i < e; i++) + if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) + return cloneFullComment(FC, D); + } + else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { + // Attach any tag type's documentation to its typedef if latter + // does not have one of its own. + QualType QT = TD->getUnderlyingType(); + if (const TagType *TT = QT->getAs<TagType>()) + if (const Decl *TD = TT->getDecl()) + if (comments::FullComment *FC = getCommentForDecl(TD, PP)) + return cloneFullComment(FC, D); + } + else if (const ObjCInterfaceDecl *IC = dyn_cast<ObjCInterfaceDecl>(D)) { + while (IC->getSuperClass()) { + IC = IC->getSuperClass(); + if (comments::FullComment *FC = getCommentForDecl(IC, PP)) + return cloneFullComment(FC, D); + } + } + else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) { + if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) + if (comments::FullComment *FC = getCommentForDecl(IC, PP)) + return cloneFullComment(FC, D); + } + else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + if (!(RD = RD->getDefinition())) + return NULL; + // Check non-virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = + RD->bases_begin(), E = RD->bases_end(); I != E; ++I) { + if (I->isVirtual() || (I->getAccessSpecifier() != AS_public)) + continue; + QualType Ty = I->getType(); + if (Ty.isNull()) + continue; + if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { + if (!(NonVirtualBase= NonVirtualBase->getDefinition())) + continue; + + if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) + return cloneFullComment(FC, D); + } + } + // Check virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = + RD->vbases_begin(), E = RD->vbases_end(); I != E; ++I) { + if (I->getAccessSpecifier() != AS_public) + continue; + QualType Ty = I->getType(); + if (Ty.isNull()) + continue; + if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { + if (!(VirtualBase= VirtualBase->getDefinition())) + continue; + if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) + return cloneFullComment(FC, D); + } + } + } + return NULL; + } + + // If the RawComment was attached to other redeclaration of this Decl, we + // should parse the comment in context of that other Decl. This is important + // because comments can contain references to parameter names which can be + // different across redeclarations. + if (D != OriginalDecl) + return getCommentForDecl(OriginalDecl, PP); + + comments::FullComment *FC = RC->parse(*this, PP, D); + ParsedComments[Canonical] = FC; + return FC; +} + +void +ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, + TemplateTemplateParmDecl *Parm) { + ID.AddInteger(Parm->getDepth()); + ID.AddInteger(Parm->getPosition()); + ID.AddBoolean(Parm->isParameterPack()); + + TemplateParameterList *Params = Parm->getTemplateParameters(); + ID.AddInteger(Params->size()); + for (TemplateParameterList::const_iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { + ID.AddInteger(0); + ID.AddBoolean(TTP->isParameterPack()); + continue; + } + + if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { + ID.AddInteger(1); + ID.AddBoolean(NTTP->isParameterPack()); + ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); + if (NTTP->isExpandedParameterPack()) { + ID.AddBoolean(true); + ID.AddInteger(NTTP->getNumExpansionTypes()); + for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { + QualType T = NTTP->getExpansionType(I); + ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); + } + } else + ID.AddBoolean(false); + continue; + } + + TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P); + ID.AddInteger(2); + Profile(ID, TTP); + } +} + +TemplateTemplateParmDecl * +ASTContext::getCanonicalTemplateTemplateParmDecl( + TemplateTemplateParmDecl *TTP) const { + // Check if we already have a canonical template template parameter. + llvm::FoldingSetNodeID ID; + CanonicalTemplateTemplateParm::Profile(ID, TTP); + void *InsertPos = 0; + CanonicalTemplateTemplateParm *Canonical + = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); + if (Canonical) + return Canonical->getParam(); + + // Build a canonical template parameter list. + TemplateParameterList *Params = TTP->getTemplateParameters(); + SmallVector<NamedDecl *, 4> CanonParams; + CanonParams.reserve(Params->size()); + for (TemplateParameterList::const_iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) + CanonParams.push_back( + TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + TTP->getDepth(), + TTP->getIndex(), 0, false, + TTP->isParameterPack())); + else if (NonTypeTemplateParmDecl *NTTP + = dyn_cast<NonTypeTemplateParmDecl>(*P)) { + QualType T = getCanonicalType(NTTP->getType()); + TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); + NonTypeTemplateParmDecl *Param; + if (NTTP->isExpandedParameterPack()) { + SmallVector<QualType, 2> ExpandedTypes; + SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; + for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { + ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); + ExpandedTInfos.push_back( + getTrivialTypeSourceInfo(ExpandedTypes.back())); + } + + Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + NTTP->getDepth(), + NTTP->getPosition(), 0, + T, + TInfo, + ExpandedTypes.data(), + ExpandedTypes.size(), + ExpandedTInfos.data()); + } else { + Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + NTTP->getDepth(), + NTTP->getPosition(), 0, + T, + NTTP->isParameterPack(), + TInfo); + } + CanonParams.push_back(Param); + + } else + CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( + cast<TemplateTemplateParmDecl>(*P))); + } + + TemplateTemplateParmDecl *CanonTTP + = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), TTP->getDepth(), + TTP->getPosition(), + TTP->isParameterPack(), + 0, + TemplateParameterList::Create(*this, SourceLocation(), + SourceLocation(), + CanonParams.data(), + CanonParams.size(), + SourceLocation())); + + // Get the new insert position for the node we care about. + Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); + assert(Canonical == 0 && "Shouldn't be in the map!"); + (void)Canonical; + + // Create the canonical template template parameter entry. + Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); + CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); + return CanonTTP; +} + +CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { + if (!LangOpts.CPlusPlus) return 0; + + switch (T.getCXXABI().getKind()) { + case TargetCXXABI::GenericARM: + case TargetCXXABI::iOS: + return CreateARMCXXABI(*this); + case TargetCXXABI::GenericAArch64: // Same as Itanium at this level + case TargetCXXABI::GenericItanium: + return CreateItaniumCXXABI(*this); + case TargetCXXABI::Microsoft: + return CreateMicrosoftCXXABI(*this); + } + llvm_unreachable("Invalid CXXABI type!"); +} + +static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T, + const LangOptions &LOpts) { + if (LOpts.FakeAddressSpaceMap) { + // The fake address space map must have a distinct entry for each + // language-specific address space. + static const unsigned FakeAddrSpaceMap[] = { + 1, // opencl_global + 2, // opencl_local + 3, // opencl_constant + 4, // cuda_device + 5, // cuda_constant + 6 // cuda_shared + }; + return &FakeAddrSpaceMap; + } else { + return &T.getAddressSpaceMap(); + } +} + +static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, + const LangOptions &LangOpts) { + switch (LangOpts.getAddressSpaceMapMangling()) { + case LangOptions::ASMM_Target: + return TI.useAddressSpaceMapMangling(); + case LangOptions::ASMM_On: + return true; + case LangOptions::ASMM_Off: + return false; + } + llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); +} + +ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM, + const TargetInfo *t, + IdentifierTable &idents, SelectorTable &sels, + Builtin::Context &builtins, + unsigned size_reserve, + bool DelayInitialization) + : FunctionProtoTypes(this_()), + TemplateSpecializationTypes(this_()), + DependentTemplateSpecializationTypes(this_()), + SubstTemplateTemplateParmPacks(this_()), + GlobalNestedNameSpecifier(0), + Int128Decl(0), UInt128Decl(0), Float128StubDecl(0), + BuiltinVaListDecl(0), + ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), ObjCProtocolClassDecl(0), + BOOLDecl(0), + CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0), + FILEDecl(0), + jmp_bufDecl(0), sigjmp_bufDecl(0), ucontext_tDecl(0), + BlockDescriptorType(0), BlockDescriptorExtendedType(0), + cudaConfigureCallDecl(0), + NullTypeSourceInfo(QualType()), + FirstLocalImport(), LastLocalImport(), + SourceMgr(SM), LangOpts(LOpts), + AddrSpaceMap(0), Target(t), PrintingPolicy(LOpts), + Idents(idents), Selectors(sels), + BuiltinInfo(builtins), + DeclarationNames(*this), + ExternalSource(0), Listener(0), + Comments(SM), CommentsLoaded(false), + CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), + LastSDM(0, 0) +{ + if (size_reserve > 0) Types.reserve(size_reserve); + TUDecl = TranslationUnitDecl::Create(*this); + + if (!DelayInitialization) { + assert(t && "No target supplied for ASTContext initialization"); + InitBuiltinTypes(*t); + } +} + +ASTContext::~ASTContext() { + // Release the DenseMaps associated with DeclContext objects. + // FIXME: Is this the ideal solution? + ReleaseDeclContextMaps(); + + // Call all of the deallocation functions on all of their targets. + for (DeallocationMap::const_iterator I = Deallocations.begin(), + E = Deallocations.end(); I != E; ++I) + for (unsigned J = 0, N = I->second.size(); J != N; ++J) + (I->first)((I->second)[J]); + + // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed + // because they can contain DenseMaps. + for (llvm::DenseMap<const ObjCContainerDecl*, + const ASTRecordLayout*>::iterator + I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) + // Increment in loop to prevent using deallocated memory. + if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) + R->Destroy(*this); + + for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator + I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { + // Increment in loop to prevent using deallocated memory. + if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) + R->Destroy(*this); + } + + for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), + AEnd = DeclAttrs.end(); + A != AEnd; ++A) + A->second->~AttrVec(); + + for (llvm::DenseMap<const DeclContext *, MangleNumberingContext *>::iterator + I = MangleNumberingContexts.begin(), + E = MangleNumberingContexts.end(); + I != E; ++I) + delete I->second; +} + +void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) { + Deallocations[Callback].push_back(Data); +} + +void +ASTContext::setExternalSource(OwningPtr<ExternalASTSource> &Source) { + ExternalSource.reset(Source.take()); +} + +void ASTContext::PrintStats() const { + llvm::errs() << "\n*** AST Context Stats:\n"; + llvm::errs() << " " << Types.size() << " types total.\n"; + + unsigned counts[] = { +#define TYPE(Name, Parent) 0, +#define ABSTRACT_TYPE(Name, Parent) +#include "clang/AST/TypeNodes.def" + 0 // Extra + }; + + for (unsigned i = 0, e = Types.size(); i != e; ++i) { + Type *T = Types[i]; + counts[(unsigned)T->getTypeClass()]++; + } + + unsigned Idx = 0; + unsigned TotalBytes = 0; +#define TYPE(Name, Parent) \ + if (counts[Idx]) \ + llvm::errs() << " " << counts[Idx] << " " << #Name \ + << " types\n"; \ + TotalBytes += counts[Idx] * sizeof(Name##Type); \ + ++Idx; +#define ABSTRACT_TYPE(Name, Parent) +#include "clang/AST/TypeNodes.def" + + llvm::errs() << "Total bytes = " << TotalBytes << "\n"; + + // Implicit special member functions. + llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" + << NumImplicitDefaultConstructors + << " implicit default constructors created\n"; + llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" + << NumImplicitCopyConstructors + << " implicit copy constructors created\n"; + if (getLangOpts().CPlusPlus) + llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" + << NumImplicitMoveConstructors + << " implicit move constructors created\n"; + llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" + << NumImplicitCopyAssignmentOperators + << " implicit copy assignment operators created\n"; + if (getLangOpts().CPlusPlus) + llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" + << NumImplicitMoveAssignmentOperators + << " implicit move assignment operators created\n"; + llvm::errs() << NumImplicitDestructorsDeclared << "/" + << NumImplicitDestructors + << " implicit destructors created\n"; + + if (ExternalSource.get()) { + llvm::errs() << "\n"; + ExternalSource->PrintStats(); + } + + BumpAlloc.PrintStats(); +} + +TypedefDecl *ASTContext::getInt128Decl() const { + if (!Int128Decl) { + TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(Int128Ty); + Int128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), + getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + &Idents.get("__int128_t"), + TInfo); + } + + return Int128Decl; +} + +TypedefDecl *ASTContext::getUInt128Decl() const { + if (!UInt128Decl) { + TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(UnsignedInt128Ty); + UInt128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this), + getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + &Idents.get("__uint128_t"), + TInfo); + } + + return UInt128Decl; +} + +TypeDecl *ASTContext::getFloat128StubType() const { + assert(LangOpts.CPlusPlus && "should only be called for c++"); + if (!Float128StubDecl) { + Float128StubDecl = CXXRecordDecl::Create(const_cast<ASTContext &>(*this), + TTK_Struct, + getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + &Idents.get("__float128")); + } + + return Float128StubDecl; +} + +void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { + BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K); + R = CanQualType::CreateUnsafe(QualType(Ty, 0)); + Types.push_back(Ty); +} + +void ASTContext::InitBuiltinTypes(const TargetInfo &Target) { + assert((!this->Target || this->Target == &Target) && + "Incorrect target reinitialization"); + assert(VoidTy.isNull() && "Context reinitialized?"); + + this->Target = &Target; + + ABI.reset(createCXXABI(Target)); + AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); + AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); + + // C99 6.2.5p19. + InitBuiltinType(VoidTy, BuiltinType::Void); + + // C99 6.2.5p2. + InitBuiltinType(BoolTy, BuiltinType::Bool); + // C99 6.2.5p3. + if (LangOpts.CharIsSigned) + InitBuiltinType(CharTy, BuiltinType::Char_S); + else + InitBuiltinType(CharTy, BuiltinType::Char_U); + // C99 6.2.5p4. + InitBuiltinType(SignedCharTy, BuiltinType::SChar); + InitBuiltinType(ShortTy, BuiltinType::Short); + InitBuiltinType(IntTy, BuiltinType::Int); + InitBuiltinType(LongTy, BuiltinType::Long); + InitBuiltinType(LongLongTy, BuiltinType::LongLong); + + // C99 6.2.5p6. + InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); + InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); + InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); + InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); + InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); + + // C99 6.2.5p10. + InitBuiltinType(FloatTy, BuiltinType::Float); + InitBuiltinType(DoubleTy, BuiltinType::Double); + InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); + + // GNU extension, 128-bit integers. + InitBuiltinType(Int128Ty, BuiltinType::Int128); + InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); + + // C++ 3.9.1p5 + if (TargetInfo::isTypeSigned(Target.getWCharType())) + InitBuiltinType(WCharTy, BuiltinType::WChar_S); + else // -fshort-wchar makes wchar_t be unsigned. + InitBuiltinType(WCharTy, BuiltinType::WChar_U); + if (LangOpts.CPlusPlus && LangOpts.WChar) + WideCharTy = WCharTy; + else { + // C99 (or C++ using -fno-wchar). + WideCharTy = getFromTargetType(Target.getWCharType()); + } + + WIntTy = getFromTargetType(Target.getWIntType()); + + if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ + InitBuiltinType(Char16Ty, BuiltinType::Char16); + else // C99 + Char16Ty = getFromTargetType(Target.getChar16Type()); + + if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ + InitBuiltinType(Char32Ty, BuiltinType::Char32); + else // C99 + Char32Ty = getFromTargetType(Target.getChar32Type()); + + // Placeholder type for type-dependent expressions whose type is + // completely unknown. No code should ever check a type against + // DependentTy and users should never see it; however, it is here to + // help diagnose failures to properly check for type-dependent + // expressions. + InitBuiltinType(DependentTy, BuiltinType::Dependent); + + // Placeholder type for functions. + InitBuiltinType(OverloadTy, BuiltinType::Overload); + + // Placeholder type for bound members. + InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); + + // Placeholder type for pseudo-objects. + InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); + + // "any" type; useful for debugger-like clients. + InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); + + // Placeholder type for unbridged ARC casts. + InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); + + // Placeholder type for builtin functions. + InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); + + // C99 6.2.5p11. + FloatComplexTy = getComplexType(FloatTy); + DoubleComplexTy = getComplexType(DoubleTy); + LongDoubleComplexTy = getComplexType(LongDoubleTy); + + // Builtin types for 'id', 'Class', and 'SEL'. + InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); + InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); + InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); + + if (LangOpts.OpenCL) { + InitBuiltinType(OCLImage1dTy, BuiltinType::OCLImage1d); + InitBuiltinType(OCLImage1dArrayTy, BuiltinType::OCLImage1dArray); + InitBuiltinType(OCLImage1dBufferTy, BuiltinType::OCLImage1dBuffer); + InitBuiltinType(OCLImage2dTy, BuiltinType::OCLImage2d); + InitBuiltinType(OCLImage2dArrayTy, BuiltinType::OCLImage2dArray); + InitBuiltinType(OCLImage3dTy, BuiltinType::OCLImage3d); + + InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); + InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); + } + + // Builtin type for __objc_yes and __objc_no + ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? + SignedCharTy : BoolTy); + + ObjCConstantStringType = QualType(); + + ObjCSuperType = QualType(); + + // void * type + VoidPtrTy = getPointerType(VoidTy); + + // nullptr type (C++0x 2.14.7) + InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); + + // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 + InitBuiltinType(HalfTy, BuiltinType::Half); + + // Builtin type used to help define __builtin_va_list. + VaListTagTy = QualType(); +} + +DiagnosticsEngine &ASTContext::getDiagnostics() const { + return SourceMgr.getDiagnostics(); +} + +AttrVec& ASTContext::getDeclAttrs(const Decl *D) { + AttrVec *&Result = DeclAttrs[D]; + if (!Result) { + void *Mem = Allocate(sizeof(AttrVec)); + Result = new (Mem) AttrVec; + } + + return *Result; +} + +/// \brief Erase the attributes corresponding to the given declaration. +void ASTContext::eraseDeclAttrs(const Decl *D) { + llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); + if (Pos != DeclAttrs.end()) { + Pos->second->~AttrVec(); + DeclAttrs.erase(Pos); + } +} + +// FIXME: Remove ? +MemberSpecializationInfo * +ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { + assert(Var->isStaticDataMember() && "Not a static data member"); + return getTemplateOrSpecializationInfo(Var) + .dyn_cast<MemberSpecializationInfo *>(); +} + +ASTContext::TemplateOrSpecializationInfo +ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { + llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = + TemplateOrInstantiation.find(Var); + if (Pos == TemplateOrInstantiation.end()) + return TemplateOrSpecializationInfo(); + + return Pos->second; +} + +void +ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, + TemplateSpecializationKind TSK, + SourceLocation PointOfInstantiation) { + assert(Inst->isStaticDataMember() && "Not a static data member"); + assert(Tmpl->isStaticDataMember() && "Not a static data member"); + setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( + Tmpl, TSK, PointOfInstantiation)); +} + +void +ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, + TemplateOrSpecializationInfo TSI) { + assert(!TemplateOrInstantiation[Inst] && + "Already noted what the variable was instantiated from"); + TemplateOrInstantiation[Inst] = TSI; +} + +FunctionDecl *ASTContext::getClassScopeSpecializationPattern( + const FunctionDecl *FD){ + assert(FD && "Specialization is 0"); + llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos + = ClassScopeSpecializationPattern.find(FD); + if (Pos == ClassScopeSpecializationPattern.end()) + return 0; + + return Pos->second; +} + +void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD, + FunctionDecl *Pattern) { + assert(FD && "Specialization is 0"); + assert(Pattern && "Class scope specialization pattern is 0"); + ClassScopeSpecializationPattern[FD] = Pattern; +} + +NamedDecl * +ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) { + llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos + = InstantiatedFromUsingDecl.find(UUD); + if (Pos == InstantiatedFromUsingDecl.end()) + return 0; + + return Pos->second; +} + +void +ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) { + assert((isa<UsingDecl>(Pattern) || + isa<UnresolvedUsingValueDecl>(Pattern) || + isa<UnresolvedUsingTypenameDecl>(Pattern)) && + "pattern decl is not a using decl"); + assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); + InstantiatedFromUsingDecl[Inst] = Pattern; +} + +UsingShadowDecl * +ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { + llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos + = InstantiatedFromUsingShadowDecl.find(Inst); + if (Pos == InstantiatedFromUsingShadowDecl.end()) + return 0; + + return Pos->second; +} + +void +ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, + UsingShadowDecl *Pattern) { + assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); + InstantiatedFromUsingShadowDecl[Inst] = Pattern; +} + +FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { + llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos + = InstantiatedFromUnnamedFieldDecl.find(Field); + if (Pos == InstantiatedFromUnnamedFieldDecl.end()) + return 0; + + return Pos->second; +} + +void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, + FieldDecl *Tmpl) { + assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); + assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); + assert(!InstantiatedFromUnnamedFieldDecl[Inst] && + "Already noted what unnamed field was instantiated from"); + + InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; +} + +ASTContext::overridden_cxx_method_iterator +ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { + llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos + = OverriddenMethods.find(Method->getCanonicalDecl()); + if (Pos == OverriddenMethods.end()) + return 0; + + return Pos->second.begin(); +} + +ASTContext::overridden_cxx_method_iterator +ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { + llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos + = OverriddenMethods.find(Method->getCanonicalDecl()); + if (Pos == OverriddenMethods.end()) + return 0; + + return Pos->second.end(); +} + +unsigned +ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { + llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos + = OverriddenMethods.find(Method->getCanonicalDecl()); + if (Pos == OverriddenMethods.end()) + return 0; + + return Pos->second.size(); +} + +void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, + const CXXMethodDecl *Overridden) { + assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); + OverriddenMethods[Method].push_back(Overridden); +} + +void ASTContext::getOverriddenMethods( + const NamedDecl *D, + SmallVectorImpl<const NamedDecl *> &Overridden) const { + assert(D); + + if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { + Overridden.append(overridden_methods_begin(CXXMethod), + overridden_methods_end(CXXMethod)); + return; + } + + const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D); + if (!Method) + return; + + SmallVector<const ObjCMethodDecl *, 8> OverDecls; + Method->getOverriddenMethods(OverDecls); + Overridden.append(OverDecls.begin(), OverDecls.end()); +} + +void ASTContext::addedLocalImportDecl(ImportDecl *Import) { + assert(!Import->NextLocalImport && "Import declaration already in the chain"); + assert(!Import->isFromASTFile() && "Non-local import declaration"); + if (!FirstLocalImport) { + FirstLocalImport = Import; + LastLocalImport = Import; + return; + } + + LastLocalImport->NextLocalImport = Import; + LastLocalImport = Import; +} + +//===----------------------------------------------------------------------===// +// Type Sizing and Analysis +//===----------------------------------------------------------------------===// + +/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified +/// scalar floating point type. +const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { + const BuiltinType *BT = T->getAs<BuiltinType>(); + assert(BT && "Not a floating point type!"); + switch (BT->getKind()) { + default: llvm_unreachable("Not a floating point type!"); + case BuiltinType::Half: return Target->getHalfFormat(); + case BuiltinType::Float: return Target->getFloatFormat(); + case BuiltinType::Double: return Target->getDoubleFormat(); + case BuiltinType::LongDouble: return Target->getLongDoubleFormat(); + } +} + +CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { + unsigned Align = Target->getCharWidth(); + + bool UseAlignAttrOnly = false; + if (unsigned AlignFromAttr = D->getMaxAlignment()) { + Align = AlignFromAttr; + + // __attribute__((aligned)) can increase or decrease alignment + // *except* on a struct or struct member, where it only increases + // alignment unless 'packed' is also specified. + // + // It is an error for alignas to decrease alignment, so we can + // ignore that possibility; Sema should diagnose it. + if (isa<FieldDecl>(D)) { + UseAlignAttrOnly = D->hasAttr<PackedAttr>() || + cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); + } else { + UseAlignAttrOnly = true; + } + } + else if (isa<FieldDecl>(D)) + UseAlignAttrOnly = + D->hasAttr<PackedAttr>() || + cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); + + // If we're using the align attribute only, just ignore everything + // else about the declaration and its type. + if (UseAlignAttrOnly) { + // do nothing + + } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) { + QualType T = VD->getType(); + if (const ReferenceType* RT = T->getAs<ReferenceType>()) { + if (ForAlignof) + T = RT->getPointeeType(); + else + T = getPointerType(RT->getPointeeType()); + } + if (!T->isIncompleteType() && !T->isFunctionType()) { + // Adjust alignments of declarations with array type by the + // large-array alignment on the target. + if (const ArrayType *arrayType = getAsArrayType(T)) { + unsigned MinWidth = Target->getLargeArrayMinWidth(); + if (!ForAlignof && MinWidth) { + if (isa<VariableArrayType>(arrayType)) + Align = std::max(Align, Target->getLargeArrayAlign()); + else if (isa<ConstantArrayType>(arrayType) && + MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) + Align = std::max(Align, Target->getLargeArrayAlign()); + } + + // Walk through any array types while we're at it. + T = getBaseElementType(arrayType); + } + Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + if (VD->hasGlobalStorage()) + Align = std::max(Align, getTargetInfo().getMinGlobalAlign()); + } + } + + // Fields can be subject to extra alignment constraints, like if + // the field is packed, the struct is packed, or the struct has a + // a max-field-alignment constraint (#pragma pack). So calculate + // the actual alignment of the field within the struct, and then + // (as we're expected to) constrain that by the alignment of the type. + if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) { + const RecordDecl *Parent = Field->getParent(); + // We can only produce a sensible answer if the record is valid. + if (!Parent->isInvalidDecl()) { + const ASTRecordLayout &Layout = getASTRecordLayout(Parent); + + // Start with the record's overall alignment. + unsigned FieldAlign = toBits(Layout.getAlignment()); + + // Use the GCD of that and the offset within the record. + uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); + if (Offset > 0) { + // Alignment is always a power of 2, so the GCD will be a power of 2, + // which means we get to do this crazy thing instead of Euclid's. + uint64_t LowBitOfOffset = Offset & (~Offset + 1); + if (LowBitOfOffset < FieldAlign) + FieldAlign = static_cast<unsigned>(LowBitOfOffset); + } + + Align = std::min(Align, FieldAlign); + } + } + } + + return toCharUnitsFromBits(Align); +} + +// getTypeInfoDataSizeInChars - Return the size of a type, in +// chars. If the type is a record, its data size is returned. This is +// the size of the memcpy that's performed when assigning this type +// using a trivial copy/move assignment operator. +std::pair<CharUnits, CharUnits> +ASTContext::getTypeInfoDataSizeInChars(QualType T) const { + std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T); + + // In C++, objects can sometimes be allocated into the tail padding + // of a base-class subobject. We decide whether that's possible + // during class layout, so here we can just trust the layout results. + if (getLangOpts().CPlusPlus) { + if (const RecordType *RT = T->getAs<RecordType>()) { + const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); + sizeAndAlign.first = layout.getDataSize(); + } + } + + return sizeAndAlign; +} + +/// getConstantArrayInfoInChars - Performing the computation in CharUnits +/// instead of in bits prevents overflowing the uint64_t for some large arrays. +std::pair<CharUnits, CharUnits> +static getConstantArrayInfoInChars(const ASTContext &Context, + const ConstantArrayType *CAT) { + std::pair<CharUnits, CharUnits> EltInfo = + Context.getTypeInfoInChars(CAT->getElementType()); + uint64_t Size = CAT->getSize().getZExtValue(); + assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <= + (uint64_t)(-1)/Size) && + "Overflow in array type char size evaluation"); + uint64_t Width = EltInfo.first.getQuantity() * Size; + unsigned Align = EltInfo.second.getQuantity(); + if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || + Context.getTargetInfo().getPointerWidth(0) == 64) + Width = llvm::RoundUpToAlignment(Width, Align); + return std::make_pair(CharUnits::fromQuantity(Width), + CharUnits::fromQuantity(Align)); +} + +std::pair<CharUnits, CharUnits> +ASTContext::getTypeInfoInChars(const Type *T) const { + if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T)) + return getConstantArrayInfoInChars(*this, CAT); + std::pair<uint64_t, unsigned> Info = getTypeInfo(T); + return std::make_pair(toCharUnitsFromBits(Info.first), + toCharUnitsFromBits(Info.second)); +} + +std::pair<CharUnits, CharUnits> +ASTContext::getTypeInfoInChars(QualType T) const { + return getTypeInfoInChars(T.getTypePtr()); +} + +std::pair<uint64_t, unsigned> ASTContext::getTypeInfo(const Type *T) const { + TypeInfoMap::iterator it = MemoizedTypeInfo.find(T); + if (it != MemoizedTypeInfo.end()) + return it->second; + + std::pair<uint64_t, unsigned> Info = getTypeInfoImpl(T); + MemoizedTypeInfo.insert(std::make_pair(T, Info)); + return Info; +} + +/// getTypeInfoImpl - Return the size of the specified type, in bits. This +/// method does not work on incomplete types. +/// +/// FIXME: Pointers into different addr spaces could have different sizes and +/// alignment requirements: getPointerInfo should take an AddrSpace, this +/// should take a QualType, &c. +std::pair<uint64_t, unsigned> +ASTContext::getTypeInfoImpl(const Type *T) const { + uint64_t Width=0; + unsigned Align=8; + switch (T->getTypeClass()) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_TYPE(Class, Base) +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ + case Type::Class: \ + assert(!T->isDependentType() && "should not see dependent types here"); \ + return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); +#include "clang/AST/TypeNodes.def" + llvm_unreachable("Should not see dependent types"); + + case Type::FunctionNoProto: + case Type::FunctionProto: + // GCC extension: alignof(function) = 32 bits + Width = 0; + Align = 32; + break; + + case Type::IncompleteArray: + case Type::VariableArray: + Width = 0; + Align = getTypeAlign(cast<ArrayType>(T)->getElementType()); + break; + + case Type::ConstantArray: { + const ConstantArrayType *CAT = cast<ConstantArrayType>(T); + + std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType()); + uint64_t Size = CAT->getSize().getZExtValue(); + assert((Size == 0 || EltInfo.first <= (uint64_t)(-1)/Size) && + "Overflow in array type bit size evaluation"); + Width = EltInfo.first*Size; + Align = EltInfo.second; + if (!getTargetInfo().getCXXABI().isMicrosoft() || + getTargetInfo().getPointerWidth(0) == 64) + Width = llvm::RoundUpToAlignment(Width, Align); + break; + } + case Type::ExtVector: + case Type::Vector: { + const VectorType *VT = cast<VectorType>(T); + std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(VT->getElementType()); + Width = EltInfo.first*VT->getNumElements(); + Align = Width; + // If the alignment is not a power of 2, round up to the next power of 2. + // This happens for non-power-of-2 length vectors. + if (Align & (Align-1)) { + Align = llvm::NextPowerOf2(Align); + Width = llvm::RoundUpToAlignment(Width, Align); + } + // Adjust the alignment based on the target max. + uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); + if (TargetVectorAlign && TargetVectorAlign < Align) + Align = TargetVectorAlign; + break; + } + + case Type::Builtin: + switch (cast<BuiltinType>(T)->getKind()) { + default: llvm_unreachable("Unknown builtin type!"); + case BuiltinType::Void: + // GCC extension: alignof(void) = 8 bits. + Width = 0; + Align = 8; + break; + + case BuiltinType::Bool: + Width = Target->getBoolWidth(); + Align = Target->getBoolAlign(); + break; + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::UChar: + case BuiltinType::SChar: + Width = Target->getCharWidth(); + Align = Target->getCharAlign(); + break; + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + Width = Target->getWCharWidth(); + Align = Target->getWCharAlign(); + break; + case BuiltinType::Char16: + Width = Target->getChar16Width(); + Align = Target->getChar16Align(); + break; + case BuiltinType::Char32: + Width = Target->getChar32Width(); + Align = Target->getChar32Align(); + break; + case BuiltinType::UShort: + case BuiltinType::Short: + Width = Target->getShortWidth(); + Align = Target->getShortAlign(); + break; + case BuiltinType::UInt: + case BuiltinType::Int: + Width = Target->getIntWidth(); + Align = Target->getIntAlign(); + break; + case BuiltinType::ULong: + case BuiltinType::Long: + Width = Target->getLongWidth(); + Align = Target->getLongAlign(); + break; + case BuiltinType::ULongLong: + case BuiltinType::LongLong: + Width = Target->getLongLongWidth(); + Align = Target->getLongLongAlign(); + break; + case BuiltinType::Int128: + case BuiltinType::UInt128: + Width = 128; + Align = 128; // int128_t is 128-bit aligned on all targets. + break; + case BuiltinType::Half: + Width = Target->getHalfWidth(); + Align = Target->getHalfAlign(); + break; + case BuiltinType::Float: + Width = Target->getFloatWidth(); + Align = Target->getFloatAlign(); + break; + case BuiltinType::Double: + Width = Target->getDoubleWidth(); + Align = Target->getDoubleAlign(); + break; + case BuiltinType::LongDouble: + Width = Target->getLongDoubleWidth(); + Align = Target->getLongDoubleAlign(); + break; + case BuiltinType::NullPtr: + Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) + Align = Target->getPointerAlign(0); // == sizeof(void*) + break; + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + Width = Target->getPointerWidth(0); + Align = Target->getPointerAlign(0); + break; + case BuiltinType::OCLSampler: + // Samplers are modeled as integers. + Width = Target->getIntWidth(); + Align = Target->getIntAlign(); + break; + case BuiltinType::OCLEvent: + case BuiltinType::OCLImage1d: + case BuiltinType::OCLImage1dArray: + case BuiltinType::OCLImage1dBuffer: + case BuiltinType::OCLImage2d: + case BuiltinType::OCLImage2dArray: + case BuiltinType::OCLImage3d: + // Currently these types are pointers to opaque types. + Width = Target->getPointerWidth(0); + Align = Target->getPointerAlign(0); + break; + } + break; + case Type::ObjCObjectPointer: + Width = Target->getPointerWidth(0); + Align = Target->getPointerAlign(0); + break; + case Type::BlockPointer: { + unsigned AS = getTargetAddressSpace( + cast<BlockPointerType>(T)->getPointeeType()); + Width = Target->getPointerWidth(AS); + Align = Target->getPointerAlign(AS); + break; + } + case Type::LValueReference: + case Type::RValueReference: { + // alignof and sizeof should never enter this code path here, so we go + // the pointer route. + unsigned AS = getTargetAddressSpace( + cast<ReferenceType>(T)->getPointeeType()); + Width = Target->getPointerWidth(AS); + Align = Target->getPointerAlign(AS); + break; + } + case Type::Pointer: { + unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); + Width = Target->getPointerWidth(AS); + Align = Target->getPointerAlign(AS); + break; + } + case Type::MemberPointer: { + const MemberPointerType *MPT = cast<MemberPointerType>(T); + llvm::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT); + break; + } + case Type::Complex: { + // Complex types have the same alignment as their elements, but twice the + // size. + std::pair<uint64_t, unsigned> EltInfo = + getTypeInfo(cast<ComplexType>(T)->getElementType()); + Width = EltInfo.first*2; + Align = EltInfo.second; + break; + } + case Type::ObjCObject: + return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); + case Type::Decayed: + return getTypeInfo(cast<DecayedType>(T)->getDecayedType().getTypePtr()); + case Type::ObjCInterface: { + const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T); + const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); + Width = toBits(Layout.getSize()); + Align = toBits(Layout.getAlignment()); + break; + } + case Type::Record: + case Type::Enum: { + const TagType *TT = cast<TagType>(T); + + if (TT->getDecl()->isInvalidDecl()) { + Width = 8; + Align = 8; + break; + } + + if (const EnumType *ET = dyn_cast<EnumType>(TT)) + return getTypeInfo(ET->getDecl()->getIntegerType()); + + const RecordType *RT = cast<RecordType>(TT); + const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl()); + Width = toBits(Layout.getSize()); + Align = toBits(Layout.getAlignment()); + break; + } + + case Type::SubstTemplateTypeParm: + return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> + getReplacementType().getTypePtr()); + + case Type::Auto: { + const AutoType *A = cast<AutoType>(T); + assert(!A->getDeducedType().isNull() && + "cannot request the size of an undeduced or dependent auto type"); + return getTypeInfo(A->getDeducedType().getTypePtr()); + } + + case Type::Paren: + return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); + + case Type::Typedef: { + const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); + std::pair<uint64_t, unsigned> Info + = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); + // If the typedef has an aligned attribute on it, it overrides any computed + // alignment we have. This violates the GCC documentation (which says that + // attribute(aligned) can only round up) but matches its implementation. + if (unsigned AttrAlign = Typedef->getMaxAlignment()) + Align = AttrAlign; + else + Align = Info.second; + Width = Info.first; + break; + } + + case Type::Elaborated: + return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); + + case Type::Attributed: + return getTypeInfo( + cast<AttributedType>(T)->getEquivalentType().getTypePtr()); + + case Type::Atomic: { + // Start with the base type information. + std::pair<uint64_t, unsigned> Info + = getTypeInfo(cast<AtomicType>(T)->getValueType()); + Width = Info.first; + Align = Info.second; + + // If the size of the type doesn't exceed the platform's max + // atomic promotion width, make the size and alignment more + // favorable to atomic operations: + if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth()) { + // Round the size up to a power of 2. + if (!llvm::isPowerOf2_64(Width)) + Width = llvm::NextPowerOf2(Width); + + // Set the alignment equal to the size. + Align = static_cast<unsigned>(Width); + } + } + + } + + assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); + return std::make_pair(Width, Align); +} + +/// toCharUnitsFromBits - Convert a size in bits to a size in characters. +CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { + return CharUnits::fromQuantity(BitSize / getCharWidth()); +} + +/// toBits - Convert a size in characters to a size in characters. +int64_t ASTContext::toBits(CharUnits CharSize) const { + return CharSize.getQuantity() * getCharWidth(); +} + +/// getTypeSizeInChars - Return the size of the specified type, in characters. +/// This method does not work on incomplete types. +CharUnits ASTContext::getTypeSizeInChars(QualType T) const { + return getTypeInfoInChars(T).first; +} +CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { + return getTypeInfoInChars(T).first; +} + +/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in +/// characters. This method does not work on incomplete types. +CharUnits ASTContext::getTypeAlignInChars(QualType T) const { + return toCharUnitsFromBits(getTypeAlign(T)); +} +CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { + return toCharUnitsFromBits(getTypeAlign(T)); +} + +/// getPreferredTypeAlign - Return the "preferred" alignment of the specified +/// type for the current target in bits. This can be different than the ABI +/// alignment in cases where it is beneficial for performance to overalign +/// a data type. +unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { + unsigned ABIAlign = getTypeAlign(T); + + if (Target->getTriple().getArch() == llvm::Triple::xcore) + return ABIAlign; // Never overalign on XCore. + + // Double and long long should be naturally aligned if possible. + if (const ComplexType* CT = T->getAs<ComplexType>()) + T = CT->getElementType().getTypePtr(); + if (T->isSpecificBuiltinType(BuiltinType::Double) || + T->isSpecificBuiltinType(BuiltinType::LongLong) || + T->isSpecificBuiltinType(BuiltinType::ULongLong)) + return std::max(ABIAlign, (unsigned)getTypeSize(T)); + + return ABIAlign; +} + +/// getAlignOfGlobalVar - Return the alignment in bits that should be given +/// to a global variable of the specified type. +unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { + return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign()); +} + +/// getAlignOfGlobalVarInChars - Return the alignment in characters that +/// should be given to a global variable of the specified type. +CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { + return toCharUnitsFromBits(getAlignOfGlobalVar(T)); +} + +/// DeepCollectObjCIvars - +/// This routine first collects all declared, but not synthesized, ivars in +/// super class and then collects all ivars, including those synthesized for +/// current class. This routine is used for implementation of current class +/// when all ivars, declared and synthesized are known. +/// +void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, + bool leafClass, + SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { + if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) + DeepCollectObjCIvars(SuperClass, false, Ivars); + if (!leafClass) { + for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(), + E = OI->ivar_end(); I != E; ++I) + Ivars.push_back(*I); + } else { + ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI); + for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; + Iv= Iv->getNextIvar()) + Ivars.push_back(Iv); + } +} + +/// CollectInheritedProtocols - Collect all protocols in current class and +/// those inherited by it. +void ASTContext::CollectInheritedProtocols(const Decl *CDecl, + llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { + if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { + // We can use protocol_iterator here instead of + // all_referenced_protocol_iterator since we are walking all categories. + for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(), + PE = OI->all_referenced_protocol_end(); P != PE; ++P) { + ObjCProtocolDecl *Proto = (*P); + Protocols.insert(Proto->getCanonicalDecl()); + for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), + PE = Proto->protocol_end(); P != PE; ++P) { + Protocols.insert((*P)->getCanonicalDecl()); + CollectInheritedProtocols(*P, Protocols); + } + } + + // Categories of this Interface. + for (ObjCInterfaceDecl::visible_categories_iterator + Cat = OI->visible_categories_begin(), + CatEnd = OI->visible_categories_end(); + Cat != CatEnd; ++Cat) { + CollectInheritedProtocols(*Cat, Protocols); + } + + if (ObjCInterfaceDecl *SD = OI->getSuperClass()) + while (SD) { + CollectInheritedProtocols(SD, Protocols); + SD = SD->getSuperClass(); + } + } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { + for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(), + PE = OC->protocol_end(); P != PE; ++P) { + ObjCProtocolDecl *Proto = (*P); + Protocols.insert(Proto->getCanonicalDecl()); + for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), + PE = Proto->protocol_end(); P != PE; ++P) + CollectInheritedProtocols(*P, Protocols); + } + } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { + for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(), + PE = OP->protocol_end(); P != PE; ++P) { + ObjCProtocolDecl *Proto = (*P); + Protocols.insert(Proto->getCanonicalDecl()); + for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(), + PE = Proto->protocol_end(); P != PE; ++P) + CollectInheritedProtocols(*P, Protocols); + } + } +} + +unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { + unsigned count = 0; + // Count ivars declared in class extension. + for (ObjCInterfaceDecl::known_extensions_iterator + Ext = OI->known_extensions_begin(), + ExtEnd = OI->known_extensions_end(); + Ext != ExtEnd; ++Ext) { + count += Ext->ivar_size(); + } + + // Count ivar defined in this class's implementation. This + // includes synthesized ivars. + if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) + count += ImplDecl->ivar_size(); + + return count; +} + +bool ASTContext::isSentinelNullExpr(const Expr *E) { + if (!E) + return false; + + // nullptr_t is always treated as null. + if (E->getType()->isNullPtrType()) return true; + + if (E->getType()->isAnyPointerType() && + E->IgnoreParenCasts()->isNullPointerConstant(*this, + Expr::NPC_ValueDependentIsNull)) + return true; + + // Unfortunately, __null has type 'int'. + if (isa<GNUNullExpr>(E)) return true; + + return false; +} + +/// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists. +ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { + llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator + I = ObjCImpls.find(D); + if (I != ObjCImpls.end()) + return cast<ObjCImplementationDecl>(I->second); + return 0; +} +/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists. +ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { + llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator + I = ObjCImpls.find(D); + if (I != ObjCImpls.end()) + return cast<ObjCCategoryImplDecl>(I->second); + return 0; +} + +/// \brief Set the implementation of ObjCInterfaceDecl. +void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, + ObjCImplementationDecl *ImplD) { + assert(IFaceD && ImplD && "Passed null params"); + ObjCImpls[IFaceD] = ImplD; +} +/// \brief Set the implementation of ObjCCategoryDecl. +void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, + ObjCCategoryImplDecl *ImplD) { + assert(CatD && ImplD && "Passed null params"); + ObjCImpls[CatD] = ImplD; +} + +const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( + const NamedDecl *ND) const { + if (const ObjCInterfaceDecl *ID = + dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) + return ID; + if (const ObjCCategoryDecl *CD = + dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) + return CD->getClassInterface(); + if (const ObjCImplDecl *IMD = + dyn_cast<ObjCImplDecl>(ND->getDeclContext())) + return IMD->getClassInterface(); + + return 0; +} + +/// \brief Get the copy initialization expression of VarDecl,or NULL if +/// none exists. +Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) { + assert(VD && "Passed null params"); + assert(VD->hasAttr<BlocksAttr>() && + "getBlockVarCopyInits - not __block var"); + llvm::DenseMap<const VarDecl*, Expr*>::iterator + I = BlockVarCopyInits.find(VD); + return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : 0; +} + +/// \brief Set the copy inialization expression of a block var decl. +void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) { + assert(VD && Init && "Passed null params"); + assert(VD->hasAttr<BlocksAttr>() && + "setBlockVarCopyInits - not __block var"); + BlockVarCopyInits[VD] = Init; +} + +TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, + unsigned DataSize) const { + if (!DataSize) + DataSize = TypeLoc::getFullDataSizeForType(T); + else + assert(DataSize == TypeLoc::getFullDataSizeForType(T) && + "incorrect data size provided to CreateTypeSourceInfo!"); + + TypeSourceInfo *TInfo = + (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); + new (TInfo) TypeSourceInfo(T); + return TInfo; +} + +TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, + SourceLocation L) const { + TypeSourceInfo *DI = CreateTypeSourceInfo(T); + DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); + return DI; +} + +const ASTRecordLayout & +ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { + return getObjCLayout(D, 0); +} + +const ASTRecordLayout & +ASTContext::getASTObjCImplementationLayout( + const ObjCImplementationDecl *D) const { + return getObjCLayout(D->getClassInterface(), D); +} + +//===----------------------------------------------------------------------===// +// Type creation/memoization methods +//===----------------------------------------------------------------------===// + +QualType +ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { + unsigned fastQuals = quals.getFastQualifiers(); + quals.removeFastQualifiers(); + + // Check if we've already instantiated this type. + llvm::FoldingSetNodeID ID; + ExtQuals::Profile(ID, baseType, quals); + void *insertPos = 0; + if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { + assert(eq->getQualifiers() == quals); + return QualType(eq, fastQuals); + } + + // If the base type is not canonical, make the appropriate canonical type. + QualType canon; + if (!baseType->isCanonicalUnqualified()) { + SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); + canonSplit.Quals.addConsistentQualifiers(quals); + canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); + + // Re-find the insert position. + (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); + } + + ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); + ExtQualNodes.InsertNode(eq, insertPos); + return QualType(eq, fastQuals); +} + +QualType +ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const { + QualType CanT = getCanonicalType(T); + if (CanT.getAddressSpace() == AddressSpace) + return T; + + // If we are composing extended qualifiers together, merge together + // into one ExtQuals node. + QualifierCollector Quals; + const Type *TypeNode = Quals.strip(T); + + // If this type already has an address space specified, it cannot get + // another one. + assert(!Quals.hasAddressSpace() && + "Type cannot be in multiple addr spaces!"); + Quals.addAddressSpace(AddressSpace); + + return getExtQualType(TypeNode, Quals); +} + +QualType ASTContext::getObjCGCQualType(QualType T, + Qualifiers::GC GCAttr) const { + QualType CanT = getCanonicalType(T); + if (CanT.getObjCGCAttr() == GCAttr) + return T; + + if (const PointerType *ptr = T->getAs<PointerType>()) { + QualType Pointee = ptr->getPointeeType(); + if (Pointee->isAnyPointerType()) { + QualType ResultType = getObjCGCQualType(Pointee, GCAttr); + return getPointerType(ResultType); + } + } + + // If we are composing extended qualifiers together, merge together + // into one ExtQuals node. + QualifierCollector Quals; + const Type *TypeNode = Quals.strip(T); + + // If this type already has an ObjCGC specified, it cannot get + // another one. + assert(!Quals.hasObjCGCAttr() && + "Type cannot have multiple ObjCGCs!"); + Quals.addObjCGCAttr(GCAttr); + + return getExtQualType(TypeNode, Quals); +} + +const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, + FunctionType::ExtInfo Info) { + if (T->getExtInfo() == Info) + return T; + + QualType Result; + if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) { + Result = getFunctionNoProtoType(FNPT->getResultType(), Info); + } else { + const FunctionProtoType *FPT = cast<FunctionProtoType>(T); + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + EPI.ExtInfo = Info; + Result = getFunctionType(FPT->getResultType(), FPT->getArgTypes(), EPI); + } + + return cast<FunctionType>(Result.getTypePtr()); +} + +void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, + QualType ResultType) { + FD = FD->getMostRecentDecl(); + while (true) { + const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>(); + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + FD->setType(getFunctionType(ResultType, FPT->getArgTypes(), EPI)); + if (FunctionDecl *Next = FD->getPreviousDecl()) + FD = Next; + else + break; + } + if (ASTMutationListener *L = getASTMutationListener()) + L->DeducedReturnType(FD, ResultType); +} + +/// getComplexType - Return the uniqued reference to the type for a complex +/// number with the specified element type. +QualType ASTContext::getComplexType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + ComplexType::Profile(ID, T); + + void *InsertPos = 0; + if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(CT, 0); + + // If the pointee type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getComplexType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical); + Types.push_back(New); + ComplexTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +/// getPointerType - Return the uniqued reference to the type for a pointer to +/// the specified type. +QualType ASTContext::getPointerType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + PointerType::Profile(ID, T); + + void *InsertPos = 0; + if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(PT, 0); + + // If the pointee type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getPointerType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical); + Types.push_back(New); + PointerTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +QualType ASTContext::getDecayedType(QualType T) const { + assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); + + llvm::FoldingSetNodeID ID; + DecayedType::Profile(ID, T); + void *InsertPos = 0; + if (DecayedType *DT = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(DT, 0); + + QualType Decayed; + + // C99 6.7.5.3p7: + // A declaration of a parameter as "array of type" shall be + // adjusted to "qualified pointer to type", where the type + // qualifiers (if any) are those specified within the [ and ] of + // the array type derivation. + if (T->isArrayType()) + Decayed = getArrayDecayedType(T); + + // C99 6.7.5.3p8: + // A declaration of a parameter as "function returning type" + // shall be adjusted to "pointer to function returning type", as + // in 6.3.2.1. + if (T->isFunctionType()) + Decayed = getPointerType(T); + + QualType Canonical = getCanonicalType(Decayed); + + // Get the new insert position for the node we care about. + DecayedType *NewIP = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + + DecayedType *New = + new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); + Types.push_back(New); + DecayedTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +/// getBlockPointerType - Return the uniqued reference to the type for +/// a pointer to the specified block. +QualType ASTContext::getBlockPointerType(QualType T) const { + assert(T->isFunctionType() && "block of function types only"); + // Unique pointers, to guarantee there is only one block of a particular + // structure. + llvm::FoldingSetNodeID ID; + BlockPointerType::Profile(ID, T); + + void *InsertPos = 0; + if (BlockPointerType *PT = + BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(PT, 0); + + // If the block pointee type isn't canonical, this won't be a canonical + // type either so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getBlockPointerType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + BlockPointerType *NewIP = + BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + BlockPointerType *New + = new (*this, TypeAlignment) BlockPointerType(T, Canonical); + Types.push_back(New); + BlockPointerTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +/// getLValueReferenceType - Return the uniqued reference to the type for an +/// lvalue reference to the specified type. +QualType +ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { + assert(getCanonicalType(T) != OverloadTy && + "Unresolved overloaded function type"); + + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + ReferenceType::Profile(ID, T, SpelledAsLValue); + + void *InsertPos = 0; + if (LValueReferenceType *RT = + LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(RT, 0); + + const ReferenceType *InnerRef = T->getAs<ReferenceType>(); + + // If the referencee type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. + QualType Canonical; + if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { + QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); + Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); + + // Get the new insert position for the node we care about. + LValueReferenceType *NewIP = + LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + + LValueReferenceType *New + = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, + SpelledAsLValue); + Types.push_back(New); + LValueReferenceTypes.InsertNode(New, InsertPos); + + return QualType(New, 0); +} + +/// getRValueReferenceType - Return the uniqued reference to the type for an +/// rvalue reference to the specified type. +QualType ASTContext::getRValueReferenceType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + ReferenceType::Profile(ID, T, false); + + void *InsertPos = 0; + if (RValueReferenceType *RT = + RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(RT, 0); + + const ReferenceType *InnerRef = T->getAs<ReferenceType>(); + + // If the referencee type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. + QualType Canonical; + if (InnerRef || !T.isCanonical()) { + QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); + Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); + + // Get the new insert position for the node we care about. + RValueReferenceType *NewIP = + RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + + RValueReferenceType *New + = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); + Types.push_back(New); + RValueReferenceTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +/// getMemberPointerType - Return the uniqued reference to the type for a +/// member pointer to the specified type, in the specified class. +QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + MemberPointerType::Profile(ID, T, Cls); + + void *InsertPos = 0; + if (MemberPointerType *PT = + MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(PT, 0); + + // If the pointee or class type isn't canonical, this won't be a canonical + // type either, so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { + Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); + + // Get the new insert position for the node we care about. + MemberPointerType *NewIP = + MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + MemberPointerType *New + = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); + Types.push_back(New); + MemberPointerTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +/// getConstantArrayType - Return the unique reference to the type for an +/// array of the specified element type. +QualType ASTContext::getConstantArrayType(QualType EltTy, + const llvm::APInt &ArySizeIn, + ArrayType::ArraySizeModifier ASM, + unsigned IndexTypeQuals) const { + assert((EltTy->isDependentType() || + EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && + "Constant array of VLAs is illegal!"); + + // Convert the array size into a canonical width matching the pointer size for + // the target. + llvm::APInt ArySize(ArySizeIn); + ArySize = + ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy))); + + llvm::FoldingSetNodeID ID; + ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals); + + void *InsertPos = 0; + if (ConstantArrayType *ATP = + ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(ATP, 0); + + // If the element type isn't canonical or has qualifiers, this won't + // be a canonical type either, so fill in the canonical type field. + QualType Canon; + if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { + SplitQualType canonSplit = getCanonicalType(EltTy).split(); + Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, + ASM, IndexTypeQuals); + Canon = getQualifiedType(Canon, canonSplit.Quals); + + // Get the new insert position for the node we care about. + ConstantArrayType *NewIP = + ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + + ConstantArrayType *New = new(*this,TypeAlignment) + ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals); + ConstantArrayTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); +} + +/// getVariableArrayDecayedType - Turns the given type, which may be +/// variably-modified, into the corresponding type with all the known +/// sizes replaced with [*]. +QualType ASTContext::getVariableArrayDecayedType(QualType type) const { + // Vastly most common case. + if (!type->isVariablyModifiedType()) return type; + + QualType result; + + SplitQualType split = type.getSplitDesugaredType(); + const Type *ty = split.Ty; + switch (ty->getTypeClass()) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.def" + llvm_unreachable("didn't desugar past all non-canonical types?"); + + // These types should never be variably-modified. + case Type::Builtin: + case Type::Complex: + case Type::Vector: + case Type::ExtVector: + case Type::DependentSizedExtVector: + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + case Type::Record: + case Type::Enum: + case Type::UnresolvedUsing: + case Type::TypeOfExpr: + case Type::TypeOf: + case Type::Decltype: + case Type::UnaryTransform: + case Type::DependentName: + case Type::InjectedClassName: + case Type::TemplateSpecialization: + case Type::DependentTemplateSpecialization: + case Type::TemplateTypeParm: + case Type::SubstTemplateTypeParmPack: + case Type::Auto: + case Type::PackExpansion: + llvm_unreachable("type should never be variably-modified"); + + // These types can be variably-modified but should never need to + // further decay. + case Type::FunctionNoProto: + case Type::FunctionProto: + case Type::BlockPointer: + case Type::MemberPointer: + return type; + + // These types can be variably-modified. All these modifications + // preserve structure except as noted by comments. + // TODO: if we ever care about optimizing VLAs, there are no-op + // optimizations available here. + case Type::Pointer: + result = getPointerType(getVariableArrayDecayedType( + cast<PointerType>(ty)->getPointeeType())); + break; + + case Type::LValueReference: { + const LValueReferenceType *lv = cast<LValueReferenceType>(ty); + result = getLValueReferenceType( + getVariableArrayDecayedType(lv->getPointeeType()), + lv->isSpelledAsLValue()); + break; + } + + case Type::RValueReference: { + const RValueReferenceType *lv = cast<RValueReferenceType>(ty); + result = getRValueReferenceType( + getVariableArrayDecayedType(lv->getPointeeType())); + break; + } + + case Type::Atomic: { + const AtomicType *at = cast<AtomicType>(ty); + result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); + break; + } + + case Type::ConstantArray: { + const ConstantArrayType *cat = cast<ConstantArrayType>(ty); + result = getConstantArrayType( + getVariableArrayDecayedType(cat->getElementType()), + cat->getSize(), + cat->getSizeModifier(), + cat->getIndexTypeCVRQualifiers()); + break; + } + + case Type::DependentSizedArray: { + const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty); + result = getDependentSizedArrayType( + getVariableArrayDecayedType(dat->getElementType()), + dat->getSizeExpr(), + dat->getSizeModifier(), + dat->getIndexTypeCVRQualifiers(), + dat->getBracketsRange()); + break; + } + + // Turn incomplete types into [*] types. + case Type::IncompleteArray: { + const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty); + result = getVariableArrayType( + getVariableArrayDecayedType(iat->getElementType()), + /*size*/ 0, + ArrayType::Normal, + iat->getIndexTypeCVRQualifiers(), + SourceRange()); + break; + } + + // Turn VLA types into [*] types. + case Type::VariableArray: { + const VariableArrayType *vat = cast<VariableArrayType>(ty); + result = getVariableArrayType( + getVariableArrayDecayedType(vat->getElementType()), + /*size*/ 0, + ArrayType::Star, + vat->getIndexTypeCVRQualifiers(), + vat->getBracketsRange()); + break; + } + } + + // Apply the top-level qualifiers from the original. + return getQualifiedType(result, split.Quals); +} + +/// getVariableArrayType - Returns a non-unique reference to the type for a +/// variable array of the specified element type. +QualType ASTContext::getVariableArrayType(QualType EltTy, + Expr *NumElts, + ArrayType::ArraySizeModifier ASM, + unsigned IndexTypeQuals, + SourceRange Brackets) const { + // Since we don't unique expressions, it isn't possible to unique VLA's + // that have an expression provided for their size. + QualType Canon; + + // Be sure to pull qualifiers off the element type. + if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { + SplitQualType canonSplit = getCanonicalType(EltTy).split(); + Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, + IndexTypeQuals, Brackets); + Canon = getQualifiedType(Canon, canonSplit.Quals); + } + + VariableArrayType *New = new(*this, TypeAlignment) + VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); + + VariableArrayTypes.push_back(New); + Types.push_back(New); + return QualType(New, 0); +} + +/// getDependentSizedArrayType - Returns a non-unique reference to +/// the type for a dependently-sized array of the specified element +/// type. +QualType ASTContext::getDependentSizedArrayType(QualType elementType, + Expr *numElements, + ArrayType::ArraySizeModifier ASM, + unsigned elementTypeQuals, + SourceRange brackets) const { + assert((!numElements || numElements->isTypeDependent() || + numElements->isValueDependent()) && + "Size must be type- or value-dependent!"); + + // Dependently-sized array types that do not have a specified number + // of elements will have their sizes deduced from a dependent + // initializer. We do no canonicalization here at all, which is okay + // because they can't be used in most locations. + if (!numElements) { + DependentSizedArrayType *newType + = new (*this, TypeAlignment) + DependentSizedArrayType(*this, elementType, QualType(), + numElements, ASM, elementTypeQuals, + brackets); + Types.push_back(newType); + return QualType(newType, 0); + } + + // Otherwise, we actually build a new type every time, but we + // also build a canonical type. + + SplitQualType canonElementType = getCanonicalType(elementType).split(); + + void *insertPos = 0; + llvm::FoldingSetNodeID ID; + DependentSizedArrayType::Profile(ID, *this, + QualType(canonElementType.Ty, 0), + ASM, elementTypeQuals, numElements); + + // Look for an existing type with these properties. + DependentSizedArrayType *canonTy = + DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); + + // If we don't have one, build one. + if (!canonTy) { + canonTy = new (*this, TypeAlignment) + DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), + QualType(), numElements, ASM, elementTypeQuals, + brackets); + DependentSizedArrayTypes.InsertNode(canonTy, insertPos); + Types.push_back(canonTy); + } + + // Apply qualifiers from the element type to the array. + QualType canon = getQualifiedType(QualType(canonTy,0), + canonElementType.Quals); + + // If we didn't need extra canonicalization for the element type, + // then just use that as our result. + if (QualType(canonElementType.Ty, 0) == elementType) + return canon; + + // Otherwise, we need to build a type which follows the spelling + // of the element type. + DependentSizedArrayType *sugaredType + = new (*this, TypeAlignment) + DependentSizedArrayType(*this, elementType, canon, numElements, + ASM, elementTypeQuals, brackets); + Types.push_back(sugaredType); + return QualType(sugaredType, 0); +} + +QualType ASTContext::getIncompleteArrayType(QualType elementType, + ArrayType::ArraySizeModifier ASM, + unsigned elementTypeQuals) const { + llvm::FoldingSetNodeID ID; + IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); + + void *insertPos = 0; + if (IncompleteArrayType *iat = + IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) + return QualType(iat, 0); + + // If the element type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. We also have to pull + // qualifiers off the element type. + QualType canon; + + if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { + SplitQualType canonSplit = getCanonicalType(elementType).split(); + canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), + ASM, elementTypeQuals); + canon = getQualifiedType(canon, canonSplit.Quals); + + // Get the new insert position for the node we care about. + IncompleteArrayType *existing = + IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); + assert(!existing && "Shouldn't be in the map!"); (void) existing; + } + + IncompleteArrayType *newType = new (*this, TypeAlignment) + IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); + + IncompleteArrayTypes.InsertNode(newType, insertPos); + Types.push_back(newType); + return QualType(newType, 0); +} + +/// getVectorType - Return the unique reference to a vector type of +/// the specified element type and size. VectorType must be a built-in type. +QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, + VectorType::VectorKind VecKind) const { + assert(vecType->isBuiltinType()); + + // Check if we've already instantiated a vector of this type. + llvm::FoldingSetNodeID ID; + VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); + + void *InsertPos = 0; + if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(VTP, 0); + + // If the element type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!vecType.isCanonical()) { + Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); + + // Get the new insert position for the node we care about. + VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + VectorType *New = new (*this, TypeAlignment) + VectorType(vecType, NumElts, Canonical, VecKind); + VectorTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); +} + +/// getExtVectorType - Return the unique reference to an extended vector type of +/// the specified element type and size. VectorType must be a built-in type. +QualType +ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { + assert(vecType->isBuiltinType() || vecType->isDependentType()); + + // Check if we've already instantiated a vector of this type. + llvm::FoldingSetNodeID ID; + VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, + VectorType::GenericVector); + void *InsertPos = 0; + if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(VTP, 0); + + // If the element type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!vecType.isCanonical()) { + Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); + + // Get the new insert position for the node we care about. + VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + ExtVectorType *New = new (*this, TypeAlignment) + ExtVectorType(vecType, NumElts, Canonical); + VectorTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); +} + +QualType +ASTContext::getDependentSizedExtVectorType(QualType vecType, + Expr *SizeExpr, + SourceLocation AttrLoc) const { + llvm::FoldingSetNodeID ID; + DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), + SizeExpr); + + void *InsertPos = 0; + DependentSizedExtVectorType *Canon + = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); + DependentSizedExtVectorType *New; + if (Canon) { + // We already have a canonical version of this array type; use it as + // the canonical type for a newly-built type. + New = new (*this, TypeAlignment) + DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), + SizeExpr, AttrLoc); + } else { + QualType CanonVecTy = getCanonicalType(vecType); + if (CanonVecTy == vecType) { + New = new (*this, TypeAlignment) + DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, + AttrLoc); + + DependentSizedExtVectorType *CanonCheck + = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); + (void)CanonCheck; + DependentSizedExtVectorTypes.InsertNode(New, InsertPos); + } else { + QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, + SourceLocation()); + New = new (*this, TypeAlignment) + DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc); + } + } + + Types.push_back(New); + return QualType(New, 0); +} + +/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. +/// +QualType +ASTContext::getFunctionNoProtoType(QualType ResultTy, + const FunctionType::ExtInfo &Info) const { + const CallingConv CallConv = Info.getCC(); + + // Unique functions, to guarantee there is only one function of a particular + // structure. + llvm::FoldingSetNodeID ID; + FunctionNoProtoType::Profile(ID, ResultTy, Info); + + void *InsertPos = 0; + if (FunctionNoProtoType *FT = + FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(FT, 0); + + QualType Canonical; + if (!ResultTy.isCanonical()) { + Canonical = getFunctionNoProtoType(getCanonicalType(ResultTy), Info); + + // Get the new insert position for the node we care about. + FunctionNoProtoType *NewIP = + FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + + FunctionProtoType::ExtInfo newInfo = Info.withCallingConv(CallConv); + FunctionNoProtoType *New = new (*this, TypeAlignment) + FunctionNoProtoType(ResultTy, Canonical, newInfo); + Types.push_back(New); + FunctionNoProtoTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +/// \brief Determine whether \p T is canonical as the result type of a function. +static bool isCanonicalResultType(QualType T) { + return T.isCanonical() && + (T.getObjCLifetime() == Qualifiers::OCL_None || + T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); +} + +/// getFunctionType - Return a normal function type with a typed argument +/// list. isVariadic indicates whether the argument list includes '...'. +QualType +ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray, + const FunctionProtoType::ExtProtoInfo &EPI) const { + size_t NumArgs = ArgArray.size(); + + // Unique functions, to guarantee there is only one function of a particular + // structure. + llvm::FoldingSetNodeID ID; + FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, + *this); + + void *InsertPos = 0; + if (FunctionProtoType *FTP = + FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(FTP, 0); + + // Determine whether the type being created is already canonical or not. + bool isCanonical = + EPI.ExceptionSpecType == EST_None && isCanonicalResultType(ResultTy) && + !EPI.HasTrailingReturn; + for (unsigned i = 0; i != NumArgs && isCanonical; ++i) + if (!ArgArray[i].isCanonicalAsParam()) + isCanonical = false; + + // If this type isn't canonical, get the canonical version of it. + // The exception spec is not part of the canonical type. + QualType Canonical; + if (!isCanonical) { + SmallVector<QualType, 16> CanonicalArgs; + CanonicalArgs.reserve(NumArgs); + for (unsigned i = 0; i != NumArgs; ++i) + CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); + + FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; + CanonicalEPI.HasTrailingReturn = false; + CanonicalEPI.ExceptionSpecType = EST_None; + CanonicalEPI.NumExceptions = 0; + + // Result types do not have ARC lifetime qualifiers. + QualType CanResultTy = getCanonicalType(ResultTy); + if (ResultTy.getQualifiers().hasObjCLifetime()) { + Qualifiers Qs = CanResultTy.getQualifiers(); + Qs.removeObjCLifetime(); + CanResultTy = getQualifiedType(CanResultTy.getUnqualifiedType(), Qs); + } + + Canonical = getFunctionType(CanResultTy, CanonicalArgs, CanonicalEPI); + + // Get the new insert position for the node we care about. + FunctionProtoType *NewIP = + FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + + // FunctionProtoType objects are allocated with extra bytes after + // them for three variable size arrays at the end: + // - parameter types + // - exception types + // - consumed-arguments flags + // Instead of the exception types, there could be a noexcept + // expression, or information used to resolve the exception + // specification. + size_t Size = sizeof(FunctionProtoType) + + NumArgs * sizeof(QualType); + if (EPI.ExceptionSpecType == EST_Dynamic) { + Size += EPI.NumExceptions * sizeof(QualType); + } else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) { + Size += sizeof(Expr*); + } else if (EPI.ExceptionSpecType == EST_Uninstantiated) { + Size += 2 * sizeof(FunctionDecl*); + } else if (EPI.ExceptionSpecType == EST_Unevaluated) { + Size += sizeof(FunctionDecl*); + } + if (EPI.ConsumedArguments) + Size += NumArgs * sizeof(bool); + + FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment); + FunctionProtoType::ExtProtoInfo newEPI = EPI; + new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); + Types.push_back(FTP); + FunctionProtoTypes.InsertNode(FTP, InsertPos); + return QualType(FTP, 0); +} + +#ifndef NDEBUG +static bool NeedsInjectedClassNameType(const RecordDecl *D) { + if (!isa<CXXRecordDecl>(D)) return false; + const CXXRecordDecl *RD = cast<CXXRecordDecl>(D); + if (isa<ClassTemplatePartialSpecializationDecl>(RD)) + return true; + if (RD->getDescribedClassTemplate() && + !isa<ClassTemplateSpecializationDecl>(RD)) + return true; + return false; +} +#endif + +/// getInjectedClassNameType - Return the unique reference to the +/// injected class name type for the specified templated declaration. +QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, + QualType TST) const { + assert(NeedsInjectedClassNameType(Decl)); + if (Decl->TypeForDecl) { + assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); + } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { + assert(PrevDecl->TypeForDecl && "previous declaration has no type"); + Decl->TypeForDecl = PrevDecl->TypeForDecl; + assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); + } else { + Type *newType = + new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); + Decl->TypeForDecl = newType; + Types.push_back(newType); + } + return QualType(Decl->TypeForDecl, 0); +} + +/// getTypeDeclType - Return the unique reference to the type for the +/// specified type declaration. +QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { + assert(Decl && "Passed null for Decl param"); + assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); + + if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl)) + return getTypedefType(Typedef); + + assert(!isa<TemplateTypeParmDecl>(Decl) && + "Template type parameter types are always available."); + + if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) { + assert(Record->isFirstDecl() && "struct/union has previous declaration"); + assert(!NeedsInjectedClassNameType(Record)); + return getRecordType(Record); + } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) { + assert(Enum->isFirstDecl() && "enum has previous declaration"); + return getEnumType(Enum); + } else if (const UnresolvedUsingTypenameDecl *Using = + dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { + Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); + Decl->TypeForDecl = newType; + Types.push_back(newType); + } else + llvm_unreachable("TypeDecl without a type?"); + + return QualType(Decl->TypeForDecl, 0); +} + +/// getTypedefType - Return the unique reference to the type for the +/// specified typedef name decl. +QualType +ASTContext::getTypedefType(const TypedefNameDecl *Decl, + QualType Canonical) const { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (Canonical.isNull()) + Canonical = getCanonicalType(Decl->getUnderlyingType()); + TypedefType *newType = new(*this, TypeAlignment) + TypedefType(Type::Typedef, Decl, Canonical); + Decl->TypeForDecl = newType; + Types.push_back(newType); + return QualType(newType, 0); +} + +QualType ASTContext::getRecordType(const RecordDecl *Decl) const { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) + if (PrevDecl->TypeForDecl) + return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); + + RecordType *newType = new (*this, TypeAlignment) RecordType(Decl); + Decl->TypeForDecl = newType; + Types.push_back(newType); + return QualType(newType, 0); +} + +QualType ASTContext::getEnumType(const EnumDecl *Decl) const { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) + if (PrevDecl->TypeForDecl) + return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); + + EnumType *newType = new (*this, TypeAlignment) EnumType(Decl); + Decl->TypeForDecl = newType; + Types.push_back(newType); + return QualType(newType, 0); +} + +QualType ASTContext::getAttributedType(AttributedType::Kind attrKind, + QualType modifiedType, + QualType equivalentType) { + llvm::FoldingSetNodeID id; + AttributedType::Profile(id, attrKind, modifiedType, equivalentType); + + void *insertPos = 0; + AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); + if (type) return QualType(type, 0); + + QualType canon = getCanonicalType(equivalentType); + type = new (*this, TypeAlignment) + AttributedType(canon, attrKind, modifiedType, equivalentType); + + Types.push_back(type); + AttributedTypes.InsertNode(type, insertPos); + + return QualType(type, 0); +} + + +/// \brief Retrieve a substitution-result type. +QualType +ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, + QualType Replacement) const { + assert(Replacement.isCanonical() + && "replacement types must always be canonical"); + + llvm::FoldingSetNodeID ID; + SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); + void *InsertPos = 0; + SubstTemplateTypeParmType *SubstParm + = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (!SubstParm) { + SubstParm = new (*this, TypeAlignment) + SubstTemplateTypeParmType(Parm, Replacement); + Types.push_back(SubstParm); + SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); + } + + return QualType(SubstParm, 0); +} + +/// \brief Retrieve a +QualType ASTContext::getSubstTemplateTypeParmPackType( + const TemplateTypeParmType *Parm, + const TemplateArgument &ArgPack) { +#ifndef NDEBUG + for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(), + PEnd = ArgPack.pack_end(); + P != PEnd; ++P) { + assert(P->getKind() == TemplateArgument::Type &&"Pack contains a non-type"); + assert(P->getAsType().isCanonical() && "Pack contains non-canonical type"); + } +#endif + + llvm::FoldingSetNodeID ID; + SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); + void *InsertPos = 0; + if (SubstTemplateTypeParmPackType *SubstParm + = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(SubstParm, 0); + + QualType Canon; + if (!Parm->isCanonicalUnqualified()) { + Canon = getCanonicalType(QualType(Parm, 0)); + Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), + ArgPack); + SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + SubstTemplateTypeParmPackType *SubstParm + = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, + ArgPack); + Types.push_back(SubstParm); + SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); + return QualType(SubstParm, 0); +} + +/// \brief Retrieve the template type parameter type for a template +/// parameter or parameter pack with the given depth, index, and (optionally) +/// name. +QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, + bool ParameterPack, + TemplateTypeParmDecl *TTPDecl) const { + llvm::FoldingSetNodeID ID; + TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); + void *InsertPos = 0; + TemplateTypeParmType *TypeParm + = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (TypeParm) + return QualType(TypeParm, 0); + + if (TTPDecl) { + QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); + TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); + + TemplateTypeParmType *TypeCheck + = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!TypeCheck && "Template type parameter canonical type broken"); + (void)TypeCheck; + } else + TypeParm = new (*this, TypeAlignment) + TemplateTypeParmType(Depth, Index, ParameterPack); + + Types.push_back(TypeParm); + TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); + + return QualType(TypeParm, 0); +} + +TypeSourceInfo * +ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, + SourceLocation NameLoc, + const TemplateArgumentListInfo &Args, + QualType Underlying) const { + assert(!Name.getAsDependentTemplateName() && + "No dependent template names here!"); + QualType TST = getTemplateSpecializationType(Name, Args, Underlying); + + TypeSourceInfo *DI = CreateTypeSourceInfo(TST); + TemplateSpecializationTypeLoc TL = + DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); + TL.setTemplateKeywordLoc(SourceLocation()); + TL.setTemplateNameLoc(NameLoc); + TL.setLAngleLoc(Args.getLAngleLoc()); + TL.setRAngleLoc(Args.getRAngleLoc()); + for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) + TL.setArgLocInfo(i, Args[i].getLocInfo()); + return DI; +} + +QualType +ASTContext::getTemplateSpecializationType(TemplateName Template, + const TemplateArgumentListInfo &Args, + QualType Underlying) const { + assert(!Template.getAsDependentTemplateName() && + "No dependent template names here!"); + + unsigned NumArgs = Args.size(); + + SmallVector<TemplateArgument, 4> ArgVec; + ArgVec.reserve(NumArgs); + for (unsigned i = 0; i != NumArgs; ++i) + ArgVec.push_back(Args[i].getArgument()); + + return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs, + Underlying); +} + +#ifndef NDEBUG +static bool hasAnyPackExpansions(const TemplateArgument *Args, + unsigned NumArgs) { + for (unsigned I = 0; I != NumArgs; ++I) + if (Args[I].isPackExpansion()) + return true; + + return true; +} +#endif + +QualType +ASTContext::getTemplateSpecializationType(TemplateName Template, + const TemplateArgument *Args, + unsigned NumArgs, + QualType Underlying) const { + assert(!Template.getAsDependentTemplateName() && + "No dependent template names here!"); + // Look through qualified template names. + if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) + Template = TemplateName(QTN->getTemplateDecl()); + + bool IsTypeAlias = + Template.getAsTemplateDecl() && + isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); + QualType CanonType; + if (!Underlying.isNull()) + CanonType = getCanonicalType(Underlying); + else { + // We can get here with an alias template when the specialization contains + // a pack expansion that does not match up with a parameter pack. + assert((!IsTypeAlias || hasAnyPackExpansions(Args, NumArgs)) && + "Caller must compute aliased type"); + IsTypeAlias = false; + CanonType = getCanonicalTemplateSpecializationType(Template, Args, + NumArgs); + } + + // Allocate the (non-canonical) template specialization type, but don't + // try to unique it: these types typically have location information that + // we don't unique and don't want to lose. + void *Mem = Allocate(sizeof(TemplateSpecializationType) + + sizeof(TemplateArgument) * NumArgs + + (IsTypeAlias? sizeof(QualType) : 0), + TypeAlignment); + TemplateSpecializationType *Spec + = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, CanonType, + IsTypeAlias ? Underlying : QualType()); + + Types.push_back(Spec); + return QualType(Spec, 0); +} + +QualType +ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template, + const TemplateArgument *Args, + unsigned NumArgs) const { + assert(!Template.getAsDependentTemplateName() && + "No dependent template names here!"); + + // Look through qualified template names. + if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) + Template = TemplateName(QTN->getTemplateDecl()); + + // Build the canonical template specialization type. + TemplateName CanonTemplate = getCanonicalTemplateName(Template); + SmallVector<TemplateArgument, 4> CanonArgs; + CanonArgs.reserve(NumArgs); + for (unsigned I = 0; I != NumArgs; ++I) + CanonArgs.push_back(getCanonicalTemplateArgument(Args[I])); + + // Determine whether this canonical template specialization type already + // exists. + llvm::FoldingSetNodeID ID; + TemplateSpecializationType::Profile(ID, CanonTemplate, + CanonArgs.data(), NumArgs, *this); + + void *InsertPos = 0; + TemplateSpecializationType *Spec + = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (!Spec) { + // Allocate a new canonical template specialization type. + void *Mem = Allocate((sizeof(TemplateSpecializationType) + + sizeof(TemplateArgument) * NumArgs), + TypeAlignment); + Spec = new (Mem) TemplateSpecializationType(CanonTemplate, + CanonArgs.data(), NumArgs, + QualType(), QualType()); + Types.push_back(Spec); + TemplateSpecializationTypes.InsertNode(Spec, InsertPos); + } + + assert(Spec->isDependentType() && + "Non-dependent template-id type must have a canonical type"); + return QualType(Spec, 0); +} + +QualType +ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + QualType NamedType) const { + llvm::FoldingSetNodeID ID; + ElaboratedType::Profile(ID, Keyword, NNS, NamedType); + + void *InsertPos = 0; + ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + QualType Canon = NamedType; + if (!Canon.isCanonical()) { + Canon = getCanonicalType(NamedType); + ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckT && "Elaborated canonical type broken"); + (void)CheckT; + } + + T = new (*this) ElaboratedType(Keyword, NNS, NamedType, Canon); + Types.push_back(T); + ElaboratedTypes.InsertNode(T, InsertPos); + return QualType(T, 0); +} + +QualType +ASTContext::getParenType(QualType InnerType) const { + llvm::FoldingSetNodeID ID; + ParenType::Profile(ID, InnerType); + + void *InsertPos = 0; + ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + QualType Canon = InnerType; + if (!Canon.isCanonical()) { + Canon = getCanonicalType(InnerType); + ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckT && "Paren canonical type broken"); + (void)CheckT; + } + + T = new (*this) ParenType(InnerType, Canon); + Types.push_back(T); + ParenTypes.InsertNode(T, InsertPos); + return QualType(T, 0); +} + +QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + QualType Canon) const { + assert(NNS->isDependent() && "nested-name-specifier must be dependent"); + + if (Canon.isNull()) { + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + ElaboratedTypeKeyword CanonKeyword = Keyword; + if (Keyword == ETK_None) + CanonKeyword = ETK_Typename; + + if (CanonNNS != NNS || CanonKeyword != Keyword) + Canon = getDependentNameType(CanonKeyword, CanonNNS, Name); + } + + llvm::FoldingSetNodeID ID; + DependentNameType::Profile(ID, Keyword, NNS, Name); + + void *InsertPos = 0; + DependentNameType *T + = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + T = new (*this) DependentNameType(Keyword, NNS, Name, Canon); + Types.push_back(T); + DependentNameTypes.InsertNode(T, InsertPos); + return QualType(T, 0); +} + +QualType +ASTContext::getDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + const TemplateArgumentListInfo &Args) const { + // TODO: avoid this copy + SmallVector<TemplateArgument, 16> ArgCopy; + for (unsigned I = 0, E = Args.size(); I != E; ++I) + ArgCopy.push_back(Args[I].getArgument()); + return getDependentTemplateSpecializationType(Keyword, NNS, Name, + ArgCopy.size(), + ArgCopy.data()); +} + +QualType +ASTContext::getDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + unsigned NumArgs, + const TemplateArgument *Args) const { + assert((!NNS || NNS->isDependent()) && + "nested-name-specifier must be dependent"); + + llvm::FoldingSetNodeID ID; + DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, + Name, NumArgs, Args); + + void *InsertPos = 0; + DependentTemplateSpecializationType *T + = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + + ElaboratedTypeKeyword CanonKeyword = Keyword; + if (Keyword == ETK_None) CanonKeyword = ETK_Typename; + + bool AnyNonCanonArgs = false; + SmallVector<TemplateArgument, 16> CanonArgs(NumArgs); + for (unsigned I = 0; I != NumArgs; ++I) { + CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); + if (!CanonArgs[I].structurallyEquals(Args[I])) + AnyNonCanonArgs = true; + } + + QualType Canon; + if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { + Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, + Name, NumArgs, + CanonArgs.data()); + + // Find the insert position again. + DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + + sizeof(TemplateArgument) * NumArgs), + TypeAlignment); + T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, + Name, NumArgs, Args, Canon); + Types.push_back(T); + DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); + return QualType(T, 0); +} + +QualType ASTContext::getPackExpansionType(QualType Pattern, + Optional<unsigned> NumExpansions) { + llvm::FoldingSetNodeID ID; + PackExpansionType::Profile(ID, Pattern, NumExpansions); + + assert(Pattern->containsUnexpandedParameterPack() && + "Pack expansions must expand one or more parameter packs"); + void *InsertPos = 0; + PackExpansionType *T + = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + QualType Canon; + if (!Pattern.isCanonical()) { + Canon = getCanonicalType(Pattern); + // The canonical type might not contain an unexpanded parameter pack, if it + // contains an alias template specialization which ignores one of its + // parameters. + if (Canon->containsUnexpandedParameterPack()) { + Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions); + + // Find the insert position again, in case we inserted an element into + // PackExpansionTypes and invalidated our insert position. + PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); + } + } + + T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions); + Types.push_back(T); + PackExpansionTypes.InsertNode(T, InsertPos); + return QualType(T, 0); +} + +/// CmpProtocolNames - Comparison predicate for sorting protocols +/// alphabetically. +static bool CmpProtocolNames(const ObjCProtocolDecl *LHS, + const ObjCProtocolDecl *RHS) { + return LHS->getDeclName() < RHS->getDeclName(); +} + +static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols, + unsigned NumProtocols) { + if (NumProtocols == 0) return true; + + if (Protocols[0]->getCanonicalDecl() != Protocols[0]) + return false; + + for (unsigned i = 1; i != NumProtocols; ++i) + if (!CmpProtocolNames(Protocols[i-1], Protocols[i]) || + Protocols[i]->getCanonicalDecl() != Protocols[i]) + return false; + return true; +} + +static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols, + unsigned &NumProtocols) { + ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols; + + // Sort protocols, keyed by name. + std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames); + + // Canonicalize. + for (unsigned I = 0, N = NumProtocols; I != N; ++I) + Protocols[I] = Protocols[I]->getCanonicalDecl(); + + // Remove duplicates. + ProtocolsEnd = std::unique(Protocols, ProtocolsEnd); + NumProtocols = ProtocolsEnd-Protocols; +} + +QualType ASTContext::getObjCObjectType(QualType BaseType, + ObjCProtocolDecl * const *Protocols, + unsigned NumProtocols) const { + // If the base type is an interface and there aren't any protocols + // to add, then the interface type will do just fine. + if (!NumProtocols && isa<ObjCInterfaceType>(BaseType)) + return BaseType; + + // Look in the folding set for an existing type. + llvm::FoldingSetNodeID ID; + ObjCObjectTypeImpl::Profile(ID, BaseType, Protocols, NumProtocols); + void *InsertPos = 0; + if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(QT, 0); + + // Build the canonical type, which has the canonical base type and + // a sorted-and-uniqued list of protocols. + QualType Canonical; + bool ProtocolsSorted = areSortedAndUniqued(Protocols, NumProtocols); + if (!ProtocolsSorted || !BaseType.isCanonical()) { + if (!ProtocolsSorted) { + SmallVector<ObjCProtocolDecl*, 8> Sorted(Protocols, + Protocols + NumProtocols); + unsigned UniqueCount = NumProtocols; + + SortAndUniqueProtocols(&Sorted[0], UniqueCount); + Canonical = getObjCObjectType(getCanonicalType(BaseType), + &Sorted[0], UniqueCount); + } else { + Canonical = getObjCObjectType(getCanonicalType(BaseType), + Protocols, NumProtocols); + } + + // Regenerate InsertPos. + ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + unsigned Size = sizeof(ObjCObjectTypeImpl); + Size += NumProtocols * sizeof(ObjCProtocolDecl *); + void *Mem = Allocate(Size, TypeAlignment); + ObjCObjectTypeImpl *T = + new (Mem) ObjCObjectTypeImpl(Canonical, BaseType, Protocols, NumProtocols); + + Types.push_back(T); + ObjCObjectTypes.InsertNode(T, InsertPos); + return QualType(T, 0); +} + +/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for +/// the given object type. +QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { + llvm::FoldingSetNodeID ID; + ObjCObjectPointerType::Profile(ID, ObjectT); + + void *InsertPos = 0; + if (ObjCObjectPointerType *QT = + ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(QT, 0); + + // Find the canonical object type. + QualType Canonical; + if (!ObjectT.isCanonical()) { + Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); + + // Regenerate InsertPos. + ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + // No match. + void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); + ObjCObjectPointerType *QType = + new (Mem) ObjCObjectPointerType(Canonical, ObjectT); + + Types.push_back(QType); + ObjCObjectPointerTypes.InsertNode(QType, InsertPos); + return QualType(QType, 0); +} + +/// getObjCInterfaceType - Return the unique reference to the type for the +/// specified ObjC interface decl. The list of protocols is optional. +QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, + ObjCInterfaceDecl *PrevDecl) const { + if (Decl->TypeForDecl) + return QualType(Decl->TypeForDecl, 0); + + if (PrevDecl) { + assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); + Decl->TypeForDecl = PrevDecl->TypeForDecl; + return QualType(PrevDecl->TypeForDecl, 0); + } + + // Prefer the definition, if there is one. + if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) + Decl = Def; + + void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); + ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl); + Decl->TypeForDecl = T; + Types.push_back(T); + return QualType(T, 0); +} + +/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique +/// TypeOfExprType AST's (since expression's are never shared). For example, +/// multiple declarations that refer to "typeof(x)" all contain different +/// DeclRefExpr's. This doesn't effect the type checker, since it operates +/// on canonical type's (which are always unique). +QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { + TypeOfExprType *toe; + if (tofExpr->isTypeDependent()) { + llvm::FoldingSetNodeID ID; + DependentTypeOfExprType::Profile(ID, *this, tofExpr); + + void *InsertPos = 0; + DependentTypeOfExprType *Canon + = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); + if (Canon) { + // We already have a "canonical" version of an identical, dependent + // typeof(expr) type. Use that as our canonical type. + toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, + QualType((TypeOfExprType*)Canon, 0)); + } else { + // Build a new, canonical typeof(expr) type. + Canon + = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); + DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); + toe = Canon; + } + } else { + QualType Canonical = getCanonicalType(tofExpr->getType()); + toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); + } + Types.push_back(toe); + return QualType(toe, 0); +} + +/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique +/// TypeOfType AST's. The only motivation to unique these nodes would be +/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be +/// an issue. This doesn't effect the type checker, since it operates +/// on canonical type's (which are always unique). +QualType ASTContext::getTypeOfType(QualType tofType) const { + QualType Canonical = getCanonicalType(tofType); + TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); + Types.push_back(tot); + return QualType(tot, 0); +} + + +/// getDecltypeType - Unlike many "get<Type>" functions, we don't unique +/// DecltypeType AST's. The only motivation to unique these nodes would be +/// memory savings. Since decltype(t) is fairly uncommon, space shouldn't be +/// an issue. This doesn't effect the type checker, since it operates +/// on canonical types (which are always unique). +QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { + DecltypeType *dt; + + // C++0x [temp.type]p2: + // If an expression e involves a template parameter, decltype(e) denotes a + // unique dependent type. Two such decltype-specifiers refer to the same + // type only if their expressions are equivalent (14.5.6.1). + if (e->isInstantiationDependent()) { + llvm::FoldingSetNodeID ID; + DependentDecltypeType::Profile(ID, *this, e); + + void *InsertPos = 0; + DependentDecltypeType *Canon + = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); + if (Canon) { + // We already have a "canonical" version of an equivalent, dependent + // decltype type. Use that as our canonical type. + dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType, + QualType((DecltypeType*)Canon, 0)); + } else { + // Build a new, canonical typeof(expr) type. + Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); + DependentDecltypeTypes.InsertNode(Canon, InsertPos); + dt = Canon; + } + } else { + dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType, + getCanonicalType(UnderlyingType)); + } + Types.push_back(dt); + return QualType(dt, 0); +} + +/// getUnaryTransformationType - We don't unique these, since the memory +/// savings are minimal and these are rare. +QualType ASTContext::getUnaryTransformType(QualType BaseType, + QualType UnderlyingType, + UnaryTransformType::UTTKind Kind) + const { + UnaryTransformType *Ty = + new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType, + Kind, + UnderlyingType->isDependentType() ? + QualType() : getCanonicalType(UnderlyingType)); + Types.push_back(Ty); + return QualType(Ty, 0); +} + +/// getAutoType - Return the uniqued reference to the 'auto' type which has been +/// deduced to the given type, or to the canonical undeduced 'auto' type, or the +/// canonical deduced-but-dependent 'auto' type. +QualType ASTContext::getAutoType(QualType DeducedType, bool IsDecltypeAuto, + bool IsDependent) const { + if (DeducedType.isNull() && !IsDecltypeAuto && !IsDependent) + return getAutoDeductType(); + + // Look in the folding set for an existing type. + void *InsertPos = 0; + llvm::FoldingSetNodeID ID; + AutoType::Profile(ID, DeducedType, IsDecltypeAuto, IsDependent); + if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(AT, 0); + + AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType, + IsDecltypeAuto, + IsDependent); + Types.push_back(AT); + if (InsertPos) + AutoTypes.InsertNode(AT, InsertPos); + return QualType(AT, 0); +} + +/// getAtomicType - Return the uniqued reference to the atomic type for +/// the given value type. +QualType ASTContext::getAtomicType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + AtomicType::Profile(ID, T); + + void *InsertPos = 0; + if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(AT, 0); + + // If the atomic value type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getAtomicType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP; + } + AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical); + Types.push_back(New); + AtomicTypes.InsertNode(New, InsertPos); + return QualType(New, 0); +} + +/// getAutoDeductType - Get type pattern for deducing against 'auto'. +QualType ASTContext::getAutoDeductType() const { + if (AutoDeductTy.isNull()) + AutoDeductTy = QualType( + new (*this, TypeAlignment) AutoType(QualType(), /*decltype(auto)*/false, + /*dependent*/false), + 0); + return AutoDeductTy; +} + +/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. +QualType ASTContext::getAutoRRefDeductType() const { + if (AutoRRefDeductTy.isNull()) + AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); + assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); + return AutoRRefDeductTy; +} + +/// getTagDeclType - Return the unique reference to the type for the +/// specified TagDecl (struct/union/class/enum) decl. +QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { + assert (Decl); + // FIXME: What is the design on getTagDeclType when it requires casting + // away const? mutable? + return getTypeDeclType(const_cast<TagDecl*>(Decl)); +} + +/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result +/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and +/// needs to agree with the definition in <stddef.h>. +CanQualType ASTContext::getSizeType() const { + return getFromTargetType(Target->getSizeType()); +} + +/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). +CanQualType ASTContext::getIntMaxType() const { + return getFromTargetType(Target->getIntMaxType()); +} + +/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). +CanQualType ASTContext::getUIntMaxType() const { + return getFromTargetType(Target->getUIntMaxType()); +} + +/// getSignedWCharType - Return the type of "signed wchar_t". +/// Used when in C++, as a GCC extension. +QualType ASTContext::getSignedWCharType() const { + // FIXME: derive from "Target" ? + return WCharTy; +} + +/// getUnsignedWCharType - Return the type of "unsigned wchar_t". +/// Used when in C++, as a GCC extension. +QualType ASTContext::getUnsignedWCharType() const { + // FIXME: derive from "Target" ? + return UnsignedIntTy; +} + +QualType ASTContext::getIntPtrType() const { + return getFromTargetType(Target->getIntPtrType()); +} + +QualType ASTContext::getUIntPtrType() const { + return getCorrespondingUnsignedType(getIntPtrType()); +} + +/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) +/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). +QualType ASTContext::getPointerDiffType() const { + return getFromTargetType(Target->getPtrDiffType(0)); +} + +/// \brief Return the unique type for "pid_t" defined in +/// <sys/types.h>. We need this to compute the correct type for vfork(). +QualType ASTContext::getProcessIDType() const { + return getFromTargetType(Target->getProcessIDType()); +} + +//===----------------------------------------------------------------------===// +// Type Operators +//===----------------------------------------------------------------------===// + +CanQualType ASTContext::getCanonicalParamType(QualType T) const { + // Push qualifiers into arrays, and then discard any remaining + // qualifiers. + T = getCanonicalType(T); + T = getVariableArrayDecayedType(T); + const Type *Ty = T.getTypePtr(); + QualType Result; + if (isa<ArrayType>(Ty)) { + Result = getArrayDecayedType(QualType(Ty,0)); + } else if (isa<FunctionType>(Ty)) { + Result = getPointerType(QualType(Ty, 0)); + } else { + Result = QualType(Ty, 0); + } + + return CanQualType::CreateUnsafe(Result); +} + +QualType ASTContext::getUnqualifiedArrayType(QualType type, + Qualifiers &quals) { + SplitQualType splitType = type.getSplitUnqualifiedType(); + + // FIXME: getSplitUnqualifiedType() actually walks all the way to + // the unqualified desugared type and then drops it on the floor. + // We then have to strip that sugar back off with + // getUnqualifiedDesugaredType(), which is silly. + const ArrayType *AT = + dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); + + // If we don't have an array, just use the results in splitType. + if (!AT) { + quals = splitType.Quals; + return QualType(splitType.Ty, 0); + } + + // Otherwise, recurse on the array's element type. + QualType elementType = AT->getElementType(); + QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); + + // If that didn't change the element type, AT has no qualifiers, so we + // can just use the results in splitType. + if (elementType == unqualElementType) { + assert(quals.empty()); // from the recursive call + quals = splitType.Quals; + return QualType(splitType.Ty, 0); + } + + // Otherwise, add in the qualifiers from the outermost type, then + // build the type back up. + quals.addConsistentQualifiers(splitType.Quals); + + if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) { + return getConstantArrayType(unqualElementType, CAT->getSize(), + CAT->getSizeModifier(), 0); + } + + if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) { + return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); + } + + if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) { + return getVariableArrayType(unqualElementType, + VAT->getSizeExpr(), + VAT->getSizeModifier(), + VAT->getIndexTypeCVRQualifiers(), + VAT->getBracketsRange()); + } + + const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT); + return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), + DSAT->getSizeModifier(), 0, + SourceRange()); +} + +/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that +/// may be similar (C++ 4.4), replaces T1 and T2 with the type that +/// they point to and return true. If T1 and T2 aren't pointer types +/// or pointer-to-member types, or if they are not similar at this +/// level, returns false and leaves T1 and T2 unchanged. Top-level +/// qualifiers on T1 and T2 are ignored. This function will typically +/// be called in a loop that successively "unwraps" pointer and +/// pointer-to-member types to compare them at each level. +bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) { + const PointerType *T1PtrType = T1->getAs<PointerType>(), + *T2PtrType = T2->getAs<PointerType>(); + if (T1PtrType && T2PtrType) { + T1 = T1PtrType->getPointeeType(); + T2 = T2PtrType->getPointeeType(); + return true; + } + + const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(), + *T2MPType = T2->getAs<MemberPointerType>(); + if (T1MPType && T2MPType && + hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), + QualType(T2MPType->getClass(), 0))) { + T1 = T1MPType->getPointeeType(); + T2 = T2MPType->getPointeeType(); + return true; + } + + if (getLangOpts().ObjC1) { + const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(), + *T2OPType = T2->getAs<ObjCObjectPointerType>(); + if (T1OPType && T2OPType) { + T1 = T1OPType->getPointeeType(); + T2 = T2OPType->getPointeeType(); + return true; + } + } + + // FIXME: Block pointers, too? + + return false; +} + +DeclarationNameInfo +ASTContext::getNameForTemplate(TemplateName Name, + SourceLocation NameLoc) const { + switch (Name.getKind()) { + case TemplateName::QualifiedTemplate: + case TemplateName::Template: + // DNInfo work in progress: CHECKME: what about DNLoc? + return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), + NameLoc); + + case TemplateName::OverloadedTemplate: { + OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); + // DNInfo work in progress: CHECKME: what about DNLoc? + return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); + } + + case TemplateName::DependentTemplate: { + DependentTemplateName *DTN = Name.getAsDependentTemplateName(); + DeclarationName DName; + if (DTN->isIdentifier()) { + DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); + return DeclarationNameInfo(DName, NameLoc); + } else { + DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); + // DNInfo work in progress: FIXME: source locations? + DeclarationNameLoc DNLoc; + DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding(); + DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding(); + return DeclarationNameInfo(DName, NameLoc, DNLoc); + } + } + + case TemplateName::SubstTemplateTemplateParm: { + SubstTemplateTemplateParmStorage *subst + = Name.getAsSubstTemplateTemplateParm(); + return DeclarationNameInfo(subst->getParameter()->getDeclName(), + NameLoc); + } + + case TemplateName::SubstTemplateTemplateParmPack: { + SubstTemplateTemplateParmPackStorage *subst + = Name.getAsSubstTemplateTemplateParmPack(); + return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), + NameLoc); + } + } + + llvm_unreachable("bad template name kind!"); +} + +TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { + switch (Name.getKind()) { + case TemplateName::QualifiedTemplate: + case TemplateName::Template: { + TemplateDecl *Template = Name.getAsTemplateDecl(); + if (TemplateTemplateParmDecl *TTP + = dyn_cast<TemplateTemplateParmDecl>(Template)) + Template = getCanonicalTemplateTemplateParmDecl(TTP); + + // The canonical template name is the canonical template declaration. + return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); + } + + case TemplateName::OverloadedTemplate: + llvm_unreachable("cannot canonicalize overloaded template"); + + case TemplateName::DependentTemplate: { + DependentTemplateName *DTN = Name.getAsDependentTemplateName(); + assert(DTN && "Non-dependent template names must refer to template decls."); + return DTN->CanonicalTemplateName; + } + + case TemplateName::SubstTemplateTemplateParm: { + SubstTemplateTemplateParmStorage *subst + = Name.getAsSubstTemplateTemplateParm(); + return getCanonicalTemplateName(subst->getReplacement()); + } + + case TemplateName::SubstTemplateTemplateParmPack: { + SubstTemplateTemplateParmPackStorage *subst + = Name.getAsSubstTemplateTemplateParmPack(); + TemplateTemplateParmDecl *canonParameter + = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); + TemplateArgument canonArgPack + = getCanonicalTemplateArgument(subst->getArgumentPack()); + return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); + } + } + + llvm_unreachable("bad template name!"); +} + +bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { + X = getCanonicalTemplateName(X); + Y = getCanonicalTemplateName(Y); + return X.getAsVoidPointer() == Y.getAsVoidPointer(); +} + +TemplateArgument +ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { + switch (Arg.getKind()) { + case TemplateArgument::Null: + return Arg; + + case TemplateArgument::Expression: + return Arg; + + case TemplateArgument::Declaration: { + ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); + return TemplateArgument(D, Arg.isDeclForReferenceParam()); + } + + case TemplateArgument::NullPtr: + return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), + /*isNullPtr*/true); + + case TemplateArgument::Template: + return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); + + case TemplateArgument::TemplateExpansion: + return TemplateArgument(getCanonicalTemplateName( + Arg.getAsTemplateOrTemplatePattern()), + Arg.getNumTemplateExpansions()); + + case TemplateArgument::Integral: + return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); + + case TemplateArgument::Type: + return TemplateArgument(getCanonicalType(Arg.getAsType())); + + case TemplateArgument::Pack: { + if (Arg.pack_size() == 0) + return Arg; + + TemplateArgument *CanonArgs + = new (*this) TemplateArgument[Arg.pack_size()]; + unsigned Idx = 0; + for (TemplateArgument::pack_iterator A = Arg.pack_begin(), + AEnd = Arg.pack_end(); + A != AEnd; (void)++A, ++Idx) + CanonArgs[Idx] = getCanonicalTemplateArgument(*A); + + return TemplateArgument(CanonArgs, Arg.pack_size()); + } + } + + // Silence GCC warning + llvm_unreachable("Unhandled template argument kind"); +} + +NestedNameSpecifier * +ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { + if (!NNS) + return 0; + + switch (NNS->getKind()) { + case NestedNameSpecifier::Identifier: + // Canonicalize the prefix but keep the identifier the same. + return NestedNameSpecifier::Create(*this, + getCanonicalNestedNameSpecifier(NNS->getPrefix()), + NNS->getAsIdentifier()); + + case NestedNameSpecifier::Namespace: + // A namespace is canonical; build a nested-name-specifier with + // this namespace and no prefix. + return NestedNameSpecifier::Create(*this, 0, + NNS->getAsNamespace()->getOriginalNamespace()); + + case NestedNameSpecifier::NamespaceAlias: + // A namespace is canonical; build a nested-name-specifier with + // this namespace and no prefix. + return NestedNameSpecifier::Create(*this, 0, + NNS->getAsNamespaceAlias()->getNamespace() + ->getOriginalNamespace()); + + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: { + QualType T = getCanonicalType(QualType(NNS->getAsType(), 0)); + + // If we have some kind of dependent-named type (e.g., "typename T::type"), + // break it apart into its prefix and identifier, then reconsititute those + // as the canonical nested-name-specifier. This is required to canonicalize + // a dependent nested-name-specifier involving typedefs of dependent-name + // types, e.g., + // typedef typename T::type T1; + // typedef typename T1::type T2; + if (const DependentNameType *DNT = T->getAs<DependentNameType>()) + return NestedNameSpecifier::Create(*this, DNT->getQualifier(), + const_cast<IdentifierInfo *>(DNT->getIdentifier())); + + // Otherwise, just canonicalize the type, and force it to be a TypeSpec. + // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the + // first place? + return NestedNameSpecifier::Create(*this, 0, false, + const_cast<Type*>(T.getTypePtr())); + } + + case NestedNameSpecifier::Global: + // The global specifier is canonical and unique. + return NNS; + } + + llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); +} + + +const ArrayType *ASTContext::getAsArrayType(QualType T) const { + // Handle the non-qualified case efficiently. + if (!T.hasLocalQualifiers()) { + // Handle the common positive case fast. + if (const ArrayType *AT = dyn_cast<ArrayType>(T)) + return AT; + } + + // Handle the common negative case fast. + if (!isa<ArrayType>(T.getCanonicalType())) + return 0; + + // Apply any qualifiers from the array type to the element type. This + // implements C99 6.7.3p8: "If the specification of an array type includes + // any type qualifiers, the element type is so qualified, not the array type." + + // If we get here, we either have type qualifiers on the type, or we have + // sugar such as a typedef in the way. If we have type qualifiers on the type + // we must propagate them down into the element type. + + SplitQualType split = T.getSplitDesugaredType(); + Qualifiers qs = split.Quals; + + // If we have a simple case, just return now. + const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty); + if (ATy == 0 || qs.empty()) + return ATy; + + // Otherwise, we have an array and we have qualifiers on it. Push the + // qualifiers into the array element type and return a new array type. + QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); + + if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy)) + return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), + CAT->getSizeModifier(), + CAT->getIndexTypeCVRQualifiers())); + if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy)) + return cast<ArrayType>(getIncompleteArrayType(NewEltTy, + IAT->getSizeModifier(), + IAT->getIndexTypeCVRQualifiers())); + + if (const DependentSizedArrayType *DSAT + = dyn_cast<DependentSizedArrayType>(ATy)) + return cast<ArrayType>( + getDependentSizedArrayType(NewEltTy, + DSAT->getSizeExpr(), + DSAT->getSizeModifier(), + DSAT->getIndexTypeCVRQualifiers(), + DSAT->getBracketsRange())); + + const VariableArrayType *VAT = cast<VariableArrayType>(ATy); + return cast<ArrayType>(getVariableArrayType(NewEltTy, + VAT->getSizeExpr(), + VAT->getSizeModifier(), + VAT->getIndexTypeCVRQualifiers(), + VAT->getBracketsRange())); +} + +QualType ASTContext::getAdjustedParameterType(QualType T) const { + if (T->isArrayType() || T->isFunctionType()) + return getDecayedType(T); + return T; +} + +QualType ASTContext::getSignatureParameterType(QualType T) const { + T = getVariableArrayDecayedType(T); + T = getAdjustedParameterType(T); + return T.getUnqualifiedType(); +} + +/// getArrayDecayedType - Return the properly qualified result of decaying the +/// specified array type to a pointer. This operation is non-trivial when +/// handling typedefs etc. The canonical type of "T" must be an array type, +/// this returns a pointer to a properly qualified element of the array. +/// +/// See C99 6.7.5.3p7 and C99 6.3.2.1p3. +QualType ASTContext::getArrayDecayedType(QualType Ty) const { + // Get the element type with 'getAsArrayType' so that we don't lose any + // typedefs in the element type of the array. This also handles propagation + // of type qualifiers from the array type into the element type if present + // (C99 6.7.3p8). + const ArrayType *PrettyArrayType = getAsArrayType(Ty); + assert(PrettyArrayType && "Not an array type!"); + + QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); + + // int x[restrict 4] -> int *restrict + return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers()); +} + +QualType ASTContext::getBaseElementType(const ArrayType *array) const { + return getBaseElementType(array->getElementType()); +} + +QualType ASTContext::getBaseElementType(QualType type) const { + Qualifiers qs; + while (true) { + SplitQualType split = type.getSplitDesugaredType(); + const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); + if (!array) break; + + type = array->getElementType(); + qs.addConsistentQualifiers(split.Quals); + } + + return getQualifiedType(type, qs); +} + +/// getConstantArrayElementCount - Returns number of constant array elements. +uint64_t +ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { + uint64_t ElementCount = 1; + do { + ElementCount *= CA->getSize().getZExtValue(); + CA = dyn_cast_or_null<ConstantArrayType>( + CA->getElementType()->getAsArrayTypeUnsafe()); + } while (CA); + return ElementCount; +} + +/// getFloatingRank - Return a relative rank for floating point types. +/// This routine will assert if passed a built-in type that isn't a float. +static FloatingRank getFloatingRank(QualType T) { + if (const ComplexType *CT = T->getAs<ComplexType>()) + return getFloatingRank(CT->getElementType()); + + assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type"); + switch (T->getAs<BuiltinType>()->getKind()) { + default: llvm_unreachable("getFloatingRank(): not a floating type"); + case BuiltinType::Half: return HalfRank; + case BuiltinType::Float: return FloatRank; + case BuiltinType::Double: return DoubleRank; + case BuiltinType::LongDouble: return LongDoubleRank; + } +} + +/// getFloatingTypeOfSizeWithinDomain - Returns a real floating +/// point or a complex type (based on typeDomain/typeSize). +/// 'typeDomain' is a real floating point or complex type. +/// 'typeSize' is a real floating point or complex type. +QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, + QualType Domain) const { + FloatingRank EltRank = getFloatingRank(Size); + if (Domain->isComplexType()) { + switch (EltRank) { + case HalfRank: llvm_unreachable("Complex half is not supported"); + case FloatRank: return FloatComplexTy; + case DoubleRank: return DoubleComplexTy; + case LongDoubleRank: return LongDoubleComplexTy; + } + } + + assert(Domain->isRealFloatingType() && "Unknown domain!"); + switch (EltRank) { + case HalfRank: return HalfTy; + case FloatRank: return FloatTy; + case DoubleRank: return DoubleTy; + case LongDoubleRank: return LongDoubleTy; + } + llvm_unreachable("getFloatingRank(): illegal value for rank"); +} + +/// getFloatingTypeOrder - Compare the rank of the two specified floating +/// point types, ignoring the domain of the type (i.e. 'double' == +/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If +/// LHS < RHS, return -1. +int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { + FloatingRank LHSR = getFloatingRank(LHS); + FloatingRank RHSR = getFloatingRank(RHS); + + if (LHSR == RHSR) + return 0; + if (LHSR > RHSR) + return 1; + return -1; +} + +/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This +/// routine will assert if passed a built-in type that isn't an integer or enum, +/// or if it is not canonicalized. +unsigned ASTContext::getIntegerRank(const Type *T) const { + assert(T->isCanonicalUnqualified() && "T should be canonicalized"); + + switch (cast<BuiltinType>(T)->getKind()) { + default: llvm_unreachable("getIntegerRank(): not a built-in integer"); + case BuiltinType::Bool: + return 1 + (getIntWidth(BoolTy) << 3); + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::SChar: + case BuiltinType::UChar: + return 2 + (getIntWidth(CharTy) << 3); + case BuiltinType::Short: + case BuiltinType::UShort: + return 3 + (getIntWidth(ShortTy) << 3); + case BuiltinType::Int: + case BuiltinType::UInt: + return 4 + (getIntWidth(IntTy) << 3); + case BuiltinType::Long: + case BuiltinType::ULong: + return 5 + (getIntWidth(LongTy) << 3); + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + return 6 + (getIntWidth(LongLongTy) << 3); + case BuiltinType::Int128: + case BuiltinType::UInt128: + return 7 + (getIntWidth(Int128Ty) << 3); + } +} + +/// \brief Whether this is a promotable bitfield reference according +/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). +/// +/// \returns the type this bit-field will promote to, or NULL if no +/// promotion occurs. +QualType ASTContext::isPromotableBitField(Expr *E) const { + if (E->isTypeDependent() || E->isValueDependent()) + return QualType(); + + FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? + if (!Field) + return QualType(); + + QualType FT = Field->getType(); + + uint64_t BitWidth = Field->getBitWidthValue(*this); + uint64_t IntSize = getTypeSize(IntTy); + // GCC extension compatibility: if the bit-field size is less than or equal + // to the size of int, it gets promoted no matter what its type is. + // For instance, unsigned long bf : 4 gets promoted to signed int. + if (BitWidth < IntSize) + return IntTy; + + if (BitWidth == IntSize) + return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; + + // Types bigger than int are not subject to promotions, and therefore act + // like the base type. + // FIXME: This doesn't quite match what gcc does, but what gcc does here + // is ridiculous. + return QualType(); +} + +/// getPromotedIntegerType - Returns the type that Promotable will +/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable +/// integer type. +QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { + assert(!Promotable.isNull()); + assert(Promotable->isPromotableIntegerType()); + if (const EnumType *ET = Promotable->getAs<EnumType>()) + return ET->getDecl()->getPromotionType(); + + if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) { + // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t + // (3.9.1) can be converted to a prvalue of the first of the following + // types that can represent all the values of its underlying type: + // int, unsigned int, long int, unsigned long int, long long int, or + // unsigned long long int [...] + // FIXME: Is there some better way to compute this? + if (BT->getKind() == BuiltinType::WChar_S || + BT->getKind() == BuiltinType::WChar_U || + BT->getKind() == BuiltinType::Char16 || + BT->getKind() == BuiltinType::Char32) { + bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; + uint64_t FromSize = getTypeSize(BT); + QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, + LongLongTy, UnsignedLongLongTy }; + for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { + uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); + if (FromSize < ToSize || + (FromSize == ToSize && + FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) + return PromoteTypes[Idx]; + } + llvm_unreachable("char type should fit into long long"); + } + } + + // At this point, we should have a signed or unsigned integer type. + if (Promotable->isSignedIntegerType()) + return IntTy; + uint64_t PromotableSize = getIntWidth(Promotable); + uint64_t IntSize = getIntWidth(IntTy); + assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); + return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; +} + +/// \brief Recurses in pointer/array types until it finds an objc retainable +/// type and returns its ownership. +Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { + while (!T.isNull()) { + if (T.getObjCLifetime() != Qualifiers::OCL_None) + return T.getObjCLifetime(); + if (T->isArrayType()) + T = getBaseElementType(T); + else if (const PointerType *PT = T->getAs<PointerType>()) + T = PT->getPointeeType(); + else if (const ReferenceType *RT = T->getAs<ReferenceType>()) + T = RT->getPointeeType(); + else + break; + } + + return Qualifiers::OCL_None; +} + +static const Type *getIntegerTypeForEnum(const EnumType *ET) { + // Incomplete enum types are not treated as integer types. + // FIXME: In C++, enum types are never integer types. + if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) + return ET->getDecl()->getIntegerType().getTypePtr(); + return NULL; +} + +/// getIntegerTypeOrder - Returns the highest ranked integer type: +/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If +/// LHS < RHS, return -1. +int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { + const Type *LHSC = getCanonicalType(LHS).getTypePtr(); + const Type *RHSC = getCanonicalType(RHS).getTypePtr(); + + // Unwrap enums to their underlying type. + if (const EnumType *ET = dyn_cast<EnumType>(LHSC)) + LHSC = getIntegerTypeForEnum(ET); + if (const EnumType *ET = dyn_cast<EnumType>(RHSC)) + RHSC = getIntegerTypeForEnum(ET); + + if (LHSC == RHSC) return 0; + + bool LHSUnsigned = LHSC->isUnsignedIntegerType(); + bool RHSUnsigned = RHSC->isUnsignedIntegerType(); + + unsigned LHSRank = getIntegerRank(LHSC); + unsigned RHSRank = getIntegerRank(RHSC); + + if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. + if (LHSRank == RHSRank) return 0; + return LHSRank > RHSRank ? 1 : -1; + } + + // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. + if (LHSUnsigned) { + // If the unsigned [LHS] type is larger, return it. + if (LHSRank >= RHSRank) + return 1; + + // If the signed type can represent all values of the unsigned type, it + // wins. Because we are dealing with 2's complement and types that are + // powers of two larger than each other, this is always safe. + return -1; + } + + // If the unsigned [RHS] type is larger, return it. + if (RHSRank >= LHSRank) + return -1; + + // If the signed type can represent all values of the unsigned type, it + // wins. Because we are dealing with 2's complement and types that are + // powers of two larger than each other, this is always safe. + return 1; +} + +static RecordDecl * +CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK, + DeclContext *DC, IdentifierInfo *Id) { + SourceLocation Loc; + if (Ctx.getLangOpts().CPlusPlus) + return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id); + else + return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id); +} + +// getCFConstantStringType - Return the type used for constant CFStrings. +QualType ASTContext::getCFConstantStringType() const { + if (!CFConstantStringTypeDecl) { + CFConstantStringTypeDecl = + CreateRecordDecl(*this, TTK_Struct, TUDecl, + &Idents.get("NSConstantString")); + CFConstantStringTypeDecl->startDefinition(); + + QualType FieldTypes[4]; + + // const int *isa; + FieldTypes[0] = getPointerType(IntTy.withConst()); + // int flags; + FieldTypes[1] = IntTy; + // const char *str; + FieldTypes[2] = getPointerType(CharTy.withConst()); + // long length; + FieldTypes[3] = LongTy; + + // Create fields + for (unsigned i = 0; i < 4; ++i) { + FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl, + SourceLocation(), + SourceLocation(), 0, + FieldTypes[i], /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + CFConstantStringTypeDecl->addDecl(Field); + } + + CFConstantStringTypeDecl->completeDefinition(); + } + + return getTagDeclType(CFConstantStringTypeDecl); +} + +QualType ASTContext::getObjCSuperType() const { + if (ObjCSuperType.isNull()) { + RecordDecl *ObjCSuperTypeDecl = + CreateRecordDecl(*this, TTK_Struct, TUDecl, &Idents.get("objc_super")); + TUDecl->addDecl(ObjCSuperTypeDecl); + ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); + } + return ObjCSuperType; +} + +void ASTContext::setCFConstantStringType(QualType T) { + const RecordType *Rec = T->getAs<RecordType>(); + assert(Rec && "Invalid CFConstantStringType"); + CFConstantStringTypeDecl = Rec->getDecl(); +} + +QualType ASTContext::getBlockDescriptorType() const { + if (BlockDescriptorType) + return getTagDeclType(BlockDescriptorType); + + RecordDecl *T; + // FIXME: Needs the FlagAppleBlock bit. + T = CreateRecordDecl(*this, TTK_Struct, TUDecl, + &Idents.get("__block_descriptor")); + T->startDefinition(); + + QualType FieldTypes[] = { + UnsignedLongTy, + UnsignedLongTy, + }; + + static const char *const FieldNames[] = { + "reserved", + "Size" + }; + + for (size_t i = 0; i < 2; ++i) { + FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(), + SourceLocation(), + &Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + T->addDecl(Field); + } + + T->completeDefinition(); + + BlockDescriptorType = T; + + return getTagDeclType(BlockDescriptorType); +} + +QualType ASTContext::getBlockDescriptorExtendedType() const { + if (BlockDescriptorExtendedType) + return getTagDeclType(BlockDescriptorExtendedType); + + RecordDecl *T; + // FIXME: Needs the FlagAppleBlock bit. + T = CreateRecordDecl(*this, TTK_Struct, TUDecl, + &Idents.get("__block_descriptor_withcopydispose")); + T->startDefinition(); + + QualType FieldTypes[] = { + UnsignedLongTy, + UnsignedLongTy, + getPointerType(VoidPtrTy), + getPointerType(VoidPtrTy) + }; + + static const char *const FieldNames[] = { + "reserved", + "Size", + "CopyFuncPtr", + "DestroyFuncPtr" + }; + + for (size_t i = 0; i < 4; ++i) { + FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(), + SourceLocation(), + &Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + T->addDecl(Field); + } + + T->completeDefinition(); + + BlockDescriptorExtendedType = T; + + return getTagDeclType(BlockDescriptorExtendedType); +} + +/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" +/// requires copy/dispose. Note that this must match the logic +/// in buildByrefHelpers. +bool ASTContext::BlockRequiresCopying(QualType Ty, + const VarDecl *D) { + if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { + const Expr *copyExpr = getBlockVarCopyInits(D); + if (!copyExpr && record->hasTrivialDestructor()) return false; + + return true; + } + + if (!Ty->isObjCRetainableType()) return false; + + Qualifiers qs = Ty.getQualifiers(); + + // If we have lifetime, that dominates. + if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { + assert(getLangOpts().ObjCAutoRefCount); + + switch (lifetime) { + case Qualifiers::OCL_None: llvm_unreachable("impossible"); + + // These are just bits as far as the runtime is concerned. + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + return false; + + // Tell the runtime that this is ARC __weak, called by the + // byref routines. + case Qualifiers::OCL_Weak: + // ARC __strong __block variables need to be retained. + case Qualifiers::OCL_Strong: + return true; + } + llvm_unreachable("fell out of lifetime switch!"); + } + return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || + Ty->isObjCObjectPointerType()); +} + +bool ASTContext::getByrefLifetime(QualType Ty, + Qualifiers::ObjCLifetime &LifeTime, + bool &HasByrefExtendedLayout) const { + + if (!getLangOpts().ObjC1 || + getLangOpts().getGC() != LangOptions::NonGC) + return false; + + HasByrefExtendedLayout = false; + if (Ty->isRecordType()) { + HasByrefExtendedLayout = true; + LifeTime = Qualifiers::OCL_None; + } + else if (getLangOpts().ObjCAutoRefCount) + LifeTime = Ty.getObjCLifetime(); + // MRR. + else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) + LifeTime = Qualifiers::OCL_ExplicitNone; + else + LifeTime = Qualifiers::OCL_None; + return true; +} + +TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { + if (!ObjCInstanceTypeDecl) + ObjCInstanceTypeDecl = TypedefDecl::Create(*this, + getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + &Idents.get("instancetype"), + getTrivialTypeSourceInfo(getObjCIdType())); + return ObjCInstanceTypeDecl; +} + +// This returns true if a type has been typedefed to BOOL: +// typedef <type> BOOL; +static bool isTypeTypedefedAsBOOL(QualType T) { + if (const TypedefType *TT = dyn_cast<TypedefType>(T)) + if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) + return II->isStr("BOOL"); + + return false; +} + +/// getObjCEncodingTypeSize returns size of type for objective-c encoding +/// purpose. +CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { + if (!type->isIncompleteArrayType() && type->isIncompleteType()) + return CharUnits::Zero(); + + CharUnits sz = getTypeSizeInChars(type); + + // Make all integer and enum types at least as large as an int + if (sz.isPositive() && type->isIntegralOrEnumerationType()) + sz = std::max(sz, getTypeSizeInChars(IntTy)); + // Treat arrays as pointers, since that's how they're passed in. + else if (type->isArrayType()) + sz = getTypeSizeInChars(VoidPtrTy); + return sz; +} + +static inline +std::string charUnitsToString(const CharUnits &CU) { + return llvm::itostr(CU.getQuantity()); +} + +/// getObjCEncodingForBlock - Return the encoded type for this block +/// declaration. +std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { + std::string S; + + const BlockDecl *Decl = Expr->getBlockDecl(); + QualType BlockTy = + Expr->getType()->getAs<BlockPointerType>()->getPointeeType(); + // Encode result type. + if (getLangOpts().EncodeExtendedBlockSig) + getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, + BlockTy->getAs<FunctionType>()->getResultType(), + S, true /*Extended*/); + else + getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), + S); + // Compute size of all parameters. + // Start with computing size of a pointer in number of bytes. + // FIXME: There might(should) be a better way of doing this computation! + SourceLocation Loc; + CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); + CharUnits ParmOffset = PtrSize; + for (BlockDecl::param_const_iterator PI = Decl->param_begin(), + E = Decl->param_end(); PI != E; ++PI) { + QualType PType = (*PI)->getType(); + CharUnits sz = getObjCEncodingTypeSize(PType); + if (sz.isZero()) + continue; + assert (sz.isPositive() && "BlockExpr - Incomplete param type"); + ParmOffset += sz; + } + // Size of the argument frame + S += charUnitsToString(ParmOffset); + // Block pointer and offset. + S += "@?0"; + + // Argument types. + ParmOffset = PtrSize; + for (BlockDecl::param_const_iterator PI = Decl->param_begin(), E = + Decl->param_end(); PI != E; ++PI) { + ParmVarDecl *PVDecl = *PI; + QualType PType = PVDecl->getOriginalType(); + if (const ArrayType *AT = + dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { + // Use array's original type only if it has known number of + // elements. + if (!isa<ConstantArrayType>(AT)) + PType = PVDecl->getType(); + } else if (PType->isFunctionType()) + PType = PVDecl->getType(); + if (getLangOpts().EncodeExtendedBlockSig) + getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, + S, true /*Extended*/); + else + getObjCEncodingForType(PType, S); + S += charUnitsToString(ParmOffset); + ParmOffset += getObjCEncodingTypeSize(PType); + } + + return S; +} + +bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl, + std::string& S) { + // Encode result type. + getObjCEncodingForType(Decl->getResultType(), S); + CharUnits ParmOffset; + // Compute size of all parameters. + for (FunctionDecl::param_const_iterator PI = Decl->param_begin(), + E = Decl->param_end(); PI != E; ++PI) { + QualType PType = (*PI)->getType(); + CharUnits sz = getObjCEncodingTypeSize(PType); + if (sz.isZero()) + continue; + + assert (sz.isPositive() && + "getObjCEncodingForFunctionDecl - Incomplete param type"); + ParmOffset += sz; + } + S += charUnitsToString(ParmOffset); + ParmOffset = CharUnits::Zero(); + + // Argument types. + for (FunctionDecl::param_const_iterator PI = Decl->param_begin(), + E = Decl->param_end(); PI != E; ++PI) { + ParmVarDecl *PVDecl = *PI; + QualType PType = PVDecl->getOriginalType(); + if (const ArrayType *AT = + dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { + // Use array's original type only if it has known number of + // elements. + if (!isa<ConstantArrayType>(AT)) + PType = PVDecl->getType(); + } else if (PType->isFunctionType()) + PType = PVDecl->getType(); + getObjCEncodingForType(PType, S); + S += charUnitsToString(ParmOffset); + ParmOffset += getObjCEncodingTypeSize(PType); + } + + return false; +} + +/// getObjCEncodingForMethodParameter - Return the encoded type for a single +/// method parameter or return type. If Extended, include class names and +/// block object types. +void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, + QualType T, std::string& S, + bool Extended) const { + // Encode type qualifer, 'in', 'inout', etc. for the parameter. + getObjCEncodingForTypeQualifier(QT, S); + // Encode parameter type. + getObjCEncodingForTypeImpl(T, S, true, true, 0, + true /*OutermostType*/, + false /*EncodingProperty*/, + false /*StructField*/, + Extended /*EncodeBlockParameters*/, + Extended /*EncodeClassNames*/); +} + +/// getObjCEncodingForMethodDecl - Return the encoded type for this method +/// declaration. +bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, + std::string& S, + bool Extended) const { + // FIXME: This is not very efficient. + // Encode return type. + getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), + Decl->getResultType(), S, Extended); + // Compute size of all parameters. + // Start with computing size of a pointer in number of bytes. + // FIXME: There might(should) be a better way of doing this computation! + SourceLocation Loc; + CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); + // The first two arguments (self and _cmd) are pointers; account for + // their size. + CharUnits ParmOffset = 2 * PtrSize; + for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), + E = Decl->sel_param_end(); PI != E; ++PI) { + QualType PType = (*PI)->getType(); + CharUnits sz = getObjCEncodingTypeSize(PType); + if (sz.isZero()) + continue; + + assert (sz.isPositive() && + "getObjCEncodingForMethodDecl - Incomplete param type"); + ParmOffset += sz; + } + S += charUnitsToString(ParmOffset); + S += "@0:"; + S += charUnitsToString(PtrSize); + + // Argument types. + ParmOffset = 2 * PtrSize; + for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), + E = Decl->sel_param_end(); PI != E; ++PI) { + const ParmVarDecl *PVDecl = *PI; + QualType PType = PVDecl->getOriginalType(); + if (const ArrayType *AT = + dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { + // Use array's original type only if it has known number of + // elements. + if (!isa<ConstantArrayType>(AT)) + PType = PVDecl->getType(); + } else if (PType->isFunctionType()) + PType = PVDecl->getType(); + getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), + PType, S, Extended); + S += charUnitsToString(ParmOffset); + ParmOffset += getObjCEncodingTypeSize(PType); + } + + return false; +} + +/// getObjCEncodingForPropertyDecl - Return the encoded type for this +/// property declaration. If non-NULL, Container must be either an +/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be +/// NULL when getting encodings for protocol properties. +/// Property attributes are stored as a comma-delimited C string. The simple +/// attributes readonly and bycopy are encoded as single characters. The +/// parametrized attributes, getter=name, setter=name, and ivar=name, are +/// encoded as single characters, followed by an identifier. Property types +/// are also encoded as a parametrized attribute. The characters used to encode +/// these attributes are defined by the following enumeration: +/// @code +/// enum PropertyAttributes { +/// kPropertyReadOnly = 'R', // property is read-only. +/// kPropertyBycopy = 'C', // property is a copy of the value last assigned +/// kPropertyByref = '&', // property is a reference to the value last assigned +/// kPropertyDynamic = 'D', // property is dynamic +/// kPropertyGetter = 'G', // followed by getter selector name +/// kPropertySetter = 'S', // followed by setter selector name +/// kPropertyInstanceVariable = 'V' // followed by instance variable name +/// kPropertyType = 'T' // followed by old-style type encoding. +/// kPropertyWeak = 'W' // 'weak' property +/// kPropertyStrong = 'P' // property GC'able +/// kPropertyNonAtomic = 'N' // property non-atomic +/// }; +/// @endcode +void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, + const Decl *Container, + std::string& S) const { + // Collect information from the property implementation decl(s). + bool Dynamic = false; + ObjCPropertyImplDecl *SynthesizePID = 0; + + // FIXME: Duplicated code due to poor abstraction. + if (Container) { + if (const ObjCCategoryImplDecl *CID = + dyn_cast<ObjCCategoryImplDecl>(Container)) { + for (ObjCCategoryImplDecl::propimpl_iterator + i = CID->propimpl_begin(), e = CID->propimpl_end(); + i != e; ++i) { + ObjCPropertyImplDecl *PID = *i; + if (PID->getPropertyDecl() == PD) { + if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) { + Dynamic = true; + } else { + SynthesizePID = PID; + } + } + } + } else { + const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container); + for (ObjCCategoryImplDecl::propimpl_iterator + i = OID->propimpl_begin(), e = OID->propimpl_end(); + i != e; ++i) { + ObjCPropertyImplDecl *PID = *i; + if (PID->getPropertyDecl() == PD) { + if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) { + Dynamic = true; + } else { + SynthesizePID = PID; + } + } + } + } + } + + // FIXME: This is not very efficient. + S = "T"; + + // Encode result type. + // GCC has some special rules regarding encoding of properties which + // closely resembles encoding of ivars. + getObjCEncodingForTypeImpl(PD->getType(), S, true, true, 0, + true /* outermost type */, + true /* encoding for property */); + + if (PD->isReadOnly()) { + S += ",R"; + if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) + S += ",C"; + if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) + S += ",&"; + } else { + switch (PD->getSetterKind()) { + case ObjCPropertyDecl::Assign: break; + case ObjCPropertyDecl::Copy: S += ",C"; break; + case ObjCPropertyDecl::Retain: S += ",&"; break; + case ObjCPropertyDecl::Weak: S += ",W"; break; + } + } + + // It really isn't clear at all what this means, since properties + // are "dynamic by default". + if (Dynamic) + S += ",D"; + + if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) + S += ",N"; + + if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) { + S += ",G"; + S += PD->getGetterName().getAsString(); + } + + if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) { + S += ",S"; + S += PD->getSetterName().getAsString(); + } + + if (SynthesizePID) { + const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); + S += ",V"; + S += OID->getNameAsString(); + } + + // FIXME: OBJCGC: weak & strong +} + +/// getLegacyIntegralTypeEncoding - +/// Another legacy compatibility encoding: 32-bit longs are encoded as +/// 'l' or 'L' , but not always. For typedefs, we need to use +/// 'i' or 'I' instead if encoding a struct field, or a pointer! +/// +void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { + if (isa<TypedefType>(PointeeTy.getTypePtr())) { + if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) { + if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) + PointeeTy = UnsignedIntTy; + else + if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) + PointeeTy = IntTy; + } + } +} + +void ASTContext::getObjCEncodingForType(QualType T, std::string& S, + const FieldDecl *Field) const { + // We follow the behavior of gcc, expanding structures which are + // directly pointed to, and expanding embedded structures. Note that + // these rules are sufficient to prevent recursive encoding of the + // same type. + getObjCEncodingForTypeImpl(T, S, true, true, Field, + true /* outermost type */); +} + +static char getObjCEncodingForPrimitiveKind(const ASTContext *C, + BuiltinType::Kind kind) { + switch (kind) { + case BuiltinType::Void: return 'v'; + case BuiltinType::Bool: return 'B'; + case BuiltinType::Char_U: + case BuiltinType::UChar: return 'C'; + case BuiltinType::Char16: + case BuiltinType::UShort: return 'S'; + case BuiltinType::Char32: + case BuiltinType::UInt: return 'I'; + case BuiltinType::ULong: + return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; + case BuiltinType::UInt128: return 'T'; + case BuiltinType::ULongLong: return 'Q'; + case BuiltinType::Char_S: + case BuiltinType::SChar: return 'c'; + case BuiltinType::Short: return 's'; + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Int: return 'i'; + case BuiltinType::Long: + return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; + case BuiltinType::LongLong: return 'q'; + case BuiltinType::Int128: return 't'; + case BuiltinType::Float: return 'f'; + case BuiltinType::Double: return 'd'; + case BuiltinType::LongDouble: return 'D'; + case BuiltinType::NullPtr: return '*'; // like char* + + case BuiltinType::Half: + // FIXME: potentially need @encodes for these! + return ' '; + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + llvm_unreachable("@encoding ObjC primitive type"); + + // OpenCL and placeholder types don't need @encodings. + case BuiltinType::OCLImage1d: + case BuiltinType::OCLImage1dArray: + case BuiltinType::OCLImage1dBuffer: + case BuiltinType::OCLImage2d: + case BuiltinType::OCLImage2dArray: + case BuiltinType::OCLImage3d: + case BuiltinType::OCLEvent: + case BuiltinType::OCLSampler: + case BuiltinType::Dependent: +#define BUILTIN_TYPE(KIND, ID) +#define PLACEHOLDER_TYPE(KIND, ID) \ + case BuiltinType::KIND: +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("invalid builtin type for @encode"); + } + llvm_unreachable("invalid BuiltinType::Kind value"); +} + +static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { + EnumDecl *Enum = ET->getDecl(); + + // The encoding of an non-fixed enum type is always 'i', regardless of size. + if (!Enum->isFixed()) + return 'i'; + + // The encoding of a fixed enum type matches its fixed underlying type. + const BuiltinType *BT = Enum->getIntegerType()->castAs<BuiltinType>(); + return getObjCEncodingForPrimitiveKind(C, BT->getKind()); +} + +static void EncodeBitField(const ASTContext *Ctx, std::string& S, + QualType T, const FieldDecl *FD) { + assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); + S += 'b'; + // The NeXT runtime encodes bit fields as b followed by the number of bits. + // The GNU runtime requires more information; bitfields are encoded as b, + // then the offset (in bits) of the first element, then the type of the + // bitfield, then the size in bits. For example, in this structure: + // + // struct + // { + // int integer; + // int flags:2; + // }; + // On a 32-bit system, the encoding for flags would be b2 for the NeXT + // runtime, but b32i2 for the GNU runtime. The reason for this extra + // information is not especially sensible, but we're stuck with it for + // compatibility with GCC, although providing it breaks anything that + // actually uses runtime introspection and wants to work on both runtimes... + if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { + const RecordDecl *RD = FD->getParent(); + const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); + S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex())); + if (const EnumType *ET = T->getAs<EnumType>()) + S += ObjCEncodingForEnumType(Ctx, ET); + else { + const BuiltinType *BT = T->castAs<BuiltinType>(); + S += getObjCEncodingForPrimitiveKind(Ctx, BT->getKind()); + } + } + S += llvm::utostr(FD->getBitWidthValue(*Ctx)); +} + +// FIXME: Use SmallString for accumulating string. +void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S, + bool ExpandPointedToStructures, + bool ExpandStructures, + const FieldDecl *FD, + bool OutermostType, + bool EncodingProperty, + bool StructField, + bool EncodeBlockParameters, + bool EncodeClassNames, + bool EncodePointerToObjCTypedef) const { + CanQualType CT = getCanonicalType(T); + switch (CT->getTypeClass()) { + case Type::Builtin: + case Type::Enum: + if (FD && FD->isBitField()) + return EncodeBitField(this, S, T, FD); + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CT)) + S += getObjCEncodingForPrimitiveKind(this, BT->getKind()); + else + S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); + return; + + case Type::Complex: { + const ComplexType *CT = T->castAs<ComplexType>(); + S += 'j'; + getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, 0, false, + false); + return; + } + + case Type::Atomic: { + const AtomicType *AT = T->castAs<AtomicType>(); + S += 'A'; + getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, 0, + false, false); + return; + } + + // encoding for pointer or reference types. + case Type::Pointer: + case Type::LValueReference: + case Type::RValueReference: { + QualType PointeeTy; + if (isa<PointerType>(CT)) { + const PointerType *PT = T->castAs<PointerType>(); + if (PT->isObjCSelType()) { + S += ':'; + return; + } + PointeeTy = PT->getPointeeType(); + } else { + PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); + } + + bool isReadOnly = false; + // For historical/compatibility reasons, the read-only qualifier of the + // pointee gets emitted _before_ the '^'. The read-only qualifier of + // the pointer itself gets ignored, _unless_ we are looking at a typedef! + // Also, do not emit the 'r' for anything but the outermost type! + if (isa<TypedefType>(T.getTypePtr())) { + if (OutermostType && T.isConstQualified()) { + isReadOnly = true; + S += 'r'; + } + } else if (OutermostType) { + QualType P = PointeeTy; + while (P->getAs<PointerType>()) + P = P->getAs<PointerType>()->getPointeeType(); + if (P.isConstQualified()) { + isReadOnly = true; + S += 'r'; + } + } + if (isReadOnly) { + // Another legacy compatibility encoding. Some ObjC qualifier and type + // combinations need to be rearranged. + // Rewrite "in const" from "nr" to "rn" + if (StringRef(S).endswith("nr")) + S.replace(S.end()-2, S.end(), "rn"); + } + + if (PointeeTy->isCharType()) { + // char pointer types should be encoded as '*' unless it is a + // type that has been typedef'd to 'BOOL'. + if (!isTypeTypedefedAsBOOL(PointeeTy)) { + S += '*'; + return; + } + } else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) { + // GCC binary compat: Need to convert "struct objc_class *" to "#". + if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { + S += '#'; + return; + } + // GCC binary compat: Need to convert "struct objc_object *" to "@". + if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { + S += '@'; + return; + } + // fall through... + } + S += '^'; + getLegacyIntegralTypeEncoding(PointeeTy); + + getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures, + NULL); + return; + } + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: { + const ArrayType *AT = cast<ArrayType>(CT); + + if (isa<IncompleteArrayType>(AT) && !StructField) { + // Incomplete arrays are encoded as a pointer to the array element. + S += '^'; + + getObjCEncodingForTypeImpl(AT->getElementType(), S, + false, ExpandStructures, FD); + } else { + S += '['; + + if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) + S += llvm::utostr(CAT->getSize().getZExtValue()); + else { + //Variable length arrays are encoded as a regular array with 0 elements. + assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && + "Unknown array type!"); + S += '0'; + } + + getObjCEncodingForTypeImpl(AT->getElementType(), S, + false, ExpandStructures, FD); + S += ']'; + } + return; + } + + case Type::FunctionNoProto: + case Type::FunctionProto: + S += '?'; + return; + + case Type::Record: { + RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); + S += RDecl->isUnion() ? '(' : '{'; + // Anonymous structures print as '?' + if (const IdentifierInfo *II = RDecl->getIdentifier()) { + S += II->getName(); + if (ClassTemplateSpecializationDecl *Spec + = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { + const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); + llvm::raw_string_ostream OS(S); + TemplateSpecializationType::PrintTemplateArgumentList(OS, + TemplateArgs.data(), + TemplateArgs.size(), + (*this).getPrintingPolicy()); + } + } else { + S += '?'; + } + if (ExpandStructures) { + S += '='; + if (!RDecl->isUnion()) { + getObjCEncodingForStructureImpl(RDecl, S, FD); + } else { + for (RecordDecl::field_iterator Field = RDecl->field_begin(), + FieldEnd = RDecl->field_end(); + Field != FieldEnd; ++Field) { + if (FD) { + S += '"'; + S += Field->getNameAsString(); + S += '"'; + } + + // Special case bit-fields. + if (Field->isBitField()) { + getObjCEncodingForTypeImpl(Field->getType(), S, false, true, + *Field); + } else { + QualType qt = Field->getType(); + getLegacyIntegralTypeEncoding(qt); + getObjCEncodingForTypeImpl(qt, S, false, true, + FD, /*OutermostType*/false, + /*EncodingProperty*/false, + /*StructField*/true); + } + } + } + } + S += RDecl->isUnion() ? ')' : '}'; + return; + } + + case Type::BlockPointer: { + const BlockPointerType *BT = T->castAs<BlockPointerType>(); + S += "@?"; // Unlike a pointer-to-function, which is "^?". + if (EncodeBlockParameters) { + const FunctionType *FT = BT->getPointeeType()->castAs<FunctionType>(); + + S += '<'; + // Block return type + getObjCEncodingForTypeImpl(FT->getResultType(), S, + ExpandPointedToStructures, ExpandStructures, + FD, + false /* OutermostType */, + EncodingProperty, + false /* StructField */, + EncodeBlockParameters, + EncodeClassNames); + // Block self + S += "@?"; + // Block parameters + if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { + for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(), + E = FPT->arg_type_end(); I && (I != E); ++I) { + getObjCEncodingForTypeImpl(*I, S, + ExpandPointedToStructures, + ExpandStructures, + FD, + false /* OutermostType */, + EncodingProperty, + false /* StructField */, + EncodeBlockParameters, + EncodeClassNames); + } + } + S += '>'; + } + return; + } + + case Type::ObjCObject: + case Type::ObjCInterface: { + // Ignore protocol qualifiers when mangling at this level. + T = T->castAs<ObjCObjectType>()->getBaseType(); + + // The assumption seems to be that this assert will succeed + // because nested levels will have filtered out 'id' and 'Class'. + const ObjCInterfaceType *OIT = T->castAs<ObjCInterfaceType>(); + // @encode(class_name) + ObjCInterfaceDecl *OI = OIT->getDecl(); + S += '{'; + const IdentifierInfo *II = OI->getIdentifier(); + S += II->getName(); + S += '='; + SmallVector<const ObjCIvarDecl*, 32> Ivars; + DeepCollectObjCIvars(OI, true, Ivars); + for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { + const FieldDecl *Field = cast<FieldDecl>(Ivars[i]); + if (Field->isBitField()) + getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field); + else + getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD, + false, false, false, false, false, + EncodePointerToObjCTypedef); + } + S += '}'; + return; + } + + case Type::ObjCObjectPointer: { + const ObjCObjectPointerType *OPT = T->castAs<ObjCObjectPointerType>(); + if (OPT->isObjCIdType()) { + S += '@'; + return; + } + + if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { + // FIXME: Consider if we need to output qualifiers for 'Class<p>'. + // Since this is a binary compatibility issue, need to consult with runtime + // folks. Fortunately, this is a *very* obsure construct. + S += '#'; + return; + } + + if (OPT->isObjCQualifiedIdType()) { + getObjCEncodingForTypeImpl(getObjCIdType(), S, + ExpandPointedToStructures, + ExpandStructures, FD); + if (FD || EncodingProperty || EncodeClassNames) { + // Note that we do extended encoding of protocol qualifer list + // Only when doing ivar or property encoding. + S += '"'; + for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(), + E = OPT->qual_end(); I != E; ++I) { + S += '<'; + S += (*I)->getNameAsString(); + S += '>'; + } + S += '"'; + } + return; + } + + QualType PointeeTy = OPT->getPointeeType(); + if (!EncodingProperty && + isa<TypedefType>(PointeeTy.getTypePtr()) && + !EncodePointerToObjCTypedef) { + // Another historical/compatibility reason. + // We encode the underlying type which comes out as + // {...}; + S += '^'; + if (FD && OPT->getInterfaceDecl()) { + // Prevent recursive encoding of fields in some rare cases. + ObjCInterfaceDecl *OI = OPT->getInterfaceDecl(); + SmallVector<const ObjCIvarDecl*, 32> Ivars; + DeepCollectObjCIvars(OI, true, Ivars); + for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { + if (cast<FieldDecl>(Ivars[i]) == FD) { + S += '{'; + S += OI->getIdentifier()->getName(); + S += '}'; + return; + } + } + } + getObjCEncodingForTypeImpl(PointeeTy, S, + false, ExpandPointedToStructures, + NULL, + false, false, false, false, false, + /*EncodePointerToObjCTypedef*/true); + return; + } + + S += '@'; + if (OPT->getInterfaceDecl() && + (FD || EncodingProperty || EncodeClassNames)) { + S += '"'; + S += OPT->getInterfaceDecl()->getIdentifier()->getName(); + for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(), + E = OPT->qual_end(); I != E; ++I) { + S += '<'; + S += (*I)->getNameAsString(); + S += '>'; + } + S += '"'; + } + return; + } + + // gcc just blithely ignores member pointers. + // FIXME: we shoul do better than that. 'M' is available. + case Type::MemberPointer: + return; + + case Type::Vector: + case Type::ExtVector: + // This matches gcc's encoding, even though technically it is + // insufficient. + // FIXME. We should do a better job than gcc. + return; + + case Type::Auto: + // We could see an undeduced auto type here during error recovery. + // Just ignore it. + return; + +#define ABSTRACT_TYPE(KIND, BASE) +#define TYPE(KIND, BASE) +#define DEPENDENT_TYPE(KIND, BASE) \ + case Type::KIND: +#define NON_CANONICAL_TYPE(KIND, BASE) \ + case Type::KIND: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ + case Type::KIND: +#include "clang/AST/TypeNodes.def" + llvm_unreachable("@encode for dependent type!"); + } + llvm_unreachable("bad type kind!"); +} + +void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, + std::string &S, + const FieldDecl *FD, + bool includeVBases) const { + assert(RDecl && "Expected non-null RecordDecl"); + assert(!RDecl->isUnion() && "Should not be called for unions"); + if (!RDecl->getDefinition()) + return; + + CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); + std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; + const ASTRecordLayout &layout = getASTRecordLayout(RDecl); + + if (CXXRec) { + for (CXXRecordDecl::base_class_iterator + BI = CXXRec->bases_begin(), + BE = CXXRec->bases_end(); BI != BE; ++BI) { + if (!BI->isVirtual()) { + CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl(); + if (base->isEmpty()) + continue; + uint64_t offs = toBits(layout.getBaseClassOffset(base)); + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), + std::make_pair(offs, base)); + } + } + } + + unsigned i = 0; + for (RecordDecl::field_iterator Field = RDecl->field_begin(), + FieldEnd = RDecl->field_end(); + Field != FieldEnd; ++Field, ++i) { + uint64_t offs = layout.getFieldOffset(i); + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), + std::make_pair(offs, *Field)); + } + + if (CXXRec && includeVBases) { + for (CXXRecordDecl::base_class_iterator + BI = CXXRec->vbases_begin(), + BE = CXXRec->vbases_end(); BI != BE; ++BI) { + CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl(); + if (base->isEmpty()) + continue; + uint64_t offs = toBits(layout.getVBaseClassOffset(base)); + if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && + FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), + std::make_pair(offs, base)); + } + } + + CharUnits size; + if (CXXRec) { + size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); + } else { + size = layout.getSize(); + } + + uint64_t CurOffs = 0; + std::multimap<uint64_t, NamedDecl *>::iterator + CurLayObj = FieldOrBaseOffsets.begin(); + + if (CXXRec && CXXRec->isDynamicClass() && + (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { + if (FD) { + S += "\"_vptr$"; + std::string recname = CXXRec->getNameAsString(); + if (recname.empty()) recname = "?"; + S += recname; + S += '"'; + } + S += "^^?"; + CurOffs += getTypeSize(VoidPtrTy); + } + + if (!RDecl->hasFlexibleArrayMember()) { + // Mark the end of the structure. + uint64_t offs = toBits(size); + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), + std::make_pair(offs, (NamedDecl*)0)); + } + + for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { + assert(CurOffs <= CurLayObj->first); + + if (CurOffs < CurLayObj->first) { + uint64_t padding = CurLayObj->first - CurOffs; + // FIXME: There doesn't seem to be a way to indicate in the encoding that + // packing/alignment of members is different that normal, in which case + // the encoding will be out-of-sync with the real layout. + // If the runtime switches to just consider the size of types without + // taking into account alignment, we could make padding explicit in the + // encoding (e.g. using arrays of chars). The encoding strings would be + // longer then though. + CurOffs += padding; + } + + NamedDecl *dcl = CurLayObj->second; + if (dcl == 0) + break; // reached end of structure. + + if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) { + // We expand the bases without their virtual bases since those are going + // in the initial structure. Note that this differs from gcc which + // expands virtual bases each time one is encountered in the hierarchy, + // making the encoding type bigger than it really is. + getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false); + assert(!base->isEmpty()); + CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); + } else { + FieldDecl *field = cast<FieldDecl>(dcl); + if (FD) { + S += '"'; + S += field->getNameAsString(); + S += '"'; + } + + if (field->isBitField()) { + EncodeBitField(this, S, field->getType(), field); + CurOffs += field->getBitWidthValue(*this); + } else { + QualType qt = field->getType(); + getLegacyIntegralTypeEncoding(qt); + getObjCEncodingForTypeImpl(qt, S, false, true, FD, + /*OutermostType*/false, + /*EncodingProperty*/false, + /*StructField*/true); + CurOffs += getTypeSize(field->getType()); + } + } + } +} + +void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, + std::string& S) const { + if (QT & Decl::OBJC_TQ_In) + S += 'n'; + if (QT & Decl::OBJC_TQ_Inout) + S += 'N'; + if (QT & Decl::OBJC_TQ_Out) + S += 'o'; + if (QT & Decl::OBJC_TQ_Bycopy) + S += 'O'; + if (QT & Decl::OBJC_TQ_Byref) + S += 'R'; + if (QT & Decl::OBJC_TQ_Oneway) + S += 'V'; +} + +TypedefDecl *ASTContext::getObjCIdDecl() const { + if (!ObjCIdDecl) { + QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0); + T = getObjCObjectPointerType(T); + TypeSourceInfo *IdInfo = getTrivialTypeSourceInfo(T); + ObjCIdDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), + getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Idents.get("id"), IdInfo); + } + + return ObjCIdDecl; +} + +TypedefDecl *ASTContext::getObjCSelDecl() const { + if (!ObjCSelDecl) { + QualType SelT = getPointerType(ObjCBuiltinSelTy); + TypeSourceInfo *SelInfo = getTrivialTypeSourceInfo(SelT); + ObjCSelDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), + getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Idents.get("SEL"), SelInfo); + } + return ObjCSelDecl; +} + +TypedefDecl *ASTContext::getObjCClassDecl() const { + if (!ObjCClassDecl) { + QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0); + T = getObjCObjectPointerType(T); + TypeSourceInfo *ClassInfo = getTrivialTypeSourceInfo(T); + ObjCClassDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this), + getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Idents.get("Class"), ClassInfo); + } + + return ObjCClassDecl; +} + +ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { + if (!ObjCProtocolClassDecl) { + ObjCProtocolClassDecl + = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), + &Idents.get("Protocol"), + /*PrevDecl=*/0, + SourceLocation(), true); + } + + return ObjCProtocolClassDecl; +} + +//===----------------------------------------------------------------------===// +// __builtin_va_list Construction Functions +//===----------------------------------------------------------------------===// + +static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { + // typedef char* __builtin_va_list; + QualType CharPtrType = Context->getPointerType(Context->CharTy); + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(CharPtrType); + + TypedefDecl *VaListTypeDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + TInfo); + return VaListTypeDecl; +} + +static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { + // typedef void* __builtin_va_list; + QualType VoidPtrType = Context->getPointerType(Context->VoidTy); + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(VoidPtrType); + + TypedefDecl *VaListTypeDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + TInfo); + return VaListTypeDecl; +} + +static TypedefDecl * +CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { + RecordDecl *VaListTagDecl; + if (Context->getLangOpts().CPlusPlus) { + // namespace std { struct __va_list { + NamespaceDecl *NS; + NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + /*Inline*/false, SourceLocation(), + SourceLocation(), &Context->Idents.get("std"), + /*PrevDecl*/0); + + VaListTagDecl = CXXRecordDecl::Create(*Context, TTK_Struct, + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__va_list")); + VaListTagDecl->setDeclContext(NS); + } else { + // struct __va_list + VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, + Context->getTranslationUnitDecl(), + &Context->Idents.get("__va_list")); + } + + VaListTagDecl->startDefinition(); + + const size_t NumFields = 5; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // void *__stack; + FieldTypes[0] = Context->getPointerType(Context->VoidTy); + FieldNames[0] = "__stack"; + + // void *__gr_top; + FieldTypes[1] = Context->getPointerType(Context->VoidTy); + FieldNames[1] = "__gr_top"; + + // void *__vr_top; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "__vr_top"; + + // int __gr_offs; + FieldTypes[3] = Context->IntTy; + FieldNames[3] = "__gr_offs"; + + // int __vr_offs; + FieldTypes[4] = Context->IntTy; + FieldNames[4] = "__vr_offs"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), + VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + Context->VaListTagTy = VaListTagType; + + // } __builtin_va_list; + TypedefDecl *VaListTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + Context->getTrivialTypeSourceInfo(VaListTagType)); + + return VaListTypedefDecl; +} + +static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + + VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, + Context->getTranslationUnitDecl(), + &Context->Idents.get("__va_list_tag")); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 5; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // unsigned char gpr; + FieldTypes[0] = Context->UnsignedCharTy; + FieldNames[0] = "gpr"; + + // unsigned char fpr; + FieldTypes[1] = Context->UnsignedCharTy; + FieldNames[1] = "fpr"; + + // unsigned short reserved; + FieldTypes[2] = Context->UnsignedShortTy; + FieldNames[2] = "reserved"; + + // void* overflow_arg_area; + FieldTypes[3] = Context->getPointerType(Context->VoidTy); + FieldNames[3] = "overflow_arg_area"; + + // void* reg_save_area; + FieldTypes[4] = Context->getPointerType(Context->VoidTy); + FieldNames[4] = "reg_save_area"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + Context->VaListTagTy = VaListTagType; + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__va_list_tag"), + Context->getTrivialTypeSourceInfo(VaListTagType)); + QualType VaListTagTypedefType = + Context->getTypedefType(VaListTagTypedefDecl); + + // typedef __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType + = Context->getConstantArrayType(VaListTagTypedefType, + Size, ArrayType::Normal, 0); + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(VaListTagArrayType); + TypedefDecl *VaListTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + TInfo); + + return VaListTypedefDecl; +} + +static TypedefDecl * +CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, + Context->getTranslationUnitDecl(), + &Context->Idents.get("__va_list_tag")); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 4; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // unsigned gp_offset; + FieldTypes[0] = Context->UnsignedIntTy; + FieldNames[0] = "gp_offset"; + + // unsigned fp_offset; + FieldTypes[1] = Context->UnsignedIntTy; + FieldNames[1] = "fp_offset"; + + // void* overflow_arg_area; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "overflow_arg_area"; + + // void* reg_save_area; + FieldTypes[3] = Context->getPointerType(Context->VoidTy); + FieldNames[3] = "reg_save_area"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), + VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + Context->VaListTagTy = VaListTagType; + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__va_list_tag"), + Context->getTrivialTypeSourceInfo(VaListTagType)); + QualType VaListTagTypedefType = + Context->getTypedefType(VaListTagTypedefDecl); + + // typedef __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType + = Context->getConstantArrayType(VaListTagTypedefType, + Size, ArrayType::Normal,0); + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(VaListTagArrayType); + TypedefDecl *VaListTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + TInfo); + + return VaListTypedefDecl; +} + +static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { + // typedef int __builtin_va_list[4]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); + QualType IntArrayType + = Context->getConstantArrayType(Context->IntTy, + Size, ArrayType::Normal, 0); + TypedefDecl *VaListTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + Context->getTrivialTypeSourceInfo(IntArrayType)); + + return VaListTypedefDecl; +} + +static TypedefDecl * +CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { + RecordDecl *VaListDecl; + if (Context->getLangOpts().CPlusPlus) { + // namespace std { struct __va_list { + NamespaceDecl *NS; + NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + /*Inline*/false, SourceLocation(), + SourceLocation(), &Context->Idents.get("std"), + /*PrevDecl*/0); + + VaListDecl = CXXRecordDecl::Create(*Context, TTK_Struct, + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__va_list")); + + VaListDecl->setDeclContext(NS); + + } else { + // struct __va_list { + VaListDecl = CreateRecordDecl(*Context, TTK_Struct, + Context->getTranslationUnitDecl(), + &Context->Idents.get("__va_list")); + } + + VaListDecl->startDefinition(); + + // void * __ap; + FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), + VaListDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get("__ap"), + Context->getPointerType(Context->VoidTy), + /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListDecl->addDecl(Field); + + // }; + VaListDecl->completeDefinition(); + + // typedef struct __va_list __builtin_va_list; + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(Context->getRecordType(VaListDecl)); + + TypedefDecl *VaListTypeDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + TInfo); + + return VaListTypeDecl; +} + +static TypedefDecl * +CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct, + Context->getTranslationUnitDecl(), + &Context->Idents.get("__va_list_tag")); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 4; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // long __gpr; + FieldTypes[0] = Context->LongTy; + FieldNames[0] = "__gpr"; + + // long __fpr; + FieldTypes[1] = Context->LongTy; + FieldNames[1] = "__fpr"; + + // void *__overflow_arg_area; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "__overflow_arg_area"; + + // void *__reg_save_area; + FieldTypes[3] = Context->getPointerType(Context->VoidTy); + FieldNames[3] = "__reg_save_area"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), + VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + Context->VaListTagTy = VaListTagType; + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__va_list_tag"), + Context->getTrivialTypeSourceInfo(VaListTagType)); + QualType VaListTagTypedefType = + Context->getTypedefType(VaListTagTypedefDecl); + + // typedef __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType + = Context->getConstantArrayType(VaListTagTypedefType, + Size, ArrayType::Normal,0); + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(VaListTagArrayType); + TypedefDecl *VaListTypedefDecl + = TypedefDecl::Create(const_cast<ASTContext &>(*Context), + Context->getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), + &Context->Idents.get("__builtin_va_list"), + TInfo); + + return VaListTypedefDecl; +} + +static TypedefDecl *CreateVaListDecl(const ASTContext *Context, + TargetInfo::BuiltinVaListKind Kind) { + switch (Kind) { + case TargetInfo::CharPtrBuiltinVaList: + return CreateCharPtrBuiltinVaListDecl(Context); + case TargetInfo::VoidPtrBuiltinVaList: + return CreateVoidPtrBuiltinVaListDecl(Context); + case TargetInfo::AArch64ABIBuiltinVaList: + return CreateAArch64ABIBuiltinVaListDecl(Context); + case TargetInfo::PowerABIBuiltinVaList: + return CreatePowerABIBuiltinVaListDecl(Context); + case TargetInfo::X86_64ABIBuiltinVaList: + return CreateX86_64ABIBuiltinVaListDecl(Context); + case TargetInfo::PNaClABIBuiltinVaList: + return CreatePNaClABIBuiltinVaListDecl(Context); + case TargetInfo::AAPCSABIBuiltinVaList: + return CreateAAPCSABIBuiltinVaListDecl(Context); + case TargetInfo::SystemZBuiltinVaList: + return CreateSystemZBuiltinVaListDecl(Context); + } + + llvm_unreachable("Unhandled __builtin_va_list type kind"); +} + +TypedefDecl *ASTContext::getBuiltinVaListDecl() const { + if (!BuiltinVaListDecl) + BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); + + return BuiltinVaListDecl; +} + +QualType ASTContext::getVaListTagType() const { + // Force the creation of VaListTagTy by building the __builtin_va_list + // declaration. + if (VaListTagTy.isNull()) + (void) getBuiltinVaListDecl(); + + return VaListTagTy; +} + +void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { + assert(ObjCConstantStringType.isNull() && + "'NSConstantString' type already set!"); + + ObjCConstantStringType = getObjCInterfaceType(Decl); +} + +/// \brief Retrieve the template name that corresponds to a non-empty +/// lookup. +TemplateName +ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, + UnresolvedSetIterator End) const { + unsigned size = End - Begin; + assert(size > 1 && "set is not overloaded!"); + + void *memory = Allocate(sizeof(OverloadedTemplateStorage) + + size * sizeof(FunctionTemplateDecl*)); + OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size); + + NamedDecl **Storage = OT->getStorage(); + for (UnresolvedSetIterator I = Begin; I != End; ++I) { + NamedDecl *D = *I; + assert(isa<FunctionTemplateDecl>(D) || + (isa<UsingShadowDecl>(D) && + isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); + *Storage++ = D; + } + + return TemplateName(OT); +} + +/// \brief Retrieve the template name that represents a qualified +/// template name such as \c std::vector. +TemplateName +ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, + bool TemplateKeyword, + TemplateDecl *Template) const { + assert(NNS && "Missing nested-name-specifier in qualified template name"); + + // FIXME: Canonicalization? + llvm::FoldingSetNodeID ID; + QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); + + void *InsertPos = 0; + QualifiedTemplateName *QTN = + QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + if (!QTN) { + QTN = new (*this, llvm::alignOf<QualifiedTemplateName>()) + QualifiedTemplateName(NNS, TemplateKeyword, Template); + QualifiedTemplateNames.InsertNode(QTN, InsertPos); + } + + return TemplateName(QTN); +} + +/// \brief Retrieve the template name that represents a dependent +/// template name such as \c MetaFun::template apply. +TemplateName +ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, + const IdentifierInfo *Name) const { + assert((!NNS || NNS->isDependent()) && + "Nested name specifier must be dependent"); + + llvm::FoldingSetNodeID ID; + DependentTemplateName::Profile(ID, NNS, Name); + + void *InsertPos = 0; + DependentTemplateName *QTN = + DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + + if (QTN) + return TemplateName(QTN); + + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + if (CanonNNS == NNS) { + QTN = new (*this, llvm::alignOf<DependentTemplateName>()) + DependentTemplateName(NNS, Name); + } else { + TemplateName Canon = getDependentTemplateName(CanonNNS, Name); + QTN = new (*this, llvm::alignOf<DependentTemplateName>()) + DependentTemplateName(NNS, Name, Canon); + DependentTemplateName *CheckQTN = + DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckQTN && "Dependent type name canonicalization broken"); + (void)CheckQTN; + } + + DependentTemplateNames.InsertNode(QTN, InsertPos); + return TemplateName(QTN); +} + +/// \brief Retrieve the template name that represents a dependent +/// template name such as \c MetaFun::template operator+. +TemplateName +ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, + OverloadedOperatorKind Operator) const { + assert((!NNS || NNS->isDependent()) && + "Nested name specifier must be dependent"); + + llvm::FoldingSetNodeID ID; + DependentTemplateName::Profile(ID, NNS, Operator); + + void *InsertPos = 0; + DependentTemplateName *QTN + = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + + if (QTN) + return TemplateName(QTN); + + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + if (CanonNNS == NNS) { + QTN = new (*this, llvm::alignOf<DependentTemplateName>()) + DependentTemplateName(NNS, Operator); + } else { + TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); + QTN = new (*this, llvm::alignOf<DependentTemplateName>()) + DependentTemplateName(NNS, Operator, Canon); + + DependentTemplateName *CheckQTN + = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckQTN && "Dependent template name canonicalization broken"); + (void)CheckQTN; + } + + DependentTemplateNames.InsertNode(QTN, InsertPos); + return TemplateName(QTN); +} + +TemplateName +ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, + TemplateName replacement) const { + llvm::FoldingSetNodeID ID; + SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); + + void *insertPos = 0; + SubstTemplateTemplateParmStorage *subst + = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); + + if (!subst) { + subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); + SubstTemplateTemplateParms.InsertNode(subst, insertPos); + } + + return TemplateName(subst); +} + +TemplateName +ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, + const TemplateArgument &ArgPack) const { + ASTContext &Self = const_cast<ASTContext &>(*this); + llvm::FoldingSetNodeID ID; + SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); + + void *InsertPos = 0; + SubstTemplateTemplateParmPackStorage *Subst + = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); + + if (!Subst) { + Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, + ArgPack.pack_size(), + ArgPack.pack_begin()); + SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); + } + + return TemplateName(Subst); +} + +/// getFromTargetType - Given one of the integer types provided by +/// TargetInfo, produce the corresponding type. The unsigned @p Type +/// is actually a value of type @c TargetInfo::IntType. +CanQualType ASTContext::getFromTargetType(unsigned Type) const { + switch (Type) { + case TargetInfo::NoInt: return CanQualType(); + case TargetInfo::SignedChar: return SignedCharTy; + case TargetInfo::UnsignedChar: return UnsignedCharTy; + case TargetInfo::SignedShort: return ShortTy; + case TargetInfo::UnsignedShort: return UnsignedShortTy; + case TargetInfo::SignedInt: return IntTy; + case TargetInfo::UnsignedInt: return UnsignedIntTy; + case TargetInfo::SignedLong: return LongTy; + case TargetInfo::UnsignedLong: return UnsignedLongTy; + case TargetInfo::SignedLongLong: return LongLongTy; + case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; + } + + llvm_unreachable("Unhandled TargetInfo::IntType value"); +} + +//===----------------------------------------------------------------------===// +// Type Predicates. +//===----------------------------------------------------------------------===// + +/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's +/// garbage collection attribute. +/// +Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { + if (getLangOpts().getGC() == LangOptions::NonGC) + return Qualifiers::GCNone; + + assert(getLangOpts().ObjC1); + Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); + + // Default behaviour under objective-C's gc is for ObjC pointers + // (or pointers to them) be treated as though they were declared + // as __strong. + if (GCAttrs == Qualifiers::GCNone) { + if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) + return Qualifiers::Strong; + else if (Ty->isPointerType()) + return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType()); + } else { + // It's not valid to set GC attributes on anything that isn't a + // pointer. +#ifndef NDEBUG + QualType CT = Ty->getCanonicalTypeInternal(); + while (const ArrayType *AT = dyn_cast<ArrayType>(CT)) + CT = AT->getElementType(); + assert(CT->isAnyPointerType() || CT->isBlockPointerType()); +#endif + } + return GCAttrs; +} + +//===----------------------------------------------------------------------===// +// Type Compatibility Testing +//===----------------------------------------------------------------------===// + +/// areCompatVectorTypes - Return true if the two specified vector types are +/// compatible. +static bool areCompatVectorTypes(const VectorType *LHS, + const VectorType *RHS) { + assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); + return LHS->getElementType() == RHS->getElementType() && + LHS->getNumElements() == RHS->getNumElements(); +} + +bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, + QualType SecondVec) { + assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); + assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); + + if (hasSameUnqualifiedType(FirstVec, SecondVec)) + return true; + + // Treat Neon vector types and most AltiVec vector types as if they are the + // equivalent GCC vector types. + const VectorType *First = FirstVec->getAs<VectorType>(); + const VectorType *Second = SecondVec->getAs<VectorType>(); + if (First->getNumElements() == Second->getNumElements() && + hasSameType(First->getElementType(), Second->getElementType()) && + First->getVectorKind() != VectorType::AltiVecPixel && + First->getVectorKind() != VectorType::AltiVecBool && + Second->getVectorKind() != VectorType::AltiVecPixel && + Second->getVectorKind() != VectorType::AltiVecBool) + return true; + + return false; +} + +//===----------------------------------------------------------------------===// +// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. +//===----------------------------------------------------------------------===// + +/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the +/// inheritance hierarchy of 'rProto'. +bool +ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, + ObjCProtocolDecl *rProto) const { + if (declaresSameEntity(lProto, rProto)) + return true; + for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(), + E = rProto->protocol_end(); PI != E; ++PI) + if (ProtocolCompatibleWithProtocol(lProto, *PI)) + return true; + return false; +} + +/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and +/// Class<pr1, ...>. +bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs, + QualType rhs) { + const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>(); + const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); + assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible"); + + for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), + E = lhsQID->qual_end(); I != E; ++I) { + bool match = false; + ObjCProtocolDecl *lhsProto = *I; + for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(), + E = rhsOPT->qual_end(); J != E; ++J) { + ObjCProtocolDecl *rhsProto = *J; + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { + match = true; + break; + } + } + if (!match) + return false; + } + return true; +} + +/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an +/// ObjCQualifiedIDType. +bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs, + bool compare) { + // Allow id<P..> and an 'id' or void* type in all cases. + if (lhs->isVoidPointerType() || + lhs->isObjCIdType() || lhs->isObjCClassType()) + return true; + else if (rhs->isVoidPointerType() || + rhs->isObjCIdType() || rhs->isObjCClassType()) + return true; + + if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) { + const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); + + if (!rhsOPT) return false; + + if (rhsOPT->qual_empty()) { + // If the RHS is a unqualified interface pointer "NSString*", + // make sure we check the class hierarchy. + if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) { + for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), + E = lhsQID->qual_end(); I != E; ++I) { + // when comparing an id<P> on lhs with a static type on rhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + if (!rhsID->ClassImplementsProtocol(*I, true)) + return false; + } + } + // If there are no qualifiers and no interface, we have an 'id'. + return true; + } + // Both the right and left sides have qualifiers. + for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), + E = lhsQID->qual_end(); I != E; ++I) { + ObjCProtocolDecl *lhsProto = *I; + bool match = false; + + // when comparing an id<P> on lhs with a static type on rhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(), + E = rhsOPT->qual_end(); J != E; ++J) { + ObjCProtocolDecl *rhsProto = *J; + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || + (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { + match = true; + break; + } + } + // If the RHS is a qualified interface pointer "NSString<P>*", + // make sure we check the class hierarchy. + if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) { + for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(), + E = lhsQID->qual_end(); I != E; ++I) { + // when comparing an id<P> on lhs with a static type on rhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + if (rhsID->ClassImplementsProtocol(*I, true)) { + match = true; + break; + } + } + } + if (!match) + return false; + } + + return true; + } + + const ObjCObjectPointerType *rhsQID = rhs->getAsObjCQualifiedIdType(); + assert(rhsQID && "One of the LHS/RHS should be id<x>"); + + if (const ObjCObjectPointerType *lhsOPT = + lhs->getAsObjCInterfacePointerType()) { + // If both the right and left sides have qualifiers. + for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(), + E = lhsOPT->qual_end(); I != E; ++I) { + ObjCProtocolDecl *lhsProto = *I; + bool match = false; + + // when comparing an id<P> on rhs with a static type on lhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + // First, lhs protocols in the qualifier list must be found, direct + // or indirect in rhs's qualifier list or it is a mismatch. + for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(), + E = rhsQID->qual_end(); J != E; ++J) { + ObjCProtocolDecl *rhsProto = *J; + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || + (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { + match = true; + break; + } + } + if (!match) + return false; + } + + // Static class's protocols, or its super class or category protocols + // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. + if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) { + llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; + CollectInheritedProtocols(lhsID, LHSInheritedProtocols); + // This is rather dubious but matches gcc's behavior. If lhs has + // no type qualifier and its class has no static protocol(s) + // assume that it is mismatch. + if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty()) + return false; + for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = + LHSInheritedProtocols.begin(), + E = LHSInheritedProtocols.end(); I != E; ++I) { + bool match = false; + ObjCProtocolDecl *lhsProto = (*I); + for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(), + E = rhsQID->qual_end(); J != E; ++J) { + ObjCProtocolDecl *rhsProto = *J; + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || + (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { + match = true; + break; + } + } + if (!match) + return false; + } + } + return true; + } + return false; +} + +/// canAssignObjCInterfaces - Return true if the two interface types are +/// compatible for assignment from RHS to LHS. This handles validation of any +/// protocol qualifiers on the LHS or RHS. +/// +bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, + const ObjCObjectPointerType *RHSOPT) { + const ObjCObjectType* LHS = LHSOPT->getObjectType(); + const ObjCObjectType* RHS = RHSOPT->getObjectType(); + + // If either type represents the built-in 'id' or 'Class' types, return true. + if (LHS->isObjCUnqualifiedIdOrClass() || + RHS->isObjCUnqualifiedIdOrClass()) + return true; + + if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) + return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0), + QualType(RHSOPT,0), + false); + + if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) + return ObjCQualifiedClassTypesAreCompatible(QualType(LHSOPT,0), + QualType(RHSOPT,0)); + + // If we have 2 user-defined types, fall into that path. + if (LHS->getInterface() && RHS->getInterface()) + return canAssignObjCInterfaces(LHS, RHS); + + return false; +} + +/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written +/// for providing type-safety for objective-c pointers used to pass/return +/// arguments in block literals. When passed as arguments, passing 'A*' where +/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is +/// not OK. For the return type, the opposite is not OK. +bool ASTContext::canAssignObjCInterfacesInBlockPointer( + const ObjCObjectPointerType *LHSOPT, + const ObjCObjectPointerType *RHSOPT, + bool BlockReturnType) { + if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) + return true; + + if (LHSOPT->isObjCBuiltinType()) { + return RHSOPT->isObjCBuiltinType() || RHSOPT->isObjCQualifiedIdType(); + } + + if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) + return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0), + QualType(RHSOPT,0), + false); + + const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); + const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); + if (LHS && RHS) { // We have 2 user-defined types. + if (LHS != RHS) { + if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) + return BlockReturnType; + if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) + return !BlockReturnType; + } + else + return true; + } + return false; +} + +/// getIntersectionOfProtocols - This routine finds the intersection of set +/// of protocols inherited from two distinct objective-c pointer objects. +/// It is used to build composite qualifier list of the composite type of +/// the conditional expression involving two objective-c pointer objects. +static +void getIntersectionOfProtocols(ASTContext &Context, + const ObjCObjectPointerType *LHSOPT, + const ObjCObjectPointerType *RHSOPT, + SmallVectorImpl<ObjCProtocolDecl *> &IntersectionOfProtocols) { + + const ObjCObjectType* LHS = LHSOPT->getObjectType(); + const ObjCObjectType* RHS = RHSOPT->getObjectType(); + assert(LHS->getInterface() && "LHS must have an interface base"); + assert(RHS->getInterface() && "RHS must have an interface base"); + + llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocolSet; + unsigned LHSNumProtocols = LHS->getNumProtocols(); + if (LHSNumProtocols > 0) + InheritedProtocolSet.insert(LHS->qual_begin(), LHS->qual_end()); + else { + llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; + Context.CollectInheritedProtocols(LHS->getInterface(), + LHSInheritedProtocols); + InheritedProtocolSet.insert(LHSInheritedProtocols.begin(), + LHSInheritedProtocols.end()); + } + + unsigned RHSNumProtocols = RHS->getNumProtocols(); + if (RHSNumProtocols > 0) { + ObjCProtocolDecl **RHSProtocols = + const_cast<ObjCProtocolDecl **>(RHS->qual_begin()); + for (unsigned i = 0; i < RHSNumProtocols; ++i) + if (InheritedProtocolSet.count(RHSProtocols[i])) + IntersectionOfProtocols.push_back(RHSProtocols[i]); + } else { + llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSInheritedProtocols; + Context.CollectInheritedProtocols(RHS->getInterface(), + RHSInheritedProtocols); + for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = + RHSInheritedProtocols.begin(), + E = RHSInheritedProtocols.end(); I != E; ++I) + if (InheritedProtocolSet.count((*I))) + IntersectionOfProtocols.push_back((*I)); + } +} + +/// areCommonBaseCompatible - Returns common base class of the two classes if +/// one found. Note that this is O'2 algorithm. But it will be called as the +/// last type comparison in a ?-exp of ObjC pointer types before a +/// warning is issued. So, its invokation is extremely rare. +QualType ASTContext::areCommonBaseCompatible( + const ObjCObjectPointerType *Lptr, + const ObjCObjectPointerType *Rptr) { + const ObjCObjectType *LHS = Lptr->getObjectType(); + const ObjCObjectType *RHS = Rptr->getObjectType(); + const ObjCInterfaceDecl* LDecl = LHS->getInterface(); + const ObjCInterfaceDecl* RDecl = RHS->getInterface(); + if (!LDecl || !RDecl || (declaresSameEntity(LDecl, RDecl))) + return QualType(); + + do { + LHS = cast<ObjCInterfaceType>(getObjCInterfaceType(LDecl)); + if (canAssignObjCInterfaces(LHS, RHS)) { + SmallVector<ObjCProtocolDecl *, 8> Protocols; + getIntersectionOfProtocols(*this, Lptr, Rptr, Protocols); + + QualType Result = QualType(LHS, 0); + if (!Protocols.empty()) + Result = getObjCObjectType(Result, Protocols.data(), Protocols.size()); + Result = getObjCObjectPointerType(Result); + return Result; + } + } while ((LDecl = LDecl->getSuperClass())); + + return QualType(); +} + +bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, + const ObjCObjectType *RHS) { + assert(LHS->getInterface() && "LHS is not an interface type"); + assert(RHS->getInterface() && "RHS is not an interface type"); + + // Verify that the base decls are compatible: the RHS must be a subclass of + // the LHS. + if (!LHS->getInterface()->isSuperClassOf(RHS->getInterface())) + return false; + + // RHS must have a superset of the protocols in the LHS. If the LHS is not + // protocol qualified at all, then we are good. + if (LHS->getNumProtocols() == 0) + return true; + + // Okay, we know the LHS has protocol qualifiers. If the RHS doesn't, + // more detailed analysis is required. + if (RHS->getNumProtocols() == 0) { + // OK, if LHS is a superclass of RHS *and* + // this superclass is assignment compatible with LHS. + // false otherwise. + bool IsSuperClass = + LHS->getInterface()->isSuperClassOf(RHS->getInterface()); + if (IsSuperClass) { + // OK if conversion of LHS to SuperClass results in narrowing of types + // ; i.e., SuperClass may implement at least one of the protocols + // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. + // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. + llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; + CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); + // If super class has no protocols, it is not a match. + if (SuperClassInheritedProtocols.empty()) + return false; + + for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(), + LHSPE = LHS->qual_end(); + LHSPI != LHSPE; LHSPI++) { + bool SuperImplementsProtocol = false; + ObjCProtocolDecl *LHSProto = (*LHSPI); + + for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I = + SuperClassInheritedProtocols.begin(), + E = SuperClassInheritedProtocols.end(); I != E; ++I) { + ObjCProtocolDecl *SuperClassProto = (*I); + if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { + SuperImplementsProtocol = true; + break; + } + } + if (!SuperImplementsProtocol) + return false; + } + return true; + } + return false; + } + + for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(), + LHSPE = LHS->qual_end(); + LHSPI != LHSPE; LHSPI++) { + bool RHSImplementsProtocol = false; + + // If the RHS doesn't implement the protocol on the left, the types + // are incompatible. + for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(), + RHSPE = RHS->qual_end(); + RHSPI != RHSPE; RHSPI++) { + if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) { + RHSImplementsProtocol = true; + break; + } + } + // FIXME: For better diagnostics, consider passing back the protocol name. + if (!RHSImplementsProtocol) + return false; + } + // The RHS implements all protocols listed on the LHS. + return true; +} + +bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { + // get the "pointed to" types + const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); + const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); + + if (!LHSOPT || !RHSOPT) + return false; + + return canAssignObjCInterfaces(LHSOPT, RHSOPT) || + canAssignObjCInterfaces(RHSOPT, LHSOPT); +} + +bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { + return canAssignObjCInterfaces( + getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(), + getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>()); +} + +/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, +/// both shall have the identically qualified version of a compatible type. +/// C99 6.2.7p1: Two types have compatible types if their types are the +/// same. See 6.7.[2,3,5] for additional rules. +bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, + bool CompareUnqualified) { + if (getLangOpts().CPlusPlus) + return hasSameType(LHS, RHS); + + return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); +} + +bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { + return typesAreCompatible(LHS, RHS); +} + +bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { + return !mergeTypes(LHS, RHS, true).isNull(); +} + +/// mergeTransparentUnionType - if T is a transparent union type and a member +/// of T is compatible with SubType, return the merged type, else return +/// QualType() +QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, + bool OfBlockPointer, + bool Unqualified) { + if (const RecordType *UT = T->getAsUnionType()) { + RecordDecl *UD = UT->getDecl(); + if (UD->hasAttr<TransparentUnionAttr>()) { + for (RecordDecl::field_iterator it = UD->field_begin(), + itend = UD->field_end(); it != itend; ++it) { + QualType ET = it->getType().getUnqualifiedType(); + QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); + if (!MT.isNull()) + return MT; + } + } + } + + return QualType(); +} + +/// mergeFunctionArgumentTypes - merge two types which appear as function +/// argument types +QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs, + bool OfBlockPointer, + bool Unqualified) { + // GNU extension: two types are compatible if they appear as a function + // argument, one of the types is a transparent union type and the other + // type is compatible with a union member + QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, + Unqualified); + if (!lmerge.isNull()) + return lmerge; + + QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, + Unqualified); + if (!rmerge.isNull()) + return rmerge; + + return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); +} + +QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, + bool OfBlockPointer, + bool Unqualified) { + const FunctionType *lbase = lhs->getAs<FunctionType>(); + const FunctionType *rbase = rhs->getAs<FunctionType>(); + const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase); + const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase); + bool allLTypes = true; + bool allRTypes = true; + + // Check return type + QualType retType; + if (OfBlockPointer) { + QualType RHS = rbase->getResultType(); + QualType LHS = lbase->getResultType(); + bool UnqualifiedResult = Unqualified; + if (!UnqualifiedResult) + UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); + retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); + } + else + retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false, + Unqualified); + if (retType.isNull()) return QualType(); + + if (Unqualified) + retType = retType.getUnqualifiedType(); + + CanQualType LRetType = getCanonicalType(lbase->getResultType()); + CanQualType RRetType = getCanonicalType(rbase->getResultType()); + if (Unqualified) { + LRetType = LRetType.getUnqualifiedType(); + RRetType = RRetType.getUnqualifiedType(); + } + + if (getCanonicalType(retType) != LRetType) + allLTypes = false; + if (getCanonicalType(retType) != RRetType) + allRTypes = false; + + // FIXME: double check this + // FIXME: should we error if lbase->getRegParmAttr() != 0 && + // rbase->getRegParmAttr() != 0 && + // lbase->getRegParmAttr() != rbase->getRegParmAttr()? + FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); + FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); + + // Compatible functions must have compatible calling conventions + if (lbaseInfo.getCC() != rbaseInfo.getCC()) + return QualType(); + + // Regparm is part of the calling convention. + if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) + return QualType(); + if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) + return QualType(); + + if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) + return QualType(); + + // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. + bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); + + if (lbaseInfo.getNoReturn() != NoReturn) + allLTypes = false; + if (rbaseInfo.getNoReturn() != NoReturn) + allRTypes = false; + + FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); + + if (lproto && rproto) { // two C99 style function prototypes + assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() && + "C++ shouldn't be here"); + unsigned lproto_nargs = lproto->getNumArgs(); + unsigned rproto_nargs = rproto->getNumArgs(); + + // Compatible functions must have the same number of arguments + if (lproto_nargs != rproto_nargs) + return QualType(); + + // Variadic and non-variadic functions aren't compatible + if (lproto->isVariadic() != rproto->isVariadic()) + return QualType(); + + if (lproto->getTypeQuals() != rproto->getTypeQuals()) + return QualType(); + + if (LangOpts.ObjCAutoRefCount && + !FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto)) + return QualType(); + + // Check argument compatibility + SmallVector<QualType, 10> types; + for (unsigned i = 0; i < lproto_nargs; i++) { + QualType largtype = lproto->getArgType(i).getUnqualifiedType(); + QualType rargtype = rproto->getArgType(i).getUnqualifiedType(); + QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype, + OfBlockPointer, + Unqualified); + if (argtype.isNull()) return QualType(); + + if (Unqualified) + argtype = argtype.getUnqualifiedType(); + + types.push_back(argtype); + if (Unqualified) { + largtype = largtype.getUnqualifiedType(); + rargtype = rargtype.getUnqualifiedType(); + } + + if (getCanonicalType(argtype) != getCanonicalType(largtype)) + allLTypes = false; + if (getCanonicalType(argtype) != getCanonicalType(rargtype)) + allRTypes = false; + } + + if (allLTypes) return lhs; + if (allRTypes) return rhs; + + FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); + EPI.ExtInfo = einfo; + return getFunctionType(retType, types, EPI); + } + + if (lproto) allRTypes = false; + if (rproto) allLTypes = false; + + const FunctionProtoType *proto = lproto ? lproto : rproto; + if (proto) { + assert(!proto->hasExceptionSpec() && "C++ shouldn't be here"); + if (proto->isVariadic()) return QualType(); + // Check that the types are compatible with the types that + // would result from default argument promotions (C99 6.7.5.3p15). + // The only types actually affected are promotable integer + // types and floats, which would be passed as a different + // type depending on whether the prototype is visible. + unsigned proto_nargs = proto->getNumArgs(); + for (unsigned i = 0; i < proto_nargs; ++i) { + QualType argTy = proto->getArgType(i); + + // Look at the converted type of enum types, since that is the type used + // to pass enum values. + if (const EnumType *Enum = argTy->getAs<EnumType>()) { + argTy = Enum->getDecl()->getIntegerType(); + if (argTy.isNull()) + return QualType(); + } + + if (argTy->isPromotableIntegerType() || + getCanonicalType(argTy).getUnqualifiedType() == FloatTy) + return QualType(); + } + + if (allLTypes) return lhs; + if (allRTypes) return rhs; + + FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); + EPI.ExtInfo = einfo; + return getFunctionType(retType, proto->getArgTypes(), EPI); + } + + if (allLTypes) return lhs; + if (allRTypes) return rhs; + return getFunctionNoProtoType(retType, einfo); +} + +/// Given that we have an enum type and a non-enum type, try to merge them. +static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, + QualType other, bool isBlockReturnType) { + // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, + // a signed integer type, or an unsigned integer type. + // Compatibility is based on the underlying type, not the promotion + // type. + QualType underlyingType = ET->getDecl()->getIntegerType(); + if (underlyingType.isNull()) return QualType(); + if (Context.hasSameType(underlyingType, other)) + return other; + + // In block return types, we're more permissive and accept any + // integral type of the same size. + if (isBlockReturnType && other->isIntegerType() && + Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) + return other; + + return QualType(); +} + +QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, + bool OfBlockPointer, + bool Unqualified, bool BlockReturnType) { + // C++ [expr]: If an expression initially has the type "reference to T", the + // type is adjusted to "T" prior to any further analysis, the expression + // designates the object or function denoted by the reference, and the + // expression is an lvalue unless the reference is an rvalue reference and + // the expression is a function call (possibly inside parentheses). + assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?"); + assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?"); + + if (Unqualified) { + LHS = LHS.getUnqualifiedType(); + RHS = RHS.getUnqualifiedType(); + } + + QualType LHSCan = getCanonicalType(LHS), + RHSCan = getCanonicalType(RHS); + + // If two types are identical, they are compatible. + if (LHSCan == RHSCan) + return LHS; + + // If the qualifiers are different, the types aren't compatible... mostly. + Qualifiers LQuals = LHSCan.getLocalQualifiers(); + Qualifiers RQuals = RHSCan.getLocalQualifiers(); + if (LQuals != RQuals) { + // If any of these qualifiers are different, we have a type + // mismatch. + if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || + LQuals.getAddressSpace() != RQuals.getAddressSpace() || + LQuals.getObjCLifetime() != RQuals.getObjCLifetime()) + return QualType(); + + // Exactly one GC qualifier difference is allowed: __strong is + // okay if the other type has no GC qualifier but is an Objective + // C object pointer (i.e. implicitly strong by default). We fix + // this by pretending that the unqualified type was actually + // qualified __strong. + Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); + Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); + assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); + + if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) + return QualType(); + + if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { + return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); + } + if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { + return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); + } + return QualType(); + } + + // Okay, qualifiers are equal. + + Type::TypeClass LHSClass = LHSCan->getTypeClass(); + Type::TypeClass RHSClass = RHSCan->getTypeClass(); + + // We want to consider the two function types to be the same for these + // comparisons, just force one to the other. + if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; + if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; + + // Same as above for arrays + if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) + LHSClass = Type::ConstantArray; + if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) + RHSClass = Type::ConstantArray; + + // ObjCInterfaces are just specialized ObjCObjects. + if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; + if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; + + // Canonicalize ExtVector -> Vector. + if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; + if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; + + // If the canonical type classes don't match. + if (LHSClass != RHSClass) { + // Note that we only have special rules for turning block enum + // returns into block int returns, not vice-versa. + if (const EnumType* ETy = LHS->getAs<EnumType>()) { + return mergeEnumWithInteger(*this, ETy, RHS, false); + } + if (const EnumType* ETy = RHS->getAs<EnumType>()) { + return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); + } + // allow block pointer type to match an 'id' type. + if (OfBlockPointer && !BlockReturnType) { + if (LHS->isObjCIdType() && RHS->isBlockPointerType()) + return LHS; + if (RHS->isObjCIdType() && LHS->isBlockPointerType()) + return RHS; + } + + return QualType(); + } + + // The canonical type classes match. + switch (LHSClass) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.def" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + case Type::Auto: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + llvm_unreachable("C++ should never be in mergeTypes"); + + case Type::ObjCInterface: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::FunctionProto: + case Type::ExtVector: + llvm_unreachable("Types are eliminated above"); + + case Type::Pointer: + { + // Merge two pointer types, while trying to preserve typedef info + QualType LHSPointee = LHS->getAs<PointerType>()->getPointeeType(); + QualType RHSPointee = RHS->getAs<PointerType>()->getPointeeType(); + if (Unqualified) { + LHSPointee = LHSPointee.getUnqualifiedType(); + RHSPointee = RHSPointee.getUnqualifiedType(); + } + QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, + Unqualified); + if (ResultType.isNull()) return QualType(); + if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) + return LHS; + if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) + return RHS; + return getPointerType(ResultType); + } + case Type::BlockPointer: + { + // Merge two block pointer types, while trying to preserve typedef info + QualType LHSPointee = LHS->getAs<BlockPointerType>()->getPointeeType(); + QualType RHSPointee = RHS->getAs<BlockPointerType>()->getPointeeType(); + if (Unqualified) { + LHSPointee = LHSPointee.getUnqualifiedType(); + RHSPointee = RHSPointee.getUnqualifiedType(); + } + QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, + Unqualified); + if (ResultType.isNull()) return QualType(); + if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) + return LHS; + if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) + return RHS; + return getBlockPointerType(ResultType); + } + case Type::Atomic: + { + // Merge two pointer types, while trying to preserve typedef info + QualType LHSValue = LHS->getAs<AtomicType>()->getValueType(); + QualType RHSValue = RHS->getAs<AtomicType>()->getValueType(); + if (Unqualified) { + LHSValue = LHSValue.getUnqualifiedType(); + RHSValue = RHSValue.getUnqualifiedType(); + } + QualType ResultType = mergeTypes(LHSValue, RHSValue, false, + Unqualified); + if (ResultType.isNull()) return QualType(); + if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) + return LHS; + if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) + return RHS; + return getAtomicType(ResultType); + } + case Type::ConstantArray: + { + const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); + const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); + if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) + return QualType(); + + QualType LHSElem = getAsArrayType(LHS)->getElementType(); + QualType RHSElem = getAsArrayType(RHS)->getElementType(); + if (Unqualified) { + LHSElem = LHSElem.getUnqualifiedType(); + RHSElem = RHSElem.getUnqualifiedType(); + } + + QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); + if (ResultType.isNull()) return QualType(); + if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) + return LHS; + if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) + return RHS; + if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(), + ArrayType::ArraySizeModifier(), 0); + if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(), + ArrayType::ArraySizeModifier(), 0); + const VariableArrayType* LVAT = getAsVariableArrayType(LHS); + const VariableArrayType* RVAT = getAsVariableArrayType(RHS); + if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) + return LHS; + if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) + return RHS; + if (LVAT) { + // FIXME: This isn't correct! But tricky to implement because + // the array's size has to be the size of LHS, but the type + // has to be different. + return LHS; + } + if (RVAT) { + // FIXME: This isn't correct! But tricky to implement because + // the array's size has to be the size of RHS, but the type + // has to be different. + return RHS; + } + if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; + if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; + return getIncompleteArrayType(ResultType, + ArrayType::ArraySizeModifier(), 0); + } + case Type::FunctionNoProto: + return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); + case Type::Record: + case Type::Enum: + return QualType(); + case Type::Builtin: + // Only exactly equal builtin types are compatible, which is tested above. + return QualType(); + case Type::Complex: + // Distinct complex types are incompatible. + return QualType(); + case Type::Vector: + // FIXME: The merged type should be an ExtVector! + if (areCompatVectorTypes(LHSCan->getAs<VectorType>(), + RHSCan->getAs<VectorType>())) + return LHS; + return QualType(); + case Type::ObjCObject: { + // Check if the types are assignment compatible. + // FIXME: This should be type compatibility, e.g. whether + // "LHS x; RHS x;" at global scope is legal. + const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>(); + const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>(); + if (canAssignObjCInterfaces(LHSIface, RHSIface)) + return LHS; + + return QualType(); + } + case Type::ObjCObjectPointer: { + if (OfBlockPointer) { + if (canAssignObjCInterfacesInBlockPointer( + LHS->getAs<ObjCObjectPointerType>(), + RHS->getAs<ObjCObjectPointerType>(), + BlockReturnType)) + return LHS; + return QualType(); + } + if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(), + RHS->getAs<ObjCObjectPointerType>())) + return LHS; + + return QualType(); + } + } + + llvm_unreachable("Invalid Type::Class!"); +} + +bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs( + const FunctionProtoType *FromFunctionType, + const FunctionProtoType *ToFunctionType) { + if (FromFunctionType->hasAnyConsumedArgs() != + ToFunctionType->hasAnyConsumedArgs()) + return false; + FunctionProtoType::ExtProtoInfo FromEPI = + FromFunctionType->getExtProtoInfo(); + FunctionProtoType::ExtProtoInfo ToEPI = + ToFunctionType->getExtProtoInfo(); + if (FromEPI.ConsumedArguments && ToEPI.ConsumedArguments) + for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs(); + ArgIdx != NumArgs; ++ArgIdx) { + if (FromEPI.ConsumedArguments[ArgIdx] != + ToEPI.ConsumedArguments[ArgIdx]) + return false; + } + return true; +} + +/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and +/// 'RHS' attributes and returns the merged version; including for function +/// return types. +QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { + QualType LHSCan = getCanonicalType(LHS), + RHSCan = getCanonicalType(RHS); + // If two types are identical, they are compatible. + if (LHSCan == RHSCan) + return LHS; + if (RHSCan->isFunctionType()) { + if (!LHSCan->isFunctionType()) + return QualType(); + QualType OldReturnType = + cast<FunctionType>(RHSCan.getTypePtr())->getResultType(); + QualType NewReturnType = + cast<FunctionType>(LHSCan.getTypePtr())->getResultType(); + QualType ResReturnType = + mergeObjCGCQualifiers(NewReturnType, OldReturnType); + if (ResReturnType.isNull()) + return QualType(); + if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { + // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); + // In either case, use OldReturnType to build the new function type. + const FunctionType *F = LHS->getAs<FunctionType>(); + if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) { + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + EPI.ExtInfo = getFunctionExtInfo(LHS); + QualType ResultType = + getFunctionType(OldReturnType, FPT->getArgTypes(), EPI); + return ResultType; + } + } + return QualType(); + } + + // If the qualifiers are different, the types can still be merged. + Qualifiers LQuals = LHSCan.getLocalQualifiers(); + Qualifiers RQuals = RHSCan.getLocalQualifiers(); + if (LQuals != RQuals) { + // If any of these qualifiers are different, we have a type mismatch. + if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || + LQuals.getAddressSpace() != RQuals.getAddressSpace()) + return QualType(); + + // Exactly one GC qualifier difference is allowed: __strong is + // okay if the other type has no GC qualifier but is an Objective + // C object pointer (i.e. implicitly strong by default). We fix + // this by pretending that the unqualified type was actually + // qualified __strong. + Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); + Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); + assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); + + if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) + return QualType(); + + if (GC_L == Qualifiers::Strong) + return LHS; + if (GC_R == Qualifiers::Strong) + return RHS; + return QualType(); + } + + if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { + QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType(); + QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType(); + QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); + if (ResQT == LHSBaseQT) + return LHS; + if (ResQT == RHSBaseQT) + return RHS; + } + return QualType(); +} + +//===----------------------------------------------------------------------===// +// Integer Predicates +//===----------------------------------------------------------------------===// + +unsigned ASTContext::getIntWidth(QualType T) const { + if (const EnumType *ET = T->getAs<EnumType>()) + T = ET->getDecl()->getIntegerType(); + if (T->isBooleanType()) + return 1; + // For builtin types, just use the standard type sizing method + return (unsigned)getTypeSize(T); +} + +QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { + assert(T->hasSignedIntegerRepresentation() && "Unexpected type"); + + // Turn <4 x signed int> -> <4 x unsigned int> + if (const VectorType *VTy = T->getAs<VectorType>()) + return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), + VTy->getNumElements(), VTy->getVectorKind()); + + // For enums, we return the unsigned version of the base type. + if (const EnumType *ETy = T->getAs<EnumType>()) + T = ETy->getDecl()->getIntegerType(); + + const BuiltinType *BTy = T->getAs<BuiltinType>(); + assert(BTy && "Unexpected signed integer type"); + switch (BTy->getKind()) { + case BuiltinType::Char_S: + case BuiltinType::SChar: + return UnsignedCharTy; + case BuiltinType::Short: + return UnsignedShortTy; + case BuiltinType::Int: + return UnsignedIntTy; + case BuiltinType::Long: + return UnsignedLongTy; + case BuiltinType::LongLong: + return UnsignedLongLongTy; + case BuiltinType::Int128: + return UnsignedInt128Ty; + default: + llvm_unreachable("Unexpected signed integer type"); + } +} + +ASTMutationListener::~ASTMutationListener() { } + +void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, + QualType ReturnType) {} + +//===----------------------------------------------------------------------===// +// Builtin Type Computation +//===----------------------------------------------------------------------===// + +/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the +/// pointer over the consumed characters. This returns the resultant type. If +/// AllowTypeModifiers is false then modifier like * are not parsed, just basic +/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of +/// a vector of "i*". +/// +/// RequiresICE is filled in on return to indicate whether the value is required +/// to be an Integer Constant Expression. +static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, + ASTContext::GetBuiltinTypeError &Error, + bool &RequiresICE, + bool AllowTypeModifiers) { + // Modifiers. + int HowLong = 0; + bool Signed = false, Unsigned = false; + RequiresICE = false; + + // Read the prefixed modifiers first. + bool Done = false; + while (!Done) { + switch (*Str++) { + default: Done = true; --Str; break; + case 'I': + RequiresICE = true; + break; + case 'S': + assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); + assert(!Signed && "Can't use 'S' modifier multiple times!"); + Signed = true; + break; + case 'U': + assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); + assert(!Unsigned && "Can't use 'S' modifier multiple times!"); + Unsigned = true; + break; + case 'L': + assert(HowLong <= 2 && "Can't have LLLL modifier"); + ++HowLong; + break; + } + } + + QualType Type; + + // Read the base type. + switch (*Str++) { + default: llvm_unreachable("Unknown builtin type letter!"); + case 'v': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'v'!"); + Type = Context.VoidTy; + break; + case 'h': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'f'!"); + Type = Context.HalfTy; + break; + case 'f': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'f'!"); + Type = Context.FloatTy; + break; + case 'd': + assert(HowLong < 2 && !Signed && !Unsigned && + "Bad modifiers used with 'd'!"); + if (HowLong) + Type = Context.LongDoubleTy; + else + Type = Context.DoubleTy; + break; + case 's': + assert(HowLong == 0 && "Bad modifiers used with 's'!"); + if (Unsigned) + Type = Context.UnsignedShortTy; + else + Type = Context.ShortTy; + break; + case 'i': + if (HowLong == 3) + Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; + else if (HowLong == 2) + Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; + else if (HowLong == 1) + Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; + else + Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; + break; + case 'c': + assert(HowLong == 0 && "Bad modifiers used with 'c'!"); + if (Signed) + Type = Context.SignedCharTy; + else if (Unsigned) + Type = Context.UnsignedCharTy; + else + Type = Context.CharTy; + break; + case 'b': // boolean + assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); + Type = Context.BoolTy; + break; + case 'z': // size_t. + assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); + Type = Context.getSizeType(); + break; + case 'F': + Type = Context.getCFConstantStringType(); + break; + case 'G': + Type = Context.getObjCIdType(); + break; + case 'H': + Type = Context.getObjCSelType(); + break; + case 'M': + Type = Context.getObjCSuperType(); + break; + case 'a': + Type = Context.getBuiltinVaListType(); + assert(!Type.isNull() && "builtin va list type not initialized!"); + break; + case 'A': + // This is a "reference" to a va_list; however, what exactly + // this means depends on how va_list is defined. There are two + // different kinds of va_list: ones passed by value, and ones + // passed by reference. An example of a by-value va_list is + // x86, where va_list is a char*. An example of by-ref va_list + // is x86-64, where va_list is a __va_list_tag[1]. For x86, + // we want this argument to be a char*&; for x86-64, we want + // it to be a __va_list_tag*. + Type = Context.getBuiltinVaListType(); + assert(!Type.isNull() && "builtin va list type not initialized!"); + if (Type->isArrayType()) + Type = Context.getArrayDecayedType(Type); + else + Type = Context.getLValueReferenceType(Type); + break; + case 'V': { + char *End; + unsigned NumElements = strtoul(Str, &End, 10); + assert(End != Str && "Missing vector size"); + Str = End; + + QualType ElementType = DecodeTypeFromStr(Str, Context, Error, + RequiresICE, false); + assert(!RequiresICE && "Can't require vector ICE"); + + // TODO: No way to make AltiVec vectors in builtins yet. + Type = Context.getVectorType(ElementType, NumElements, + VectorType::GenericVector); + break; + } + case 'E': { + char *End; + + unsigned NumElements = strtoul(Str, &End, 10); + assert(End != Str && "Missing vector size"); + + Str = End; + + QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, + false); + Type = Context.getExtVectorType(ElementType, NumElements); + break; + } + case 'X': { + QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, + false); + assert(!RequiresICE && "Can't require complex ICE"); + Type = Context.getComplexType(ElementType); + break; + } + case 'Y' : { + Type = Context.getPointerDiffType(); + break; + } + case 'P': + Type = Context.getFILEType(); + if (Type.isNull()) { + Error = ASTContext::GE_Missing_stdio; + return QualType(); + } + break; + case 'J': + if (Signed) + Type = Context.getsigjmp_bufType(); + else + Type = Context.getjmp_bufType(); + + if (Type.isNull()) { + Error = ASTContext::GE_Missing_setjmp; + return QualType(); + } + break; + case 'K': + assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); + Type = Context.getucontext_tType(); + + if (Type.isNull()) { + Error = ASTContext::GE_Missing_ucontext; + return QualType(); + } + break; + case 'p': + Type = Context.getProcessIDType(); + break; + } + + // If there are modifiers and if we're allowed to parse them, go for it. + Done = !AllowTypeModifiers; + while (!Done) { + switch (char c = *Str++) { + default: Done = true; --Str; break; + case '*': + case '&': { + // Both pointers and references can have their pointee types + // qualified with an address space. + char *End; + unsigned AddrSpace = strtoul(Str, &End, 10); + if (End != Str && AddrSpace != 0) { + Type = Context.getAddrSpaceQualType(Type, AddrSpace); + Str = End; + } + if (c == '*') + Type = Context.getPointerType(Type); + else + Type = Context.getLValueReferenceType(Type); + break; + } + // FIXME: There's no way to have a built-in with an rvalue ref arg. + case 'C': + Type = Type.withConst(); + break; + case 'D': + Type = Context.getVolatileType(Type); + break; + case 'R': + Type = Type.withRestrict(); + break; + } + } + + assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && + "Integer constant 'I' type must be an integer"); + + return Type; +} + +/// GetBuiltinType - Return the type for the specified builtin. +QualType ASTContext::GetBuiltinType(unsigned Id, + GetBuiltinTypeError &Error, + unsigned *IntegerConstantArgs) const { + const char *TypeStr = BuiltinInfo.GetTypeString(Id); + + SmallVector<QualType, 8> ArgTypes; + + bool RequiresICE = false; + Error = GE_None; + QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, + RequiresICE, true); + if (Error != GE_None) + return QualType(); + + assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); + + while (TypeStr[0] && TypeStr[0] != '.') { + QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); + if (Error != GE_None) + return QualType(); + + // If this argument is required to be an IntegerConstantExpression and the + // caller cares, fill in the bitmask we return. + if (RequiresICE && IntegerConstantArgs) + *IntegerConstantArgs |= 1 << ArgTypes.size(); + + // Do array -> pointer decay. The builtin should use the decayed type. + if (Ty->isArrayType()) + Ty = getArrayDecayedType(Ty); + + ArgTypes.push_back(Ty); + } + + assert((TypeStr[0] != '.' || TypeStr[1] == 0) && + "'.' should only occur at end of builtin type list!"); + + FunctionType::ExtInfo EI(CC_C); + if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); + + bool Variadic = (TypeStr[0] == '.'); + + // We really shouldn't be making a no-proto type here, especially in C++. + if (ArgTypes.empty() && Variadic) + return getFunctionNoProtoType(ResType, EI); + + FunctionProtoType::ExtProtoInfo EPI; + EPI.ExtInfo = EI; + EPI.Variadic = Variadic; + + return getFunctionType(ResType, ArgTypes, EPI); +} + +GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) { + if (!FD->isExternallyVisible()) + return GVA_Internal; + + GVALinkage External = GVA_StrongExternal; + switch (FD->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + External = GVA_StrongExternal; + break; + + case TSK_ExplicitInstantiationDefinition: + return GVA_ExplicitTemplateInstantiation; + + case TSK_ExplicitInstantiationDeclaration: + case TSK_ImplicitInstantiation: + External = GVA_TemplateInstantiation; + break; + } + + if (!FD->isInlined()) + return External; + + if ((!getLangOpts().CPlusPlus && !getLangOpts().MicrosoftMode) || + FD->hasAttr<GNUInlineAttr>()) { + // GNU or C99 inline semantics. Determine whether this symbol should be + // externally visible. + if (FD->isInlineDefinitionExternallyVisible()) + return External; + + // C99 inline semantics, where the symbol is not externally visible. + return GVA_C99Inline; + } + + // C++0x [temp.explicit]p9: + // [ Note: The intent is that an inline function that is the subject of + // an explicit instantiation declaration will still be implicitly + // instantiated when used so that the body can be considered for + // inlining, but that no out-of-line copy of the inline function would be + // generated in the translation unit. -- end note ] + if (FD->getTemplateSpecializationKind() + == TSK_ExplicitInstantiationDeclaration) + return GVA_C99Inline; + + return GVA_CXXInline; +} + +GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { + if (!VD->isExternallyVisible()) + return GVA_Internal; + + switch (VD->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + return GVA_StrongExternal; + + case TSK_ExplicitInstantiationDeclaration: + llvm_unreachable("Variable should not be instantiated"); + // Fall through to treat this like any other instantiation. + + case TSK_ExplicitInstantiationDefinition: + return GVA_ExplicitTemplateInstantiation; + + case TSK_ImplicitInstantiation: + return GVA_TemplateInstantiation; + } + + llvm_unreachable("Invalid Linkage!"); +} + +bool ASTContext::DeclMustBeEmitted(const Decl *D) { + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + if (!VD->isFileVarDecl()) + return false; + } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + // We never need to emit an uninstantiated function template. + if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) + return false; + } else + return false; + + // If this is a member of a class template, we do not need to emit it. + if (D->getDeclContext()->isDependentContext()) + return false; + + // Weak references don't produce any output by themselves. + if (D->hasAttr<WeakRefAttr>()) + return false; + + // Aliases and used decls are required. + if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) + return true; + + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + // Forward declarations aren't required. + if (!FD->doesThisDeclarationHaveABody()) + return FD->doesDeclarationForceExternallyVisibleDefinition(); + + // Constructors and destructors are required. + if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) + return true; + + // The key function for a class is required. This rule only comes + // into play when inline functions can be key functions, though. + if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { + const CXXRecordDecl *RD = MD->getParent(); + if (MD->isOutOfLine() && RD->isDynamicClass()) { + const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); + if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) + return true; + } + } + } + + GVALinkage Linkage = GetGVALinkageForFunction(FD); + + // static, static inline, always_inline, and extern inline functions can + // always be deferred. Normal inline functions can be deferred in C99/C++. + // Implicit template instantiations can also be deferred in C++. + if (Linkage == GVA_Internal || Linkage == GVA_C99Inline || + Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) + return false; + return true; + } + + const VarDecl *VD = cast<VarDecl>(D); + assert(VD->isFileVarDecl() && "Expected file scoped var"); + + if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly) + return false; + + // Variables that can be needed in other TUs are required. + GVALinkage L = GetGVALinkageForVariable(VD); + if (L != GVA_Internal && L != GVA_TemplateInstantiation) + return true; + + // Variables that have destruction with side-effects are required. + if (VD->getType().isDestructedType()) + return true; + + // Variables that have initialization with side-effects are required. + if (VD->getInit() && VD->getInit()->HasSideEffects(*this)) + return true; + + return false; +} + +CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, + bool IsCXXMethod) const { + // Pass through to the C++ ABI object + if (IsCXXMethod) + return ABI->getDefaultMethodCallConv(IsVariadic); + + return (LangOpts.MRTD && !IsVariadic) ? CC_X86StdCall : CC_C; +} + +bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { + // Pass through to the C++ ABI object + return ABI->isNearlyEmpty(RD); +} + +MangleContext *ASTContext::createMangleContext() { + switch (Target->getCXXABI().getKind()) { + case TargetCXXABI::GenericAArch64: + case TargetCXXABI::GenericItanium: + case TargetCXXABI::GenericARM: + case TargetCXXABI::iOS: + return ItaniumMangleContext::create(*this, getDiagnostics()); + case TargetCXXABI::Microsoft: + return MicrosoftMangleContext::create(*this, getDiagnostics()); + } + llvm_unreachable("Unsupported ABI"); +} + +CXXABI::~CXXABI() {} + +size_t ASTContext::getSideTableAllocatedMemory() const { + return ASTRecordLayouts.getMemorySize() + + llvm::capacity_in_bytes(ObjCLayouts) + + llvm::capacity_in_bytes(KeyFunctions) + + llvm::capacity_in_bytes(ObjCImpls) + + llvm::capacity_in_bytes(BlockVarCopyInits) + + llvm::capacity_in_bytes(DeclAttrs) + + llvm::capacity_in_bytes(TemplateOrInstantiation) + + llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + + llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + + llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + + llvm::capacity_in_bytes(OverriddenMethods) + + llvm::capacity_in_bytes(Types) + + llvm::capacity_in_bytes(VariableArrayTypes) + + llvm::capacity_in_bytes(ClassScopeSpecializationPattern); +} + +/// getIntTypeForBitwidth - +/// sets integer QualTy according to specified details: +/// bitwidth, signed/unsigned. +/// Returns empty type if there is no appropriate target types. +QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, + unsigned Signed) const { + TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); + CanQualType QualTy = getFromTargetType(Ty); + if (!QualTy && DestWidth == 128) + return Signed ? Int128Ty : UnsignedInt128Ty; + return QualTy; +} + +/// getRealTypeForBitwidth - +/// sets floating point QualTy according to specified bitwidth. +/// Returns empty type if there is no appropriate target types. +QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth) const { + TargetInfo::RealType Ty = getTargetInfo().getRealTypeByWidth(DestWidth); + switch (Ty) { + case TargetInfo::Float: + return FloatTy; + case TargetInfo::Double: + return DoubleTy; + case TargetInfo::LongDouble: + return LongDoubleTy; + case TargetInfo::NoFloat: + return QualType(); + } + + llvm_unreachable("Unhandled TargetInfo::RealType value"); +} + +void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { + if (Number > 1) + MangleNumbers[ND] = Number; +} + +unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { + llvm::DenseMap<const NamedDecl *, unsigned>::const_iterator I = + MangleNumbers.find(ND); + return I != MangleNumbers.end() ? I->second : 1; +} + +MangleNumberingContext & +ASTContext::getManglingNumberContext(const DeclContext *DC) { + assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. + MangleNumberingContext *&MCtx = MangleNumberingContexts[DC]; + if (!MCtx) + MCtx = createMangleNumberingContext(); + return *MCtx; +} + +MangleNumberingContext *ASTContext::createMangleNumberingContext() const { + return ABI->createMangleNumberingContext(); +} + +void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { + ParamIndices[D] = index; +} + +unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { + ParameterIndexTable::const_iterator I = ParamIndices.find(D); + assert(I != ParamIndices.end() && + "ParmIndices lacks entry set by ParmVarDecl"); + return I->second; +} + +APValue * +ASTContext::getMaterializedTemporaryValue(const MaterializeTemporaryExpr *E, + bool MayCreate) { + assert(E && E->getStorageDuration() == SD_Static && + "don't need to cache the computed value for this temporary"); + if (MayCreate) + return &MaterializedTemporaryValues[E]; + + llvm::DenseMap<const MaterializeTemporaryExpr *, APValue>::iterator I = + MaterializedTemporaryValues.find(E); + return I == MaterializedTemporaryValues.end() ? 0 : &I->second; +} + +bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { + const llvm::Triple &T = getTargetInfo().getTriple(); + if (!T.isOSDarwin()) + return false; + + if (!(T.isiOS() && T.isOSVersionLT(7)) && + !(T.isMacOSX() && T.isOSVersionLT(10, 9))) + return false; + + QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); + CharUnits sizeChars = getTypeSizeInChars(AtomicTy); + uint64_t Size = sizeChars.getQuantity(); + CharUnits alignChars = getTypeAlignInChars(AtomicTy); + unsigned Align = alignChars.getQuantity(); + unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); + return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); +} + +namespace { + + /// \brief A \c RecursiveASTVisitor that builds a map from nodes to their + /// parents as defined by the \c RecursiveASTVisitor. + /// + /// Note that the relationship described here is purely in terms of AST + /// traversal - there are other relationships (for example declaration context) + /// in the AST that are better modeled by special matchers. + /// + /// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes. + class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> { + + public: + /// \brief Builds and returns the translation unit's parent map. + /// + /// The caller takes ownership of the returned \c ParentMap. + static ASTContext::ParentMap *buildMap(TranslationUnitDecl &TU) { + ParentMapASTVisitor Visitor(new ASTContext::ParentMap); + Visitor.TraverseDecl(&TU); + return Visitor.Parents; + } + + private: + typedef RecursiveASTVisitor<ParentMapASTVisitor> VisitorBase; + + ParentMapASTVisitor(ASTContext::ParentMap *Parents) : Parents(Parents) { + } + + bool shouldVisitTemplateInstantiations() const { + return true; + } + bool shouldVisitImplicitCode() const { + return true; + } + // Disables data recursion. We intercept Traverse* methods in the RAV, which + // are not triggered during data recursion. + bool shouldUseDataRecursionFor(clang::Stmt *S) const { + return false; + } + + template <typename T> + bool TraverseNode(T *Node, bool(VisitorBase:: *traverse) (T *)) { + if (Node == NULL) + return true; + if (ParentStack.size() > 0) + // FIXME: Currently we add the same parent multiple times, for example + // when we visit all subexpressions of template instantiations; this is + // suboptimal, bug benign: the only way to visit those is with + // hasAncestor / hasParent, and those do not create new matches. + // The plan is to enable DynTypedNode to be storable in a map or hash + // map. The main problem there is to implement hash functions / + // comparison operators for all types that DynTypedNode supports that + // do not have pointer identity. + (*Parents)[Node].push_back(ParentStack.back()); + ParentStack.push_back(ast_type_traits::DynTypedNode::create(*Node)); + bool Result = (this ->* traverse) (Node); + ParentStack.pop_back(); + return Result; + } + + bool TraverseDecl(Decl *DeclNode) { + return TraverseNode(DeclNode, &VisitorBase::TraverseDecl); + } + + bool TraverseStmt(Stmt *StmtNode) { + return TraverseNode(StmtNode, &VisitorBase::TraverseStmt); + } + + ASTContext::ParentMap *Parents; + llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack; + + friend class RecursiveASTVisitor<ParentMapASTVisitor>; + }; + +} // end namespace + +ASTContext::ParentVector +ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) { + assert(Node.getMemoizationData() && + "Invariant broken: only nodes that support memoization may be " + "used in the parent map."); + if (!AllParents) { + // We always need to run over the whole translation unit, as + // hasAncestor can escape any subtree. + AllParents.reset( + ParentMapASTVisitor::buildMap(*getTranslationUnitDecl())); + } + ParentMap::const_iterator I = AllParents->find(Node.getMemoizationData()); + if (I == AllParents->end()) { + return ParentVector(); + } + return I->second; +} + +bool +ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, + const ObjCMethodDecl *MethodImpl) { + // No point trying to match an unavailable/deprecated mothod. + if (MethodDecl->hasAttr<UnavailableAttr>() + || MethodDecl->hasAttr<DeprecatedAttr>()) + return false; + if (MethodDecl->getObjCDeclQualifier() != + MethodImpl->getObjCDeclQualifier()) + return false; + if (!hasSameType(MethodDecl->getResultType(), + MethodImpl->getResultType())) + return false; + + if (MethodDecl->param_size() != MethodImpl->param_size()) + return false; + + for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), + IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), + EF = MethodDecl->param_end(); + IM != EM && IF != EF; ++IM, ++IF) { + const ParmVarDecl *DeclVar = (*IF); + const ParmVarDecl *ImplVar = (*IM); + if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) + return false; + if (!hasSameType(DeclVar->getType(), ImplVar->getType())) + return false; + } + return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); + +} diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp new file mode 100644 index 000000000000..fce8f64b3328 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp @@ -0,0 +1,1698 @@ +//===--- ASTDiagnostic.cpp - Diagnostic Printing Hooks for AST Nodes ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a diagnostic formatting hook for AST elements. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/ASTDiagnostic.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/TemplateBase.h" +#include "clang/AST/Type.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; + +// Returns a desugared version of the QualType, and marks ShouldAKA as true +// whenever we remove significant sugar from the type. +static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) { + QualifierCollector QC; + + while (true) { + const Type *Ty = QC.strip(QT); + + // Don't aka just because we saw an elaborated type... + if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) { + QT = ET->desugar(); + continue; + } + // ... or a paren type ... + if (const ParenType *PT = dyn_cast<ParenType>(Ty)) { + QT = PT->desugar(); + continue; + } + // ...or a substituted template type parameter ... + if (const SubstTemplateTypeParmType *ST = + dyn_cast<SubstTemplateTypeParmType>(Ty)) { + QT = ST->desugar(); + continue; + } + // ...or an attributed type... + if (const AttributedType *AT = dyn_cast<AttributedType>(Ty)) { + QT = AT->desugar(); + continue; + } + // ... or an auto type. + if (const AutoType *AT = dyn_cast<AutoType>(Ty)) { + if (!AT->isSugared()) + break; + QT = AT->desugar(); + continue; + } + + // Don't desugar template specializations, unless it's an alias template. + if (const TemplateSpecializationType *TST + = dyn_cast<TemplateSpecializationType>(Ty)) + if (!TST->isTypeAlias()) + break; + + // Don't desugar magic Objective-C types. + if (QualType(Ty,0) == Context.getObjCIdType() || + QualType(Ty,0) == Context.getObjCClassType() || + QualType(Ty,0) == Context.getObjCSelType() || + QualType(Ty,0) == Context.getObjCProtoType()) + break; + + // Don't desugar va_list. + if (QualType(Ty,0) == Context.getBuiltinVaListType()) + break; + + // Otherwise, do a single-step desugar. + QualType Underlying; + bool IsSugar = false; + switch (Ty->getTypeClass()) { +#define ABSTRACT_TYPE(Class, Base) +#define TYPE(Class, Base) \ +case Type::Class: { \ +const Class##Type *CTy = cast<Class##Type>(Ty); \ +if (CTy->isSugared()) { \ +IsSugar = true; \ +Underlying = CTy->desugar(); \ +} \ +break; \ +} +#include "clang/AST/TypeNodes.def" + } + + // If it wasn't sugared, we're done. + if (!IsSugar) + break; + + // If the desugared type is a vector type, we don't want to expand + // it, it will turn into an attribute mess. People want their "vec4". + if (isa<VectorType>(Underlying)) + break; + + // Don't desugar through the primary typedef of an anonymous type. + if (const TagType *UTT = Underlying->getAs<TagType>()) + if (const TypedefType *QTT = dyn_cast<TypedefType>(QT)) + if (UTT->getDecl()->getTypedefNameForAnonDecl() == QTT->getDecl()) + break; + + // Record that we actually looked through an opaque type here. + ShouldAKA = true; + QT = Underlying; + } + + // If we have a pointer-like type, desugar the pointee as well. + // FIXME: Handle other pointer-like types. + if (const PointerType *Ty = QT->getAs<PointerType>()) { + QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(), + ShouldAKA)); + } else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) { + QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(), + ShouldAKA)); + } else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) { + QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(), + ShouldAKA)); + } + + return QC.apply(Context, QT); +} + +/// \brief Convert the given type to a string suitable for printing as part of +/// a diagnostic. +/// +/// There are four main criteria when determining whether we should have an +/// a.k.a. clause when pretty-printing a type: +/// +/// 1) Some types provide very minimal sugar that doesn't impede the +/// user's understanding --- for example, elaborated type +/// specifiers. If this is all the sugar we see, we don't want an +/// a.k.a. clause. +/// 2) Some types are technically sugared but are much more familiar +/// when seen in their sugared form --- for example, va_list, +/// vector types, and the magic Objective C types. We don't +/// want to desugar these, even if we do produce an a.k.a. clause. +/// 3) Some types may have already been desugared previously in this diagnostic. +/// if this is the case, doing another "aka" would just be clutter. +/// 4) Two different types within the same diagnostic have the same output +/// string. In this case, force an a.k.a with the desugared type when +/// doing so will provide additional information. +/// +/// \param Context the context in which the type was allocated +/// \param Ty the type to print +/// \param QualTypeVals pointer values to QualTypes which are used in the +/// diagnostic message +static std::string +ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, + const DiagnosticsEngine::ArgumentValue *PrevArgs, + unsigned NumPrevArgs, + ArrayRef<intptr_t> QualTypeVals) { + // FIXME: Playing with std::string is really slow. + bool ForceAKA = false; + QualType CanTy = Ty.getCanonicalType(); + std::string S = Ty.getAsString(Context.getPrintingPolicy()); + std::string CanS = CanTy.getAsString(Context.getPrintingPolicy()); + + for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) { + QualType CompareTy = + QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I])); + if (CompareTy.isNull()) + continue; + if (CompareTy == Ty) + continue; // Same types + QualType CompareCanTy = CompareTy.getCanonicalType(); + if (CompareCanTy == CanTy) + continue; // Same canonical types + std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy()); + bool aka; + QualType CompareDesugar = Desugar(Context, CompareTy, aka); + std::string CompareDesugarStr = + CompareDesugar.getAsString(Context.getPrintingPolicy()); + if (CompareS != S && CompareDesugarStr != S) + continue; // The type string is different than the comparison string + // and the desugared comparison string. + std::string CompareCanS = + CompareCanTy.getAsString(Context.getPrintingPolicy()); + + if (CompareCanS == CanS) + continue; // No new info from canonical type + + ForceAKA = true; + break; + } + + // Check to see if we already desugared this type in this + // diagnostic. If so, don't do it again. + bool Repeated = false; + for (unsigned i = 0; i != NumPrevArgs; ++i) { + // TODO: Handle ak_declcontext case. + if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) { + void *Ptr = (void*)PrevArgs[i].second; + QualType PrevTy(QualType::getFromOpaquePtr(Ptr)); + if (PrevTy == Ty) { + Repeated = true; + break; + } + } + } + + // Consider producing an a.k.a. clause if removing all the direct + // sugar gives us something "significantly different". + if (!Repeated) { + bool ShouldAKA = false; + QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA); + if (ShouldAKA || ForceAKA) { + if (DesugaredTy == Ty) { + DesugaredTy = Ty.getCanonicalType(); + } + std::string akaStr = DesugaredTy.getAsString(Context.getPrintingPolicy()); + if (akaStr != S) { + S = "'" + S + "' (aka '" + akaStr + "')"; + return S; + } + } + } + + S = "'" + S + "'"; + return S; +} + +static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType, + QualType ToType, bool PrintTree, + bool PrintFromType, bool ElideType, + bool ShowColors, raw_ostream &OS); + +void clang::FormatASTNodeDiagnosticArgument( + DiagnosticsEngine::ArgumentKind Kind, + intptr_t Val, + const char *Modifier, + unsigned ModLen, + const char *Argument, + unsigned ArgLen, + const DiagnosticsEngine::ArgumentValue *PrevArgs, + unsigned NumPrevArgs, + SmallVectorImpl<char> &Output, + void *Cookie, + ArrayRef<intptr_t> QualTypeVals) { + ASTContext &Context = *static_cast<ASTContext*>(Cookie); + + size_t OldEnd = Output.size(); + llvm::raw_svector_ostream OS(Output); + bool NeedQuotes = true; + + switch (Kind) { + default: llvm_unreachable("unknown ArgumentKind"); + case DiagnosticsEngine::ak_qualtype_pair: { + TemplateDiffTypes &TDT = *reinterpret_cast<TemplateDiffTypes*>(Val); + QualType FromType = + QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.FromType)); + QualType ToType = + QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.ToType)); + + if (FormatTemplateTypeDiff(Context, FromType, ToType, TDT.PrintTree, + TDT.PrintFromType, TDT.ElideType, + TDT.ShowColors, OS)) { + NeedQuotes = !TDT.PrintTree; + TDT.TemplateDiffUsed = true; + break; + } + + // Don't fall-back during tree printing. The caller will handle + // this case. + if (TDT.PrintTree) + return; + + // Attempting to do a template diff on non-templates. Set the variables + // and continue with regular type printing of the appropriate type. + Val = TDT.PrintFromType ? TDT.FromType : TDT.ToType; + ModLen = 0; + ArgLen = 0; + // Fall through + } + case DiagnosticsEngine::ak_qualtype: { + assert(ModLen == 0 && ArgLen == 0 && + "Invalid modifier for QualType argument"); + + QualType Ty(QualType::getFromOpaquePtr(reinterpret_cast<void*>(Val))); + OS << ConvertTypeToDiagnosticString(Context, Ty, PrevArgs, NumPrevArgs, + QualTypeVals); + NeedQuotes = false; + break; + } + case DiagnosticsEngine::ak_declarationname: { + if (ModLen == 9 && !memcmp(Modifier, "objcclass", 9) && ArgLen == 0) + OS << '+'; + else if (ModLen == 12 && !memcmp(Modifier, "objcinstance", 12) + && ArgLen==0) + OS << '-'; + else + assert(ModLen == 0 && ArgLen == 0 && + "Invalid modifier for DeclarationName argument"); + + OS << DeclarationName::getFromOpaqueInteger(Val); + break; + } + case DiagnosticsEngine::ak_nameddecl: { + bool Qualified; + if (ModLen == 1 && Modifier[0] == 'q' && ArgLen == 0) + Qualified = true; + else { + assert(ModLen == 0 && ArgLen == 0 && + "Invalid modifier for NamedDecl* argument"); + Qualified = false; + } + const NamedDecl *ND = reinterpret_cast<const NamedDecl*>(Val); + ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), Qualified); + break; + } + case DiagnosticsEngine::ak_nestednamespec: { + NestedNameSpecifier *NNS = reinterpret_cast<NestedNameSpecifier*>(Val); + NNS->print(OS, Context.getPrintingPolicy()); + NeedQuotes = false; + break; + } + case DiagnosticsEngine::ak_declcontext: { + DeclContext *DC = reinterpret_cast<DeclContext *> (Val); + assert(DC && "Should never have a null declaration context"); + + if (DC->isTranslationUnit()) { + // FIXME: Get these strings from some localized place + if (Context.getLangOpts().CPlusPlus) + OS << "the global namespace"; + else + OS << "the global scope"; + } else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) { + OS << ConvertTypeToDiagnosticString(Context, + Context.getTypeDeclType(Type), + PrevArgs, NumPrevArgs, + QualTypeVals); + } else { + // FIXME: Get these strings from some localized place + NamedDecl *ND = cast<NamedDecl>(DC); + if (isa<NamespaceDecl>(ND)) + OS << "namespace "; + else if (isa<ObjCMethodDecl>(ND)) + OS << "method "; + else if (isa<FunctionDecl>(ND)) + OS << "function "; + + OS << '\''; + ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), true); + OS << '\''; + } + NeedQuotes = false; + break; + } + } + + OS.flush(); + + if (NeedQuotes) { + Output.insert(Output.begin()+OldEnd, '\''); + Output.push_back('\''); + } +} + +/// TemplateDiff - A class that constructs a pretty string for a pair of +/// QualTypes. For the pair of types, a diff tree will be created containing +/// all the information about the templates and template arguments. Afterwards, +/// the tree is transformed to a string according to the options passed in. +namespace { +class TemplateDiff { + /// Context - The ASTContext which is used for comparing template arguments. + ASTContext &Context; + + /// Policy - Used during expression printing. + PrintingPolicy Policy; + + /// ElideType - Option to elide identical types. + bool ElideType; + + /// PrintTree - Format output string as a tree. + bool PrintTree; + + /// ShowColor - Diagnostics support color, so bolding will be used. + bool ShowColor; + + /// FromType - When single type printing is selected, this is the type to be + /// be printed. When tree printing is selected, this type will show up first + /// in the tree. + QualType FromType; + + /// ToType - The type that FromType is compared to. Only in tree printing + /// will this type be outputed. + QualType ToType; + + /// OS - The stream used to construct the output strings. + raw_ostream &OS; + + /// IsBold - Keeps track of the bold formatting for the output string. + bool IsBold; + + /// DiffTree - A tree representation the differences between two types. + class DiffTree { + public: + /// DiffKind - The difference in a DiffNode and which fields are used. + enum DiffKind { + /// Incomplete or invalid node. + Invalid, + /// Another level of templates, uses TemplateDecl and Qualifiers + Template, + /// Type difference, uses QualType + Type, + /// Expression difference, uses Expr + Expression, + /// Template argument difference, uses TemplateDecl + TemplateTemplate, + /// Integer difference, uses APSInt and Expr + Integer, + /// Declaration difference, uses ValueDecl + Declaration + }; + private: + /// DiffNode - The root node stores the original type. Each child node + /// stores template arguments of their parents. For templated types, the + /// template decl is also stored. + struct DiffNode { + DiffKind Kind; + + /// NextNode - The index of the next sibling node or 0. + unsigned NextNode; + + /// ChildNode - The index of the first child node or 0. + unsigned ChildNode; + + /// ParentNode - The index of the parent node. + unsigned ParentNode; + + /// FromType, ToType - The type arguments. + QualType FromType, ToType; + + /// FromExpr, ToExpr - The expression arguments. + Expr *FromExpr, *ToExpr; + + /// FromTD, ToTD - The template decl for template template + /// arguments or the type arguments that are templates. + TemplateDecl *FromTD, *ToTD; + + /// FromQual, ToQual - Qualifiers for template types. + Qualifiers FromQual, ToQual; + + /// FromInt, ToInt - APSInt's for integral arguments. + llvm::APSInt FromInt, ToInt; + + /// IsValidFromInt, IsValidToInt - Whether the APSInt's are valid. + bool IsValidFromInt, IsValidToInt; + + /// FromValueDecl, ToValueDecl - Whether the argument is a decl. + ValueDecl *FromValueDecl, *ToValueDecl; + + /// FromAddressOf, ToAddressOf - Whether the ValueDecl needs an address of + /// operator before it. + bool FromAddressOf, ToAddressOf; + + /// FromDefault, ToDefault - Whether the argument is a default argument. + bool FromDefault, ToDefault; + + /// Same - Whether the two arguments evaluate to the same value. + bool Same; + + DiffNode(unsigned ParentNode = 0) + : Kind(Invalid), NextNode(0), ChildNode(0), ParentNode(ParentNode), + FromType(), ToType(), FromExpr(0), ToExpr(0), FromTD(0), ToTD(0), + IsValidFromInt(false), IsValidToInt(false), FromValueDecl(0), + ToValueDecl(0), FromAddressOf(false), ToAddressOf(false), + FromDefault(false), ToDefault(false), Same(false) { } + }; + + /// FlatTree - A flattened tree used to store the DiffNodes. + SmallVector<DiffNode, 16> FlatTree; + + /// CurrentNode - The index of the current node being used. + unsigned CurrentNode; + + /// NextFreeNode - The index of the next unused node. Used when creating + /// child nodes. + unsigned NextFreeNode; + + /// ReadNode - The index of the current node being read. + unsigned ReadNode; + + public: + DiffTree() : + CurrentNode(0), NextFreeNode(1) { + FlatTree.push_back(DiffNode()); + } + + // Node writing functions. + /// SetNode - Sets FromTD and ToTD of the current node. + void SetNode(TemplateDecl *FromTD, TemplateDecl *ToTD) { + FlatTree[CurrentNode].FromTD = FromTD; + FlatTree[CurrentNode].ToTD = ToTD; + } + + /// SetNode - Sets FromType and ToType of the current node. + void SetNode(QualType FromType, QualType ToType) { + FlatTree[CurrentNode].FromType = FromType; + FlatTree[CurrentNode].ToType = ToType; + } + + /// SetNode - Set FromExpr and ToExpr of the current node. + void SetNode(Expr *FromExpr, Expr *ToExpr) { + FlatTree[CurrentNode].FromExpr = FromExpr; + FlatTree[CurrentNode].ToExpr = ToExpr; + } + + /// SetNode - Set FromInt and ToInt of the current node. + void SetNode(llvm::APSInt FromInt, llvm::APSInt ToInt, + bool IsValidFromInt, bool IsValidToInt) { + FlatTree[CurrentNode].FromInt = FromInt; + FlatTree[CurrentNode].ToInt = ToInt; + FlatTree[CurrentNode].IsValidFromInt = IsValidFromInt; + FlatTree[CurrentNode].IsValidToInt = IsValidToInt; + } + + /// SetNode - Set FromQual and ToQual of the current node. + void SetNode(Qualifiers FromQual, Qualifiers ToQual) { + FlatTree[CurrentNode].FromQual = FromQual; + FlatTree[CurrentNode].ToQual = ToQual; + } + + /// SetNode - Set FromValueDecl and ToValueDecl of the current node. + void SetNode(ValueDecl *FromValueDecl, ValueDecl *ToValueDecl, + bool FromAddressOf, bool ToAddressOf) { + FlatTree[CurrentNode].FromValueDecl = FromValueDecl; + FlatTree[CurrentNode].ToValueDecl = ToValueDecl; + FlatTree[CurrentNode].FromAddressOf = FromAddressOf; + FlatTree[CurrentNode].ToAddressOf = ToAddressOf; + } + + /// SetSame - Sets the same flag of the current node. + void SetSame(bool Same) { + FlatTree[CurrentNode].Same = Same; + } + + /// SetDefault - Sets FromDefault and ToDefault flags of the current node. + void SetDefault(bool FromDefault, bool ToDefault) { + FlatTree[CurrentNode].FromDefault = FromDefault; + FlatTree[CurrentNode].ToDefault = ToDefault; + } + + /// SetKind - Sets the current node's type. + void SetKind(DiffKind Kind) { + FlatTree[CurrentNode].Kind = Kind; + } + + /// Up - Changes the node to the parent of the current node. + void Up() { + CurrentNode = FlatTree[CurrentNode].ParentNode; + } + + /// AddNode - Adds a child node to the current node, then sets that node + /// node as the current node. + void AddNode() { + FlatTree.push_back(DiffNode(CurrentNode)); + DiffNode &Node = FlatTree[CurrentNode]; + if (Node.ChildNode == 0) { + // If a child node doesn't exist, add one. + Node.ChildNode = NextFreeNode; + } else { + // If a child node exists, find the last child node and add a + // next node to it. + unsigned i; + for (i = Node.ChildNode; FlatTree[i].NextNode != 0; + i = FlatTree[i].NextNode) { + } + FlatTree[i].NextNode = NextFreeNode; + } + CurrentNode = NextFreeNode; + ++NextFreeNode; + } + + // Node reading functions. + /// StartTraverse - Prepares the tree for recursive traversal. + void StartTraverse() { + ReadNode = 0; + CurrentNode = NextFreeNode; + NextFreeNode = 0; + } + + /// Parent - Move the current read node to its parent. + void Parent() { + ReadNode = FlatTree[ReadNode].ParentNode; + } + + /// GetNode - Gets the FromType and ToType. + void GetNode(QualType &FromType, QualType &ToType) { + FromType = FlatTree[ReadNode].FromType; + ToType = FlatTree[ReadNode].ToType; + } + + /// GetNode - Gets the FromExpr and ToExpr. + void GetNode(Expr *&FromExpr, Expr *&ToExpr) { + FromExpr = FlatTree[ReadNode].FromExpr; + ToExpr = FlatTree[ReadNode].ToExpr; + } + + /// GetNode - Gets the FromTD and ToTD. + void GetNode(TemplateDecl *&FromTD, TemplateDecl *&ToTD) { + FromTD = FlatTree[ReadNode].FromTD; + ToTD = FlatTree[ReadNode].ToTD; + } + + /// GetNode - Gets the FromInt and ToInt. + void GetNode(llvm::APSInt &FromInt, llvm::APSInt &ToInt, + bool &IsValidFromInt, bool &IsValidToInt) { + FromInt = FlatTree[ReadNode].FromInt; + ToInt = FlatTree[ReadNode].ToInt; + IsValidFromInt = FlatTree[ReadNode].IsValidFromInt; + IsValidToInt = FlatTree[ReadNode].IsValidToInt; + } + + /// GetNode - Gets the FromQual and ToQual. + void GetNode(Qualifiers &FromQual, Qualifiers &ToQual) { + FromQual = FlatTree[ReadNode].FromQual; + ToQual = FlatTree[ReadNode].ToQual; + } + + /// GetNode - Gets the FromValueDecl and ToValueDecl. + void GetNode(ValueDecl *&FromValueDecl, ValueDecl *&ToValueDecl, + bool &FromAddressOf, bool &ToAddressOf) { + FromValueDecl = FlatTree[ReadNode].FromValueDecl; + ToValueDecl = FlatTree[ReadNode].ToValueDecl; + FromAddressOf = FlatTree[ReadNode].FromAddressOf; + ToAddressOf = FlatTree[ReadNode].ToAddressOf; + } + + /// NodeIsSame - Returns true the arguments are the same. + bool NodeIsSame() { + return FlatTree[ReadNode].Same; + } + + /// HasChildrend - Returns true if the node has children. + bool HasChildren() { + return FlatTree[ReadNode].ChildNode != 0; + } + + /// MoveToChild - Moves from the current node to its child. + void MoveToChild() { + ReadNode = FlatTree[ReadNode].ChildNode; + } + + /// AdvanceSibling - If there is a next sibling, advance to it and return + /// true. Otherwise, return false. + bool AdvanceSibling() { + if (FlatTree[ReadNode].NextNode == 0) + return false; + + ReadNode = FlatTree[ReadNode].NextNode; + return true; + } + + /// HasNextSibling - Return true if the node has a next sibling. + bool HasNextSibling() { + return FlatTree[ReadNode].NextNode != 0; + } + + /// FromDefault - Return true if the from argument is the default. + bool FromDefault() { + return FlatTree[ReadNode].FromDefault; + } + + /// ToDefault - Return true if the to argument is the default. + bool ToDefault() { + return FlatTree[ReadNode].ToDefault; + } + + /// Empty - Returns true if the tree has no information. + bool Empty() { + return GetKind() == Invalid; + } + + /// GetKind - Returns the current node's type. + DiffKind GetKind() { + return FlatTree[ReadNode].Kind; + } + }; + + DiffTree Tree; + + /// TSTiterator - an iterator that is used to enter a + /// TemplateSpecializationType and read TemplateArguments inside template + /// parameter packs in order with the rest of the TemplateArguments. + struct TSTiterator { + typedef const TemplateArgument& reference; + typedef const TemplateArgument* pointer; + + /// TST - the template specialization whose arguments this iterator + /// traverse over. + const TemplateSpecializationType *TST; + + /// DesugarTST - desugared template specialization used to extract + /// default argument information + const TemplateSpecializationType *DesugarTST; + + /// Index - the index of the template argument in TST. + unsigned Index; + + /// CurrentTA - if CurrentTA is not the same as EndTA, then CurrentTA + /// points to a TemplateArgument within a parameter pack. + TemplateArgument::pack_iterator CurrentTA; + + /// EndTA - the end iterator of a parameter pack + TemplateArgument::pack_iterator EndTA; + + /// TSTiterator - Constructs an iterator and sets it to the first template + /// argument. + TSTiterator(ASTContext &Context, const TemplateSpecializationType *TST) + : TST(TST), + DesugarTST(GetTemplateSpecializationType(Context, TST->desugar())), + Index(0), CurrentTA(0), EndTA(0) { + if (isEnd()) return; + + // Set to first template argument. If not a parameter pack, done. + TemplateArgument TA = TST->getArg(0); + if (TA.getKind() != TemplateArgument::Pack) return; + + // Start looking into the parameter pack. + CurrentTA = TA.pack_begin(); + EndTA = TA.pack_end(); + + // Found a valid template argument. + if (CurrentTA != EndTA) return; + + // Parameter pack is empty, use the increment to get to a valid + // template argument. + ++(*this); + } + + /// isEnd - Returns true if the iterator is one past the end. + bool isEnd() const { + return Index >= TST->getNumArgs(); + } + + /// &operator++ - Increment the iterator to the next template argument. + TSTiterator &operator++() { + // After the end, Index should be the default argument position in + // DesugarTST, if it exists. + if (isEnd()) { + ++Index; + return *this; + } + + // If in a parameter pack, advance in the parameter pack. + if (CurrentTA != EndTA) { + ++CurrentTA; + if (CurrentTA != EndTA) + return *this; + } + + // Loop until a template argument is found, or the end is reached. + while (true) { + // Advance to the next template argument. Break if reached the end. + if (++Index == TST->getNumArgs()) break; + + // If the TemplateArgument is not a parameter pack, done. + TemplateArgument TA = TST->getArg(Index); + if (TA.getKind() != TemplateArgument::Pack) break; + + // Handle parameter packs. + CurrentTA = TA.pack_begin(); + EndTA = TA.pack_end(); + + // If the parameter pack is empty, try to advance again. + if (CurrentTA != EndTA) break; + } + return *this; + } + + /// operator* - Returns the appropriate TemplateArgument. + reference operator*() const { + assert(!isEnd() && "Index exceeds number of arguments."); + if (CurrentTA == EndTA) + return TST->getArg(Index); + else + return *CurrentTA; + } + + /// operator-> - Allow access to the underlying TemplateArgument. + pointer operator->() const { + return &operator*(); + } + + /// getDesugar - Returns the deduced template argument from DesguarTST + reference getDesugar() const { + return DesugarTST->getArg(Index); + } + }; + + // These functions build up the template diff tree, including functions to + // retrieve and compare template arguments. + + static const TemplateSpecializationType * GetTemplateSpecializationType( + ASTContext &Context, QualType Ty) { + if (const TemplateSpecializationType *TST = + Ty->getAs<TemplateSpecializationType>()) + return TST; + + const RecordType *RT = Ty->getAs<RecordType>(); + + if (!RT) + return 0; + + const ClassTemplateSpecializationDecl *CTSD = + dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl()); + + if (!CTSD) + return 0; + + Ty = Context.getTemplateSpecializationType( + TemplateName(CTSD->getSpecializedTemplate()), + CTSD->getTemplateArgs().data(), + CTSD->getTemplateArgs().size(), + Ty.getLocalUnqualifiedType().getCanonicalType()); + + return Ty->getAs<TemplateSpecializationType>(); + } + + /// DiffTemplate - recursively visits template arguments and stores the + /// argument info into a tree. + void DiffTemplate(const TemplateSpecializationType *FromTST, + const TemplateSpecializationType *ToTST) { + // Begin descent into diffing template tree. + TemplateParameterList *ParamsFrom = + FromTST->getTemplateName().getAsTemplateDecl()->getTemplateParameters(); + TemplateParameterList *ParamsTo = + ToTST->getTemplateName().getAsTemplateDecl()->getTemplateParameters(); + unsigned TotalArgs = 0; + for (TSTiterator FromIter(Context, FromTST), ToIter(Context, ToTST); + !FromIter.isEnd() || !ToIter.isEnd(); ++TotalArgs) { + Tree.AddNode(); + + // Get the parameter at index TotalArgs. If index is larger + // than the total number of parameters, then there is an + // argument pack, so re-use the last parameter. + unsigned ParamIndex = std::min(TotalArgs, ParamsFrom->size() - 1); + NamedDecl *ParamND = ParamsFrom->getParam(ParamIndex); + + // Handle Types + if (TemplateTypeParmDecl *DefaultTTPD = + dyn_cast<TemplateTypeParmDecl>(ParamND)) { + QualType FromType, ToType; + FromType = GetType(FromIter, DefaultTTPD); + // A forward declaration can have no default arg but the actual class + // can, don't mix up iterators and get the original parameter. + ToType = GetType( + ToIter, cast<TemplateTypeParmDecl>(ParamsTo->getParam(ParamIndex))); + Tree.SetNode(FromType, ToType); + Tree.SetDefault(FromIter.isEnd() && !FromType.isNull(), + ToIter.isEnd() && !ToType.isNull()); + Tree.SetKind(DiffTree::Type); + if (!FromType.isNull() && !ToType.isNull()) { + if (Context.hasSameType(FromType, ToType)) { + Tree.SetSame(true); + } else { + Qualifiers FromQual = FromType.getQualifiers(), + ToQual = ToType.getQualifiers(); + const TemplateSpecializationType *FromArgTST = + GetTemplateSpecializationType(Context, FromType); + const TemplateSpecializationType *ToArgTST = + GetTemplateSpecializationType(Context, ToType); + + if (FromArgTST && ToArgTST && + hasSameTemplate(FromArgTST, ToArgTST)) { + FromQual -= QualType(FromArgTST, 0).getQualifiers(); + ToQual -= QualType(ToArgTST, 0).getQualifiers(); + Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(), + ToArgTST->getTemplateName().getAsTemplateDecl()); + Tree.SetNode(FromQual, ToQual); + Tree.SetKind(DiffTree::Template); + DiffTemplate(FromArgTST, ToArgTST); + } + } + } + } + + // Handle Expressions + if (NonTypeTemplateParmDecl *DefaultNTTPD = + dyn_cast<NonTypeTemplateParmDecl>(ParamND)) { + Expr *FromExpr = 0, *ToExpr = 0; + llvm::APSInt FromInt, ToInt; + ValueDecl *FromValueDecl = 0, *ToValueDecl = 0; + unsigned ParamWidth = 128; // Safe default + if (DefaultNTTPD->getType()->isIntegralOrEnumerationType()) + ParamWidth = Context.getIntWidth(DefaultNTTPD->getType()); + bool HasFromInt = !FromIter.isEnd() && + FromIter->getKind() == TemplateArgument::Integral; + bool HasToInt = !ToIter.isEnd() && + ToIter->getKind() == TemplateArgument::Integral; + bool HasFromValueDecl = + !FromIter.isEnd() && + FromIter->getKind() == TemplateArgument::Declaration; + bool HasToValueDecl = + !ToIter.isEnd() && + ToIter->getKind() == TemplateArgument::Declaration; + + assert(((!HasFromInt && !HasToInt) || + (!HasFromValueDecl && !HasToValueDecl)) && + "Template argument cannot be both integer and declaration"); + + if (HasFromInt) + FromInt = FromIter->getAsIntegral(); + else if (HasFromValueDecl) + FromValueDecl = FromIter->getAsDecl(); + else + FromExpr = GetExpr(FromIter, DefaultNTTPD); + + if (HasToInt) + ToInt = ToIter->getAsIntegral(); + else if (HasToValueDecl) + ToValueDecl = ToIter->getAsDecl(); + else + ToExpr = GetExpr(ToIter, DefaultNTTPD); + + if (!HasFromInt && !HasToInt && !HasFromValueDecl && !HasToValueDecl) { + Tree.SetNode(FromExpr, ToExpr); + Tree.SetDefault(FromIter.isEnd() && FromExpr, + ToIter.isEnd() && ToExpr); + if (DefaultNTTPD->getType()->isIntegralOrEnumerationType()) { + if (FromExpr) + FromInt = GetInt(FromIter, FromExpr); + if (ToExpr) + ToInt = GetInt(ToIter, ToExpr); + Tree.SetNode(FromInt, ToInt, FromExpr, ToExpr); + Tree.SetSame(IsSameConvertedInt(ParamWidth, FromInt, ToInt)); + Tree.SetKind(DiffTree::Integer); + } else { + Tree.SetSame(IsEqualExpr(Context, ParamWidth, FromExpr, ToExpr)); + Tree.SetKind(DiffTree::Expression); + } + } else if (HasFromInt || HasToInt) { + if (!HasFromInt && FromExpr) { + FromInt = GetInt(FromIter, FromExpr); + HasFromInt = true; + } + if (!HasToInt && ToExpr) { + ToInt = GetInt(ToIter, ToExpr); + HasToInt = true; + } + Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt); + Tree.SetSame(IsSameConvertedInt(ParamWidth, FromInt, ToInt)); + Tree.SetDefault(FromIter.isEnd() && HasFromInt, + ToIter.isEnd() && HasToInt); + Tree.SetKind(DiffTree::Integer); + } else { + if (!HasFromValueDecl && FromExpr) + FromValueDecl = GetValueDecl(FromIter, FromExpr); + if (!HasToValueDecl && ToExpr) + ToValueDecl = GetValueDecl(ToIter, ToExpr); + QualType ArgumentType = DefaultNTTPD->getType(); + bool FromAddressOf = FromValueDecl && + !ArgumentType->isReferenceType() && + !FromValueDecl->getType()->isArrayType(); + bool ToAddressOf = ToValueDecl && + !ArgumentType->isReferenceType() && + !ToValueDecl->getType()->isArrayType(); + Tree.SetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf); + Tree.SetSame(FromValueDecl && ToValueDecl && + FromValueDecl->getCanonicalDecl() == + ToValueDecl->getCanonicalDecl()); + Tree.SetDefault(FromIter.isEnd() && FromValueDecl, + ToIter.isEnd() && ToValueDecl); + Tree.SetKind(DiffTree::Declaration); + } + } + + // Handle Templates + if (TemplateTemplateParmDecl *DefaultTTPD = + dyn_cast<TemplateTemplateParmDecl>(ParamND)) { + TemplateDecl *FromDecl, *ToDecl; + FromDecl = GetTemplateDecl(FromIter, DefaultTTPD); + ToDecl = GetTemplateDecl(ToIter, DefaultTTPD); + Tree.SetNode(FromDecl, ToDecl); + Tree.SetSame( + FromDecl && ToDecl && + FromDecl->getCanonicalDecl() == ToDecl->getCanonicalDecl()); + Tree.SetKind(DiffTree::TemplateTemplate); + } + + ++FromIter; + ++ToIter; + Tree.Up(); + } + } + + /// makeTemplateList - Dump every template alias into the vector. + static void makeTemplateList( + SmallVectorImpl<const TemplateSpecializationType *> &TemplateList, + const TemplateSpecializationType *TST) { + while (TST) { + TemplateList.push_back(TST); + if (!TST->isTypeAlias()) + return; + TST = TST->getAliasedType()->getAs<TemplateSpecializationType>(); + } + } + + /// hasSameBaseTemplate - Returns true when the base templates are the same, + /// even if the template arguments are not. + static bool hasSameBaseTemplate(const TemplateSpecializationType *FromTST, + const TemplateSpecializationType *ToTST) { + return FromTST->getTemplateName().getAsTemplateDecl()->getCanonicalDecl() == + ToTST->getTemplateName().getAsTemplateDecl()->getCanonicalDecl(); + } + + /// hasSameTemplate - Returns true if both types are specialized from the + /// same template declaration. If they come from different template aliases, + /// do a parallel ascension search to determine the highest template alias in + /// common and set the arguments to them. + static bool hasSameTemplate(const TemplateSpecializationType *&FromTST, + const TemplateSpecializationType *&ToTST) { + // Check the top templates if they are the same. + if (hasSameBaseTemplate(FromTST, ToTST)) + return true; + + // Create vectors of template aliases. + SmallVector<const TemplateSpecializationType*, 1> FromTemplateList, + ToTemplateList; + + makeTemplateList(FromTemplateList, FromTST); + makeTemplateList(ToTemplateList, ToTST); + + SmallVectorImpl<const TemplateSpecializationType *>::reverse_iterator + FromIter = FromTemplateList.rbegin(), FromEnd = FromTemplateList.rend(), + ToIter = ToTemplateList.rbegin(), ToEnd = ToTemplateList.rend(); + + // Check if the lowest template types are the same. If not, return. + if (!hasSameBaseTemplate(*FromIter, *ToIter)) + return false; + + // Begin searching up the template aliases. The bottom most template + // matches so move up until one pair does not match. Use the template + // right before that one. + for (; FromIter != FromEnd && ToIter != ToEnd; ++FromIter, ++ToIter) { + if (!hasSameBaseTemplate(*FromIter, *ToIter)) + break; + } + + FromTST = FromIter[-1]; + ToTST = ToIter[-1]; + + return true; + } + + /// GetType - Retrieves the template type arguments, including default + /// arguments. + QualType GetType(const TSTiterator &Iter, TemplateTypeParmDecl *DefaultTTPD) { + bool isVariadic = DefaultTTPD->isParameterPack(); + + if (!Iter.isEnd()) + return Iter->getAsType(); + if (isVariadic) + return QualType(); + + QualType ArgType = DefaultTTPD->getDefaultArgument(); + if (ArgType->isDependentType()) + return Iter.getDesugar().getAsType(); + + return ArgType; + } + + /// GetExpr - Retrieves the template expression argument, including default + /// arguments. + Expr *GetExpr(const TSTiterator &Iter, NonTypeTemplateParmDecl *DefaultNTTPD) { + Expr *ArgExpr = 0; + bool isVariadic = DefaultNTTPD->isParameterPack(); + + if (!Iter.isEnd()) + ArgExpr = Iter->getAsExpr(); + else if (!isVariadic) + ArgExpr = DefaultNTTPD->getDefaultArgument(); + + if (ArgExpr) + while (SubstNonTypeTemplateParmExpr *SNTTPE = + dyn_cast<SubstNonTypeTemplateParmExpr>(ArgExpr)) + ArgExpr = SNTTPE->getReplacement(); + + return ArgExpr; + } + + /// GetInt - Retrieves the template integer argument, including evaluating + /// default arguments. + llvm::APInt GetInt(const TSTiterator &Iter, Expr *ArgExpr) { + // Default, value-depenedent expressions require fetching + // from the desugared TemplateArgument + if (Iter.isEnd() && ArgExpr->isValueDependent()) + switch (Iter.getDesugar().getKind()) { + case TemplateArgument::Integral: + return Iter.getDesugar().getAsIntegral(); + case TemplateArgument::Expression: + ArgExpr = Iter.getDesugar().getAsExpr(); + return ArgExpr->EvaluateKnownConstInt(Context); + default: + assert(0 && "Unexpected template argument kind"); + } + return ArgExpr->EvaluateKnownConstInt(Context); + } + + /// GetValueDecl - Retrieves the template Decl argument, including + /// default expression argument. + ValueDecl *GetValueDecl(const TSTiterator &Iter, Expr *ArgExpr) { + // Default, value-depenedent expressions require fetching + // from the desugared TemplateArgument + if (Iter.isEnd() && ArgExpr->isValueDependent()) + switch (Iter.getDesugar().getKind()) { + case TemplateArgument::Declaration: + return Iter.getDesugar().getAsDecl(); + case TemplateArgument::Expression: + ArgExpr = Iter.getDesugar().getAsExpr(); + return cast<DeclRefExpr>(ArgExpr)->getDecl(); + default: + assert(0 && "Unexpected template argument kind"); + } + DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr); + if (!DRE) { + DRE = cast<DeclRefExpr>(cast<UnaryOperator>(ArgExpr)->getSubExpr()); + } + + return DRE->getDecl(); + } + + /// GetTemplateDecl - Retrieves the template template arguments, including + /// default arguments. + TemplateDecl *GetTemplateDecl(const TSTiterator &Iter, + TemplateTemplateParmDecl *DefaultTTPD) { + bool isVariadic = DefaultTTPD->isParameterPack(); + + TemplateArgument TA = DefaultTTPD->getDefaultArgument().getArgument(); + TemplateDecl *DefaultTD = 0; + if (TA.getKind() != TemplateArgument::Null) + DefaultTD = TA.getAsTemplate().getAsTemplateDecl(); + + if (!Iter.isEnd()) + return Iter->getAsTemplate().getAsTemplateDecl(); + if (!isVariadic) + return DefaultTD; + + return 0; + } + + /// IsSameConvertedInt - Returns true if both integers are equal when + /// converted to an integer type with the given width. + static bool IsSameConvertedInt(unsigned Width, const llvm::APSInt &X, + const llvm::APSInt &Y) { + llvm::APInt ConvertedX = X.extOrTrunc(Width); + llvm::APInt ConvertedY = Y.extOrTrunc(Width); + return ConvertedX == ConvertedY; + } + + /// IsEqualExpr - Returns true if the expressions evaluate to the same value. + static bool IsEqualExpr(ASTContext &Context, unsigned ParamWidth, + Expr *FromExpr, Expr *ToExpr) { + if (FromExpr == ToExpr) + return true; + + if (!FromExpr || !ToExpr) + return false; + + FromExpr = FromExpr->IgnoreParens(); + ToExpr = ToExpr->IgnoreParens(); + + DeclRefExpr *FromDRE = dyn_cast<DeclRefExpr>(FromExpr), + *ToDRE = dyn_cast<DeclRefExpr>(ToExpr); + + if (FromDRE || ToDRE) { + if (!FromDRE || !ToDRE) + return false; + return FromDRE->getDecl() == ToDRE->getDecl(); + } + + Expr::EvalResult FromResult, ToResult; + if (!FromExpr->EvaluateAsRValue(FromResult, Context) || + !ToExpr->EvaluateAsRValue(ToResult, Context)) + return false; + + APValue &FromVal = FromResult.Val; + APValue &ToVal = ToResult.Val; + + if (FromVal.getKind() != ToVal.getKind()) return false; + + switch (FromVal.getKind()) { + case APValue::Int: + return IsSameConvertedInt(ParamWidth, FromVal.getInt(), ToVal.getInt()); + case APValue::LValue: { + APValue::LValueBase FromBase = FromVal.getLValueBase(); + APValue::LValueBase ToBase = ToVal.getLValueBase(); + if (FromBase.isNull() && ToBase.isNull()) + return true; + if (FromBase.isNull() || ToBase.isNull()) + return false; + return FromBase.get<const ValueDecl*>() == + ToBase.get<const ValueDecl*>(); + } + case APValue::MemberPointer: + return FromVal.getMemberPointerDecl() == ToVal.getMemberPointerDecl(); + default: + llvm_unreachable("Unknown template argument expression."); + } + } + + // These functions converts the tree representation of the template + // differences into the internal character vector. + + /// TreeToString - Converts the Tree object into a character stream which + /// will later be turned into the output string. + void TreeToString(int Indent = 1) { + if (PrintTree) { + OS << '\n'; + OS.indent(2 * Indent); + ++Indent; + } + + // Handle cases where the difference is not templates with different + // arguments. + switch (Tree.GetKind()) { + case DiffTree::Invalid: + llvm_unreachable("Template diffing failed with bad DiffNode"); + case DiffTree::Type: { + QualType FromType, ToType; + Tree.GetNode(FromType, ToType); + PrintTypeNames(FromType, ToType, Tree.FromDefault(), Tree.ToDefault(), + Tree.NodeIsSame()); + return; + } + case DiffTree::Expression: { + Expr *FromExpr, *ToExpr; + Tree.GetNode(FromExpr, ToExpr); + PrintExpr(FromExpr, ToExpr, Tree.FromDefault(), Tree.ToDefault(), + Tree.NodeIsSame()); + return; + } + case DiffTree::TemplateTemplate: { + TemplateDecl *FromTD, *ToTD; + Tree.GetNode(FromTD, ToTD); + PrintTemplateTemplate(FromTD, ToTD, Tree.FromDefault(), + Tree.ToDefault(), Tree.NodeIsSame()); + return; + } + case DiffTree::Integer: { + llvm::APSInt FromInt, ToInt; + Expr *FromExpr, *ToExpr; + bool IsValidFromInt, IsValidToInt; + Tree.GetNode(FromExpr, ToExpr); + Tree.GetNode(FromInt, ToInt, IsValidFromInt, IsValidToInt); + PrintAPSInt(FromInt, ToInt, IsValidFromInt, IsValidToInt, + FromExpr, ToExpr, Tree.FromDefault(), Tree.ToDefault(), + Tree.NodeIsSame()); + return; + } + case DiffTree::Declaration: { + ValueDecl *FromValueDecl, *ToValueDecl; + bool FromAddressOf, ToAddressOf; + Tree.GetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf); + PrintValueDecl(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf, + Tree.FromDefault(), Tree.ToDefault(), Tree.NodeIsSame()); + return; + } + case DiffTree::Template: { + // Node is root of template. Recurse on children. + TemplateDecl *FromTD, *ToTD; + Tree.GetNode(FromTD, ToTD); + + if (!Tree.HasChildren()) { + // If we're dealing with a template specialization with zero + // arguments, there are no children; special-case this. + OS << FromTD->getNameAsString() << "<>"; + return; + } + + Qualifiers FromQual, ToQual; + Tree.GetNode(FromQual, ToQual); + PrintQualifiers(FromQual, ToQual); + + OS << FromTD->getNameAsString() << '<'; + Tree.MoveToChild(); + unsigned NumElideArgs = 0; + do { + if (ElideType) { + if (Tree.NodeIsSame()) { + ++NumElideArgs; + continue; + } + if (NumElideArgs > 0) { + PrintElideArgs(NumElideArgs, Indent); + NumElideArgs = 0; + OS << ", "; + } + } + TreeToString(Indent); + if (Tree.HasNextSibling()) + OS << ", "; + } while (Tree.AdvanceSibling()); + if (NumElideArgs > 0) + PrintElideArgs(NumElideArgs, Indent); + + Tree.Parent(); + OS << ">"; + return; + } + } + } + + // To signal to the text printer that a certain text needs to be bolded, + // a special character is injected into the character stream which the + // text printer will later strip out. + + /// Bold - Start bolding text. + void Bold() { + assert(!IsBold && "Attempting to bold text that is already bold."); + IsBold = true; + if (ShowColor) + OS << ToggleHighlight; + } + + /// Unbold - Stop bolding text. + void Unbold() { + assert(IsBold && "Attempting to remove bold from unbold text."); + IsBold = false; + if (ShowColor) + OS << ToggleHighlight; + } + + // Functions to print out the arguments and highlighting the difference. + + /// PrintTypeNames - prints the typenames, bolding differences. Will detect + /// typenames that are the same and attempt to disambiguate them by using + /// canonical typenames. + void PrintTypeNames(QualType FromType, QualType ToType, + bool FromDefault, bool ToDefault, bool Same) { + assert((!FromType.isNull() || !ToType.isNull()) && + "Only one template argument may be missing."); + + if (Same) { + OS << FromType.getAsString(); + return; + } + + if (!FromType.isNull() && !ToType.isNull() && + FromType.getLocalUnqualifiedType() == + ToType.getLocalUnqualifiedType()) { + Qualifiers FromQual = FromType.getLocalQualifiers(), + ToQual = ToType.getLocalQualifiers(), + CommonQual; + PrintQualifiers(FromQual, ToQual); + FromType.getLocalUnqualifiedType().print(OS, Policy); + return; + } + + std::string FromTypeStr = FromType.isNull() ? "(no argument)" + : FromType.getAsString(); + std::string ToTypeStr = ToType.isNull() ? "(no argument)" + : ToType.getAsString(); + // Switch to canonical typename if it is better. + // TODO: merge this with other aka printing above. + if (FromTypeStr == ToTypeStr) { + std::string FromCanTypeStr = FromType.getCanonicalType().getAsString(); + std::string ToCanTypeStr = ToType.getCanonicalType().getAsString(); + if (FromCanTypeStr != ToCanTypeStr) { + FromTypeStr = FromCanTypeStr; + ToTypeStr = ToCanTypeStr; + } + } + + if (PrintTree) OS << '['; + OS << (FromDefault ? "(default) " : ""); + Bold(); + OS << FromTypeStr; + Unbold(); + if (PrintTree) { + OS << " != " << (ToDefault ? "(default) " : ""); + Bold(); + OS << ToTypeStr; + Unbold(); + OS << "]"; + } + return; + } + + /// PrintExpr - Prints out the expr template arguments, highlighting argument + /// differences. + void PrintExpr(const Expr *FromExpr, const Expr *ToExpr, + bool FromDefault, bool ToDefault, bool Same) { + assert((FromExpr || ToExpr) && + "Only one template argument may be missing."); + if (Same) { + PrintExpr(FromExpr); + } else if (!PrintTree) { + OS << (FromDefault ? "(default) " : ""); + Bold(); + PrintExpr(FromExpr); + Unbold(); + } else { + OS << (FromDefault ? "[(default) " : "["); + Bold(); + PrintExpr(FromExpr); + Unbold(); + OS << " != " << (ToDefault ? "(default) " : ""); + Bold(); + PrintExpr(ToExpr); + Unbold(); + OS << ']'; + } + } + + /// PrintExpr - Actual formatting and printing of expressions. + void PrintExpr(const Expr *E) { + if (!E) + OS << "(no argument)"; + else + E->printPretty(OS, 0, Policy); return; + } + + /// PrintTemplateTemplate - Handles printing of template template arguments, + /// highlighting argument differences. + void PrintTemplateTemplate(TemplateDecl *FromTD, TemplateDecl *ToTD, + bool FromDefault, bool ToDefault, bool Same) { + assert((FromTD || ToTD) && "Only one template argument may be missing."); + + std::string FromName = FromTD ? FromTD->getName() : "(no argument)"; + std::string ToName = ToTD ? ToTD->getName() : "(no argument)"; + if (FromTD && ToTD && FromName == ToName) { + FromName = FromTD->getQualifiedNameAsString(); + ToName = ToTD->getQualifiedNameAsString(); + } + + if (Same) { + OS << "template " << FromTD->getNameAsString(); + } else if (!PrintTree) { + OS << (FromDefault ? "(default) template " : "template "); + Bold(); + OS << FromName; + Unbold(); + } else { + OS << (FromDefault ? "[(default) template " : "[template "); + Bold(); + OS << FromName; + Unbold(); + OS << " != " << (ToDefault ? "(default) template " : "template "); + Bold(); + OS << ToName; + Unbold(); + OS << ']'; + } + } + + /// PrintAPSInt - Handles printing of integral arguments, highlighting + /// argument differences. + void PrintAPSInt(llvm::APSInt FromInt, llvm::APSInt ToInt, + bool IsValidFromInt, bool IsValidToInt, Expr *FromExpr, + Expr *ToExpr, bool FromDefault, bool ToDefault, bool Same) { + assert((IsValidFromInt || IsValidToInt) && + "Only one integral argument may be missing."); + + if (Same) { + OS << FromInt.toString(10); + } else if (!PrintTree) { + OS << (FromDefault ? "(default) " : ""); + PrintAPSInt(FromInt, FromExpr, IsValidFromInt); + } else { + OS << (FromDefault ? "[(default) " : "["); + PrintAPSInt(FromInt, FromExpr, IsValidFromInt); + OS << " != " << (ToDefault ? "(default) " : ""); + PrintAPSInt(ToInt, ToExpr, IsValidToInt); + OS << ']'; + } + } + + /// PrintAPSInt - If valid, print the APSInt. If the expression is + /// gives more information, print it too. + void PrintAPSInt(llvm::APSInt Val, Expr *E, bool Valid) { + Bold(); + if (Valid) { + if (HasExtraInfo(E)) { + PrintExpr(E); + Unbold(); + OS << " aka "; + Bold(); + } + OS << Val.toString(10); + } else { + OS << "(no argument)"; + } + Unbold(); + } + + /// HasExtraInfo - Returns true if E is not an integer literal or the + /// negation of an integer literal + bool HasExtraInfo(Expr *E) { + if (!E) return false; + if (isa<IntegerLiteral>(E)) return false; + + if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) + if (UO->getOpcode() == UO_Minus) + if (isa<IntegerLiteral>(UO->getSubExpr())) + return false; + + return true; + } + + /// PrintDecl - Handles printing of Decl arguments, highlighting + /// argument differences. + void PrintValueDecl(ValueDecl *FromValueDecl, ValueDecl *ToValueDecl, + bool FromAddressOf, bool ToAddressOf, bool FromDefault, + bool ToDefault, bool Same) { + assert((FromValueDecl || ToValueDecl) && + "Only one Decl argument may be NULL"); + + if (Same) { + OS << FromValueDecl->getName(); + } else if (!PrintTree) { + OS << (FromDefault ? "(default) " : ""); + Bold(); + if (FromAddressOf) + OS << "&"; + OS << (FromValueDecl ? FromValueDecl->getName() : "(no argument)"); + Unbold(); + } else { + OS << (FromDefault ? "[(default) " : "["); + Bold(); + if (FromAddressOf) + OS << "&"; + OS << (FromValueDecl ? FromValueDecl->getName() : "(no argument)"); + Unbold(); + OS << " != " << (ToDefault ? "(default) " : ""); + Bold(); + if (ToAddressOf) + OS << "&"; + OS << (ToValueDecl ? ToValueDecl->getName() : "(no argument)"); + Unbold(); + OS << ']'; + } + + } + + // Prints the appropriate placeholder for elided template arguments. + void PrintElideArgs(unsigned NumElideArgs, unsigned Indent) { + if (PrintTree) { + OS << '\n'; + for (unsigned i = 0; i < Indent; ++i) + OS << " "; + } + if (NumElideArgs == 0) return; + if (NumElideArgs == 1) + OS << "[...]"; + else + OS << "[" << NumElideArgs << " * ...]"; + } + + // Prints and highlights differences in Qualifiers. + void PrintQualifiers(Qualifiers FromQual, Qualifiers ToQual) { + // Both types have no qualifiers + if (FromQual.empty() && ToQual.empty()) + return; + + // Both types have same qualifiers + if (FromQual == ToQual) { + PrintQualifier(FromQual, /*ApplyBold*/false); + return; + } + + // Find common qualifiers and strip them from FromQual and ToQual. + Qualifiers CommonQual = Qualifiers::removeCommonQualifiers(FromQual, + ToQual); + + // The qualifiers are printed before the template name. + // Inline printing: + // The common qualifiers are printed. Then, qualifiers only in this type + // are printed and highlighted. Finally, qualifiers only in the other + // type are printed and highlighted inside parentheses after "missing". + // Tree printing: + // Qualifiers are printed next to each other, inside brackets, and + // separated by "!=". The printing order is: + // common qualifiers, highlighted from qualifiers, "!=", + // common qualifiers, highlighted to qualifiers + if (PrintTree) { + OS << "["; + if (CommonQual.empty() && FromQual.empty()) { + Bold(); + OS << "(no qualifiers) "; + Unbold(); + } else { + PrintQualifier(CommonQual, /*ApplyBold*/false); + PrintQualifier(FromQual, /*ApplyBold*/true); + } + OS << "!= "; + if (CommonQual.empty() && ToQual.empty()) { + Bold(); + OS << "(no qualifiers)"; + Unbold(); + } else { + PrintQualifier(CommonQual, /*ApplyBold*/false, + /*appendSpaceIfNonEmpty*/!ToQual.empty()); + PrintQualifier(ToQual, /*ApplyBold*/true, + /*appendSpaceIfNonEmpty*/false); + } + OS << "] "; + } else { + PrintQualifier(CommonQual, /*ApplyBold*/false); + PrintQualifier(FromQual, /*ApplyBold*/true); + } + } + + void PrintQualifier(Qualifiers Q, bool ApplyBold, + bool AppendSpaceIfNonEmpty = true) { + if (Q.empty()) return; + if (ApplyBold) Bold(); + Q.print(OS, Policy, AppendSpaceIfNonEmpty); + if (ApplyBold) Unbold(); + } + +public: + + TemplateDiff(raw_ostream &OS, ASTContext &Context, QualType FromType, + QualType ToType, bool PrintTree, bool PrintFromType, + bool ElideType, bool ShowColor) + : Context(Context), + Policy(Context.getLangOpts()), + ElideType(ElideType), + PrintTree(PrintTree), + ShowColor(ShowColor), + // When printing a single type, the FromType is the one printed. + FromType(PrintFromType ? FromType : ToType), + ToType(PrintFromType ? ToType : FromType), + OS(OS), + IsBold(false) { + } + + /// DiffTemplate - Start the template type diffing. + void DiffTemplate() { + Qualifiers FromQual = FromType.getQualifiers(), + ToQual = ToType.getQualifiers(); + + const TemplateSpecializationType *FromOrigTST = + GetTemplateSpecializationType(Context, FromType); + const TemplateSpecializationType *ToOrigTST = + GetTemplateSpecializationType(Context, ToType); + + // Only checking templates. + if (!FromOrigTST || !ToOrigTST) + return; + + // Different base templates. + if (!hasSameTemplate(FromOrigTST, ToOrigTST)) { + return; + } + + FromQual -= QualType(FromOrigTST, 0).getQualifiers(); + ToQual -= QualType(ToOrigTST, 0).getQualifiers(); + Tree.SetNode(FromType, ToType); + Tree.SetNode(FromQual, ToQual); + Tree.SetKind(DiffTree::Template); + + // Same base template, but different arguments. + Tree.SetNode(FromOrigTST->getTemplateName().getAsTemplateDecl(), + ToOrigTST->getTemplateName().getAsTemplateDecl()); + + DiffTemplate(FromOrigTST, ToOrigTST); + } + + /// Emit - When the two types given are templated types with the same + /// base template, a string representation of the type difference will be + /// emitted to the stream and return true. Otherwise, return false. + bool Emit() { + Tree.StartTraverse(); + if (Tree.Empty()) + return false; + + TreeToString(); + assert(!IsBold && "Bold is applied to end of string."); + return true; + } +}; // end class TemplateDiff +} // end namespace + +/// FormatTemplateTypeDiff - A helper static function to start the template +/// diff and return the properly formatted string. Returns true if the diff +/// is successful. +static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType, + QualType ToType, bool PrintTree, + bool PrintFromType, bool ElideType, + bool ShowColors, raw_ostream &OS) { + if (PrintTree) + PrintFromType = true; + TemplateDiff TD(OS, Context, FromType, ToType, PrintTree, PrintFromType, + ElideType, ShowColors); + TD.DiffTemplate(); + return TD.Emit(); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp new file mode 100644 index 000000000000..2f402559f4da --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp @@ -0,0 +1,2174 @@ +//===--- ASTDumper.cpp - Dumping implementation for ASTs ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the AST dump methods, which dump out the +// AST in a form that exposes type details and other fields. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CommentVisitor.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclLookups.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclVisitor.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/Module.h" +#include "clang/Basic/SourceManager.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; +using namespace clang::comments; + +//===----------------------------------------------------------------------===// +// ASTDumper Visitor +//===----------------------------------------------------------------------===// + +namespace { + // Colors used for various parts of the AST dump + + struct TerminalColor { + raw_ostream::Colors Color; + bool Bold; + }; + + // Decl kind names (VarDecl, FunctionDecl, etc) + static const TerminalColor DeclKindNameColor = { raw_ostream::GREEN, true }; + // Attr names (CleanupAttr, GuardedByAttr, etc) + static const TerminalColor AttrColor = { raw_ostream::BLUE, true }; + // Statement names (DeclStmt, ImplicitCastExpr, etc) + static const TerminalColor StmtColor = { raw_ostream::MAGENTA, true }; + // Comment names (FullComment, ParagraphComment, TextComment, etc) + static const TerminalColor CommentColor = { raw_ostream::YELLOW, true }; + + // Type names (int, float, etc, plus user defined types) + static const TerminalColor TypeColor = { raw_ostream::GREEN, false }; + + // Pointer address + static const TerminalColor AddressColor = { raw_ostream::YELLOW, false }; + // Source locations + static const TerminalColor LocationColor = { raw_ostream::YELLOW, false }; + + // lvalue/xvalue + static const TerminalColor ValueKindColor = { raw_ostream::CYAN, false }; + // bitfield/objcproperty/objcsubscript/vectorcomponent + static const TerminalColor ObjectKindColor = { raw_ostream::CYAN, false }; + + // Null statements + static const TerminalColor NullColor = { raw_ostream::BLUE, false }; + + // Undeserialized entities + static const TerminalColor UndeserializedColor = { raw_ostream::GREEN, true }; + + // CastKind from CastExpr's + static const TerminalColor CastColor = { raw_ostream::RED, false }; + + // Value of the statement + static const TerminalColor ValueColor = { raw_ostream::CYAN, true }; + // Decl names + static const TerminalColor DeclNameColor = { raw_ostream::CYAN, true }; + + // Indents ( `, -. | ) + static const TerminalColor IndentColor = { raw_ostream::BLUE, false }; + + class ASTDumper + : public ConstDeclVisitor<ASTDumper>, public ConstStmtVisitor<ASTDumper>, + public ConstCommentVisitor<ASTDumper> { + raw_ostream &OS; + const CommandTraits *Traits; + const SourceManager *SM; + bool IsFirstLine; + + // Indicates whether more child are expected at the current tree depth + enum IndentType { IT_Child, IT_LastChild }; + + /// Indents[i] indicates if another child exists at level i. + /// Used by Indent() to print the tree structure. + llvm::SmallVector<IndentType, 32> Indents; + + /// Indicates that more children will be needed at this indent level. + /// If true, prevents lastChild() from marking the node as the last child. + /// This is used when there are multiple collections of children to be + /// dumped as well as during conditional node dumping. + bool MoreChildren; + + /// Keep track of the last location we print out so that we can + /// print out deltas from then on out. + const char *LastLocFilename; + unsigned LastLocLine; + + /// The \c FullComment parent of the comment being dumped. + const FullComment *FC; + + bool ShowColors; + + class IndentScope { + ASTDumper &Dumper; + // Preserve the Dumper's MoreChildren value from the previous IndentScope + bool MoreChildren; + public: + IndentScope(ASTDumper &Dumper) : Dumper(Dumper) { + MoreChildren = Dumper.hasMoreChildren(); + Dumper.setMoreChildren(false); + Dumper.indent(); + } + ~IndentScope() { + Dumper.setMoreChildren(MoreChildren); + Dumper.unindent(); + } + }; + + class ColorScope { + ASTDumper &Dumper; + public: + ColorScope(ASTDumper &Dumper, TerminalColor Color) + : Dumper(Dumper) { + if (Dumper.ShowColors) + Dumper.OS.changeColor(Color.Color, Color.Bold); + } + ~ColorScope() { + if (Dumper.ShowColors) + Dumper.OS.resetColor(); + } + }; + + public: + ASTDumper(raw_ostream &OS, const CommandTraits *Traits, + const SourceManager *SM) + : OS(OS), Traits(Traits), SM(SM), IsFirstLine(true), MoreChildren(false), + LastLocFilename(""), LastLocLine(~0U), FC(0), + ShowColors(SM && SM->getDiagnostics().getShowColors()) { } + + ASTDumper(raw_ostream &OS, const CommandTraits *Traits, + const SourceManager *SM, bool ShowColors) + : OS(OS), Traits(Traits), SM(SM), IsFirstLine(true), MoreChildren(false), + LastLocFilename(""), LastLocLine(~0U), + ShowColors(ShowColors) { } + + ~ASTDumper() { + OS << "\n"; + } + + void dumpDecl(const Decl *D); + void dumpStmt(const Stmt *S); + void dumpFullComment(const FullComment *C); + + // Formatting + void indent(); + void unindent(); + void lastChild(); + bool hasMoreChildren(); + void setMoreChildren(bool Value); + + // Utilities + void dumpPointer(const void *Ptr); + void dumpSourceRange(SourceRange R); + void dumpLocation(SourceLocation Loc); + void dumpBareType(QualType T); + void dumpType(QualType T); + void dumpBareDeclRef(const Decl *Node); + void dumpDeclRef(const Decl *Node, const char *Label = 0); + void dumpName(const NamedDecl *D); + bool hasNodes(const DeclContext *DC); + void dumpDeclContext(const DeclContext *DC); + void dumpLookups(const DeclContext *DC); + void dumpAttr(const Attr *A); + + // C++ Utilities + void dumpAccessSpecifier(AccessSpecifier AS); + void dumpCXXCtorInitializer(const CXXCtorInitializer *Init); + void dumpTemplateParameters(const TemplateParameterList *TPL); + void dumpTemplateArgumentListInfo(const TemplateArgumentListInfo &TALI); + void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A); + void dumpTemplateArgumentList(const TemplateArgumentList &TAL); + void dumpTemplateArgument(const TemplateArgument &A, + SourceRange R = SourceRange()); + + // Decls + void VisitLabelDecl(const LabelDecl *D); + void VisitTypedefDecl(const TypedefDecl *D); + void VisitEnumDecl(const EnumDecl *D); + void VisitRecordDecl(const RecordDecl *D); + void VisitEnumConstantDecl(const EnumConstantDecl *D); + void VisitIndirectFieldDecl(const IndirectFieldDecl *D); + void VisitFunctionDecl(const FunctionDecl *D); + void VisitFieldDecl(const FieldDecl *D); + void VisitVarDecl(const VarDecl *D); + void VisitFileScopeAsmDecl(const FileScopeAsmDecl *D); + void VisitImportDecl(const ImportDecl *D); + + // C++ Decls + void VisitNamespaceDecl(const NamespaceDecl *D); + void VisitUsingDirectiveDecl(const UsingDirectiveDecl *D); + void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D); + void VisitTypeAliasDecl(const TypeAliasDecl *D); + void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D); + void VisitCXXRecordDecl(const CXXRecordDecl *D); + void VisitStaticAssertDecl(const StaticAssertDecl *D); + void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D); + void VisitClassTemplateDecl(const ClassTemplateDecl *D); + void VisitClassTemplateSpecializationDecl( + const ClassTemplateSpecializationDecl *D); + void VisitClassTemplatePartialSpecializationDecl( + const ClassTemplatePartialSpecializationDecl *D); + void VisitClassScopeFunctionSpecializationDecl( + const ClassScopeFunctionSpecializationDecl *D); + void VisitVarTemplateDecl(const VarTemplateDecl *D); + void VisitVarTemplateSpecializationDecl( + const VarTemplateSpecializationDecl *D); + void VisitVarTemplatePartialSpecializationDecl( + const VarTemplatePartialSpecializationDecl *D); + void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D); + void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D); + void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D); + void VisitUsingDecl(const UsingDecl *D); + void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D); + void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D); + void VisitUsingShadowDecl(const UsingShadowDecl *D); + void VisitLinkageSpecDecl(const LinkageSpecDecl *D); + void VisitAccessSpecDecl(const AccessSpecDecl *D); + void VisitFriendDecl(const FriendDecl *D); + + // ObjC Decls + void VisitObjCIvarDecl(const ObjCIvarDecl *D); + void VisitObjCMethodDecl(const ObjCMethodDecl *D); + void VisitObjCCategoryDecl(const ObjCCategoryDecl *D); + void VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D); + void VisitObjCProtocolDecl(const ObjCProtocolDecl *D); + void VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D); + void VisitObjCImplementationDecl(const ObjCImplementationDecl *D); + void VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D); + void VisitObjCPropertyDecl(const ObjCPropertyDecl *D); + void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D); + void VisitBlockDecl(const BlockDecl *D); + + // Stmts. + void VisitStmt(const Stmt *Node); + void VisitDeclStmt(const DeclStmt *Node); + void VisitAttributedStmt(const AttributedStmt *Node); + void VisitLabelStmt(const LabelStmt *Node); + void VisitGotoStmt(const GotoStmt *Node); + void VisitCXXCatchStmt(const CXXCatchStmt *Node); + + // Exprs + void VisitExpr(const Expr *Node); + void VisitCastExpr(const CastExpr *Node); + void VisitDeclRefExpr(const DeclRefExpr *Node); + void VisitPredefinedExpr(const PredefinedExpr *Node); + void VisitCharacterLiteral(const CharacterLiteral *Node); + void VisitIntegerLiteral(const IntegerLiteral *Node); + void VisitFloatingLiteral(const FloatingLiteral *Node); + void VisitStringLiteral(const StringLiteral *Str); + void VisitUnaryOperator(const UnaryOperator *Node); + void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Node); + void VisitMemberExpr(const MemberExpr *Node); + void VisitExtVectorElementExpr(const ExtVectorElementExpr *Node); + void VisitBinaryOperator(const BinaryOperator *Node); + void VisitCompoundAssignOperator(const CompoundAssignOperator *Node); + void VisitAddrLabelExpr(const AddrLabelExpr *Node); + void VisitBlockExpr(const BlockExpr *Node); + void VisitOpaqueValueExpr(const OpaqueValueExpr *Node); + + // C++ + void VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node); + void VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node); + void VisitCXXThisExpr(const CXXThisExpr *Node); + void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node); + void VisitCXXConstructExpr(const CXXConstructExpr *Node); + void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node); + void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node); + void VisitExprWithCleanups(const ExprWithCleanups *Node); + void VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node); + void dumpCXXTemporary(const CXXTemporary *Temporary); + void VisitLambdaExpr(const LambdaExpr *Node) { + VisitExpr(Node); + dumpDecl(Node->getLambdaClass()); + } + + // ObjC + void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node); + void VisitObjCEncodeExpr(const ObjCEncodeExpr *Node); + void VisitObjCMessageExpr(const ObjCMessageExpr *Node); + void VisitObjCBoxedExpr(const ObjCBoxedExpr *Node); + void VisitObjCSelectorExpr(const ObjCSelectorExpr *Node); + void VisitObjCProtocolExpr(const ObjCProtocolExpr *Node); + void VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node); + void VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node); + void VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node); + void VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node); + + // Comments. + const char *getCommandName(unsigned CommandID); + void dumpComment(const Comment *C); + + // Inline comments. + void visitTextComment(const TextComment *C); + void visitInlineCommandComment(const InlineCommandComment *C); + void visitHTMLStartTagComment(const HTMLStartTagComment *C); + void visitHTMLEndTagComment(const HTMLEndTagComment *C); + + // Block comments. + void visitBlockCommandComment(const BlockCommandComment *C); + void visitParamCommandComment(const ParamCommandComment *C); + void visitTParamCommandComment(const TParamCommandComment *C); + void visitVerbatimBlockComment(const VerbatimBlockComment *C); + void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C); + void visitVerbatimLineComment(const VerbatimLineComment *C); + }; +} + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +// Print out the appropriate tree structure using the Indents vector. +// Example of tree and the Indents vector at each level. +// A { } +// |-B { IT_Child } +// | `-C { IT_Child, IT_LastChild } +// `-D { IT_LastChild } +// |-E { IT_LastChild, IT_Child } +// `-F { IT_LastChild, IT_LastChild } +// Type non-last element, last element +// IT_Child "| " "|-" +// IT_LastChild " " "`-" +void ASTDumper::indent() { + if (IsFirstLine) + IsFirstLine = false; + else + OS << "\n"; + + ColorScope Color(*this, IndentColor); + for (SmallVectorImpl<IndentType>::const_iterator I = Indents.begin(), + E = Indents.end(); + I != E; ++I) { + switch (*I) { + case IT_Child: + if (I == E - 1) + OS << "|-"; + else + OS << "| "; + continue; + case IT_LastChild: + if (I == E - 1) + OS << "`-"; + else + OS << " "; + continue; + } + llvm_unreachable("Invalid IndentType"); + } + Indents.push_back(IT_Child); +} + +void ASTDumper::unindent() { + Indents.pop_back(); +} + +// Call before each potential last child node is to be dumped. If MoreChildren +// is false, then this is the last child, otherwise treat as a regular node. +void ASTDumper::lastChild() { + if (!hasMoreChildren()) + Indents.back() = IT_LastChild; +} + +// MoreChildren should be set before calling another function that may print +// additional nodes to prevent conflicting final child nodes. +bool ASTDumper::hasMoreChildren() { + return MoreChildren; +} + +void ASTDumper::setMoreChildren(bool Value) { + MoreChildren = Value; +} + +void ASTDumper::dumpPointer(const void *Ptr) { + ColorScope Color(*this, AddressColor); + OS << ' ' << Ptr; +} + +void ASTDumper::dumpLocation(SourceLocation Loc) { + ColorScope Color(*this, LocationColor); + SourceLocation SpellingLoc = SM->getSpellingLoc(Loc); + + // The general format we print out is filename:line:col, but we drop pieces + // that haven't changed since the last loc printed. + PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc); + + if (PLoc.isInvalid()) { + OS << "<invalid sloc>"; + return; + } + + if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) { + OS << PLoc.getFilename() << ':' << PLoc.getLine() + << ':' << PLoc.getColumn(); + LastLocFilename = PLoc.getFilename(); + LastLocLine = PLoc.getLine(); + } else if (PLoc.getLine() != LastLocLine) { + OS << "line" << ':' << PLoc.getLine() + << ':' << PLoc.getColumn(); + LastLocLine = PLoc.getLine(); + } else { + OS << "col" << ':' << PLoc.getColumn(); + } +} + +void ASTDumper::dumpSourceRange(SourceRange R) { + // Can't translate locations if a SourceManager isn't available. + if (!SM) + return; + + OS << " <"; + dumpLocation(R.getBegin()); + if (R.getBegin() != R.getEnd()) { + OS << ", "; + dumpLocation(R.getEnd()); + } + OS << ">"; + + // <t2.c:123:421[blah], t2.c:412:321> + +} + +void ASTDumper::dumpBareType(QualType T) { + ColorScope Color(*this, TypeColor); + + SplitQualType T_split = T.split(); + OS << "'" << QualType::getAsString(T_split) << "'"; + + if (!T.isNull()) { + // If the type is sugared, also dump a (shallow) desugared type. + SplitQualType D_split = T.getSplitDesugaredType(); + if (T_split != D_split) + OS << ":'" << QualType::getAsString(D_split) << "'"; + } +} + +void ASTDumper::dumpType(QualType T) { + OS << ' '; + dumpBareType(T); +} + +void ASTDumper::dumpBareDeclRef(const Decl *D) { + { + ColorScope Color(*this, DeclKindNameColor); + OS << D->getDeclKindName(); + } + dumpPointer(D); + + if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) { + ColorScope Color(*this, DeclNameColor); + OS << " '" << ND->getDeclName() << '\''; + } + + if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) + dumpType(VD->getType()); +} + +void ASTDumper::dumpDeclRef(const Decl *D, const char *Label) { + if (!D) + return; + + IndentScope Indent(*this); + if (Label) + OS << Label << ' '; + dumpBareDeclRef(D); +} + +void ASTDumper::dumpName(const NamedDecl *ND) { + if (ND->getDeclName()) { + ColorScope Color(*this, DeclNameColor); + OS << ' ' << ND->getNameAsString(); + } +} + +bool ASTDumper::hasNodes(const DeclContext *DC) { + if (!DC) + return false; + + return DC->hasExternalLexicalStorage() || + DC->noload_decls_begin() != DC->noload_decls_end(); +} + +void ASTDumper::dumpDeclContext(const DeclContext *DC) { + if (!DC) + return; + bool HasUndeserializedDecls = DC->hasExternalLexicalStorage(); + for (DeclContext::decl_iterator I = DC->noload_decls_begin(), + E = DC->noload_decls_end(); + I != E; ++I) { + DeclContext::decl_iterator Next = I; + ++Next; + if (Next == E && !HasUndeserializedDecls) + lastChild(); + dumpDecl(*I); + } + if (HasUndeserializedDecls) { + lastChild(); + IndentScope Indent(*this); + ColorScope Color(*this, UndeserializedColor); + OS << "<undeserialized declarations>"; + } +} + +void ASTDumper::dumpLookups(const DeclContext *DC) { + IndentScope Indent(*this); + + OS << "StoredDeclsMap "; + dumpBareDeclRef(cast<Decl>(DC)); + + const DeclContext *Primary = DC->getPrimaryContext(); + if (Primary != DC) { + OS << " primary"; + dumpPointer(cast<Decl>(Primary)); + } + + bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage(); + + DeclContext::all_lookups_iterator I = Primary->noload_lookups_begin(), + E = Primary->noload_lookups_end(); + while (I != E) { + DeclarationName Name = I.getLookupName(); + DeclContextLookupResult R = *I++; + if (I == E && !HasUndeserializedLookups) + lastChild(); + + IndentScope Indent(*this); + OS << "DeclarationName "; + { + ColorScope Color(*this, DeclNameColor); + OS << '\'' << Name << '\''; + } + + for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end(); + RI != RE; ++RI) { + if (RI + 1 == RE) + lastChild(); + dumpDeclRef(*RI); + if ((*RI)->isHidden()) + OS << " hidden"; + } + } + + if (HasUndeserializedLookups) { + lastChild(); + IndentScope Indent(*this); + ColorScope Color(*this, UndeserializedColor); + OS << "<undeserialized lookups>"; + } +} + +void ASTDumper::dumpAttr(const Attr *A) { + IndentScope Indent(*this); + { + ColorScope Color(*this, AttrColor); + switch (A->getKind()) { +#define ATTR(X) case attr::X: OS << #X; break; +#include "clang/Basic/AttrList.inc" + default: llvm_unreachable("unexpected attribute kind"); + } + OS << "Attr"; + } + dumpPointer(A); + dumpSourceRange(A->getRange()); +#include "clang/AST/AttrDump.inc" +} + +static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {} + +template<typename T> +static void dumpPreviousDeclImpl(raw_ostream &OS, const Mergeable<T> *D) { + const T *First = D->getFirstDecl(); + if (First != D) + OS << " first " << First; +} + +template<typename T> +static void dumpPreviousDeclImpl(raw_ostream &OS, const Redeclarable<T> *D) { + const T *Prev = D->getPreviousDecl(); + if (Prev) + OS << " prev " << Prev; +} + +/// Dump the previous declaration in the redeclaration chain for a declaration, +/// if any. +static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) { + switch (D->getKind()) { +#define DECL(DERIVED, BASE) \ + case Decl::DERIVED: \ + return dumpPreviousDeclImpl(OS, cast<DERIVED##Decl>(D)); +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + } + llvm_unreachable("Decl that isn't part of DeclNodes.inc!"); +} + +//===----------------------------------------------------------------------===// +// C++ Utilities +//===----------------------------------------------------------------------===// + +void ASTDumper::dumpAccessSpecifier(AccessSpecifier AS) { + switch (AS) { + case AS_none: + break; + case AS_public: + OS << "public"; + break; + case AS_protected: + OS << "protected"; + break; + case AS_private: + OS << "private"; + break; + } +} + +void ASTDumper::dumpCXXCtorInitializer(const CXXCtorInitializer *Init) { + IndentScope Indent(*this); + OS << "CXXCtorInitializer"; + if (Init->isAnyMemberInitializer()) { + OS << ' '; + dumpBareDeclRef(Init->getAnyMember()); + } else { + dumpType(QualType(Init->getBaseClass(), 0)); + } + dumpStmt(Init->getInit()); +} + +void ASTDumper::dumpTemplateParameters(const TemplateParameterList *TPL) { + if (!TPL) + return; + + for (TemplateParameterList::const_iterator I = TPL->begin(), E = TPL->end(); + I != E; ++I) + dumpDecl(*I); +} + +void ASTDumper::dumpTemplateArgumentListInfo( + const TemplateArgumentListInfo &TALI) { + for (unsigned i = 0, e = TALI.size(); i < e; ++i) { + if (i + 1 == e) + lastChild(); + dumpTemplateArgumentLoc(TALI[i]); + } +} + +void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A) { + dumpTemplateArgument(A.getArgument(), A.getSourceRange()); +} + +void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) { + for (unsigned i = 0, e = TAL.size(); i < e; ++i) + dumpTemplateArgument(TAL[i]); +} + +void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) { + IndentScope Indent(*this); + OS << "TemplateArgument"; + if (R.isValid()) + dumpSourceRange(R); + + switch (A.getKind()) { + case TemplateArgument::Null: + OS << " null"; + break; + case TemplateArgument::Type: + OS << " type"; + lastChild(); + dumpType(A.getAsType()); + break; + case TemplateArgument::Declaration: + OS << " decl"; + lastChild(); + dumpDeclRef(A.getAsDecl()); + break; + case TemplateArgument::NullPtr: + OS << " nullptr"; + break; + case TemplateArgument::Integral: + OS << " integral " << A.getAsIntegral(); + break; + case TemplateArgument::Template: + OS << " template "; + A.getAsTemplate().dump(OS); + break; + case TemplateArgument::TemplateExpansion: + OS << " template expansion"; + A.getAsTemplateOrTemplatePattern().dump(OS); + break; + case TemplateArgument::Expression: + OS << " expr"; + lastChild(); + dumpStmt(A.getAsExpr()); + break; + case TemplateArgument::Pack: + OS << " pack"; + for (TemplateArgument::pack_iterator I = A.pack_begin(), E = A.pack_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpTemplateArgument(*I); + } + break; + } +} + +//===----------------------------------------------------------------------===// +// Decl dumping methods. +//===----------------------------------------------------------------------===// + +void ASTDumper::dumpDecl(const Decl *D) { + IndentScope Indent(*this); + + if (!D) { + ColorScope Color(*this, NullColor); + OS << "<<<NULL>>>"; + return; + } + + { + ColorScope Color(*this, DeclKindNameColor); + OS << D->getDeclKindName() << "Decl"; + } + dumpPointer(D); + if (D->getLexicalDeclContext() != D->getDeclContext()) + OS << " parent " << cast<Decl>(D->getDeclContext()); + dumpPreviousDecl(OS, D); + dumpSourceRange(D->getSourceRange()); + if (Module *M = D->getOwningModule()) + OS << " in " << M->getFullModuleName(); + if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) + if (ND->isHidden()) + OS << " hidden"; + + bool HasAttrs = D->attr_begin() != D->attr_end(); + const FullComment *Comment = + D->getASTContext().getLocalCommentForDeclUncached(D); + // Decls within functions are visited by the body + bool HasDeclContext = !isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) && + hasNodes(dyn_cast<DeclContext>(D)); + + setMoreChildren(HasAttrs || Comment || HasDeclContext); + ConstDeclVisitor<ASTDumper>::Visit(D); + + setMoreChildren(Comment || HasDeclContext); + for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpAttr(*I); + } + + setMoreChildren(HasDeclContext); + lastChild(); + dumpFullComment(Comment); + + if (D->isInvalidDecl()) + OS << " invalid"; + + setMoreChildren(false); + if (HasDeclContext) + dumpDeclContext(cast<DeclContext>(D)); +} + +void ASTDumper::VisitLabelDecl(const LabelDecl *D) { + dumpName(D); +} + +void ASTDumper::VisitTypedefDecl(const TypedefDecl *D) { + dumpName(D); + dumpType(D->getUnderlyingType()); + if (D->isModulePrivate()) + OS << " __module_private__"; +} + +void ASTDumper::VisitEnumDecl(const EnumDecl *D) { + if (D->isScoped()) { + if (D->isScopedUsingClassTag()) + OS << " class"; + else + OS << " struct"; + } + dumpName(D); + if (D->isModulePrivate()) + OS << " __module_private__"; + if (D->isFixed()) + dumpType(D->getIntegerType()); +} + +void ASTDumper::VisitRecordDecl(const RecordDecl *D) { + OS << ' ' << D->getKindName(); + dumpName(D); + if (D->isModulePrivate()) + OS << " __module_private__"; + if (D->isCompleteDefinition()) + OS << " definition"; +} + +void ASTDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) { + dumpName(D); + dumpType(D->getType()); + if (const Expr *Init = D->getInitExpr()) { + lastChild(); + dumpStmt(Init); + } +} + +void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) { + dumpName(D); + dumpType(D->getType()); + for (IndirectFieldDecl::chain_iterator I = D->chain_begin(), + E = D->chain_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDeclRef(*I); + } +} + +void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) { + dumpName(D); + dumpType(D->getType()); + + StorageClass SC = D->getStorageClass(); + if (SC != SC_None) + OS << ' ' << VarDecl::getStorageClassSpecifierString(SC); + if (D->isInlineSpecified()) + OS << " inline"; + if (D->isVirtualAsWritten()) + OS << " virtual"; + if (D->isModulePrivate()) + OS << " __module_private__"; + + if (D->isPure()) + OS << " pure"; + else if (D->isDeletedAsWritten()) + OS << " delete"; + + if (const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>()) { + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + switch (EPI.ExceptionSpecType) { + default: break; + case EST_Unevaluated: + OS << " noexcept-unevaluated " << EPI.ExceptionSpecDecl; + break; + case EST_Uninstantiated: + OS << " noexcept-uninstantiated " << EPI.ExceptionSpecTemplate; + break; + } + } + + bool OldMoreChildren = hasMoreChildren(); + const FunctionTemplateSpecializationInfo *FTSI = + D->getTemplateSpecializationInfo(); + bool HasTemplateSpecialization = FTSI; + + bool HasNamedDecls = D->getDeclsInPrototypeScope().begin() != + D->getDeclsInPrototypeScope().end(); + + bool HasFunctionDecls = D->param_begin() != D->param_end(); + + const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(D); + bool HasCtorInitializers = C && C->init_begin() != C->init_end(); + + bool HasDeclarationBody = D->doesThisDeclarationHaveABody(); + + setMoreChildren(OldMoreChildren || HasNamedDecls || HasFunctionDecls || + HasCtorInitializers || HasDeclarationBody); + if (HasTemplateSpecialization) { + lastChild(); + dumpTemplateArgumentList(*FTSI->TemplateArguments); + } + + setMoreChildren(OldMoreChildren || HasFunctionDecls || + HasCtorInitializers || HasDeclarationBody); + for (ArrayRef<NamedDecl *>::iterator + I = D->getDeclsInPrototypeScope().begin(), + E = D->getDeclsInPrototypeScope().end(); I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDecl(*I); + } + + setMoreChildren(OldMoreChildren || HasCtorInitializers || HasDeclarationBody); + for (FunctionDecl::param_const_iterator I = D->param_begin(), + E = D->param_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDecl(*I); + } + + setMoreChildren(OldMoreChildren || HasDeclarationBody); + if (HasCtorInitializers) + for (CXXConstructorDecl::init_const_iterator I = C->init_begin(), + E = C->init_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpCXXCtorInitializer(*I); + } + + setMoreChildren(OldMoreChildren); + if (HasDeclarationBody) { + lastChild(); + dumpStmt(D->getBody()); + } +} + +void ASTDumper::VisitFieldDecl(const FieldDecl *D) { + dumpName(D); + dumpType(D->getType()); + if (D->isMutable()) + OS << " mutable"; + if (D->isModulePrivate()) + OS << " __module_private__"; + + bool OldMoreChildren = hasMoreChildren(); + bool IsBitField = D->isBitField(); + Expr *Init = D->getInClassInitializer(); + bool HasInit = Init; + + setMoreChildren(OldMoreChildren || HasInit); + if (IsBitField) { + lastChild(); + dumpStmt(D->getBitWidth()); + } + setMoreChildren(OldMoreChildren); + if (HasInit) { + lastChild(); + dumpStmt(Init); + } +} + +void ASTDumper::VisitVarDecl(const VarDecl *D) { + dumpName(D); + dumpType(D->getType()); + StorageClass SC = D->getStorageClass(); + if (SC != SC_None) + OS << ' ' << VarDecl::getStorageClassSpecifierString(SC); + switch (D->getTLSKind()) { + case VarDecl::TLS_None: break; + case VarDecl::TLS_Static: OS << " tls"; break; + case VarDecl::TLS_Dynamic: OS << " tls_dynamic"; break; + } + if (D->isModulePrivate()) + OS << " __module_private__"; + if (D->isNRVOVariable()) + OS << " nrvo"; + if (D->hasInit()) { + lastChild(); + dumpStmt(D->getInit()); + } +} + +void ASTDumper::VisitFileScopeAsmDecl(const FileScopeAsmDecl *D) { + lastChild(); + dumpStmt(D->getAsmString()); +} + +void ASTDumper::VisitImportDecl(const ImportDecl *D) { + OS << ' ' << D->getImportedModule()->getFullModuleName(); +} + +//===----------------------------------------------------------------------===// +// C++ Declarations +//===----------------------------------------------------------------------===// + +void ASTDumper::VisitNamespaceDecl(const NamespaceDecl *D) { + dumpName(D); + if (D->isInline()) + OS << " inline"; + if (!D->isOriginalNamespace()) + dumpDeclRef(D->getOriginalNamespace(), "original"); +} + +void ASTDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) { + OS << ' '; + dumpBareDeclRef(D->getNominatedNamespace()); +} + +void ASTDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) { + dumpName(D); + dumpDeclRef(D->getAliasedNamespace()); +} + +void ASTDumper::VisitTypeAliasDecl(const TypeAliasDecl *D) { + dumpName(D); + dumpType(D->getUnderlyingType()); +} + +void ASTDumper::VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D) { + dumpName(D); + dumpTemplateParameters(D->getTemplateParameters()); + dumpDecl(D->getTemplatedDecl()); +} + +void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) { + VisitRecordDecl(D); + if (!D->isCompleteDefinition()) + return; + + for (CXXRecordDecl::base_class_const_iterator I = D->bases_begin(), + E = D->bases_end(); + I != E; ++I) { + IndentScope Indent(*this); + if (I->isVirtual()) + OS << "virtual "; + dumpAccessSpecifier(I->getAccessSpecifier()); + dumpType(I->getType()); + if (I->isPackExpansion()) + OS << "..."; + } +} + +void ASTDumper::VisitStaticAssertDecl(const StaticAssertDecl *D) { + dumpStmt(D->getAssertExpr()); + lastChild(); + dumpStmt(D->getMessage()); +} + +void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) { + dumpName(D); + dumpTemplateParameters(D->getTemplateParameters()); + dumpDecl(D->getTemplatedDecl()); + for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(), + E = D->spec_end(); + I != E; ++I) { + FunctionTemplateDecl::spec_iterator Next = I; + ++Next; + if (Next == E) + lastChild(); + switch (I->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ImplicitInstantiation: + case TSK_ExplicitInstantiationDeclaration: + case TSK_ExplicitInstantiationDefinition: + if (D == D->getCanonicalDecl()) + dumpDecl(*I); + else + dumpDeclRef(*I); + break; + case TSK_ExplicitSpecialization: + dumpDeclRef(*I); + break; + } + } +} + +void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) { + dumpName(D); + dumpTemplateParameters(D->getTemplateParameters()); + + ClassTemplateDecl::spec_iterator I = D->spec_begin(); + ClassTemplateDecl::spec_iterator E = D->spec_end(); + if (I == E) + lastChild(); + dumpDecl(D->getTemplatedDecl()); + for (; I != E; ++I) { + ClassTemplateDecl::spec_iterator Next = I; + ++Next; + if (Next == E) + lastChild(); + switch (I->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ImplicitInstantiation: + if (D == D->getCanonicalDecl()) + dumpDecl(*I); + else + dumpDeclRef(*I); + break; + case TSK_ExplicitSpecialization: + case TSK_ExplicitInstantiationDeclaration: + case TSK_ExplicitInstantiationDefinition: + dumpDeclRef(*I); + break; + } + } +} + +void ASTDumper::VisitClassTemplateSpecializationDecl( + const ClassTemplateSpecializationDecl *D) { + VisitCXXRecordDecl(D); + dumpTemplateArgumentList(D->getTemplateArgs()); +} + +void ASTDumper::VisitClassTemplatePartialSpecializationDecl( + const ClassTemplatePartialSpecializationDecl *D) { + VisitClassTemplateSpecializationDecl(D); + dumpTemplateParameters(D->getTemplateParameters()); +} + +void ASTDumper::VisitClassScopeFunctionSpecializationDecl( + const ClassScopeFunctionSpecializationDecl *D) { + dumpDeclRef(D->getSpecialization()); + if (D->hasExplicitTemplateArgs()) + dumpTemplateArgumentListInfo(D->templateArgs()); +} + +void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) { + dumpName(D); + dumpTemplateParameters(D->getTemplateParameters()); + + VarTemplateDecl::spec_iterator I = D->spec_begin(); + VarTemplateDecl::spec_iterator E = D->spec_end(); + if (I == E) + lastChild(); + dumpDecl(D->getTemplatedDecl()); + for (; I != E; ++I) { + VarTemplateDecl::spec_iterator Next = I; + ++Next; + if (Next == E) + lastChild(); + switch (I->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ImplicitInstantiation: + if (D == D->getCanonicalDecl()) + dumpDecl(*I); + else + dumpDeclRef(*I); + break; + case TSK_ExplicitSpecialization: + case TSK_ExplicitInstantiationDeclaration: + case TSK_ExplicitInstantiationDefinition: + dumpDeclRef(*I); + break; + } + } +} + +void ASTDumper::VisitVarTemplateSpecializationDecl( + const VarTemplateSpecializationDecl *D) { + dumpTemplateArgumentList(D->getTemplateArgs()); + VisitVarDecl(D); +} + +void ASTDumper::VisitVarTemplatePartialSpecializationDecl( + const VarTemplatePartialSpecializationDecl *D) { + dumpTemplateParameters(D->getTemplateParameters()); + VisitVarTemplateSpecializationDecl(D); +} + +void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) { + if (D->wasDeclaredWithTypename()) + OS << " typename"; + else + OS << " class"; + if (D->isParameterPack()) + OS << " ..."; + dumpName(D); + if (D->hasDefaultArgument()) + dumpType(D->getDefaultArgument()); +} + +void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) { + dumpType(D->getType()); + if (D->isParameterPack()) + OS << " ..."; + dumpName(D); + if (D->hasDefaultArgument()) + dumpStmt(D->getDefaultArgument()); +} + +void ASTDumper::VisitTemplateTemplateParmDecl( + const TemplateTemplateParmDecl *D) { + if (D->isParameterPack()) + OS << " ..."; + dumpName(D); + dumpTemplateParameters(D->getTemplateParameters()); + if (D->hasDefaultArgument()) + dumpTemplateArgumentLoc(D->getDefaultArgument()); +} + +void ASTDumper::VisitUsingDecl(const UsingDecl *D) { + OS << ' '; + D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy()); + OS << D->getNameAsString(); +} + +void ASTDumper::VisitUnresolvedUsingTypenameDecl( + const UnresolvedUsingTypenameDecl *D) { + OS << ' '; + D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy()); + OS << D->getNameAsString(); +} + +void ASTDumper::VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D) { + OS << ' '; + D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy()); + OS << D->getNameAsString(); + dumpType(D->getType()); +} + +void ASTDumper::VisitUsingShadowDecl(const UsingShadowDecl *D) { + OS << ' '; + dumpBareDeclRef(D->getTargetDecl()); +} + +void ASTDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) { + switch (D->getLanguage()) { + case LinkageSpecDecl::lang_c: OS << " C"; break; + case LinkageSpecDecl::lang_cxx: OS << " C++"; break; + } +} + +void ASTDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) { + OS << ' '; + dumpAccessSpecifier(D->getAccess()); +} + +void ASTDumper::VisitFriendDecl(const FriendDecl *D) { + lastChild(); + if (TypeSourceInfo *T = D->getFriendType()) + dumpType(T->getType()); + else + dumpDecl(D->getFriendDecl()); +} + +//===----------------------------------------------------------------------===// +// Obj-C Declarations +//===----------------------------------------------------------------------===// + +void ASTDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) { + dumpName(D); + dumpType(D->getType()); + if (D->getSynthesize()) + OS << " synthesize"; + if (D->getBackingIvarReferencedInAccessor()) + OS << " BackingIvarReferencedInAccessor"; + + switch (D->getAccessControl()) { + case ObjCIvarDecl::None: + OS << " none"; + break; + case ObjCIvarDecl::Private: + OS << " private"; + break; + case ObjCIvarDecl::Protected: + OS << " protected"; + break; + case ObjCIvarDecl::Public: + OS << " public"; + break; + case ObjCIvarDecl::Package: + OS << " package"; + break; + } +} + +void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) { + if (D->isInstanceMethod()) + OS << " -"; + else + OS << " +"; + dumpName(D); + dumpType(D->getResultType()); + + bool OldMoreChildren = hasMoreChildren(); + bool IsVariadic = D->isVariadic(); + bool HasBody = D->hasBody(); + + setMoreChildren(OldMoreChildren || IsVariadic || HasBody); + if (D->isThisDeclarationADefinition()) { + lastChild(); + dumpDeclContext(D); + } else { + for (ObjCMethodDecl::param_const_iterator I = D->param_begin(), + E = D->param_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDecl(*I); + } + } + + setMoreChildren(OldMoreChildren || HasBody); + if (IsVariadic) { + lastChild(); + IndentScope Indent(*this); + OS << "..."; + } + + setMoreChildren(OldMoreChildren); + if (HasBody) { + lastChild(); + dumpStmt(D->getBody()); + } +} + +void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) { + dumpName(D); + dumpDeclRef(D->getClassInterface()); + if (D->protocol_begin() == D->protocol_end()) + lastChild(); + dumpDeclRef(D->getImplementation()); + for (ObjCCategoryDecl::protocol_iterator I = D->protocol_begin(), + E = D->protocol_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDeclRef(*I); + } +} + +void ASTDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) { + dumpName(D); + dumpDeclRef(D->getClassInterface()); + lastChild(); + dumpDeclRef(D->getCategoryDecl()); +} + +void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) { + dumpName(D); + for (ObjCProtocolDecl::protocol_iterator I = D->protocol_begin(), + E = D->protocol_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDeclRef(*I); + } +} + +void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) { + dumpName(D); + dumpDeclRef(D->getSuperClass(), "super"); + if (D->protocol_begin() == D->protocol_end()) + lastChild(); + dumpDeclRef(D->getImplementation()); + for (ObjCInterfaceDecl::protocol_iterator I = D->protocol_begin(), + E = D->protocol_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDeclRef(*I); + } +} + +void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) { + dumpName(D); + dumpDeclRef(D->getSuperClass(), "super"); + if (D->init_begin() == D->init_end()) + lastChild(); + dumpDeclRef(D->getClassInterface()); + for (ObjCImplementationDecl::init_const_iterator I = D->init_begin(), + E = D->init_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpCXXCtorInitializer(*I); + } +} + +void ASTDumper::VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D) { + dumpName(D); + lastChild(); + dumpDeclRef(D->getClassInterface()); +} + +void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) { + dumpName(D); + dumpType(D->getType()); + + if (D->getPropertyImplementation() == ObjCPropertyDecl::Required) + OS << " required"; + else if (D->getPropertyImplementation() == ObjCPropertyDecl::Optional) + OS << " optional"; + + ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes(); + if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) { + if (Attrs & ObjCPropertyDecl::OBJC_PR_readonly) + OS << " readonly"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_assign) + OS << " assign"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_readwrite) + OS << " readwrite"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_retain) + OS << " retain"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_copy) + OS << " copy"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic) + OS << " nonatomic"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_atomic) + OS << " atomic"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_weak) + OS << " weak"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_strong) + OS << " strong"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) + OS << " unsafe_unretained"; + if (Attrs & ObjCPropertyDecl::OBJC_PR_getter) { + if (!(Attrs & ObjCPropertyDecl::OBJC_PR_setter)) + lastChild(); + dumpDeclRef(D->getGetterMethodDecl(), "getter"); + } + if (Attrs & ObjCPropertyDecl::OBJC_PR_setter) { + lastChild(); + dumpDeclRef(D->getSetterMethodDecl(), "setter"); + } + } +} + +void ASTDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) { + dumpName(D->getPropertyDecl()); + if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) + OS << " synthesize"; + else + OS << " dynamic"; + dumpDeclRef(D->getPropertyDecl()); + lastChild(); + dumpDeclRef(D->getPropertyIvarDecl()); +} + +void ASTDumper::VisitBlockDecl(const BlockDecl *D) { + for (BlockDecl::param_const_iterator I = D->param_begin(), E = D->param_end(); + I != E; ++I) + dumpDecl(*I); + + if (D->isVariadic()) { + IndentScope Indent(*this); + OS << "..."; + } + + if (D->capturesCXXThis()) { + IndentScope Indent(*this); + OS << "capture this"; + } + for (BlockDecl::capture_iterator I = D->capture_begin(), E = D->capture_end(); + I != E; ++I) { + IndentScope Indent(*this); + OS << "capture"; + if (I->isByRef()) + OS << " byref"; + if (I->isNested()) + OS << " nested"; + if (I->getVariable()) { + OS << ' '; + dumpBareDeclRef(I->getVariable()); + } + if (I->hasCopyExpr()) + dumpStmt(I->getCopyExpr()); + } + lastChild(); + dumpStmt(D->getBody()); +} + +//===----------------------------------------------------------------------===// +// Stmt dumping methods. +//===----------------------------------------------------------------------===// + +void ASTDumper::dumpStmt(const Stmt *S) { + IndentScope Indent(*this); + + if (!S) { + ColorScope Color(*this, NullColor); + OS << "<<<NULL>>>"; + return; + } + + if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) { + VisitDeclStmt(DS); + return; + } + + setMoreChildren(!S->children().empty()); + ConstStmtVisitor<ASTDumper>::Visit(S); + setMoreChildren(false); + for (Stmt::const_child_range CI = S->children(); CI; ++CI) { + Stmt::const_child_range Next = CI; + ++Next; + if (!Next) + lastChild(); + dumpStmt(*CI); + } +} + +void ASTDumper::VisitStmt(const Stmt *Node) { + { + ColorScope Color(*this, StmtColor); + OS << Node->getStmtClassName(); + } + dumpPointer(Node); + dumpSourceRange(Node->getSourceRange()); +} + +void ASTDumper::VisitDeclStmt(const DeclStmt *Node) { + VisitStmt(Node); + for (DeclStmt::const_decl_iterator I = Node->decl_begin(), + E = Node->decl_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpDecl(*I); + } +} + +void ASTDumper::VisitAttributedStmt(const AttributedStmt *Node) { + VisitStmt(Node); + for (ArrayRef<const Attr *>::iterator I = Node->getAttrs().begin(), + E = Node->getAttrs().end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpAttr(*I); + } +} + +void ASTDumper::VisitLabelStmt(const LabelStmt *Node) { + VisitStmt(Node); + OS << " '" << Node->getName() << "'"; +} + +void ASTDumper::VisitGotoStmt(const GotoStmt *Node) { + VisitStmt(Node); + OS << " '" << Node->getLabel()->getName() << "'"; + dumpPointer(Node->getLabel()); +} + +void ASTDumper::VisitCXXCatchStmt(const CXXCatchStmt *Node) { + VisitStmt(Node); + dumpDecl(Node->getExceptionDecl()); +} + +//===----------------------------------------------------------------------===// +// Expr dumping methods. +//===----------------------------------------------------------------------===// + +void ASTDumper::VisitExpr(const Expr *Node) { + VisitStmt(Node); + dumpType(Node->getType()); + + { + ColorScope Color(*this, ValueKindColor); + switch (Node->getValueKind()) { + case VK_RValue: + break; + case VK_LValue: + OS << " lvalue"; + break; + case VK_XValue: + OS << " xvalue"; + break; + } + } + + { + ColorScope Color(*this, ObjectKindColor); + switch (Node->getObjectKind()) { + case OK_Ordinary: + break; + case OK_BitField: + OS << " bitfield"; + break; + case OK_ObjCProperty: + OS << " objcproperty"; + break; + case OK_ObjCSubscript: + OS << " objcsubscript"; + break; + case OK_VectorComponent: + OS << " vectorcomponent"; + break; + } + } +} + +static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) { + if (Node->path_empty()) + return; + + OS << " ("; + bool First = true; + for (CastExpr::path_const_iterator I = Node->path_begin(), + E = Node->path_end(); + I != E; ++I) { + const CXXBaseSpecifier *Base = *I; + if (!First) + OS << " -> "; + + const CXXRecordDecl *RD = + cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); + + if (Base->isVirtual()) + OS << "virtual "; + OS << RD->getName(); + First = false; + } + + OS << ')'; +} + +void ASTDumper::VisitCastExpr(const CastExpr *Node) { + VisitExpr(Node); + OS << " <"; + { + ColorScope Color(*this, CastColor); + OS << Node->getCastKindName(); + } + dumpBasePath(OS, Node); + OS << ">"; +} + +void ASTDumper::VisitDeclRefExpr(const DeclRefExpr *Node) { + VisitExpr(Node); + + OS << " "; + dumpBareDeclRef(Node->getDecl()); + if (Node->getDecl() != Node->getFoundDecl()) { + OS << " ("; + dumpBareDeclRef(Node->getFoundDecl()); + OS << ")"; + } +} + +void ASTDumper::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node) { + VisitExpr(Node); + OS << " ("; + if (!Node->requiresADL()) + OS << "no "; + OS << "ADL) = '" << Node->getName() << '\''; + + UnresolvedLookupExpr::decls_iterator + I = Node->decls_begin(), E = Node->decls_end(); + if (I == E) + OS << " empty"; + for (; I != E; ++I) + dumpPointer(*I); +} + +void ASTDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) { + VisitExpr(Node); + + { + ColorScope Color(*this, DeclKindNameColor); + OS << " " << Node->getDecl()->getDeclKindName() << "Decl"; + } + OS << "='" << *Node->getDecl() << "'"; + dumpPointer(Node->getDecl()); + if (Node->isFreeIvar()) + OS << " isFreeIvar"; +} + +void ASTDumper::VisitPredefinedExpr(const PredefinedExpr *Node) { + VisitExpr(Node); + switch (Node->getIdentType()) { + default: llvm_unreachable("unknown case"); + case PredefinedExpr::Func: OS << " __func__"; break; + case PredefinedExpr::Function: OS << " __FUNCTION__"; break; + case PredefinedExpr::FuncDName: OS << " __FUNCDNAME__"; break; + case PredefinedExpr::LFunction: OS << " L__FUNCTION__"; break; + case PredefinedExpr::PrettyFunction: OS << " __PRETTY_FUNCTION__";break; + } +} + +void ASTDumper::VisitCharacterLiteral(const CharacterLiteral *Node) { + VisitExpr(Node); + ColorScope Color(*this, ValueColor); + OS << " " << Node->getValue(); +} + +void ASTDumper::VisitIntegerLiteral(const IntegerLiteral *Node) { + VisitExpr(Node); + + bool isSigned = Node->getType()->isSignedIntegerType(); + ColorScope Color(*this, ValueColor); + OS << " " << Node->getValue().toString(10, isSigned); +} + +void ASTDumper::VisitFloatingLiteral(const FloatingLiteral *Node) { + VisitExpr(Node); + ColorScope Color(*this, ValueColor); + OS << " " << Node->getValueAsApproximateDouble(); +} + +void ASTDumper::VisitStringLiteral(const StringLiteral *Str) { + VisitExpr(Str); + ColorScope Color(*this, ValueColor); + OS << " "; + Str->outputString(OS); +} + +void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) { + VisitExpr(Node); + OS << " " << (Node->isPostfix() ? "postfix" : "prefix") + << " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'"; +} + +void ASTDumper::VisitUnaryExprOrTypeTraitExpr( + const UnaryExprOrTypeTraitExpr *Node) { + VisitExpr(Node); + switch(Node->getKind()) { + case UETT_SizeOf: + OS << " sizeof"; + break; + case UETT_AlignOf: + OS << " alignof"; + break; + case UETT_VecStep: + OS << " vec_step"; + break; + } + if (Node->isArgumentType()) + dumpType(Node->getArgumentType()); +} + +void ASTDumper::VisitMemberExpr(const MemberExpr *Node) { + VisitExpr(Node); + OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl(); + dumpPointer(Node->getMemberDecl()); +} + +void ASTDumper::VisitExtVectorElementExpr(const ExtVectorElementExpr *Node) { + VisitExpr(Node); + OS << " " << Node->getAccessor().getNameStart(); +} + +void ASTDumper::VisitBinaryOperator(const BinaryOperator *Node) { + VisitExpr(Node); + OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'"; +} + +void ASTDumper::VisitCompoundAssignOperator( + const CompoundAssignOperator *Node) { + VisitExpr(Node); + OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) + << "' ComputeLHSTy="; + dumpBareType(Node->getComputationLHSType()); + OS << " ComputeResultTy="; + dumpBareType(Node->getComputationResultType()); +} + +void ASTDumper::VisitBlockExpr(const BlockExpr *Node) { + VisitExpr(Node); + dumpDecl(Node->getBlockDecl()); +} + +void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) { + VisitExpr(Node); + + if (Expr *Source = Node->getSourceExpr()) { + lastChild(); + dumpStmt(Source); + } +} + +// GNU extensions. + +void ASTDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) { + VisitExpr(Node); + OS << " " << Node->getLabel()->getName(); + dumpPointer(Node->getLabel()); +} + +//===----------------------------------------------------------------------===// +// C++ Expressions +//===----------------------------------------------------------------------===// + +void ASTDumper::VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node) { + VisitExpr(Node); + OS << " " << Node->getCastName() + << "<" << Node->getTypeAsWritten().getAsString() << ">" + << " <" << Node->getCastKindName(); + dumpBasePath(OS, Node); + OS << ">"; +} + +void ASTDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) { + VisitExpr(Node); + OS << " " << (Node->getValue() ? "true" : "false"); +} + +void ASTDumper::VisitCXXThisExpr(const CXXThisExpr *Node) { + VisitExpr(Node); + OS << " this"; +} + +void ASTDumper::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node) { + VisitExpr(Node); + OS << " functional cast to " << Node->getTypeAsWritten().getAsString() + << " <" << Node->getCastKindName() << ">"; +} + +void ASTDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) { + VisitExpr(Node); + CXXConstructorDecl *Ctor = Node->getConstructor(); + dumpType(Ctor->getType()); + if (Node->isElidable()) + OS << " elidable"; + if (Node->requiresZeroInitialization()) + OS << " zeroing"; +} + +void ASTDumper::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node) { + VisitExpr(Node); + OS << " "; + dumpCXXTemporary(Node->getTemporary()); +} + +void +ASTDumper::VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node) { + VisitExpr(Node); + if (const ValueDecl *VD = Node->getExtendingDecl()) { + OS << " extended by "; + dumpBareDeclRef(VD); + } +} + +void ASTDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) { + VisitExpr(Node); + for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i) + dumpDeclRef(Node->getObject(i), "cleanup"); +} + +void ASTDumper::dumpCXXTemporary(const CXXTemporary *Temporary) { + OS << "(CXXTemporary"; + dumpPointer(Temporary); + OS << ")"; +} + +//===----------------------------------------------------------------------===// +// Obj-C Expressions +//===----------------------------------------------------------------------===// + +void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) { + VisitExpr(Node); + OS << " selector=" << Node->getSelector().getAsString(); + switch (Node->getReceiverKind()) { + case ObjCMessageExpr::Instance: + break; + + case ObjCMessageExpr::Class: + OS << " class="; + dumpBareType(Node->getClassReceiver()); + break; + + case ObjCMessageExpr::SuperInstance: + OS << " super (instance)"; + break; + + case ObjCMessageExpr::SuperClass: + OS << " super (class)"; + break; + } +} + +void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) { + VisitExpr(Node); + OS << " selector=" << Node->getBoxingMethod()->getSelector().getAsString(); +} + +void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) { + VisitStmt(Node); + if (const VarDecl *CatchParam = Node->getCatchParamDecl()) + dumpDecl(CatchParam); + else + OS << " catch all"; +} + +void ASTDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *Node) { + VisitExpr(Node); + dumpType(Node->getEncodedType()); +} + +void ASTDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) { + VisitExpr(Node); + + OS << " " << Node->getSelector().getAsString(); +} + +void ASTDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) { + VisitExpr(Node); + + OS << ' ' << *Node->getProtocol(); +} + +void ASTDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node) { + VisitExpr(Node); + if (Node->isImplicitProperty()) { + OS << " Kind=MethodRef Getter=\""; + if (Node->getImplicitPropertyGetter()) + OS << Node->getImplicitPropertyGetter()->getSelector().getAsString(); + else + OS << "(null)"; + + OS << "\" Setter=\""; + if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter()) + OS << Setter->getSelector().getAsString(); + else + OS << "(null)"; + OS << "\""; + } else { + OS << " Kind=PropertyRef Property=\"" << *Node->getExplicitProperty() <<'"'; + } + + if (Node->isSuperReceiver()) + OS << " super"; + + OS << " Messaging="; + if (Node->isMessagingGetter() && Node->isMessagingSetter()) + OS << "Getter&Setter"; + else if (Node->isMessagingGetter()) + OS << "Getter"; + else if (Node->isMessagingSetter()) + OS << "Setter"; +} + +void ASTDumper::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node) { + VisitExpr(Node); + if (Node->isArraySubscriptRefExpr()) + OS << " Kind=ArraySubscript GetterForArray=\""; + else + OS << " Kind=DictionarySubscript GetterForDictionary=\""; + if (Node->getAtIndexMethodDecl()) + OS << Node->getAtIndexMethodDecl()->getSelector().getAsString(); + else + OS << "(null)"; + + if (Node->isArraySubscriptRefExpr()) + OS << "\" SetterForArray=\""; + else + OS << "\" SetterForDictionary=\""; + if (Node->setAtIndexMethodDecl()) + OS << Node->setAtIndexMethodDecl()->getSelector().getAsString(); + else + OS << "(null)"; +} + +void ASTDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) { + VisitExpr(Node); + OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no"); +} + +//===----------------------------------------------------------------------===// +// Comments +//===----------------------------------------------------------------------===// + +const char *ASTDumper::getCommandName(unsigned CommandID) { + if (Traits) + return Traits->getCommandInfo(CommandID)->Name; + const CommandInfo *Info = CommandTraits::getBuiltinCommandInfo(CommandID); + if (Info) + return Info->Name; + return "<not a builtin command>"; +} + +void ASTDumper::dumpFullComment(const FullComment *C) { + if (!C) + return; + + FC = C; + dumpComment(C); + FC = 0; +} + +void ASTDumper::dumpComment(const Comment *C) { + IndentScope Indent(*this); + + if (!C) { + ColorScope Color(*this, NullColor); + OS << "<<<NULL>>>"; + return; + } + + { + ColorScope Color(*this, CommentColor); + OS << C->getCommentKindName(); + } + dumpPointer(C); + dumpSourceRange(C->getSourceRange()); + ConstCommentVisitor<ASTDumper>::visit(C); + for (Comment::child_iterator I = C->child_begin(), E = C->child_end(); + I != E; ++I) { + if (I + 1 == E) + lastChild(); + dumpComment(*I); + } +} + +void ASTDumper::visitTextComment(const TextComment *C) { + OS << " Text=\"" << C->getText() << "\""; +} + +void ASTDumper::visitInlineCommandComment(const InlineCommandComment *C) { + OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""; + switch (C->getRenderKind()) { + case InlineCommandComment::RenderNormal: + OS << " RenderNormal"; + break; + case InlineCommandComment::RenderBold: + OS << " RenderBold"; + break; + case InlineCommandComment::RenderMonospaced: + OS << " RenderMonospaced"; + break; + case InlineCommandComment::RenderEmphasized: + OS << " RenderEmphasized"; + break; + } + + for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) + OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\""; +} + +void ASTDumper::visitHTMLStartTagComment(const HTMLStartTagComment *C) { + OS << " Name=\"" << C->getTagName() << "\""; + if (C->getNumAttrs() != 0) { + OS << " Attrs: "; + for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) { + const HTMLStartTagComment::Attribute &Attr = C->getAttr(i); + OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\""; + } + } + if (C->isSelfClosing()) + OS << " SelfClosing"; +} + +void ASTDumper::visitHTMLEndTagComment(const HTMLEndTagComment *C) { + OS << " Name=\"" << C->getTagName() << "\""; +} + +void ASTDumper::visitBlockCommandComment(const BlockCommandComment *C) { + OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""; + for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) + OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\""; +} + +void ASTDumper::visitParamCommandComment(const ParamCommandComment *C) { + OS << " " << ParamCommandComment::getDirectionAsString(C->getDirection()); + + if (C->isDirectionExplicit()) + OS << " explicitly"; + else + OS << " implicitly"; + + if (C->hasParamName()) { + if (C->isParamIndexValid()) + OS << " Param=\"" << C->getParamName(FC) << "\""; + else + OS << " Param=\"" << C->getParamNameAsWritten() << "\""; + } + + if (C->isParamIndexValid()) + OS << " ParamIndex=" << C->getParamIndex(); +} + +void ASTDumper::visitTParamCommandComment(const TParamCommandComment *C) { + if (C->hasParamName()) { + if (C->isPositionValid()) + OS << " Param=\"" << C->getParamName(FC) << "\""; + else + OS << " Param=\"" << C->getParamNameAsWritten() << "\""; + } + + if (C->isPositionValid()) { + OS << " Position=<"; + for (unsigned i = 0, e = C->getDepth(); i != e; ++i) { + OS << C->getIndex(i); + if (i != e - 1) + OS << ", "; + } + OS << ">"; + } +} + +void ASTDumper::visitVerbatimBlockComment(const VerbatimBlockComment *C) { + OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"" + " CloseName=\"" << C->getCloseName() << "\""; +} + +void ASTDumper::visitVerbatimBlockLineComment( + const VerbatimBlockLineComment *C) { + OS << " Text=\"" << C->getText() << "\""; +} + +void ASTDumper::visitVerbatimLineComment(const VerbatimLineComment *C) { + OS << " Text=\"" << C->getText() << "\""; +} + +//===----------------------------------------------------------------------===// +// Decl method implementations +//===----------------------------------------------------------------------===// + +void Decl::dump() const { + dump(llvm::errs()); +} + +void Decl::dump(raw_ostream &OS) const { + ASTDumper P(OS, &getASTContext().getCommentCommandTraits(), + &getASTContext().getSourceManager()); + P.dumpDecl(this); +} + +void Decl::dumpColor() const { + ASTDumper P(llvm::errs(), &getASTContext().getCommentCommandTraits(), + &getASTContext().getSourceManager(), /*ShowColors*/true); + P.dumpDecl(this); +} + +void DeclContext::dumpLookups() const { + dumpLookups(llvm::errs()); +} + +void DeclContext::dumpLookups(raw_ostream &OS) const { + const DeclContext *DC = this; + while (!DC->isTranslationUnit()) + DC = DC->getParent(); + ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext(); + ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager()); + P.dumpLookups(this); +} + +//===----------------------------------------------------------------------===// +// Stmt method implementations +//===----------------------------------------------------------------------===// + +void Stmt::dump(SourceManager &SM) const { + dump(llvm::errs(), SM); +} + +void Stmt::dump(raw_ostream &OS, SourceManager &SM) const { + ASTDumper P(OS, 0, &SM); + P.dumpStmt(this); +} + +void Stmt::dump() const { + ASTDumper P(llvm::errs(), 0, 0); + P.dumpStmt(this); +} + +void Stmt::dumpColor() const { + ASTDumper P(llvm::errs(), 0, 0, /*ShowColors*/true); + P.dumpStmt(this); +} + +//===----------------------------------------------------------------------===// +// Comment method implementations +//===----------------------------------------------------------------------===// + +void Comment::dump() const { + dump(llvm::errs(), 0, 0); +} + +void Comment::dump(const ASTContext &Context) const { + dump(llvm::errs(), &Context.getCommentCommandTraits(), + &Context.getSourceManager()); +} + +void Comment::dump(raw_ostream &OS, const CommandTraits *Traits, + const SourceManager *SM) const { + const FullComment *FC = dyn_cast<FullComment>(this); + ASTDumper D(OS, Traits, SM); + D.dumpFullComment(FC); +} + +void Comment::dumpColor() const { + const FullComment *FC = dyn_cast<FullComment>(this); + ASTDumper D(llvm::errs(), 0, 0, /*ShowColors*/true); + D.dumpFullComment(FC); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp new file mode 100644 index 000000000000..e16015b7c4ed --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp @@ -0,0 +1,5132 @@ +//===--- ASTImporter.cpp - Importing ASTs from other Contexts ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ASTImporter class which imports AST nodes from one +// context into another context. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/ASTImporter.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTDiagnostic.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclVisitor.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/AST/TypeVisitor.h" +#include "clang/Basic/FileManager.h" +#include "clang/Basic/SourceManager.h" +#include "llvm/Support/MemoryBuffer.h" +#include <deque> + +namespace clang { + class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>, + public DeclVisitor<ASTNodeImporter, Decl *>, + public StmtVisitor<ASTNodeImporter, Stmt *> { + ASTImporter &Importer; + + public: + explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) { } + + using TypeVisitor<ASTNodeImporter, QualType>::Visit; + using DeclVisitor<ASTNodeImporter, Decl *>::Visit; + using StmtVisitor<ASTNodeImporter, Stmt *>::Visit; + + // Importing types + QualType VisitType(const Type *T); + QualType VisitBuiltinType(const BuiltinType *T); + QualType VisitComplexType(const ComplexType *T); + QualType VisitPointerType(const PointerType *T); + QualType VisitBlockPointerType(const BlockPointerType *T); + QualType VisitLValueReferenceType(const LValueReferenceType *T); + QualType VisitRValueReferenceType(const RValueReferenceType *T); + QualType VisitMemberPointerType(const MemberPointerType *T); + QualType VisitConstantArrayType(const ConstantArrayType *T); + QualType VisitIncompleteArrayType(const IncompleteArrayType *T); + QualType VisitVariableArrayType(const VariableArrayType *T); + // FIXME: DependentSizedArrayType + // FIXME: DependentSizedExtVectorType + QualType VisitVectorType(const VectorType *T); + QualType VisitExtVectorType(const ExtVectorType *T); + QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T); + QualType VisitFunctionProtoType(const FunctionProtoType *T); + // FIXME: UnresolvedUsingType + QualType VisitParenType(const ParenType *T); + QualType VisitTypedefType(const TypedefType *T); + QualType VisitTypeOfExprType(const TypeOfExprType *T); + // FIXME: DependentTypeOfExprType + QualType VisitTypeOfType(const TypeOfType *T); + QualType VisitDecltypeType(const DecltypeType *T); + QualType VisitUnaryTransformType(const UnaryTransformType *T); + QualType VisitAutoType(const AutoType *T); + // FIXME: DependentDecltypeType + QualType VisitRecordType(const RecordType *T); + QualType VisitEnumType(const EnumType *T); + // FIXME: TemplateTypeParmType + // FIXME: SubstTemplateTypeParmType + QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T); + QualType VisitElaboratedType(const ElaboratedType *T); + // FIXME: DependentNameType + // FIXME: DependentTemplateSpecializationType + QualType VisitObjCInterfaceType(const ObjCInterfaceType *T); + QualType VisitObjCObjectType(const ObjCObjectType *T); + QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T); + + // Importing declarations + bool ImportDeclParts(NamedDecl *D, DeclContext *&DC, + DeclContext *&LexicalDC, DeclarationName &Name, + SourceLocation &Loc); + void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = 0); + void ImportDeclarationNameLoc(const DeclarationNameInfo &From, + DeclarationNameInfo& To); + void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false); + + /// \brief What we should import from the definition. + enum ImportDefinitionKind { + /// \brief Import the default subset of the definition, which might be + /// nothing (if minimal import is set) or might be everything (if minimal + /// import is not set). + IDK_Default, + /// \brief Import everything. + IDK_Everything, + /// \brief Import only the bare bones needed to establish a valid + /// DeclContext. + IDK_Basic + }; + + bool shouldForceImportDeclContext(ImportDefinitionKind IDK) { + return IDK == IDK_Everything || + (IDK == IDK_Default && !Importer.isMinimalImport()); + } + + bool ImportDefinition(RecordDecl *From, RecordDecl *To, + ImportDefinitionKind Kind = IDK_Default); + bool ImportDefinition(VarDecl *From, VarDecl *To, + ImportDefinitionKind Kind = IDK_Default); + bool ImportDefinition(EnumDecl *From, EnumDecl *To, + ImportDefinitionKind Kind = IDK_Default); + bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To, + ImportDefinitionKind Kind = IDK_Default); + bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To, + ImportDefinitionKind Kind = IDK_Default); + TemplateParameterList *ImportTemplateParameterList( + TemplateParameterList *Params); + TemplateArgument ImportTemplateArgument(const TemplateArgument &From); + bool ImportTemplateArguments(const TemplateArgument *FromArgs, + unsigned NumFromArgs, + SmallVectorImpl<TemplateArgument> &ToArgs); + bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord, + bool Complain = true); + bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar, + bool Complain = true); + bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord); + bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC); + bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To); + bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To); + Decl *VisitDecl(Decl *D); + Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D); + Decl *VisitNamespaceDecl(NamespaceDecl *D); + Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias); + Decl *VisitTypedefDecl(TypedefDecl *D); + Decl *VisitTypeAliasDecl(TypeAliasDecl *D); + Decl *VisitEnumDecl(EnumDecl *D); + Decl *VisitRecordDecl(RecordDecl *D); + Decl *VisitEnumConstantDecl(EnumConstantDecl *D); + Decl *VisitFunctionDecl(FunctionDecl *D); + Decl *VisitCXXMethodDecl(CXXMethodDecl *D); + Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D); + Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D); + Decl *VisitCXXConversionDecl(CXXConversionDecl *D); + Decl *VisitFieldDecl(FieldDecl *D); + Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D); + Decl *VisitObjCIvarDecl(ObjCIvarDecl *D); + Decl *VisitVarDecl(VarDecl *D); + Decl *VisitImplicitParamDecl(ImplicitParamDecl *D); + Decl *VisitParmVarDecl(ParmVarDecl *D); + Decl *VisitObjCMethodDecl(ObjCMethodDecl *D); + Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D); + Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D); + Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D); + Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D); + Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D); + Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D); + Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D); + Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D); + Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D); + Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D); + Decl *VisitClassTemplateDecl(ClassTemplateDecl *D); + Decl *VisitClassTemplateSpecializationDecl( + ClassTemplateSpecializationDecl *D); + Decl *VisitVarTemplateDecl(VarTemplateDecl *D); + Decl *VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D); + + // Importing statements + Stmt *VisitStmt(Stmt *S); + + // Importing expressions + Expr *VisitExpr(Expr *E); + Expr *VisitDeclRefExpr(DeclRefExpr *E); + Expr *VisitIntegerLiteral(IntegerLiteral *E); + Expr *VisitCharacterLiteral(CharacterLiteral *E); + Expr *VisitParenExpr(ParenExpr *E); + Expr *VisitUnaryOperator(UnaryOperator *E); + Expr *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E); + Expr *VisitBinaryOperator(BinaryOperator *E); + Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E); + Expr *VisitImplicitCastExpr(ImplicitCastExpr *E); + Expr *VisitCStyleCastExpr(CStyleCastExpr *E); + }; +} +using namespace clang; + +//---------------------------------------------------------------------------- +// Structural Equivalence +//---------------------------------------------------------------------------- + +namespace { + struct StructuralEquivalenceContext { + /// \brief AST contexts for which we are checking structural equivalence. + ASTContext &C1, &C2; + + /// \brief The set of "tentative" equivalences between two canonical + /// declarations, mapping from a declaration in the first context to the + /// declaration in the second context that we believe to be equivalent. + llvm::DenseMap<Decl *, Decl *> TentativeEquivalences; + + /// \brief Queue of declarations in the first context whose equivalence + /// with a declaration in the second context still needs to be verified. + std::deque<Decl *> DeclsToCheck; + + /// \brief Declaration (from, to) pairs that are known not to be equivalent + /// (which we have already complained about). + llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls; + + /// \brief Whether we're being strict about the spelling of types when + /// unifying two types. + bool StrictTypeSpelling; + + /// \brief Whether to complain about failures. + bool Complain; + + /// \brief \c true if the last diagnostic came from C2. + bool LastDiagFromC2; + + StructuralEquivalenceContext(ASTContext &C1, ASTContext &C2, + llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls, + bool StrictTypeSpelling = false, + bool Complain = true) + : C1(C1), C2(C2), NonEquivalentDecls(NonEquivalentDecls), + StrictTypeSpelling(StrictTypeSpelling), Complain(Complain), + LastDiagFromC2(false) {} + + /// \brief Determine whether the two declarations are structurally + /// equivalent. + bool IsStructurallyEquivalent(Decl *D1, Decl *D2); + + /// \brief Determine whether the two types are structurally equivalent. + bool IsStructurallyEquivalent(QualType T1, QualType T2); + + private: + /// \brief Finish checking all of the structural equivalences. + /// + /// \returns true if an error occurred, false otherwise. + bool Finish(); + + public: + DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) { + assert(Complain && "Not allowed to complain"); + if (LastDiagFromC2) + C1.getDiagnostics().notePriorDiagnosticFrom(C2.getDiagnostics()); + LastDiagFromC2 = false; + return C1.getDiagnostics().Report(Loc, DiagID); + } + + DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) { + assert(Complain && "Not allowed to complain"); + if (!LastDiagFromC2) + C2.getDiagnostics().notePriorDiagnosticFrom(C1.getDiagnostics()); + LastDiagFromC2 = true; + return C2.getDiagnostics().Report(Loc, DiagID); + } + }; +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + QualType T1, QualType T2); +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + Decl *D1, Decl *D2); + +/// \brief Determine structural equivalence of two expressions. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + Expr *E1, Expr *E2) { + if (!E1 || !E2) + return E1 == E2; + + // FIXME: Actually perform a structural comparison! + return true; +} + +/// \brief Determine whether two identifiers are equivalent. +static bool IsStructurallyEquivalent(const IdentifierInfo *Name1, + const IdentifierInfo *Name2) { + if (!Name1 || !Name2) + return Name1 == Name2; + + return Name1->getName() == Name2->getName(); +} + +/// \brief Determine whether two nested-name-specifiers are equivalent. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + NestedNameSpecifier *NNS1, + NestedNameSpecifier *NNS2) { + // FIXME: Implement! + return true; +} + +/// \brief Determine whether two template arguments are equivalent. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const TemplateArgument &Arg1, + const TemplateArgument &Arg2) { + if (Arg1.getKind() != Arg2.getKind()) + return false; + + switch (Arg1.getKind()) { + case TemplateArgument::Null: + return true; + + case TemplateArgument::Type: + return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType()); + + case TemplateArgument::Integral: + if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(), + Arg2.getIntegralType())) + return false; + + return llvm::APSInt::isSameValue(Arg1.getAsIntegral(), Arg2.getAsIntegral()); + + case TemplateArgument::Declaration: + return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl()); + + case TemplateArgument::NullPtr: + return true; // FIXME: Is this correct? + + case TemplateArgument::Template: + return IsStructurallyEquivalent(Context, + Arg1.getAsTemplate(), + Arg2.getAsTemplate()); + + case TemplateArgument::TemplateExpansion: + return IsStructurallyEquivalent(Context, + Arg1.getAsTemplateOrTemplatePattern(), + Arg2.getAsTemplateOrTemplatePattern()); + + case TemplateArgument::Expression: + return IsStructurallyEquivalent(Context, + Arg1.getAsExpr(), Arg2.getAsExpr()); + + case TemplateArgument::Pack: + if (Arg1.pack_size() != Arg2.pack_size()) + return false; + + for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I) + if (!IsStructurallyEquivalent(Context, + Arg1.pack_begin()[I], + Arg2.pack_begin()[I])) + return false; + + return true; + } + + llvm_unreachable("Invalid template argument kind"); +} + +/// \brief Determine structural equivalence for the common part of array +/// types. +static bool IsArrayStructurallyEquivalent(StructuralEquivalenceContext &Context, + const ArrayType *Array1, + const ArrayType *Array2) { + if (!IsStructurallyEquivalent(Context, + Array1->getElementType(), + Array2->getElementType())) + return false; + if (Array1->getSizeModifier() != Array2->getSizeModifier()) + return false; + if (Array1->getIndexTypeQualifiers() != Array2->getIndexTypeQualifiers()) + return false; + + return true; +} + +/// \brief Determine structural equivalence of two types. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + QualType T1, QualType T2) { + if (T1.isNull() || T2.isNull()) + return T1.isNull() && T2.isNull(); + + if (!Context.StrictTypeSpelling) { + // We aren't being strict about token-to-token equivalence of types, + // so map down to the canonical type. + T1 = Context.C1.getCanonicalType(T1); + T2 = Context.C2.getCanonicalType(T2); + } + + if (T1.getQualifiers() != T2.getQualifiers()) + return false; + + Type::TypeClass TC = T1->getTypeClass(); + + if (T1->getTypeClass() != T2->getTypeClass()) { + // Compare function types with prototypes vs. without prototypes as if + // both did not have prototypes. + if (T1->getTypeClass() == Type::FunctionProto && + T2->getTypeClass() == Type::FunctionNoProto) + TC = Type::FunctionNoProto; + else if (T1->getTypeClass() == Type::FunctionNoProto && + T2->getTypeClass() == Type::FunctionProto) + TC = Type::FunctionNoProto; + else + return false; + } + + switch (TC) { + case Type::Builtin: + // FIXME: Deal with Char_S/Char_U. + if (cast<BuiltinType>(T1)->getKind() != cast<BuiltinType>(T2)->getKind()) + return false; + break; + + case Type::Complex: + if (!IsStructurallyEquivalent(Context, + cast<ComplexType>(T1)->getElementType(), + cast<ComplexType>(T2)->getElementType())) + return false; + break; + + case Type::Decayed: + if (!IsStructurallyEquivalent(Context, + cast<DecayedType>(T1)->getPointeeType(), + cast<DecayedType>(T2)->getPointeeType())) + return false; + break; + + case Type::Pointer: + if (!IsStructurallyEquivalent(Context, + cast<PointerType>(T1)->getPointeeType(), + cast<PointerType>(T2)->getPointeeType())) + return false; + break; + + case Type::BlockPointer: + if (!IsStructurallyEquivalent(Context, + cast<BlockPointerType>(T1)->getPointeeType(), + cast<BlockPointerType>(T2)->getPointeeType())) + return false; + break; + + case Type::LValueReference: + case Type::RValueReference: { + const ReferenceType *Ref1 = cast<ReferenceType>(T1); + const ReferenceType *Ref2 = cast<ReferenceType>(T2); + if (Ref1->isSpelledAsLValue() != Ref2->isSpelledAsLValue()) + return false; + if (Ref1->isInnerRef() != Ref2->isInnerRef()) + return false; + if (!IsStructurallyEquivalent(Context, + Ref1->getPointeeTypeAsWritten(), + Ref2->getPointeeTypeAsWritten())) + return false; + break; + } + + case Type::MemberPointer: { + const MemberPointerType *MemPtr1 = cast<MemberPointerType>(T1); + const MemberPointerType *MemPtr2 = cast<MemberPointerType>(T2); + if (!IsStructurallyEquivalent(Context, + MemPtr1->getPointeeType(), + MemPtr2->getPointeeType())) + return false; + if (!IsStructurallyEquivalent(Context, + QualType(MemPtr1->getClass(), 0), + QualType(MemPtr2->getClass(), 0))) + return false; + break; + } + + case Type::ConstantArray: { + const ConstantArrayType *Array1 = cast<ConstantArrayType>(T1); + const ConstantArrayType *Array2 = cast<ConstantArrayType>(T2); + if (!llvm::APInt::isSameValue(Array1->getSize(), Array2->getSize())) + return false; + + if (!IsArrayStructurallyEquivalent(Context, Array1, Array2)) + return false; + break; + } + + case Type::IncompleteArray: + if (!IsArrayStructurallyEquivalent(Context, + cast<ArrayType>(T1), + cast<ArrayType>(T2))) + return false; + break; + + case Type::VariableArray: { + const VariableArrayType *Array1 = cast<VariableArrayType>(T1); + const VariableArrayType *Array2 = cast<VariableArrayType>(T2); + if (!IsStructurallyEquivalent(Context, + Array1->getSizeExpr(), Array2->getSizeExpr())) + return false; + + if (!IsArrayStructurallyEquivalent(Context, Array1, Array2)) + return false; + + break; + } + + case Type::DependentSizedArray: { + const DependentSizedArrayType *Array1 = cast<DependentSizedArrayType>(T1); + const DependentSizedArrayType *Array2 = cast<DependentSizedArrayType>(T2); + if (!IsStructurallyEquivalent(Context, + Array1->getSizeExpr(), Array2->getSizeExpr())) + return false; + + if (!IsArrayStructurallyEquivalent(Context, Array1, Array2)) + return false; + + break; + } + + case Type::DependentSizedExtVector: { + const DependentSizedExtVectorType *Vec1 + = cast<DependentSizedExtVectorType>(T1); + const DependentSizedExtVectorType *Vec2 + = cast<DependentSizedExtVectorType>(T2); + if (!IsStructurallyEquivalent(Context, + Vec1->getSizeExpr(), Vec2->getSizeExpr())) + return false; + if (!IsStructurallyEquivalent(Context, + Vec1->getElementType(), + Vec2->getElementType())) + return false; + break; + } + + case Type::Vector: + case Type::ExtVector: { + const VectorType *Vec1 = cast<VectorType>(T1); + const VectorType *Vec2 = cast<VectorType>(T2); + if (!IsStructurallyEquivalent(Context, + Vec1->getElementType(), + Vec2->getElementType())) + return false; + if (Vec1->getNumElements() != Vec2->getNumElements()) + return false; + if (Vec1->getVectorKind() != Vec2->getVectorKind()) + return false; + break; + } + + case Type::FunctionProto: { + const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1); + const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2); + if (Proto1->getNumArgs() != Proto2->getNumArgs()) + return false; + for (unsigned I = 0, N = Proto1->getNumArgs(); I != N; ++I) { + if (!IsStructurallyEquivalent(Context, + Proto1->getArgType(I), + Proto2->getArgType(I))) + return false; + } + if (Proto1->isVariadic() != Proto2->isVariadic()) + return false; + if (Proto1->getExceptionSpecType() != Proto2->getExceptionSpecType()) + return false; + if (Proto1->getExceptionSpecType() == EST_Dynamic) { + if (Proto1->getNumExceptions() != Proto2->getNumExceptions()) + return false; + for (unsigned I = 0, N = Proto1->getNumExceptions(); I != N; ++I) { + if (!IsStructurallyEquivalent(Context, + Proto1->getExceptionType(I), + Proto2->getExceptionType(I))) + return false; + } + } else if (Proto1->getExceptionSpecType() == EST_ComputedNoexcept) { + if (!IsStructurallyEquivalent(Context, + Proto1->getNoexceptExpr(), + Proto2->getNoexceptExpr())) + return false; + } + if (Proto1->getTypeQuals() != Proto2->getTypeQuals()) + return false; + + // Fall through to check the bits common with FunctionNoProtoType. + } + + case Type::FunctionNoProto: { + const FunctionType *Function1 = cast<FunctionType>(T1); + const FunctionType *Function2 = cast<FunctionType>(T2); + if (!IsStructurallyEquivalent(Context, + Function1->getResultType(), + Function2->getResultType())) + return false; + if (Function1->getExtInfo() != Function2->getExtInfo()) + return false; + break; + } + + case Type::UnresolvedUsing: + if (!IsStructurallyEquivalent(Context, + cast<UnresolvedUsingType>(T1)->getDecl(), + cast<UnresolvedUsingType>(T2)->getDecl())) + return false; + + break; + + case Type::Attributed: + if (!IsStructurallyEquivalent(Context, + cast<AttributedType>(T1)->getModifiedType(), + cast<AttributedType>(T2)->getModifiedType())) + return false; + if (!IsStructurallyEquivalent(Context, + cast<AttributedType>(T1)->getEquivalentType(), + cast<AttributedType>(T2)->getEquivalentType())) + return false; + break; + + case Type::Paren: + if (!IsStructurallyEquivalent(Context, + cast<ParenType>(T1)->getInnerType(), + cast<ParenType>(T2)->getInnerType())) + return false; + break; + + case Type::Typedef: + if (!IsStructurallyEquivalent(Context, + cast<TypedefType>(T1)->getDecl(), + cast<TypedefType>(T2)->getDecl())) + return false; + break; + + case Type::TypeOfExpr: + if (!IsStructurallyEquivalent(Context, + cast<TypeOfExprType>(T1)->getUnderlyingExpr(), + cast<TypeOfExprType>(T2)->getUnderlyingExpr())) + return false; + break; + + case Type::TypeOf: + if (!IsStructurallyEquivalent(Context, + cast<TypeOfType>(T1)->getUnderlyingType(), + cast<TypeOfType>(T2)->getUnderlyingType())) + return false; + break; + + case Type::UnaryTransform: + if (!IsStructurallyEquivalent(Context, + cast<UnaryTransformType>(T1)->getUnderlyingType(), + cast<UnaryTransformType>(T1)->getUnderlyingType())) + return false; + break; + + case Type::Decltype: + if (!IsStructurallyEquivalent(Context, + cast<DecltypeType>(T1)->getUnderlyingExpr(), + cast<DecltypeType>(T2)->getUnderlyingExpr())) + return false; + break; + + case Type::Auto: + if (!IsStructurallyEquivalent(Context, + cast<AutoType>(T1)->getDeducedType(), + cast<AutoType>(T2)->getDeducedType())) + return false; + break; + + case Type::Record: + case Type::Enum: + if (!IsStructurallyEquivalent(Context, + cast<TagType>(T1)->getDecl(), + cast<TagType>(T2)->getDecl())) + return false; + break; + + case Type::TemplateTypeParm: { + const TemplateTypeParmType *Parm1 = cast<TemplateTypeParmType>(T1); + const TemplateTypeParmType *Parm2 = cast<TemplateTypeParmType>(T2); + if (Parm1->getDepth() != Parm2->getDepth()) + return false; + if (Parm1->getIndex() != Parm2->getIndex()) + return false; + if (Parm1->isParameterPack() != Parm2->isParameterPack()) + return false; + + // Names of template type parameters are never significant. + break; + } + + case Type::SubstTemplateTypeParm: { + const SubstTemplateTypeParmType *Subst1 + = cast<SubstTemplateTypeParmType>(T1); + const SubstTemplateTypeParmType *Subst2 + = cast<SubstTemplateTypeParmType>(T2); + if (!IsStructurallyEquivalent(Context, + QualType(Subst1->getReplacedParameter(), 0), + QualType(Subst2->getReplacedParameter(), 0))) + return false; + if (!IsStructurallyEquivalent(Context, + Subst1->getReplacementType(), + Subst2->getReplacementType())) + return false; + break; + } + + case Type::SubstTemplateTypeParmPack: { + const SubstTemplateTypeParmPackType *Subst1 + = cast<SubstTemplateTypeParmPackType>(T1); + const SubstTemplateTypeParmPackType *Subst2 + = cast<SubstTemplateTypeParmPackType>(T2); + if (!IsStructurallyEquivalent(Context, + QualType(Subst1->getReplacedParameter(), 0), + QualType(Subst2->getReplacedParameter(), 0))) + return false; + if (!IsStructurallyEquivalent(Context, + Subst1->getArgumentPack(), + Subst2->getArgumentPack())) + return false; + break; + } + case Type::TemplateSpecialization: { + const TemplateSpecializationType *Spec1 + = cast<TemplateSpecializationType>(T1); + const TemplateSpecializationType *Spec2 + = cast<TemplateSpecializationType>(T2); + if (!IsStructurallyEquivalent(Context, + Spec1->getTemplateName(), + Spec2->getTemplateName())) + return false; + if (Spec1->getNumArgs() != Spec2->getNumArgs()) + return false; + for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) { + if (!IsStructurallyEquivalent(Context, + Spec1->getArg(I), Spec2->getArg(I))) + return false; + } + break; + } + + case Type::Elaborated: { + const ElaboratedType *Elab1 = cast<ElaboratedType>(T1); + const ElaboratedType *Elab2 = cast<ElaboratedType>(T2); + // CHECKME: what if a keyword is ETK_None or ETK_typename ? + if (Elab1->getKeyword() != Elab2->getKeyword()) + return false; + if (!IsStructurallyEquivalent(Context, + Elab1->getQualifier(), + Elab2->getQualifier())) + return false; + if (!IsStructurallyEquivalent(Context, + Elab1->getNamedType(), + Elab2->getNamedType())) + return false; + break; + } + + case Type::InjectedClassName: { + const InjectedClassNameType *Inj1 = cast<InjectedClassNameType>(T1); + const InjectedClassNameType *Inj2 = cast<InjectedClassNameType>(T2); + if (!IsStructurallyEquivalent(Context, + Inj1->getInjectedSpecializationType(), + Inj2->getInjectedSpecializationType())) + return false; + break; + } + + case Type::DependentName: { + const DependentNameType *Typename1 = cast<DependentNameType>(T1); + const DependentNameType *Typename2 = cast<DependentNameType>(T2); + if (!IsStructurallyEquivalent(Context, + Typename1->getQualifier(), + Typename2->getQualifier())) + return false; + if (!IsStructurallyEquivalent(Typename1->getIdentifier(), + Typename2->getIdentifier())) + return false; + + break; + } + + case Type::DependentTemplateSpecialization: { + const DependentTemplateSpecializationType *Spec1 = + cast<DependentTemplateSpecializationType>(T1); + const DependentTemplateSpecializationType *Spec2 = + cast<DependentTemplateSpecializationType>(T2); + if (!IsStructurallyEquivalent(Context, + Spec1->getQualifier(), + Spec2->getQualifier())) + return false; + if (!IsStructurallyEquivalent(Spec1->getIdentifier(), + Spec2->getIdentifier())) + return false; + if (Spec1->getNumArgs() != Spec2->getNumArgs()) + return false; + for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) { + if (!IsStructurallyEquivalent(Context, + Spec1->getArg(I), Spec2->getArg(I))) + return false; + } + break; + } + + case Type::PackExpansion: + if (!IsStructurallyEquivalent(Context, + cast<PackExpansionType>(T1)->getPattern(), + cast<PackExpansionType>(T2)->getPattern())) + return false; + break; + + case Type::ObjCInterface: { + const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1); + const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2); + if (!IsStructurallyEquivalent(Context, + Iface1->getDecl(), Iface2->getDecl())) + return false; + break; + } + + case Type::ObjCObject: { + const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1); + const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2); + if (!IsStructurallyEquivalent(Context, + Obj1->getBaseType(), + Obj2->getBaseType())) + return false; + if (Obj1->getNumProtocols() != Obj2->getNumProtocols()) + return false; + for (unsigned I = 0, N = Obj1->getNumProtocols(); I != N; ++I) { + if (!IsStructurallyEquivalent(Context, + Obj1->getProtocol(I), + Obj2->getProtocol(I))) + return false; + } + break; + } + + case Type::ObjCObjectPointer: { + const ObjCObjectPointerType *Ptr1 = cast<ObjCObjectPointerType>(T1); + const ObjCObjectPointerType *Ptr2 = cast<ObjCObjectPointerType>(T2); + if (!IsStructurallyEquivalent(Context, + Ptr1->getPointeeType(), + Ptr2->getPointeeType())) + return false; + break; + } + + case Type::Atomic: { + if (!IsStructurallyEquivalent(Context, + cast<AtomicType>(T1)->getValueType(), + cast<AtomicType>(T2)->getValueType())) + return false; + break; + } + + } // end switch + + return true; +} + +/// \brief Determine structural equivalence of two fields. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + FieldDecl *Field1, FieldDecl *Field2) { + RecordDecl *Owner2 = cast<RecordDecl>(Field2->getDeclContext()); + + // For anonymous structs/unions, match up the anonymous struct/union type + // declarations directly, so that we don't go off searching for anonymous + // types + if (Field1->isAnonymousStructOrUnion() && + Field2->isAnonymousStructOrUnion()) { + RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getDecl(); + RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getDecl(); + return IsStructurallyEquivalent(Context, D1, D2); + } + + // Check for equivalent field names. + IdentifierInfo *Name1 = Field1->getIdentifier(); + IdentifierInfo *Name2 = Field2->getIdentifier(); + if (!::IsStructurallyEquivalent(Name1, Name2)) + return false; + + if (!IsStructurallyEquivalent(Context, + Field1->getType(), Field2->getType())) { + if (Context.Complain) { + Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(Owner2); + Context.Diag2(Field2->getLocation(), diag::note_odr_field) + << Field2->getDeclName() << Field2->getType(); + Context.Diag1(Field1->getLocation(), diag::note_odr_field) + << Field1->getDeclName() << Field1->getType(); + } + return false; + } + + if (Field1->isBitField() != Field2->isBitField()) { + if (Context.Complain) { + Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(Owner2); + if (Field1->isBitField()) { + Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field) + << Field1->getDeclName() << Field1->getType() + << Field1->getBitWidthValue(Context.C1); + Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field) + << Field2->getDeclName(); + } else { + Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field) + << Field2->getDeclName() << Field2->getType() + << Field2->getBitWidthValue(Context.C2); + Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field) + << Field1->getDeclName(); + } + } + return false; + } + + if (Field1->isBitField()) { + // Make sure that the bit-fields are the same length. + unsigned Bits1 = Field1->getBitWidthValue(Context.C1); + unsigned Bits2 = Field2->getBitWidthValue(Context.C2); + + if (Bits1 != Bits2) { + if (Context.Complain) { + Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(Owner2); + Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field) + << Field2->getDeclName() << Field2->getType() << Bits2; + Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field) + << Field1->getDeclName() << Field1->getType() << Bits1; + } + return false; + } + } + + return true; +} + +/// \brief Find the index of the given anonymous struct/union within its +/// context. +/// +/// \returns Returns the index of this anonymous struct/union in its context, +/// including the next assigned index (if none of them match). Returns an +/// empty option if the context is not a record, i.e.. if the anonymous +/// struct/union is at namespace or block scope. +static Optional<unsigned> findAnonymousStructOrUnionIndex(RecordDecl *Anon) { + ASTContext &Context = Anon->getASTContext(); + QualType AnonTy = Context.getRecordType(Anon); + + RecordDecl *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext()); + if (!Owner) + return None; + + unsigned Index = 0; + for (DeclContext::decl_iterator D = Owner->noload_decls_begin(), + DEnd = Owner->noload_decls_end(); + D != DEnd; ++D) { + FieldDecl *F = dyn_cast<FieldDecl>(*D); + if (!F || !F->isAnonymousStructOrUnion()) + continue; + + if (Context.hasSameType(F->getType(), AnonTy)) + break; + + ++Index; + } + + return Index; +} + +/// \brief Determine structural equivalence of two records. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + RecordDecl *D1, RecordDecl *D2) { + if (D1->isUnion() != D2->isUnion()) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here) + << D1->getDeclName() << (unsigned)D1->getTagKind(); + } + return false; + } + + if (D1->isAnonymousStructOrUnion() && D2->isAnonymousStructOrUnion()) { + // If both anonymous structs/unions are in a record context, make sure + // they occur in the same location in the context records. + if (Optional<unsigned> Index1 = findAnonymousStructOrUnionIndex(D1)) { + if (Optional<unsigned> Index2 = findAnonymousStructOrUnionIndex(D2)) { + if (*Index1 != *Index2) + return false; + } + } + } + + // If both declarations are class template specializations, we know + // the ODR applies, so check the template and template arguments. + ClassTemplateSpecializationDecl *Spec1 + = dyn_cast<ClassTemplateSpecializationDecl>(D1); + ClassTemplateSpecializationDecl *Spec2 + = dyn_cast<ClassTemplateSpecializationDecl>(D2); + if (Spec1 && Spec2) { + // Check that the specialized templates are the same. + if (!IsStructurallyEquivalent(Context, Spec1->getSpecializedTemplate(), + Spec2->getSpecializedTemplate())) + return false; + + // Check that the template arguments are the same. + if (Spec1->getTemplateArgs().size() != Spec2->getTemplateArgs().size()) + return false; + + for (unsigned I = 0, N = Spec1->getTemplateArgs().size(); I != N; ++I) + if (!IsStructurallyEquivalent(Context, + Spec1->getTemplateArgs().get(I), + Spec2->getTemplateArgs().get(I))) + return false; + } + // If one is a class template specialization and the other is not, these + // structures are different. + else if (Spec1 || Spec2) + return false; + + // Compare the definitions of these two records. If either or both are + // incomplete, we assume that they are equivalent. + D1 = D1->getDefinition(); + D2 = D2->getDefinition(); + if (!D1 || !D2) + return true; + + if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D1)) { + if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) { + if (D1CXX->getNumBases() != D2CXX->getNumBases()) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases) + << D2CXX->getNumBases(); + Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases) + << D1CXX->getNumBases(); + } + return false; + } + + // Check the base classes. + for (CXXRecordDecl::base_class_iterator Base1 = D1CXX->bases_begin(), + BaseEnd1 = D1CXX->bases_end(), + Base2 = D2CXX->bases_begin(); + Base1 != BaseEnd1; + ++Base1, ++Base2) { + if (!IsStructurallyEquivalent(Context, + Base1->getType(), Base2->getType())) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag2(Base2->getLocStart(), diag::note_odr_base) + << Base2->getType() + << Base2->getSourceRange(); + Context.Diag1(Base1->getLocStart(), diag::note_odr_base) + << Base1->getType() + << Base1->getSourceRange(); + } + return false; + } + + // Check virtual vs. non-virtual inheritance mismatch. + if (Base1->isVirtual() != Base2->isVirtual()) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag2(Base2->getLocStart(), + diag::note_odr_virtual_base) + << Base2->isVirtual() << Base2->getSourceRange(); + Context.Diag1(Base1->getLocStart(), diag::note_odr_base) + << Base1->isVirtual() + << Base1->getSourceRange(); + } + return false; + } + } + } else if (D1CXX->getNumBases() > 0) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + const CXXBaseSpecifier *Base1 = D1CXX->bases_begin(); + Context.Diag1(Base1->getLocStart(), diag::note_odr_base) + << Base1->getType() + << Base1->getSourceRange(); + Context.Diag2(D2->getLocation(), diag::note_odr_missing_base); + } + return false; + } + } + + // Check the fields for consistency. + RecordDecl::field_iterator Field2 = D2->field_begin(), + Field2End = D2->field_end(); + for (RecordDecl::field_iterator Field1 = D1->field_begin(), + Field1End = D1->field_end(); + Field1 != Field1End; + ++Field1, ++Field2) { + if (Field2 == Field2End) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag1(Field1->getLocation(), diag::note_odr_field) + << Field1->getDeclName() << Field1->getType(); + Context.Diag2(D2->getLocation(), diag::note_odr_missing_field); + } + return false; + } + + if (!IsStructurallyEquivalent(Context, *Field1, *Field2)) + return false; + } + + if (Field2 != Field2End) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag2(Field2->getLocation(), diag::note_odr_field) + << Field2->getDeclName() << Field2->getType(); + Context.Diag1(D1->getLocation(), diag::note_odr_missing_field); + } + return false; + } + + return true; +} + +/// \brief Determine structural equivalence of two enums. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + EnumDecl *D1, EnumDecl *D2) { + EnumDecl::enumerator_iterator EC2 = D2->enumerator_begin(), + EC2End = D2->enumerator_end(); + for (EnumDecl::enumerator_iterator EC1 = D1->enumerator_begin(), + EC1End = D1->enumerator_end(); + EC1 != EC1End; ++EC1, ++EC2) { + if (EC2 == EC2End) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator) + << EC1->getDeclName() + << EC1->getInitVal().toString(10); + Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator); + } + return false; + } + + llvm::APSInt Val1 = EC1->getInitVal(); + llvm::APSInt Val2 = EC2->getInitVal(); + if (!llvm::APSInt::isSameValue(Val1, Val2) || + !IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator) + << EC2->getDeclName() + << EC2->getInitVal().toString(10); + Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator) + << EC1->getDeclName() + << EC1->getInitVal().toString(10); + } + return false; + } + } + + if (EC2 != EC2End) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent) + << Context.C2.getTypeDeclType(D2); + Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator) + << EC2->getDeclName() + << EC2->getInitVal().toString(10); + Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator); + } + return false; + } + + return true; +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + TemplateParameterList *Params1, + TemplateParameterList *Params2) { + if (Params1->size() != Params2->size()) { + if (Context.Complain) { + Context.Diag2(Params2->getTemplateLoc(), + diag::err_odr_different_num_template_parameters) + << Params1->size() << Params2->size(); + Context.Diag1(Params1->getTemplateLoc(), + diag::note_odr_template_parameter_list); + } + return false; + } + + for (unsigned I = 0, N = Params1->size(); I != N; ++I) { + if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) { + if (Context.Complain) { + Context.Diag2(Params2->getParam(I)->getLocation(), + diag::err_odr_different_template_parameter_kind); + Context.Diag1(Params1->getParam(I)->getLocation(), + diag::note_odr_template_parameter_here); + } + return false; + } + + if (!Context.IsStructurallyEquivalent(Params1->getParam(I), + Params2->getParam(I))) { + + return false; + } + } + + return true; +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + TemplateTypeParmDecl *D1, + TemplateTypeParmDecl *D2) { + if (D1->isParameterPack() != D2->isParameterPack()) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack) + << D2->isParameterPack(); + Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack) + << D1->isParameterPack(); + } + return false; + } + + return true; +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + NonTypeTemplateParmDecl *D1, + NonTypeTemplateParmDecl *D2) { + if (D1->isParameterPack() != D2->isParameterPack()) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack) + << D2->isParameterPack(); + Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack) + << D1->isParameterPack(); + } + return false; + } + + // Check types. + if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), + diag::err_odr_non_type_parameter_type_inconsistent) + << D2->getType() << D1->getType(); + Context.Diag1(D1->getLocation(), diag::note_odr_value_here) + << D1->getType(); + } + return false; + } + + return true; +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + TemplateTemplateParmDecl *D1, + TemplateTemplateParmDecl *D2) { + if (D1->isParameterPack() != D2->isParameterPack()) { + if (Context.Complain) { + Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack) + << D2->isParameterPack(); + Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack) + << D1->isParameterPack(); + } + return false; + } + + // Check template parameter lists. + return IsStructurallyEquivalent(Context, D1->getTemplateParameters(), + D2->getTemplateParameters()); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + ClassTemplateDecl *D1, + ClassTemplateDecl *D2) { + // Check template parameters. + if (!IsStructurallyEquivalent(Context, + D1->getTemplateParameters(), + D2->getTemplateParameters())) + return false; + + // Check the templated declaration. + return Context.IsStructurallyEquivalent(D1->getTemplatedDecl(), + D2->getTemplatedDecl()); +} + +/// \brief Determine structural equivalence of two declarations. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + Decl *D1, Decl *D2) { + // FIXME: Check for known structural equivalences via a callback of some sort. + + // Check whether we already know that these two declarations are not + // structurally equivalent. + if (Context.NonEquivalentDecls.count(std::make_pair(D1->getCanonicalDecl(), + D2->getCanonicalDecl()))) + return false; + + // Determine whether we've already produced a tentative equivalence for D1. + Decl *&EquivToD1 = Context.TentativeEquivalences[D1->getCanonicalDecl()]; + if (EquivToD1) + return EquivToD1 == D2->getCanonicalDecl(); + + // Produce a tentative equivalence D1 <-> D2, which will be checked later. + EquivToD1 = D2->getCanonicalDecl(); + Context.DeclsToCheck.push_back(D1->getCanonicalDecl()); + return true; +} + +bool StructuralEquivalenceContext::IsStructurallyEquivalent(Decl *D1, + Decl *D2) { + if (!::IsStructurallyEquivalent(*this, D1, D2)) + return false; + + return !Finish(); +} + +bool StructuralEquivalenceContext::IsStructurallyEquivalent(QualType T1, + QualType T2) { + if (!::IsStructurallyEquivalent(*this, T1, T2)) + return false; + + return !Finish(); +} + +bool StructuralEquivalenceContext::Finish() { + while (!DeclsToCheck.empty()) { + // Check the next declaration. + Decl *D1 = DeclsToCheck.front(); + DeclsToCheck.pop_front(); + + Decl *D2 = TentativeEquivalences[D1]; + assert(D2 && "Unrecorded tentative equivalence?"); + + bool Equivalent = true; + + // FIXME: Switch on all declaration kinds. For now, we're just going to + // check the obvious ones. + if (RecordDecl *Record1 = dyn_cast<RecordDecl>(D1)) { + if (RecordDecl *Record2 = dyn_cast<RecordDecl>(D2)) { + // Check for equivalent structure names. + IdentifierInfo *Name1 = Record1->getIdentifier(); + if (!Name1 && Record1->getTypedefNameForAnonDecl()) + Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier(); + IdentifierInfo *Name2 = Record2->getIdentifier(); + if (!Name2 && Record2->getTypedefNameForAnonDecl()) + Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier(); + if (!::IsStructurallyEquivalent(Name1, Name2) || + !::IsStructurallyEquivalent(*this, Record1, Record2)) + Equivalent = false; + } else { + // Record/non-record mismatch. + Equivalent = false; + } + } else if (EnumDecl *Enum1 = dyn_cast<EnumDecl>(D1)) { + if (EnumDecl *Enum2 = dyn_cast<EnumDecl>(D2)) { + // Check for equivalent enum names. + IdentifierInfo *Name1 = Enum1->getIdentifier(); + if (!Name1 && Enum1->getTypedefNameForAnonDecl()) + Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier(); + IdentifierInfo *Name2 = Enum2->getIdentifier(); + if (!Name2 && Enum2->getTypedefNameForAnonDecl()) + Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier(); + if (!::IsStructurallyEquivalent(Name1, Name2) || + !::IsStructurallyEquivalent(*this, Enum1, Enum2)) + Equivalent = false; + } else { + // Enum/non-enum mismatch + Equivalent = false; + } + } else if (TypedefNameDecl *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) { + if (TypedefNameDecl *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) { + if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(), + Typedef2->getIdentifier()) || + !::IsStructurallyEquivalent(*this, + Typedef1->getUnderlyingType(), + Typedef2->getUnderlyingType())) + Equivalent = false; + } else { + // Typedef/non-typedef mismatch. + Equivalent = false; + } + } else if (ClassTemplateDecl *ClassTemplate1 + = dyn_cast<ClassTemplateDecl>(D1)) { + if (ClassTemplateDecl *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) { + if (!::IsStructurallyEquivalent(ClassTemplate1->getIdentifier(), + ClassTemplate2->getIdentifier()) || + !::IsStructurallyEquivalent(*this, ClassTemplate1, ClassTemplate2)) + Equivalent = false; + } else { + // Class template/non-class-template mismatch. + Equivalent = false; + } + } else if (TemplateTypeParmDecl *TTP1= dyn_cast<TemplateTypeParmDecl>(D1)) { + if (TemplateTypeParmDecl *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) { + if (!::IsStructurallyEquivalent(*this, TTP1, TTP2)) + Equivalent = false; + } else { + // Kind mismatch. + Equivalent = false; + } + } else if (NonTypeTemplateParmDecl *NTTP1 + = dyn_cast<NonTypeTemplateParmDecl>(D1)) { + if (NonTypeTemplateParmDecl *NTTP2 + = dyn_cast<NonTypeTemplateParmDecl>(D2)) { + if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2)) + Equivalent = false; + } else { + // Kind mismatch. + Equivalent = false; + } + } else if (TemplateTemplateParmDecl *TTP1 + = dyn_cast<TemplateTemplateParmDecl>(D1)) { + if (TemplateTemplateParmDecl *TTP2 + = dyn_cast<TemplateTemplateParmDecl>(D2)) { + if (!::IsStructurallyEquivalent(*this, TTP1, TTP2)) + Equivalent = false; + } else { + // Kind mismatch. + Equivalent = false; + } + } + + if (!Equivalent) { + // Note that these two declarations are not equivalent (and we already + // know about it). + NonEquivalentDecls.insert(std::make_pair(D1->getCanonicalDecl(), + D2->getCanonicalDecl())); + return true; + } + // FIXME: Check other declaration kinds! + } + + return false; +} + +//---------------------------------------------------------------------------- +// Import Types +//---------------------------------------------------------------------------- + +QualType ASTNodeImporter::VisitType(const Type *T) { + Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node) + << T->getTypeClassName(); + return QualType(); +} + +QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) { + switch (T->getKind()) { +#define SHARED_SINGLETON_TYPE(Expansion) +#define BUILTIN_TYPE(Id, SingletonId) \ + case BuiltinType::Id: return Importer.getToContext().SingletonId; +#include "clang/AST/BuiltinTypes.def" + + // FIXME: for Char16, Char32, and NullPtr, make sure that the "to" + // context supports C++. + + // FIXME: for ObjCId, ObjCClass, and ObjCSel, make sure that the "to" + // context supports ObjC. + + case BuiltinType::Char_U: + // The context we're importing from has an unsigned 'char'. If we're + // importing into a context with a signed 'char', translate to + // 'unsigned char' instead. + if (Importer.getToContext().getLangOpts().CharIsSigned) + return Importer.getToContext().UnsignedCharTy; + + return Importer.getToContext().CharTy; + + case BuiltinType::Char_S: + // The context we're importing from has an unsigned 'char'. If we're + // importing into a context with a signed 'char', translate to + // 'unsigned char' instead. + if (!Importer.getToContext().getLangOpts().CharIsSigned) + return Importer.getToContext().SignedCharTy; + + return Importer.getToContext().CharTy; + + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + // FIXME: If not in C++, shall we translate to the C equivalent of + // wchar_t? + return Importer.getToContext().WCharTy; + } + + llvm_unreachable("Invalid BuiltinType Kind!"); +} + +QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) { + QualType ToElementType = Importer.Import(T->getElementType()); + if (ToElementType.isNull()) + return QualType(); + + return Importer.getToContext().getComplexType(ToElementType); +} + +QualType ASTNodeImporter::VisitPointerType(const PointerType *T) { + QualType ToPointeeType = Importer.Import(T->getPointeeType()); + if (ToPointeeType.isNull()) + return QualType(); + + return Importer.getToContext().getPointerType(ToPointeeType); +} + +QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) { + // FIXME: Check for blocks support in "to" context. + QualType ToPointeeType = Importer.Import(T->getPointeeType()); + if (ToPointeeType.isNull()) + return QualType(); + + return Importer.getToContext().getBlockPointerType(ToPointeeType); +} + +QualType +ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) { + // FIXME: Check for C++ support in "to" context. + QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten()); + if (ToPointeeType.isNull()) + return QualType(); + + return Importer.getToContext().getLValueReferenceType(ToPointeeType); +} + +QualType +ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) { + // FIXME: Check for C++0x support in "to" context. + QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten()); + if (ToPointeeType.isNull()) + return QualType(); + + return Importer.getToContext().getRValueReferenceType(ToPointeeType); +} + +QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) { + // FIXME: Check for C++ support in "to" context. + QualType ToPointeeType = Importer.Import(T->getPointeeType()); + if (ToPointeeType.isNull()) + return QualType(); + + QualType ClassType = Importer.Import(QualType(T->getClass(), 0)); + return Importer.getToContext().getMemberPointerType(ToPointeeType, + ClassType.getTypePtr()); +} + +QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) { + QualType ToElementType = Importer.Import(T->getElementType()); + if (ToElementType.isNull()) + return QualType(); + + return Importer.getToContext().getConstantArrayType(ToElementType, + T->getSize(), + T->getSizeModifier(), + T->getIndexTypeCVRQualifiers()); +} + +QualType +ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) { + QualType ToElementType = Importer.Import(T->getElementType()); + if (ToElementType.isNull()) + return QualType(); + + return Importer.getToContext().getIncompleteArrayType(ToElementType, + T->getSizeModifier(), + T->getIndexTypeCVRQualifiers()); +} + +QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) { + QualType ToElementType = Importer.Import(T->getElementType()); + if (ToElementType.isNull()) + return QualType(); + + Expr *Size = Importer.Import(T->getSizeExpr()); + if (!Size) + return QualType(); + + SourceRange Brackets = Importer.Import(T->getBracketsRange()); + return Importer.getToContext().getVariableArrayType(ToElementType, Size, + T->getSizeModifier(), + T->getIndexTypeCVRQualifiers(), + Brackets); +} + +QualType ASTNodeImporter::VisitVectorType(const VectorType *T) { + QualType ToElementType = Importer.Import(T->getElementType()); + if (ToElementType.isNull()) + return QualType(); + + return Importer.getToContext().getVectorType(ToElementType, + T->getNumElements(), + T->getVectorKind()); +} + +QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) { + QualType ToElementType = Importer.Import(T->getElementType()); + if (ToElementType.isNull()) + return QualType(); + + return Importer.getToContext().getExtVectorType(ToElementType, + T->getNumElements()); +} + +QualType +ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) { + // FIXME: What happens if we're importing a function without a prototype + // into C++? Should we make it variadic? + QualType ToResultType = Importer.Import(T->getResultType()); + if (ToResultType.isNull()) + return QualType(); + + return Importer.getToContext().getFunctionNoProtoType(ToResultType, + T->getExtInfo()); +} + +QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) { + QualType ToResultType = Importer.Import(T->getResultType()); + if (ToResultType.isNull()) + return QualType(); + + // Import argument types + SmallVector<QualType, 4> ArgTypes; + for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(), + AEnd = T->arg_type_end(); + A != AEnd; ++A) { + QualType ArgType = Importer.Import(*A); + if (ArgType.isNull()) + return QualType(); + ArgTypes.push_back(ArgType); + } + + // Import exception types + SmallVector<QualType, 4> ExceptionTypes; + for (FunctionProtoType::exception_iterator E = T->exception_begin(), + EEnd = T->exception_end(); + E != EEnd; ++E) { + QualType ExceptionType = Importer.Import(*E); + if (ExceptionType.isNull()) + return QualType(); + ExceptionTypes.push_back(ExceptionType); + } + + FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo(); + FunctionProtoType::ExtProtoInfo ToEPI; + + ToEPI.ExtInfo = FromEPI.ExtInfo; + ToEPI.Variadic = FromEPI.Variadic; + ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn; + ToEPI.TypeQuals = FromEPI.TypeQuals; + ToEPI.RefQualifier = FromEPI.RefQualifier; + ToEPI.NumExceptions = ExceptionTypes.size(); + ToEPI.Exceptions = ExceptionTypes.data(); + ToEPI.ConsumedArguments = FromEPI.ConsumedArguments; + ToEPI.ExceptionSpecType = FromEPI.ExceptionSpecType; + ToEPI.NoexceptExpr = Importer.Import(FromEPI.NoexceptExpr); + ToEPI.ExceptionSpecDecl = cast_or_null<FunctionDecl>( + Importer.Import(FromEPI.ExceptionSpecDecl)); + ToEPI.ExceptionSpecTemplate = cast_or_null<FunctionDecl>( + Importer.Import(FromEPI.ExceptionSpecTemplate)); + + return Importer.getToContext().getFunctionType(ToResultType, ArgTypes, ToEPI); +} + +QualType ASTNodeImporter::VisitParenType(const ParenType *T) { + QualType ToInnerType = Importer.Import(T->getInnerType()); + if (ToInnerType.isNull()) + return QualType(); + + return Importer.getToContext().getParenType(ToInnerType); +} + +QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) { + TypedefNameDecl *ToDecl + = dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl())); + if (!ToDecl) + return QualType(); + + return Importer.getToContext().getTypeDeclType(ToDecl); +} + +QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) { + Expr *ToExpr = Importer.Import(T->getUnderlyingExpr()); + if (!ToExpr) + return QualType(); + + return Importer.getToContext().getTypeOfExprType(ToExpr); +} + +QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) { + QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType()); + if (ToUnderlyingType.isNull()) + return QualType(); + + return Importer.getToContext().getTypeOfType(ToUnderlyingType); +} + +QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) { + // FIXME: Make sure that the "to" context supports C++0x! + Expr *ToExpr = Importer.Import(T->getUnderlyingExpr()); + if (!ToExpr) + return QualType(); + + QualType UnderlyingType = Importer.Import(T->getUnderlyingType()); + if (UnderlyingType.isNull()) + return QualType(); + + return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType); +} + +QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) { + QualType ToBaseType = Importer.Import(T->getBaseType()); + QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType()); + if (ToBaseType.isNull() || ToUnderlyingType.isNull()) + return QualType(); + + return Importer.getToContext().getUnaryTransformType(ToBaseType, + ToUnderlyingType, + T->getUTTKind()); +} + +QualType ASTNodeImporter::VisitAutoType(const AutoType *T) { + // FIXME: Make sure that the "to" context supports C++11! + QualType FromDeduced = T->getDeducedType(); + QualType ToDeduced; + if (!FromDeduced.isNull()) { + ToDeduced = Importer.Import(FromDeduced); + if (ToDeduced.isNull()) + return QualType(); + } + + return Importer.getToContext().getAutoType(ToDeduced, T->isDecltypeAuto(), + /*IsDependent*/false); +} + +QualType ASTNodeImporter::VisitRecordType(const RecordType *T) { + RecordDecl *ToDecl + = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl())); + if (!ToDecl) + return QualType(); + + return Importer.getToContext().getTagDeclType(ToDecl); +} + +QualType ASTNodeImporter::VisitEnumType(const EnumType *T) { + EnumDecl *ToDecl + = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl())); + if (!ToDecl) + return QualType(); + + return Importer.getToContext().getTagDeclType(ToDecl); +} + +QualType ASTNodeImporter::VisitTemplateSpecializationType( + const TemplateSpecializationType *T) { + TemplateName ToTemplate = Importer.Import(T->getTemplateName()); + if (ToTemplate.isNull()) + return QualType(); + + SmallVector<TemplateArgument, 2> ToTemplateArgs; + if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs)) + return QualType(); + + QualType ToCanonType; + if (!QualType(T, 0).isCanonical()) { + QualType FromCanonType + = Importer.getFromContext().getCanonicalType(QualType(T, 0)); + ToCanonType =Importer.Import(FromCanonType); + if (ToCanonType.isNull()) + return QualType(); + } + return Importer.getToContext().getTemplateSpecializationType(ToTemplate, + ToTemplateArgs.data(), + ToTemplateArgs.size(), + ToCanonType); +} + +QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) { + NestedNameSpecifier *ToQualifier = 0; + // Note: the qualifier in an ElaboratedType is optional. + if (T->getQualifier()) { + ToQualifier = Importer.Import(T->getQualifier()); + if (!ToQualifier) + return QualType(); + } + + QualType ToNamedType = Importer.Import(T->getNamedType()); + if (ToNamedType.isNull()) + return QualType(); + + return Importer.getToContext().getElaboratedType(T->getKeyword(), + ToQualifier, ToNamedType); +} + +QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) { + ObjCInterfaceDecl *Class + = dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl())); + if (!Class) + return QualType(); + + return Importer.getToContext().getObjCInterfaceType(Class); +} + +QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) { + QualType ToBaseType = Importer.Import(T->getBaseType()); + if (ToBaseType.isNull()) + return QualType(); + + SmallVector<ObjCProtocolDecl *, 4> Protocols; + for (ObjCObjectType::qual_iterator P = T->qual_begin(), + PEnd = T->qual_end(); + P != PEnd; ++P) { + ObjCProtocolDecl *Protocol + = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(*P)); + if (!Protocol) + return QualType(); + Protocols.push_back(Protocol); + } + + return Importer.getToContext().getObjCObjectType(ToBaseType, + Protocols.data(), + Protocols.size()); +} + +QualType +ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) { + QualType ToPointeeType = Importer.Import(T->getPointeeType()); + if (ToPointeeType.isNull()) + return QualType(); + + return Importer.getToContext().getObjCObjectPointerType(ToPointeeType); +} + +//---------------------------------------------------------------------------- +// Import Declarations +//---------------------------------------------------------------------------- +bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC, + DeclContext *&LexicalDC, + DeclarationName &Name, + SourceLocation &Loc) { + // Import the context of this declaration. + DC = Importer.ImportContext(D->getDeclContext()); + if (!DC) + return true; + + LexicalDC = DC; + if (D->getDeclContext() != D->getLexicalDeclContext()) { + LexicalDC = Importer.ImportContext(D->getLexicalDeclContext()); + if (!LexicalDC) + return true; + } + + // Import the name of this declaration. + Name = Importer.Import(D->getDeclName()); + if (D->getDeclName() && !Name) + return true; + + // Import the location of this declaration. + Loc = Importer.Import(D->getLocation()); + return false; +} + +void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) { + if (!FromD) + return; + + if (!ToD) { + ToD = Importer.Import(FromD); + if (!ToD) + return; + } + + if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) { + if (RecordDecl *ToRecord = cast_or_null<RecordDecl>(ToD)) { + if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() && !ToRecord->getDefinition()) { + ImportDefinition(FromRecord, ToRecord); + } + } + return; + } + + if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) { + if (EnumDecl *ToEnum = cast_or_null<EnumDecl>(ToD)) { + if (FromEnum->getDefinition() && !ToEnum->getDefinition()) { + ImportDefinition(FromEnum, ToEnum); + } + } + return; + } +} + +void +ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From, + DeclarationNameInfo& To) { + // NOTE: To.Name and To.Loc are already imported. + // We only have to import To.LocInfo. + switch (To.getName().getNameKind()) { + case DeclarationName::Identifier: + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + case DeclarationName::CXXUsingDirective: + return; + + case DeclarationName::CXXOperatorName: { + SourceRange Range = From.getCXXOperatorNameRange(); + To.setCXXOperatorNameRange(Importer.Import(Range)); + return; + } + case DeclarationName::CXXLiteralOperatorName: { + SourceLocation Loc = From.getCXXLiteralOperatorNameLoc(); + To.setCXXLiteralOperatorNameLoc(Importer.Import(Loc)); + return; + } + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXConversionFunctionName: { + TypeSourceInfo *FromTInfo = From.getNamedTypeInfo(); + To.setNamedTypeInfo(Importer.Import(FromTInfo)); + return; + } + } + llvm_unreachable("Unknown name kind."); +} + +void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) { + if (Importer.isMinimalImport() && !ForceImport) { + Importer.ImportContext(FromDC); + return; + } + + for (DeclContext::decl_iterator From = FromDC->decls_begin(), + FromEnd = FromDC->decls_end(); + From != FromEnd; + ++From) + Importer.Import(*From); +} + +bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To, + ImportDefinitionKind Kind) { + if (To->getDefinition() || To->isBeingDefined()) { + if (Kind == IDK_Everything) + ImportDeclContext(From, /*ForceImport=*/true); + + return false; + } + + To->startDefinition(); + + // Add base classes. + if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) { + CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From); + + struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data(); + struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data(); + ToData.UserDeclaredConstructor = FromData.UserDeclaredConstructor; + ToData.UserDeclaredSpecialMembers = FromData.UserDeclaredSpecialMembers; + ToData.Aggregate = FromData.Aggregate; + ToData.PlainOldData = FromData.PlainOldData; + ToData.Empty = FromData.Empty; + ToData.Polymorphic = FromData.Polymorphic; + ToData.Abstract = FromData.Abstract; + ToData.IsStandardLayout = FromData.IsStandardLayout; + ToData.HasNoNonEmptyBases = FromData.HasNoNonEmptyBases; + ToData.HasPrivateFields = FromData.HasPrivateFields; + ToData.HasProtectedFields = FromData.HasProtectedFields; + ToData.HasPublicFields = FromData.HasPublicFields; + ToData.HasMutableFields = FromData.HasMutableFields; + ToData.HasOnlyCMembers = FromData.HasOnlyCMembers; + ToData.HasInClassInitializer = FromData.HasInClassInitializer; + ToData.HasUninitializedReferenceMember + = FromData.HasUninitializedReferenceMember; + ToData.NeedOverloadResolutionForMoveConstructor + = FromData.NeedOverloadResolutionForMoveConstructor; + ToData.NeedOverloadResolutionForMoveAssignment + = FromData.NeedOverloadResolutionForMoveAssignment; + ToData.NeedOverloadResolutionForDestructor + = FromData.NeedOverloadResolutionForDestructor; + ToData.DefaultedMoveConstructorIsDeleted + = FromData.DefaultedMoveConstructorIsDeleted; + ToData.DefaultedMoveAssignmentIsDeleted + = FromData.DefaultedMoveAssignmentIsDeleted; + ToData.DefaultedDestructorIsDeleted = FromData.DefaultedDestructorIsDeleted; + ToData.HasTrivialSpecialMembers = FromData.HasTrivialSpecialMembers; + ToData.HasIrrelevantDestructor = FromData.HasIrrelevantDestructor; + ToData.HasConstexprNonCopyMoveConstructor + = FromData.HasConstexprNonCopyMoveConstructor; + ToData.DefaultedDefaultConstructorIsConstexpr + = FromData.DefaultedDefaultConstructorIsConstexpr; + ToData.HasConstexprDefaultConstructor + = FromData.HasConstexprDefaultConstructor; + ToData.HasNonLiteralTypeFieldsOrBases + = FromData.HasNonLiteralTypeFieldsOrBases; + // ComputedVisibleConversions not imported. + ToData.UserProvidedDefaultConstructor + = FromData.UserProvidedDefaultConstructor; + ToData.DeclaredSpecialMembers = FromData.DeclaredSpecialMembers; + ToData.ImplicitCopyConstructorHasConstParam + = FromData.ImplicitCopyConstructorHasConstParam; + ToData.ImplicitCopyAssignmentHasConstParam + = FromData.ImplicitCopyAssignmentHasConstParam; + ToData.HasDeclaredCopyConstructorWithConstParam + = FromData.HasDeclaredCopyConstructorWithConstParam; + ToData.HasDeclaredCopyAssignmentWithConstParam + = FromData.HasDeclaredCopyAssignmentWithConstParam; + ToData.IsLambda = FromData.IsLambda; + + SmallVector<CXXBaseSpecifier *, 4> Bases; + for (CXXRecordDecl::base_class_iterator + Base1 = FromCXX->bases_begin(), + FromBaseEnd = FromCXX->bases_end(); + Base1 != FromBaseEnd; + ++Base1) { + QualType T = Importer.Import(Base1->getType()); + if (T.isNull()) + return true; + + SourceLocation EllipsisLoc; + if (Base1->isPackExpansion()) + EllipsisLoc = Importer.Import(Base1->getEllipsisLoc()); + + // Ensure that we have a definition for the base. + ImportDefinitionIfNeeded(Base1->getType()->getAsCXXRecordDecl()); + + Bases.push_back( + new (Importer.getToContext()) + CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()), + Base1->isVirtual(), + Base1->isBaseOfClass(), + Base1->getAccessSpecifierAsWritten(), + Importer.Import(Base1->getTypeSourceInfo()), + EllipsisLoc)); + } + if (!Bases.empty()) + ToCXX->setBases(Bases.data(), Bases.size()); + } + + if (shouldForceImportDeclContext(Kind)) + ImportDeclContext(From, /*ForceImport=*/true); + + To->completeDefinition(); + return false; +} + +bool ASTNodeImporter::ImportDefinition(VarDecl *From, VarDecl *To, + ImportDefinitionKind Kind) { + if (To->getDefinition()) + return false; + + // FIXME: Can we really import any initializer? Alternatively, we could force + // ourselves to import every declaration of a variable and then only use + // getInit() here. + To->setInit(Importer.Import(const_cast<Expr *>(From->getAnyInitializer()))); + + // FIXME: Other bits to merge? + + return false; +} + +bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To, + ImportDefinitionKind Kind) { + if (To->getDefinition() || To->isBeingDefined()) { + if (Kind == IDK_Everything) + ImportDeclContext(From, /*ForceImport=*/true); + return false; + } + + To->startDefinition(); + + QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From)); + if (T.isNull()) + return true; + + QualType ToPromotionType = Importer.Import(From->getPromotionType()); + if (ToPromotionType.isNull()) + return true; + + if (shouldForceImportDeclContext(Kind)) + ImportDeclContext(From, /*ForceImport=*/true); + + // FIXME: we might need to merge the number of positive or negative bits + // if the enumerator lists don't match. + To->completeDefinition(T, ToPromotionType, + From->getNumPositiveBits(), + From->getNumNegativeBits()); + return false; +} + +TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList( + TemplateParameterList *Params) { + SmallVector<NamedDecl *, 4> ToParams; + ToParams.reserve(Params->size()); + for (TemplateParameterList::iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + Decl *To = Importer.Import(*P); + if (!To) + return 0; + + ToParams.push_back(cast<NamedDecl>(To)); + } + + return TemplateParameterList::Create(Importer.getToContext(), + Importer.Import(Params->getTemplateLoc()), + Importer.Import(Params->getLAngleLoc()), + ToParams.data(), ToParams.size(), + Importer.Import(Params->getRAngleLoc())); +} + +TemplateArgument +ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) { + switch (From.getKind()) { + case TemplateArgument::Null: + return TemplateArgument(); + + case TemplateArgument::Type: { + QualType ToType = Importer.Import(From.getAsType()); + if (ToType.isNull()) + return TemplateArgument(); + return TemplateArgument(ToType); + } + + case TemplateArgument::Integral: { + QualType ToType = Importer.Import(From.getIntegralType()); + if (ToType.isNull()) + return TemplateArgument(); + return TemplateArgument(From, ToType); + } + + case TemplateArgument::Declaration: { + ValueDecl *FromD = From.getAsDecl(); + if (ValueDecl *To = cast_or_null<ValueDecl>(Importer.Import(FromD))) + return TemplateArgument(To, From.isDeclForReferenceParam()); + return TemplateArgument(); + } + + case TemplateArgument::NullPtr: { + QualType ToType = Importer.Import(From.getNullPtrType()); + if (ToType.isNull()) + return TemplateArgument(); + return TemplateArgument(ToType, /*isNullPtr*/true); + } + + case TemplateArgument::Template: { + TemplateName ToTemplate = Importer.Import(From.getAsTemplate()); + if (ToTemplate.isNull()) + return TemplateArgument(); + + return TemplateArgument(ToTemplate); + } + + case TemplateArgument::TemplateExpansion: { + TemplateName ToTemplate + = Importer.Import(From.getAsTemplateOrTemplatePattern()); + if (ToTemplate.isNull()) + return TemplateArgument(); + + return TemplateArgument(ToTemplate, From.getNumTemplateExpansions()); + } + + case TemplateArgument::Expression: + if (Expr *ToExpr = Importer.Import(From.getAsExpr())) + return TemplateArgument(ToExpr); + return TemplateArgument(); + + case TemplateArgument::Pack: { + SmallVector<TemplateArgument, 2> ToPack; + ToPack.reserve(From.pack_size()); + if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack)) + return TemplateArgument(); + + TemplateArgument *ToArgs + = new (Importer.getToContext()) TemplateArgument[ToPack.size()]; + std::copy(ToPack.begin(), ToPack.end(), ToArgs); + return TemplateArgument(ToArgs, ToPack.size()); + } + } + + llvm_unreachable("Invalid template argument kind"); +} + +bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs, + unsigned NumFromArgs, + SmallVectorImpl<TemplateArgument> &ToArgs) { + for (unsigned I = 0; I != NumFromArgs; ++I) { + TemplateArgument To = ImportTemplateArgument(FromArgs[I]); + if (To.isNull() && !FromArgs[I].isNull()) + return true; + + ToArgs.push_back(To); + } + + return false; +} + +bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord, + RecordDecl *ToRecord, bool Complain) { + // Eliminate a potential failure point where we attempt to re-import + // something we're trying to import while completing ToRecord. + Decl *ToOrigin = Importer.GetOriginalDecl(ToRecord); + if (ToOrigin) { + RecordDecl *ToOriginRecord = dyn_cast<RecordDecl>(ToOrigin); + if (ToOriginRecord) + ToRecord = ToOriginRecord; + } + + StructuralEquivalenceContext Ctx(Importer.getFromContext(), + ToRecord->getASTContext(), + Importer.getNonEquivalentDecls(), + false, Complain); + return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord); +} + +bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar, + bool Complain) { + StructuralEquivalenceContext Ctx( + Importer.getFromContext(), Importer.getToContext(), + Importer.getNonEquivalentDecls(), false, Complain); + return Ctx.IsStructurallyEquivalent(FromVar, ToVar); +} + +bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) { + StructuralEquivalenceContext Ctx(Importer.getFromContext(), + Importer.getToContext(), + Importer.getNonEquivalentDecls()); + return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum); +} + +bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC, + EnumConstantDecl *ToEC) +{ + const llvm::APSInt &FromVal = FromEC->getInitVal(); + const llvm::APSInt &ToVal = ToEC->getInitVal(); + + return FromVal.isSigned() == ToVal.isSigned() && + FromVal.getBitWidth() == ToVal.getBitWidth() && + FromVal == ToVal; +} + +bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From, + ClassTemplateDecl *To) { + StructuralEquivalenceContext Ctx(Importer.getFromContext(), + Importer.getToContext(), + Importer.getNonEquivalentDecls()); + return Ctx.IsStructurallyEquivalent(From, To); +} + +bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From, + VarTemplateDecl *To) { + StructuralEquivalenceContext Ctx(Importer.getFromContext(), + Importer.getToContext(), + Importer.getNonEquivalentDecls()); + return Ctx.IsStructurallyEquivalent(From, To); +} + +Decl *ASTNodeImporter::VisitDecl(Decl *D) { + Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node) + << D->getDeclKindName(); + return 0; +} + +Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) { + TranslationUnitDecl *ToD = + Importer.getToContext().getTranslationUnitDecl(); + + Importer.Imported(D, ToD); + + return ToD; +} + +Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) { + // Import the major distinguishing characteristics of this namespace. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + NamespaceDecl *MergeWithNamespace = 0; + if (!Name) { + // This is an anonymous namespace. Adopt an existing anonymous + // namespace if we can. + // FIXME: Not testable. + if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC)) + MergeWithNamespace = TU->getAnonymousNamespace(); + else + MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace(); + } else { + SmallVector<NamedDecl *, 4> ConflictingDecls; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Namespace)) + continue; + + if (NamespaceDecl *FoundNS = dyn_cast<NamespaceDecl>(FoundDecls[I])) { + MergeWithNamespace = FoundNS; + ConflictingDecls.clear(); + break; + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Namespace, + ConflictingDecls.data(), + ConflictingDecls.size()); + } + } + + // Create the "to" namespace, if needed. + NamespaceDecl *ToNamespace = MergeWithNamespace; + if (!ToNamespace) { + ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC, + D->isInline(), + Importer.Import(D->getLocStart()), + Loc, Name.getAsIdentifierInfo(), + /*PrevDecl=*/0); + ToNamespace->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(ToNamespace); + + // If this is an anonymous namespace, register it as the anonymous + // namespace within its context. + if (!Name) { + if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC)) + TU->setAnonymousNamespace(ToNamespace); + else + cast<NamespaceDecl>(DC)->setAnonymousNamespace(ToNamespace); + } + } + Importer.Imported(D, ToNamespace); + + ImportDeclContext(D); + + return ToNamespace; +} + +Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) { + // Import the major distinguishing characteristics of this typedef. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // If this typedef is not in block scope, determine whether we've + // seen a typedef with the same name (that we can merge with) or any + // other entity by that name (which name lookup could conflict with). + if (!DC->isFunctionOrMethod()) { + SmallVector<NamedDecl *, 4> ConflictingDecls; + unsigned IDNS = Decl::IDNS_Ordinary; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(IDNS)) + continue; + if (TypedefNameDecl *FoundTypedef = + dyn_cast<TypedefNameDecl>(FoundDecls[I])) { + if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(), + FoundTypedef->getUnderlyingType())) + return Importer.Imported(D, FoundTypedef); + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, IDNS, + ConflictingDecls.data(), + ConflictingDecls.size()); + if (!Name) + return 0; + } + } + + // Import the underlying type of this typedef; + QualType T = Importer.Import(D->getUnderlyingType()); + if (T.isNull()) + return 0; + + // Create the new typedef node. + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + SourceLocation StartL = Importer.Import(D->getLocStart()); + TypedefNameDecl *ToTypedef; + if (IsAlias) + ToTypedef = TypeAliasDecl::Create(Importer.getToContext(), DC, + StartL, Loc, + Name.getAsIdentifierInfo(), + TInfo); + else + ToTypedef = TypedefDecl::Create(Importer.getToContext(), DC, + StartL, Loc, + Name.getAsIdentifierInfo(), + TInfo); + + ToTypedef->setAccess(D->getAccess()); + ToTypedef->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, ToTypedef); + LexicalDC->addDeclInternal(ToTypedef); + + return ToTypedef; +} + +Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) { + return VisitTypedefNameDecl(D, /*IsAlias=*/false); +} + +Decl *ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) { + return VisitTypedefNameDecl(D, /*IsAlias=*/true); +} + +Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) { + // Import the major distinguishing characteristics of this enum. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Figure out what enum name we're looking for. + unsigned IDNS = Decl::IDNS_Tag; + DeclarationName SearchName = Name; + if (!SearchName && D->getTypedefNameForAnonDecl()) { + SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName()); + IDNS = Decl::IDNS_Ordinary; + } else if (Importer.getToContext().getLangOpts().CPlusPlus) + IDNS |= Decl::IDNS_Ordinary; + + // We may already have an enum of the same name; try to find and match it. + if (!DC->isFunctionOrMethod() && SearchName) { + SmallVector<NamedDecl *, 4> ConflictingDecls; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(SearchName, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(IDNS)) + continue; + + Decl *Found = FoundDecls[I]; + if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) { + if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>()) + Found = Tag->getDecl(); + } + + if (EnumDecl *FoundEnum = dyn_cast<EnumDecl>(Found)) { + if (IsStructuralMatch(D, FoundEnum)) + return Importer.Imported(D, FoundEnum); + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, IDNS, + ConflictingDecls.data(), + ConflictingDecls.size()); + } + } + + // Create the enum declaration. + EnumDecl *D2 = EnumDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getLocStart()), + Loc, Name.getAsIdentifierInfo(), 0, + D->isScoped(), D->isScopedUsingClassTag(), + D->isFixed()); + // Import the qualifier, if any. + D2->setQualifierInfo(Importer.Import(D->getQualifierLoc())); + D2->setAccess(D->getAccess()); + D2->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, D2); + LexicalDC->addDeclInternal(D2); + + // Import the integer type. + QualType ToIntegerType = Importer.Import(D->getIntegerType()); + if (ToIntegerType.isNull()) + return 0; + D2->setIntegerType(ToIntegerType); + + // Import the definition + if (D->isCompleteDefinition() && ImportDefinition(D, D2)) + return 0; + + return D2; +} + +Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { + // If this record has a definition in the translation unit we're coming from, + // but this particular declaration is not that definition, import the + // definition and map to that. + TagDecl *Definition = D->getDefinition(); + if (Definition && Definition != D) { + Decl *ImportedDef = Importer.Import(Definition); + if (!ImportedDef) + return 0; + + return Importer.Imported(D, ImportedDef); + } + + // Import the major distinguishing characteristics of this record. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Figure out what structure name we're looking for. + unsigned IDNS = Decl::IDNS_Tag; + DeclarationName SearchName = Name; + if (!SearchName && D->getTypedefNameForAnonDecl()) { + SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName()); + IDNS = Decl::IDNS_Ordinary; + } else if (Importer.getToContext().getLangOpts().CPlusPlus) + IDNS |= Decl::IDNS_Ordinary; + + // We may already have a record of the same name; try to find and match it. + RecordDecl *AdoptDecl = 0; + if (!DC->isFunctionOrMethod()) { + SmallVector<NamedDecl *, 4> ConflictingDecls; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(SearchName, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(IDNS)) + continue; + + Decl *Found = FoundDecls[I]; + if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) { + if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>()) + Found = Tag->getDecl(); + } + + if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) { + if (D->isAnonymousStructOrUnion() && + FoundRecord->isAnonymousStructOrUnion()) { + // If both anonymous structs/unions are in a record context, make sure + // they occur in the same location in the context records. + if (Optional<unsigned> Index1 + = findAnonymousStructOrUnionIndex(D)) { + if (Optional<unsigned> Index2 = + findAnonymousStructOrUnionIndex(FoundRecord)) { + if (*Index1 != *Index2) + continue; + } + } + } + + if (RecordDecl *FoundDef = FoundRecord->getDefinition()) { + if ((SearchName && !D->isCompleteDefinition()) + || (D->isCompleteDefinition() && + D->isAnonymousStructOrUnion() + == FoundDef->isAnonymousStructOrUnion() && + IsStructuralMatch(D, FoundDef))) { + // The record types structurally match, or the "from" translation + // unit only had a forward declaration anyway; call it the same + // function. + // FIXME: For C++, we should also merge methods here. + return Importer.Imported(D, FoundDef); + } + } else if (!D->isCompleteDefinition()) { + // We have a forward declaration of this type, so adopt that forward + // declaration rather than building a new one. + AdoptDecl = FoundRecord; + continue; + } else if (!SearchName) { + continue; + } + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty() && SearchName) { + Name = Importer.HandleNameConflict(Name, DC, IDNS, + ConflictingDecls.data(), + ConflictingDecls.size()); + } + } + + // Create the record declaration. + RecordDecl *D2 = AdoptDecl; + SourceLocation StartLoc = Importer.Import(D->getLocStart()); + if (!D2) { + if (isa<CXXRecordDecl>(D)) { + CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(), + D->getTagKind(), + DC, StartLoc, Loc, + Name.getAsIdentifierInfo()); + D2 = D2CXX; + D2->setAccess(D->getAccess()); + } else { + D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(), + DC, StartLoc, Loc, Name.getAsIdentifierInfo()); + } + + D2->setQualifierInfo(Importer.Import(D->getQualifierLoc())); + D2->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(D2); + if (D->isAnonymousStructOrUnion()) + D2->setAnonymousStructOrUnion(true); + } + + Importer.Imported(D, D2); + + if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default)) + return 0; + + return D2; +} + +Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) { + // Import the major distinguishing characteristics of this enumerator. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + // Determine whether there are any other declarations with the same name and + // in the same context. + if (!LexicalDC->isFunctionOrMethod()) { + SmallVector<NamedDecl *, 4> ConflictingDecls; + unsigned IDNS = Decl::IDNS_Ordinary; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(IDNS)) + continue; + + if (EnumConstantDecl *FoundEnumConstant + = dyn_cast<EnumConstantDecl>(FoundDecls[I])) { + if (IsStructuralMatch(D, FoundEnumConstant)) + return Importer.Imported(D, FoundEnumConstant); + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, IDNS, + ConflictingDecls.data(), + ConflictingDecls.size()); + if (!Name) + return 0; + } + } + + Expr *Init = Importer.Import(D->getInitExpr()); + if (D->getInitExpr() && !Init) + return 0; + + EnumConstantDecl *ToEnumerator + = EnumConstantDecl::Create(Importer.getToContext(), cast<EnumDecl>(DC), Loc, + Name.getAsIdentifierInfo(), T, + Init, D->getInitVal()); + ToEnumerator->setAccess(D->getAccess()); + ToEnumerator->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, ToEnumerator); + LexicalDC->addDeclInternal(ToEnumerator); + return ToEnumerator; +} + +Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { + // Import the major distinguishing characteristics of this function. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Try to find a function in our own ("to") context with the same name, same + // type, and in the same context as the function we're importing. + if (!LexicalDC->isFunctionOrMethod()) { + SmallVector<NamedDecl *, 4> ConflictingDecls; + unsigned IDNS = Decl::IDNS_Ordinary; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(IDNS)) + continue; + + if (FunctionDecl *FoundFunction = dyn_cast<FunctionDecl>(FoundDecls[I])) { + if (FoundFunction->hasExternalFormalLinkage() && + D->hasExternalFormalLinkage()) { + if (Importer.IsStructurallyEquivalent(D->getType(), + FoundFunction->getType())) { + // FIXME: Actually try to merge the body and other attributes. + return Importer.Imported(D, FoundFunction); + } + + // FIXME: Check for overloading more carefully, e.g., by boosting + // Sema::IsOverload out to the AST library. + + // Function overloading is okay in C++. + if (Importer.getToContext().getLangOpts().CPlusPlus) + continue; + + // Complain about inconsistent function types. + Importer.ToDiag(Loc, diag::err_odr_function_type_inconsistent) + << Name << D->getType() << FoundFunction->getType(); + Importer.ToDiag(FoundFunction->getLocation(), + diag::note_odr_value_here) + << FoundFunction->getType(); + } + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, IDNS, + ConflictingDecls.data(), + ConflictingDecls.size()); + if (!Name) + return 0; + } + } + + DeclarationNameInfo NameInfo(Name, Loc); + // Import additional name location/type info. + ImportDeclarationNameLoc(D->getNameInfo(), NameInfo); + + QualType FromTy = D->getType(); + bool usedDifferentExceptionSpec = false; + + if (const FunctionProtoType * + FromFPT = D->getType()->getAs<FunctionProtoType>()) { + FunctionProtoType::ExtProtoInfo FromEPI = FromFPT->getExtProtoInfo(); + // FunctionProtoType::ExtProtoInfo's ExceptionSpecDecl can point to the + // FunctionDecl that we are importing the FunctionProtoType for. + // To avoid an infinite recursion when importing, create the FunctionDecl + // with a simplified function type and update it afterwards. + if (FromEPI.ExceptionSpecDecl || FromEPI.ExceptionSpecTemplate || + FromEPI.NoexceptExpr) { + FunctionProtoType::ExtProtoInfo DefaultEPI; + FromTy = Importer.getFromContext().getFunctionType( + FromFPT->getResultType(), FromFPT->getArgTypes(), DefaultEPI); + usedDifferentExceptionSpec = true; + } + } + + // Import the type. + QualType T = Importer.Import(FromTy); + if (T.isNull()) + return 0; + + // Import the function parameters. + SmallVector<ParmVarDecl *, 8> Parameters; + for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end(); + P != PEnd; ++P) { + ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*P)); + if (!ToP) + return 0; + + Parameters.push_back(ToP); + } + + // Create the imported function. + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + FunctionDecl *ToFunction = 0; + if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) { + ToFunction = CXXConstructorDecl::Create(Importer.getToContext(), + cast<CXXRecordDecl>(DC), + D->getInnerLocStart(), + NameInfo, T, TInfo, + FromConstructor->isExplicit(), + D->isInlineSpecified(), + D->isImplicit(), + D->isConstexpr()); + } else if (isa<CXXDestructorDecl>(D)) { + ToFunction = CXXDestructorDecl::Create(Importer.getToContext(), + cast<CXXRecordDecl>(DC), + D->getInnerLocStart(), + NameInfo, T, TInfo, + D->isInlineSpecified(), + D->isImplicit()); + } else if (CXXConversionDecl *FromConversion + = dyn_cast<CXXConversionDecl>(D)) { + ToFunction = CXXConversionDecl::Create(Importer.getToContext(), + cast<CXXRecordDecl>(DC), + D->getInnerLocStart(), + NameInfo, T, TInfo, + D->isInlineSpecified(), + FromConversion->isExplicit(), + D->isConstexpr(), + Importer.Import(D->getLocEnd())); + } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { + ToFunction = CXXMethodDecl::Create(Importer.getToContext(), + cast<CXXRecordDecl>(DC), + D->getInnerLocStart(), + NameInfo, T, TInfo, + Method->getStorageClass(), + Method->isInlineSpecified(), + D->isConstexpr(), + Importer.Import(D->getLocEnd())); + } else { + ToFunction = FunctionDecl::Create(Importer.getToContext(), DC, + D->getInnerLocStart(), + NameInfo, T, TInfo, D->getStorageClass(), + D->isInlineSpecified(), + D->hasWrittenPrototype(), + D->isConstexpr()); + } + + // Import the qualifier, if any. + ToFunction->setQualifierInfo(Importer.Import(D->getQualifierLoc())); + ToFunction->setAccess(D->getAccess()); + ToFunction->setLexicalDeclContext(LexicalDC); + ToFunction->setVirtualAsWritten(D->isVirtualAsWritten()); + ToFunction->setTrivial(D->isTrivial()); + ToFunction->setPure(D->isPure()); + Importer.Imported(D, ToFunction); + + // Set the parameters. + for (unsigned I = 0, N = Parameters.size(); I != N; ++I) { + Parameters[I]->setOwningFunction(ToFunction); + ToFunction->addDeclInternal(Parameters[I]); + } + ToFunction->setParams(Parameters); + + if (usedDifferentExceptionSpec) { + // Update FunctionProtoType::ExtProtoInfo. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + ToFunction->setType(T); + } + + // FIXME: Other bits to merge? + + // Add this function to the lexical context. + LexicalDC->addDeclInternal(ToFunction); + + return ToFunction; +} + +Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) { + return VisitFunctionDecl(D); +} + +Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) { + return VisitCXXMethodDecl(D); +} + +Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) { + return VisitCXXMethodDecl(D); +} + +Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) { + return VisitCXXMethodDecl(D); +} + +static unsigned getFieldIndex(Decl *F) { + RecordDecl *Owner = dyn_cast<RecordDecl>(F->getDeclContext()); + if (!Owner) + return 0; + + unsigned Index = 1; + for (DeclContext::decl_iterator D = Owner->noload_decls_begin(), + DEnd = Owner->noload_decls_end(); + D != DEnd; ++D) { + if (*D == F) + return Index; + + if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D)) + ++Index; + } + + return Index; +} + +Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) { + // Import the major distinguishing characteristics of a variable. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Determine whether we've already imported this field. + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) { + // For anonymous fields, match up by index. + if (!Name && getFieldIndex(D) != getFieldIndex(FoundField)) + continue; + + if (Importer.IsStructurallyEquivalent(D->getType(), + FoundField->getType())) { + Importer.Imported(D, FoundField); + return FoundField; + } + + Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent) + << Name << D->getType() << FoundField->getType(); + Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here) + << FoundField->getType(); + return 0; + } + } + + // Import the type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + Expr *BitWidth = Importer.Import(D->getBitWidth()); + if (!BitWidth && D->getBitWidth()) + return 0; + + FieldDecl *ToField = FieldDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getInnerLocStart()), + Loc, Name.getAsIdentifierInfo(), + T, TInfo, BitWidth, D->isMutable(), + D->getInClassInitStyle()); + ToField->setAccess(D->getAccess()); + ToField->setLexicalDeclContext(LexicalDC); + if (ToField->hasInClassInitializer()) + ToField->setInClassInitializer(D->getInClassInitializer()); + ToField->setImplicit(D->isImplicit()); + Importer.Imported(D, ToField); + LexicalDC->addDeclInternal(ToField); + return ToField; +} + +Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) { + // Import the major distinguishing characteristics of a variable. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Determine whether we've already imported this field. + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (IndirectFieldDecl *FoundField + = dyn_cast<IndirectFieldDecl>(FoundDecls[I])) { + // For anonymous indirect fields, match up by index. + if (!Name && getFieldIndex(D) != getFieldIndex(FoundField)) + continue; + + if (Importer.IsStructurallyEquivalent(D->getType(), + FoundField->getType(), + !Name.isEmpty())) { + Importer.Imported(D, FoundField); + return FoundField; + } + + // If there are more anonymous fields to check, continue. + if (!Name && I < N-1) + continue; + + Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent) + << Name << D->getType() << FoundField->getType(); + Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here) + << FoundField->getType(); + return 0; + } + } + + // Import the type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + NamedDecl **NamedChain = + new (Importer.getToContext())NamedDecl*[D->getChainingSize()]; + + unsigned i = 0; + for (IndirectFieldDecl::chain_iterator PI = D->chain_begin(), + PE = D->chain_end(); PI != PE; ++PI) { + Decl* D = Importer.Import(*PI); + if (!D) + return 0; + NamedChain[i++] = cast<NamedDecl>(D); + } + + IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create( + Importer.getToContext(), DC, + Loc, Name.getAsIdentifierInfo(), T, + NamedChain, D->getChainingSize()); + ToIndirectField->setAccess(D->getAccess()); + ToIndirectField->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, ToIndirectField); + LexicalDC->addDeclInternal(ToIndirectField); + return ToIndirectField; +} + +Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) { + // Import the major distinguishing characteristics of an ivar. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Determine whether we've already imported this ivar + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecls[I])) { + if (Importer.IsStructurallyEquivalent(D->getType(), + FoundIvar->getType())) { + Importer.Imported(D, FoundIvar); + return FoundIvar; + } + + Importer.ToDiag(Loc, diag::err_odr_ivar_type_inconsistent) + << Name << D->getType() << FoundIvar->getType(); + Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here) + << FoundIvar->getType(); + return 0; + } + } + + // Import the type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + Expr *BitWidth = Importer.Import(D->getBitWidth()); + if (!BitWidth && D->getBitWidth()) + return 0; + + ObjCIvarDecl *ToIvar = ObjCIvarDecl::Create(Importer.getToContext(), + cast<ObjCContainerDecl>(DC), + Importer.Import(D->getInnerLocStart()), + Loc, Name.getAsIdentifierInfo(), + T, TInfo, D->getAccessControl(), + BitWidth, D->getSynthesize(), + D->getBackingIvarReferencedInAccessor()); + ToIvar->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, ToIvar); + LexicalDC->addDeclInternal(ToIvar); + return ToIvar; + +} + +Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) { + // Import the major distinguishing characteristics of a variable. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Try to find a variable in our own ("to") context with the same name and + // in the same context as the variable we're importing. + if (D->isFileVarDecl()) { + VarDecl *MergeWithVar = 0; + SmallVector<NamedDecl *, 4> ConflictingDecls; + unsigned IDNS = Decl::IDNS_Ordinary; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(IDNS)) + continue; + + if (VarDecl *FoundVar = dyn_cast<VarDecl>(FoundDecls[I])) { + // We have found a variable that we may need to merge with. Check it. + if (FoundVar->hasExternalFormalLinkage() && + D->hasExternalFormalLinkage()) { + if (Importer.IsStructurallyEquivalent(D->getType(), + FoundVar->getType())) { + MergeWithVar = FoundVar; + break; + } + + const ArrayType *FoundArray + = Importer.getToContext().getAsArrayType(FoundVar->getType()); + const ArrayType *TArray + = Importer.getToContext().getAsArrayType(D->getType()); + if (FoundArray && TArray) { + if (isa<IncompleteArrayType>(FoundArray) && + isa<ConstantArrayType>(TArray)) { + // Import the type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + FoundVar->setType(T); + MergeWithVar = FoundVar; + break; + } else if (isa<IncompleteArrayType>(TArray) && + isa<ConstantArrayType>(FoundArray)) { + MergeWithVar = FoundVar; + break; + } + } + + Importer.ToDiag(Loc, diag::err_odr_variable_type_inconsistent) + << Name << D->getType() << FoundVar->getType(); + Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here) + << FoundVar->getType(); + } + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (MergeWithVar) { + // An equivalent variable with external linkage has been found. Link + // the two declarations, then merge them. + Importer.Imported(D, MergeWithVar); + + if (VarDecl *DDef = D->getDefinition()) { + if (VarDecl *ExistingDef = MergeWithVar->getDefinition()) { + Importer.ToDiag(ExistingDef->getLocation(), + diag::err_odr_variable_multiple_def) + << Name; + Importer.FromDiag(DDef->getLocation(), diag::note_odr_defined_here); + } else { + Expr *Init = Importer.Import(DDef->getInit()); + MergeWithVar->setInit(Init); + if (DDef->isInitKnownICE()) { + EvaluatedStmt *Eval = MergeWithVar->ensureEvaluatedStmt(); + Eval->CheckedICE = true; + Eval->IsICE = DDef->isInitICE(); + } + } + } + + return MergeWithVar; + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, IDNS, + ConflictingDecls.data(), + ConflictingDecls.size()); + if (!Name) + return 0; + } + } + + // Import the type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + // Create the imported variable. + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + VarDecl *ToVar = VarDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getInnerLocStart()), + Loc, Name.getAsIdentifierInfo(), + T, TInfo, + D->getStorageClass()); + ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc())); + ToVar->setAccess(D->getAccess()); + ToVar->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, ToVar); + LexicalDC->addDeclInternal(ToVar); + + // Merge the initializer. + if (ImportDefinition(D, ToVar)) + return 0; + + return ToVar; +} + +Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) { + // Parameters are created in the translation unit's context, then moved + // into the function declaration's context afterward. + DeclContext *DC = Importer.getToContext().getTranslationUnitDecl(); + + // Import the name of this declaration. + DeclarationName Name = Importer.Import(D->getDeclName()); + if (D->getDeclName() && !Name) + return 0; + + // Import the location of this declaration. + SourceLocation Loc = Importer.Import(D->getLocation()); + + // Import the parameter's type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + // Create the imported parameter. + ImplicitParamDecl *ToParm + = ImplicitParamDecl::Create(Importer.getToContext(), DC, + Loc, Name.getAsIdentifierInfo(), + T); + return Importer.Imported(D, ToParm); +} + +Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) { + // Parameters are created in the translation unit's context, then moved + // into the function declaration's context afterward. + DeclContext *DC = Importer.getToContext().getTranslationUnitDecl(); + + // Import the name of this declaration. + DeclarationName Name = Importer.Import(D->getDeclName()); + if (D->getDeclName() && !Name) + return 0; + + // Import the location of this declaration. + SourceLocation Loc = Importer.Import(D->getLocation()); + + // Import the parameter's type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + // Create the imported parameter. + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + ParmVarDecl *ToParm = ParmVarDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getInnerLocStart()), + Loc, Name.getAsIdentifierInfo(), + T, TInfo, D->getStorageClass(), + /*FIXME: Default argument*/ 0); + ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg()); + return Importer.Imported(D, ToParm); +} + +Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) { + // Import the major distinguishing characteristics of a method. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecls[I])) { + if (FoundMethod->isInstanceMethod() != D->isInstanceMethod()) + continue; + + // Check return types. + if (!Importer.IsStructurallyEquivalent(D->getResultType(), + FoundMethod->getResultType())) { + Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent) + << D->isInstanceMethod() << Name + << D->getResultType() << FoundMethod->getResultType(); + Importer.ToDiag(FoundMethod->getLocation(), + diag::note_odr_objc_method_here) + << D->isInstanceMethod() << Name; + return 0; + } + + // Check the number of parameters. + if (D->param_size() != FoundMethod->param_size()) { + Importer.ToDiag(Loc, diag::err_odr_objc_method_num_params_inconsistent) + << D->isInstanceMethod() << Name + << D->param_size() << FoundMethod->param_size(); + Importer.ToDiag(FoundMethod->getLocation(), + diag::note_odr_objc_method_here) + << D->isInstanceMethod() << Name; + return 0; + } + + // Check parameter types. + for (ObjCMethodDecl::param_iterator P = D->param_begin(), + PEnd = D->param_end(), FoundP = FoundMethod->param_begin(); + P != PEnd; ++P, ++FoundP) { + if (!Importer.IsStructurallyEquivalent((*P)->getType(), + (*FoundP)->getType())) { + Importer.FromDiag((*P)->getLocation(), + diag::err_odr_objc_method_param_type_inconsistent) + << D->isInstanceMethod() << Name + << (*P)->getType() << (*FoundP)->getType(); + Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here) + << (*FoundP)->getType(); + return 0; + } + } + + // Check variadic/non-variadic. + // Check the number of parameters. + if (D->isVariadic() != FoundMethod->isVariadic()) { + Importer.ToDiag(Loc, diag::err_odr_objc_method_variadic_inconsistent) + << D->isInstanceMethod() << Name; + Importer.ToDiag(FoundMethod->getLocation(), + diag::note_odr_objc_method_here) + << D->isInstanceMethod() << Name; + return 0; + } + + // FIXME: Any other bits we need to merge? + return Importer.Imported(D, FoundMethod); + } + } + + // Import the result type. + QualType ResultTy = Importer.Import(D->getResultType()); + if (ResultTy.isNull()) + return 0; + + TypeSourceInfo *ResultTInfo = Importer.Import(D->getResultTypeSourceInfo()); + + ObjCMethodDecl *ToMethod + = ObjCMethodDecl::Create(Importer.getToContext(), + Loc, + Importer.Import(D->getLocEnd()), + Name.getObjCSelector(), + ResultTy, ResultTInfo, DC, + D->isInstanceMethod(), + D->isVariadic(), + D->isPropertyAccessor(), + D->isImplicit(), + D->isDefined(), + D->getImplementationControl(), + D->hasRelatedResultType()); + + // FIXME: When we decide to merge method definitions, we'll need to + // deal with implicit parameters. + + // Import the parameters + SmallVector<ParmVarDecl *, 5> ToParams; + for (ObjCMethodDecl::param_iterator FromP = D->param_begin(), + FromPEnd = D->param_end(); + FromP != FromPEnd; + ++FromP) { + ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*FromP)); + if (!ToP) + return 0; + + ToParams.push_back(ToP); + } + + // Set the parameters. + for (unsigned I = 0, N = ToParams.size(); I != N; ++I) { + ToParams[I]->setOwningFunction(ToMethod); + ToMethod->addDeclInternal(ToParams[I]); + } + SmallVector<SourceLocation, 12> SelLocs; + D->getSelectorLocs(SelLocs); + ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs); + + ToMethod->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, ToMethod); + LexicalDC->addDeclInternal(ToMethod); + return ToMethod; +} + +Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) { + // Import the major distinguishing characteristics of a category. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + ObjCInterfaceDecl *ToInterface + = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface())); + if (!ToInterface) + return 0; + + // Determine if we've already encountered this category. + ObjCCategoryDecl *MergeWithCategory + = ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo()); + ObjCCategoryDecl *ToCategory = MergeWithCategory; + if (!ToCategory) { + ToCategory = ObjCCategoryDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getAtStartLoc()), + Loc, + Importer.Import(D->getCategoryNameLoc()), + Name.getAsIdentifierInfo(), + ToInterface, + Importer.Import(D->getIvarLBraceLoc()), + Importer.Import(D->getIvarRBraceLoc())); + ToCategory->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(ToCategory); + Importer.Imported(D, ToCategory); + + // Import protocols + SmallVector<ObjCProtocolDecl *, 4> Protocols; + SmallVector<SourceLocation, 4> ProtocolLocs; + ObjCCategoryDecl::protocol_loc_iterator FromProtoLoc + = D->protocol_loc_begin(); + for (ObjCCategoryDecl::protocol_iterator FromProto = D->protocol_begin(), + FromProtoEnd = D->protocol_end(); + FromProto != FromProtoEnd; + ++FromProto, ++FromProtoLoc) { + ObjCProtocolDecl *ToProto + = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto)); + if (!ToProto) + return 0; + Protocols.push_back(ToProto); + ProtocolLocs.push_back(Importer.Import(*FromProtoLoc)); + } + + // FIXME: If we're merging, make sure that the protocol list is the same. + ToCategory->setProtocolList(Protocols.data(), Protocols.size(), + ProtocolLocs.data(), Importer.getToContext()); + + } else { + Importer.Imported(D, ToCategory); + } + + // Import all of the members of this category. + ImportDeclContext(D); + + // If we have an implementation, import it as well. + if (D->getImplementation()) { + ObjCCategoryImplDecl *Impl + = cast_or_null<ObjCCategoryImplDecl>( + Importer.Import(D->getImplementation())); + if (!Impl) + return 0; + + ToCategory->setImplementation(Impl); + } + + return ToCategory; +} + +bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From, + ObjCProtocolDecl *To, + ImportDefinitionKind Kind) { + if (To->getDefinition()) { + if (shouldForceImportDeclContext(Kind)) + ImportDeclContext(From); + return false; + } + + // Start the protocol definition + To->startDefinition(); + + // Import protocols + SmallVector<ObjCProtocolDecl *, 4> Protocols; + SmallVector<SourceLocation, 4> ProtocolLocs; + ObjCProtocolDecl::protocol_loc_iterator + FromProtoLoc = From->protocol_loc_begin(); + for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(), + FromProtoEnd = From->protocol_end(); + FromProto != FromProtoEnd; + ++FromProto, ++FromProtoLoc) { + ObjCProtocolDecl *ToProto + = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto)); + if (!ToProto) + return true; + Protocols.push_back(ToProto); + ProtocolLocs.push_back(Importer.Import(*FromProtoLoc)); + } + + // FIXME: If we're merging, make sure that the protocol list is the same. + To->setProtocolList(Protocols.data(), Protocols.size(), + ProtocolLocs.data(), Importer.getToContext()); + + if (shouldForceImportDeclContext(Kind)) { + // Import all of the members of this protocol. + ImportDeclContext(From, /*ForceImport=*/true); + } + return false; +} + +Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) { + // If this protocol has a definition in the translation unit we're coming + // from, but this particular declaration is not that definition, import the + // definition and map to that. + ObjCProtocolDecl *Definition = D->getDefinition(); + if (Definition && Definition != D) { + Decl *ImportedDef = Importer.Import(Definition); + if (!ImportedDef) + return 0; + + return Importer.Imported(D, ImportedDef); + } + + // Import the major distinguishing characteristics of a protocol. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + ObjCProtocolDecl *MergeWithProtocol = 0; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol)) + continue; + + if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(FoundDecls[I]))) + break; + } + + ObjCProtocolDecl *ToProto = MergeWithProtocol; + if (!ToProto) { + ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC, + Name.getAsIdentifierInfo(), Loc, + Importer.Import(D->getAtStartLoc()), + /*PrevDecl=*/0); + ToProto->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(ToProto); + } + + Importer.Imported(D, ToProto); + + if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto)) + return 0; + + return ToProto; +} + +bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From, + ObjCInterfaceDecl *To, + ImportDefinitionKind Kind) { + if (To->getDefinition()) { + // Check consistency of superclass. + ObjCInterfaceDecl *FromSuper = From->getSuperClass(); + if (FromSuper) { + FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper)); + if (!FromSuper) + return true; + } + + ObjCInterfaceDecl *ToSuper = To->getSuperClass(); + if ((bool)FromSuper != (bool)ToSuper || + (FromSuper && !declaresSameEntity(FromSuper, ToSuper))) { + Importer.ToDiag(To->getLocation(), + diag::err_odr_objc_superclass_inconsistent) + << To->getDeclName(); + if (ToSuper) + Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass) + << To->getSuperClass()->getDeclName(); + else + Importer.ToDiag(To->getLocation(), + diag::note_odr_objc_missing_superclass); + if (From->getSuperClass()) + Importer.FromDiag(From->getSuperClassLoc(), + diag::note_odr_objc_superclass) + << From->getSuperClass()->getDeclName(); + else + Importer.FromDiag(From->getLocation(), + diag::note_odr_objc_missing_superclass); + } + + if (shouldForceImportDeclContext(Kind)) + ImportDeclContext(From); + return false; + } + + // Start the definition. + To->startDefinition(); + + // If this class has a superclass, import it. + if (From->getSuperClass()) { + ObjCInterfaceDecl *Super = cast_or_null<ObjCInterfaceDecl>( + Importer.Import(From->getSuperClass())); + if (!Super) + return true; + + To->setSuperClass(Super); + To->setSuperClassLoc(Importer.Import(From->getSuperClassLoc())); + } + + // Import protocols + SmallVector<ObjCProtocolDecl *, 4> Protocols; + SmallVector<SourceLocation, 4> ProtocolLocs; + ObjCInterfaceDecl::protocol_loc_iterator + FromProtoLoc = From->protocol_loc_begin(); + + for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(), + FromProtoEnd = From->protocol_end(); + FromProto != FromProtoEnd; + ++FromProto, ++FromProtoLoc) { + ObjCProtocolDecl *ToProto + = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto)); + if (!ToProto) + return true; + Protocols.push_back(ToProto); + ProtocolLocs.push_back(Importer.Import(*FromProtoLoc)); + } + + // FIXME: If we're merging, make sure that the protocol list is the same. + To->setProtocolList(Protocols.data(), Protocols.size(), + ProtocolLocs.data(), Importer.getToContext()); + + // Import categories. When the categories themselves are imported, they'll + // hook themselves into this interface. + for (ObjCInterfaceDecl::known_categories_iterator + Cat = From->known_categories_begin(), + CatEnd = From->known_categories_end(); + Cat != CatEnd; ++Cat) { + Importer.Import(*Cat); + } + + // If we have an @implementation, import it as well. + if (From->getImplementation()) { + ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>( + Importer.Import(From->getImplementation())); + if (!Impl) + return true; + + To->setImplementation(Impl); + } + + if (shouldForceImportDeclContext(Kind)) { + // Import all of the members of this class. + ImportDeclContext(From, /*ForceImport=*/true); + } + return false; +} + +Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) { + // If this class has a definition in the translation unit we're coming from, + // but this particular declaration is not that definition, import the + // definition and map to that. + ObjCInterfaceDecl *Definition = D->getDefinition(); + if (Definition && Definition != D) { + Decl *ImportedDef = Importer.Import(Definition); + if (!ImportedDef) + return 0; + + return Importer.Imported(D, ImportedDef); + } + + // Import the major distinguishing characteristics of an @interface. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Look for an existing interface with the same name. + ObjCInterfaceDecl *MergeWithIface = 0; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary)) + continue; + + if ((MergeWithIface = dyn_cast<ObjCInterfaceDecl>(FoundDecls[I]))) + break; + } + + // Create an interface declaration, if one does not already exist. + ObjCInterfaceDecl *ToIface = MergeWithIface; + if (!ToIface) { + ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getAtStartLoc()), + Name.getAsIdentifierInfo(), + /*PrevDecl=*/0,Loc, + D->isImplicitInterfaceDecl()); + ToIface->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(ToIface); + } + Importer.Imported(D, ToIface); + + if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface)) + return 0; + + return ToIface; +} + +Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) { + ObjCCategoryDecl *Category = cast_or_null<ObjCCategoryDecl>( + Importer.Import(D->getCategoryDecl())); + if (!Category) + return 0; + + ObjCCategoryImplDecl *ToImpl = Category->getImplementation(); + if (!ToImpl) { + DeclContext *DC = Importer.ImportContext(D->getDeclContext()); + if (!DC) + return 0; + + SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc()); + ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getIdentifier()), + Category->getClassInterface(), + Importer.Import(D->getLocation()), + Importer.Import(D->getAtStartLoc()), + CategoryNameLoc); + + DeclContext *LexicalDC = DC; + if (D->getDeclContext() != D->getLexicalDeclContext()) { + LexicalDC = Importer.ImportContext(D->getLexicalDeclContext()); + if (!LexicalDC) + return 0; + + ToImpl->setLexicalDeclContext(LexicalDC); + } + + LexicalDC->addDeclInternal(ToImpl); + Category->setImplementation(ToImpl); + } + + Importer.Imported(D, ToImpl); + ImportDeclContext(D); + return ToImpl; +} + +Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { + // Find the corresponding interface. + ObjCInterfaceDecl *Iface = cast_or_null<ObjCInterfaceDecl>( + Importer.Import(D->getClassInterface())); + if (!Iface) + return 0; + + // Import the superclass, if any. + ObjCInterfaceDecl *Super = 0; + if (D->getSuperClass()) { + Super = cast_or_null<ObjCInterfaceDecl>( + Importer.Import(D->getSuperClass())); + if (!Super) + return 0; + } + + ObjCImplementationDecl *Impl = Iface->getImplementation(); + if (!Impl) { + // We haven't imported an implementation yet. Create a new @implementation + // now. + Impl = ObjCImplementationDecl::Create(Importer.getToContext(), + Importer.ImportContext(D->getDeclContext()), + Iface, Super, + Importer.Import(D->getLocation()), + Importer.Import(D->getAtStartLoc()), + Importer.Import(D->getSuperClassLoc()), + Importer.Import(D->getIvarLBraceLoc()), + Importer.Import(D->getIvarRBraceLoc())); + + if (D->getDeclContext() != D->getLexicalDeclContext()) { + DeclContext *LexicalDC + = Importer.ImportContext(D->getLexicalDeclContext()); + if (!LexicalDC) + return 0; + Impl->setLexicalDeclContext(LexicalDC); + } + + // Associate the implementation with the class it implements. + Iface->setImplementation(Impl); + Importer.Imported(D, Iface->getImplementation()); + } else { + Importer.Imported(D, Iface->getImplementation()); + + // Verify that the existing @implementation has the same superclass. + if ((Super && !Impl->getSuperClass()) || + (!Super && Impl->getSuperClass()) || + (Super && Impl->getSuperClass() && + !declaresSameEntity(Super->getCanonicalDecl(), Impl->getSuperClass()))) { + Importer.ToDiag(Impl->getLocation(), + diag::err_odr_objc_superclass_inconsistent) + << Iface->getDeclName(); + // FIXME: It would be nice to have the location of the superclass + // below. + if (Impl->getSuperClass()) + Importer.ToDiag(Impl->getLocation(), + diag::note_odr_objc_superclass) + << Impl->getSuperClass()->getDeclName(); + else + Importer.ToDiag(Impl->getLocation(), + diag::note_odr_objc_missing_superclass); + if (D->getSuperClass()) + Importer.FromDiag(D->getLocation(), + diag::note_odr_objc_superclass) + << D->getSuperClass()->getDeclName(); + else + Importer.FromDiag(D->getLocation(), + diag::note_odr_objc_missing_superclass); + return 0; + } + } + + // Import all of the members of this @implementation. + ImportDeclContext(D); + + return Impl; +} + +Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { + // Import the major distinguishing characteristics of an @property. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // Check whether we have already imported this property. + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (ObjCPropertyDecl *FoundProp + = dyn_cast<ObjCPropertyDecl>(FoundDecls[I])) { + // Check property types. + if (!Importer.IsStructurallyEquivalent(D->getType(), + FoundProp->getType())) { + Importer.ToDiag(Loc, diag::err_odr_objc_property_type_inconsistent) + << Name << D->getType() << FoundProp->getType(); + Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here) + << FoundProp->getType(); + return 0; + } + + // FIXME: Check property attributes, getters, setters, etc.? + + // Consider these properties to be equivalent. + Importer.Imported(D, FoundProp); + return FoundProp; + } + } + + // Import the type. + TypeSourceInfo *T = Importer.Import(D->getTypeSourceInfo()); + if (!T) + return 0; + + // Create the new property. + ObjCPropertyDecl *ToProperty + = ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc, + Name.getAsIdentifierInfo(), + Importer.Import(D->getAtLoc()), + Importer.Import(D->getLParenLoc()), + T, + D->getPropertyImplementation()); + Importer.Imported(D, ToProperty); + ToProperty->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(ToProperty); + + ToProperty->setPropertyAttributes(D->getPropertyAttributes()); + ToProperty->setPropertyAttributesAsWritten( + D->getPropertyAttributesAsWritten()); + ToProperty->setGetterName(Importer.Import(D->getGetterName())); + ToProperty->setSetterName(Importer.Import(D->getSetterName())); + ToProperty->setGetterMethodDecl( + cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl()))); + ToProperty->setSetterMethodDecl( + cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl()))); + ToProperty->setPropertyIvarDecl( + cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl()))); + return ToProperty; +} + +Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) { + ObjCPropertyDecl *Property = cast_or_null<ObjCPropertyDecl>( + Importer.Import(D->getPropertyDecl())); + if (!Property) + return 0; + + DeclContext *DC = Importer.ImportContext(D->getDeclContext()); + if (!DC) + return 0; + + // Import the lexical declaration context. + DeclContext *LexicalDC = DC; + if (D->getDeclContext() != D->getLexicalDeclContext()) { + LexicalDC = Importer.ImportContext(D->getLexicalDeclContext()); + if (!LexicalDC) + return 0; + } + + ObjCImplDecl *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC); + if (!InImpl) + return 0; + + // Import the ivar (for an @synthesize). + ObjCIvarDecl *Ivar = 0; + if (D->getPropertyIvarDecl()) { + Ivar = cast_or_null<ObjCIvarDecl>( + Importer.Import(D->getPropertyIvarDecl())); + if (!Ivar) + return 0; + } + + ObjCPropertyImplDecl *ToImpl + = InImpl->FindPropertyImplDecl(Property->getIdentifier()); + if (!ToImpl) { + ToImpl = ObjCPropertyImplDecl::Create(Importer.getToContext(), DC, + Importer.Import(D->getLocStart()), + Importer.Import(D->getLocation()), + Property, + D->getPropertyImplementation(), + Ivar, + Importer.Import(D->getPropertyIvarDeclLoc())); + ToImpl->setLexicalDeclContext(LexicalDC); + Importer.Imported(D, ToImpl); + LexicalDC->addDeclInternal(ToImpl); + } else { + // Check that we have the same kind of property implementation (@synthesize + // vs. @dynamic). + if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) { + Importer.ToDiag(ToImpl->getLocation(), + diag::err_odr_objc_property_impl_kind_inconsistent) + << Property->getDeclName() + << (ToImpl->getPropertyImplementation() + == ObjCPropertyImplDecl::Dynamic); + Importer.FromDiag(D->getLocation(), + diag::note_odr_objc_property_impl_kind) + << D->getPropertyDecl()->getDeclName() + << (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic); + return 0; + } + + // For @synthesize, check that we have the same + if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize && + Ivar != ToImpl->getPropertyIvarDecl()) { + Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(), + diag::err_odr_objc_synthesize_ivar_inconsistent) + << Property->getDeclName() + << ToImpl->getPropertyIvarDecl()->getDeclName() + << Ivar->getDeclName(); + Importer.FromDiag(D->getPropertyIvarDeclLoc(), + diag::note_odr_objc_synthesize_ivar_here) + << D->getPropertyIvarDecl()->getDeclName(); + return 0; + } + + // Merge the existing implementation with the new implementation. + Importer.Imported(D, ToImpl); + } + + return ToImpl; +} + +Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) { + // For template arguments, we adopt the translation unit as our declaration + // context. This context will be fixed when the actual template declaration + // is created. + + // FIXME: Import default argument. + return TemplateTypeParmDecl::Create(Importer.getToContext(), + Importer.getToContext().getTranslationUnitDecl(), + Importer.Import(D->getLocStart()), + Importer.Import(D->getLocation()), + D->getDepth(), + D->getIndex(), + Importer.Import(D->getIdentifier()), + D->wasDeclaredWithTypename(), + D->isParameterPack()); +} + +Decl * +ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { + // Import the name of this declaration. + DeclarationName Name = Importer.Import(D->getDeclName()); + if (D->getDeclName() && !Name) + return 0; + + // Import the location of this declaration. + SourceLocation Loc = Importer.Import(D->getLocation()); + + // Import the type of this declaration. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + + // Import type-source information. + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + if (D->getTypeSourceInfo() && !TInfo) + return 0; + + // FIXME: Import default argument. + + return NonTypeTemplateParmDecl::Create(Importer.getToContext(), + Importer.getToContext().getTranslationUnitDecl(), + Importer.Import(D->getInnerLocStart()), + Loc, D->getDepth(), D->getPosition(), + Name.getAsIdentifierInfo(), + T, D->isParameterPack(), TInfo); +} + +Decl * +ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { + // Import the name of this declaration. + DeclarationName Name = Importer.Import(D->getDeclName()); + if (D->getDeclName() && !Name) + return 0; + + // Import the location of this declaration. + SourceLocation Loc = Importer.Import(D->getLocation()); + + // Import template parameters. + TemplateParameterList *TemplateParams + = ImportTemplateParameterList(D->getTemplateParameters()); + if (!TemplateParams) + return 0; + + // FIXME: Import default argument. + + return TemplateTemplateParmDecl::Create(Importer.getToContext(), + Importer.getToContext().getTranslationUnitDecl(), + Loc, D->getDepth(), D->getPosition(), + D->isParameterPack(), + Name.getAsIdentifierInfo(), + TemplateParams); +} + +Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) { + // If this record has a definition in the translation unit we're coming from, + // but this particular declaration is not that definition, import the + // definition and map to that. + CXXRecordDecl *Definition + = cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition()); + if (Definition && Definition != D->getTemplatedDecl()) { + Decl *ImportedDef + = Importer.Import(Definition->getDescribedClassTemplate()); + if (!ImportedDef) + return 0; + + return Importer.Imported(D, ImportedDef); + } + + // Import the major distinguishing characteristics of this class template. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // We may already have a template of the same name; try to find and match it. + if (!DC->isFunctionOrMethod()) { + SmallVector<NamedDecl *, 4> ConflictingDecls; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary)) + continue; + + Decl *Found = FoundDecls[I]; + if (ClassTemplateDecl *FoundTemplate + = dyn_cast<ClassTemplateDecl>(Found)) { + if (IsStructuralMatch(D, FoundTemplate)) { + // The class templates structurally match; call it the same template. + // FIXME: We may be filling in a forward declaration here. Handle + // this case! + Importer.Imported(D->getTemplatedDecl(), + FoundTemplate->getTemplatedDecl()); + return Importer.Imported(D, FoundTemplate); + } + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary, + ConflictingDecls.data(), + ConflictingDecls.size()); + } + + if (!Name) + return 0; + } + + CXXRecordDecl *DTemplated = D->getTemplatedDecl(); + + // Create the declaration that is being templated. + SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart()); + SourceLocation IdLoc = Importer.Import(DTemplated->getLocation()); + CXXRecordDecl *D2Templated = CXXRecordDecl::Create(Importer.getToContext(), + DTemplated->getTagKind(), + DC, StartLoc, IdLoc, + Name.getAsIdentifierInfo()); + D2Templated->setAccess(DTemplated->getAccess()); + D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc())); + D2Templated->setLexicalDeclContext(LexicalDC); + + // Create the class template declaration itself. + TemplateParameterList *TemplateParams + = ImportTemplateParameterList(D->getTemplateParameters()); + if (!TemplateParams) + return 0; + + ClassTemplateDecl *D2 = ClassTemplateDecl::Create(Importer.getToContext(), DC, + Loc, Name, TemplateParams, + D2Templated, + /*PrevDecl=*/0); + D2Templated->setDescribedClassTemplate(D2); + + D2->setAccess(D->getAccess()); + D2->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(D2); + + // Note the relationship between the class templates. + Importer.Imported(D, D2); + Importer.Imported(DTemplated, D2Templated); + + if (DTemplated->isCompleteDefinition() && + !D2Templated->isCompleteDefinition()) { + // FIXME: Import definition! + } + + return D2; +} + +Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl( + ClassTemplateSpecializationDecl *D) { + // If this record has a definition in the translation unit we're coming from, + // but this particular declaration is not that definition, import the + // definition and map to that. + TagDecl *Definition = D->getDefinition(); + if (Definition && Definition != D) { + Decl *ImportedDef = Importer.Import(Definition); + if (!ImportedDef) + return 0; + + return Importer.Imported(D, ImportedDef); + } + + ClassTemplateDecl *ClassTemplate + = cast_or_null<ClassTemplateDecl>(Importer.Import( + D->getSpecializedTemplate())); + if (!ClassTemplate) + return 0; + + // Import the context of this declaration. + DeclContext *DC = ClassTemplate->getDeclContext(); + if (!DC) + return 0; + + DeclContext *LexicalDC = DC; + if (D->getDeclContext() != D->getLexicalDeclContext()) { + LexicalDC = Importer.ImportContext(D->getLexicalDeclContext()); + if (!LexicalDC) + return 0; + } + + // Import the location of this declaration. + SourceLocation StartLoc = Importer.Import(D->getLocStart()); + SourceLocation IdLoc = Importer.Import(D->getLocation()); + + // Import template arguments. + SmallVector<TemplateArgument, 2> TemplateArgs; + if (ImportTemplateArguments(D->getTemplateArgs().data(), + D->getTemplateArgs().size(), + TemplateArgs)) + return 0; + + // Try to find an existing specialization with these template arguments. + void *InsertPos = 0; + ClassTemplateSpecializationDecl *D2 + = ClassTemplate->findSpecialization(TemplateArgs.data(), + TemplateArgs.size(), InsertPos); + if (D2) { + // We already have a class template specialization with these template + // arguments. + + // FIXME: Check for specialization vs. instantiation errors. + + if (RecordDecl *FoundDef = D2->getDefinition()) { + if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) { + // The record types structurally match, or the "from" translation + // unit only had a forward declaration anyway; call it the same + // function. + return Importer.Imported(D, FoundDef); + } + } + } else { + // Create a new specialization. + D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(), + D->getTagKind(), DC, + StartLoc, IdLoc, + ClassTemplate, + TemplateArgs.data(), + TemplateArgs.size(), + /*PrevDecl=*/0); + D2->setSpecializationKind(D->getSpecializationKind()); + + // Add this specialization to the class template. + ClassTemplate->AddSpecialization(D2, InsertPos); + + // Import the qualifier, if any. + D2->setQualifierInfo(Importer.Import(D->getQualifierLoc())); + + // Add the specialization to this context. + D2->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(D2); + } + Importer.Imported(D, D2); + + if (D->isCompleteDefinition() && ImportDefinition(D, D2)) + return 0; + + return D2; +} + +Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) { + // If this variable has a definition in the translation unit we're coming + // from, + // but this particular declaration is not that definition, import the + // definition and map to that. + VarDecl *Definition = + cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition()); + if (Definition && Definition != D->getTemplatedDecl()) { + Decl *ImportedDef = Importer.Import(Definition->getDescribedVarTemplate()); + if (!ImportedDef) + return 0; + + return Importer.Imported(D, ImportedDef); + } + + // Import the major distinguishing characteristics of this variable template. + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + return 0; + + // We may already have a template of the same name; try to find and match it. + assert(!DC->isFunctionOrMethod() && + "Variable templates cannot be declared at function scope"); + SmallVector<NamedDecl *, 4> ConflictingDecls; + SmallVector<NamedDecl *, 2> FoundDecls; + DC->localUncachedLookup(Name, FoundDecls); + for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) { + if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary)) + continue; + + Decl *Found = FoundDecls[I]; + if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) { + if (IsStructuralMatch(D, FoundTemplate)) { + // The variable templates structurally match; call it the same template. + Importer.Imported(D->getTemplatedDecl(), + FoundTemplate->getTemplatedDecl()); + return Importer.Imported(D, FoundTemplate); + } + } + + ConflictingDecls.push_back(FoundDecls[I]); + } + + if (!ConflictingDecls.empty()) { + Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary, + ConflictingDecls.data(), + ConflictingDecls.size()); + } + + if (!Name) + return 0; + + VarDecl *DTemplated = D->getTemplatedDecl(); + + // Import the type. + QualType T = Importer.Import(DTemplated->getType()); + if (T.isNull()) + return 0; + + // Create the declaration that is being templated. + SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart()); + SourceLocation IdLoc = Importer.Import(DTemplated->getLocation()); + TypeSourceInfo *TInfo = Importer.Import(DTemplated->getTypeSourceInfo()); + VarDecl *D2Templated = VarDecl::Create(Importer.getToContext(), DC, StartLoc, + IdLoc, Name.getAsIdentifierInfo(), T, + TInfo, DTemplated->getStorageClass()); + D2Templated->setAccess(DTemplated->getAccess()); + D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc())); + D2Templated->setLexicalDeclContext(LexicalDC); + + // Importer.Imported(DTemplated, D2Templated); + // LexicalDC->addDeclInternal(D2Templated); + + // Merge the initializer. + if (ImportDefinition(DTemplated, D2Templated)) + return 0; + + // Create the variable template declaration itself. + TemplateParameterList *TemplateParams = + ImportTemplateParameterList(D->getTemplateParameters()); + if (!TemplateParams) + return 0; + + VarTemplateDecl *D2 = VarTemplateDecl::Create( + Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated, + /*PrevDecl=*/0); + D2Templated->setDescribedVarTemplate(D2); + + D2->setAccess(D->getAccess()); + D2->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(D2); + + // Note the relationship between the variable templates. + Importer.Imported(D, D2); + Importer.Imported(DTemplated, D2Templated); + + if (DTemplated->isThisDeclarationADefinition() && + !D2Templated->isThisDeclarationADefinition()) { + // FIXME: Import definition! + } + + return D2; +} + +Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl( + VarTemplateSpecializationDecl *D) { + // If this record has a definition in the translation unit we're coming from, + // but this particular declaration is not that definition, import the + // definition and map to that. + VarDecl *Definition = D->getDefinition(); + if (Definition && Definition != D) { + Decl *ImportedDef = Importer.Import(Definition); + if (!ImportedDef) + return 0; + + return Importer.Imported(D, ImportedDef); + } + + VarTemplateDecl *VarTemplate = cast_or_null<VarTemplateDecl>( + Importer.Import(D->getSpecializedTemplate())); + if (!VarTemplate) + return 0; + + // Import the context of this declaration. + DeclContext *DC = VarTemplate->getDeclContext(); + if (!DC) + return 0; + + DeclContext *LexicalDC = DC; + if (D->getDeclContext() != D->getLexicalDeclContext()) { + LexicalDC = Importer.ImportContext(D->getLexicalDeclContext()); + if (!LexicalDC) + return 0; + } + + // Import the location of this declaration. + SourceLocation StartLoc = Importer.Import(D->getLocStart()); + SourceLocation IdLoc = Importer.Import(D->getLocation()); + + // Import template arguments. + SmallVector<TemplateArgument, 2> TemplateArgs; + if (ImportTemplateArguments(D->getTemplateArgs().data(), + D->getTemplateArgs().size(), TemplateArgs)) + return 0; + + // Try to find an existing specialization with these template arguments. + void *InsertPos = 0; + VarTemplateSpecializationDecl *D2 = VarTemplate->findSpecialization( + TemplateArgs.data(), TemplateArgs.size(), InsertPos); + if (D2) { + // We already have a variable template specialization with these template + // arguments. + + // FIXME: Check for specialization vs. instantiation errors. + + if (VarDecl *FoundDef = D2->getDefinition()) { + if (!D->isThisDeclarationADefinition() || + IsStructuralMatch(D, FoundDef)) { + // The record types structurally match, or the "from" translation + // unit only had a forward declaration anyway; call it the same + // variable. + return Importer.Imported(D, FoundDef); + } + } + } else { + + // Import the type. + QualType T = Importer.Import(D->getType()); + if (T.isNull()) + return 0; + TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); + + // Create a new specialization. + D2 = VarTemplateSpecializationDecl::Create( + Importer.getToContext(), DC, StartLoc, IdLoc, VarTemplate, T, TInfo, + D->getStorageClass(), TemplateArgs.data(), TemplateArgs.size()); + D2->setSpecializationKind(D->getSpecializationKind()); + D2->setTemplateArgsInfo(D->getTemplateArgsInfo()); + + // Add this specialization to the class template. + VarTemplate->AddSpecialization(D2, InsertPos); + + // Import the qualifier, if any. + D2->setQualifierInfo(Importer.Import(D->getQualifierLoc())); + + // Add the specialization to this context. + D2->setLexicalDeclContext(LexicalDC); + LexicalDC->addDeclInternal(D2); + } + Importer.Imported(D, D2); + + if (D->isThisDeclarationADefinition() && ImportDefinition(D, D2)) + return 0; + + return D2; +} + +//---------------------------------------------------------------------------- +// Import Statements +//---------------------------------------------------------------------------- + +Stmt *ASTNodeImporter::VisitStmt(Stmt *S) { + Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node) + << S->getStmtClassName(); + return 0; +} + +//---------------------------------------------------------------------------- +// Import Expressions +//---------------------------------------------------------------------------- +Expr *ASTNodeImporter::VisitExpr(Expr *E) { + Importer.FromDiag(E->getLocStart(), diag::err_unsupported_ast_node) + << E->getStmtClassName(); + return 0; +} + +Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) { + ValueDecl *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl())); + if (!ToD) + return 0; + + NamedDecl *FoundD = 0; + if (E->getDecl() != E->getFoundDecl()) { + FoundD = cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl())); + if (!FoundD) + return 0; + } + + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(), + Importer.Import(E->getQualifierLoc()), + Importer.Import(E->getTemplateKeywordLoc()), + ToD, + E->refersToEnclosingLocal(), + Importer.Import(E->getLocation()), + T, E->getValueKind(), + FoundD, + /*FIXME:TemplateArgs=*/0); + if (E->hadMultipleCandidates()) + DRE->setHadMultipleCandidates(true); + return DRE; +} + +Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + return IntegerLiteral::Create(Importer.getToContext(), + E->getValue(), T, + Importer.Import(E->getLocation())); +} + +Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + return new (Importer.getToContext()) CharacterLiteral(E->getValue(), + E->getKind(), T, + Importer.Import(E->getLocation())); +} + +Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) { + Expr *SubExpr = Importer.Import(E->getSubExpr()); + if (!SubExpr) + return 0; + + return new (Importer.getToContext()) + ParenExpr(Importer.Import(E->getLParen()), + Importer.Import(E->getRParen()), + SubExpr); +} + +Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + Expr *SubExpr = Importer.Import(E->getSubExpr()); + if (!SubExpr) + return 0; + + return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(), + T, E->getValueKind(), + E->getObjectKind(), + Importer.Import(E->getOperatorLoc())); +} + +Expr *ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr( + UnaryExprOrTypeTraitExpr *E) { + QualType ResultType = Importer.Import(E->getType()); + + if (E->isArgumentType()) { + TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo()); + if (!TInfo) + return 0; + + return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(), + TInfo, ResultType, + Importer.Import(E->getOperatorLoc()), + Importer.Import(E->getRParenLoc())); + } + + Expr *SubExpr = Importer.Import(E->getArgumentExpr()); + if (!SubExpr) + return 0; + + return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(), + SubExpr, ResultType, + Importer.Import(E->getOperatorLoc()), + Importer.Import(E->getRParenLoc())); +} + +Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + Expr *LHS = Importer.Import(E->getLHS()); + if (!LHS) + return 0; + + Expr *RHS = Importer.Import(E->getRHS()); + if (!RHS) + return 0; + + return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(), + T, E->getValueKind(), + E->getObjectKind(), + Importer.Import(E->getOperatorLoc()), + E->isFPContractable()); +} + +Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + QualType CompLHSType = Importer.Import(E->getComputationLHSType()); + if (CompLHSType.isNull()) + return 0; + + QualType CompResultType = Importer.Import(E->getComputationResultType()); + if (CompResultType.isNull()) + return 0; + + Expr *LHS = Importer.Import(E->getLHS()); + if (!LHS) + return 0; + + Expr *RHS = Importer.Import(E->getRHS()); + if (!RHS) + return 0; + + return new (Importer.getToContext()) + CompoundAssignOperator(LHS, RHS, E->getOpcode(), + T, E->getValueKind(), + E->getObjectKind(), + CompLHSType, CompResultType, + Importer.Import(E->getOperatorLoc()), + E->isFPContractable()); +} + +static bool ImportCastPath(CastExpr *E, CXXCastPath &Path) { + if (E->path_empty()) return false; + + // TODO: import cast paths + return true; +} + +Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + Expr *SubExpr = Importer.Import(E->getSubExpr()); + if (!SubExpr) + return 0; + + CXXCastPath BasePath; + if (ImportCastPath(E, BasePath)) + return 0; + + return ImplicitCastExpr::Create(Importer.getToContext(), T, E->getCastKind(), + SubExpr, &BasePath, E->getValueKind()); +} + +Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return 0; + + Expr *SubExpr = Importer.Import(E->getSubExpr()); + if (!SubExpr) + return 0; + + TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten()); + if (!TInfo && E->getTypeInfoAsWritten()) + return 0; + + CXXCastPath BasePath; + if (ImportCastPath(E, BasePath)) + return 0; + + return CStyleCastExpr::Create(Importer.getToContext(), T, + E->getValueKind(), E->getCastKind(), + SubExpr, &BasePath, TInfo, + Importer.Import(E->getLParenLoc()), + Importer.Import(E->getRParenLoc())); +} + +ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager, + ASTContext &FromContext, FileManager &FromFileManager, + bool MinimalImport) + : ToContext(ToContext), FromContext(FromContext), + ToFileManager(ToFileManager), FromFileManager(FromFileManager), + Minimal(MinimalImport), LastDiagFromFrom(false) +{ + ImportedDecls[FromContext.getTranslationUnitDecl()] + = ToContext.getTranslationUnitDecl(); +} + +ASTImporter::~ASTImporter() { } + +QualType ASTImporter::Import(QualType FromT) { + if (FromT.isNull()) + return QualType(); + + const Type *fromTy = FromT.getTypePtr(); + + // Check whether we've already imported this type. + llvm::DenseMap<const Type *, const Type *>::iterator Pos + = ImportedTypes.find(fromTy); + if (Pos != ImportedTypes.end()) + return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers()); + + // Import the type + ASTNodeImporter Importer(*this); + QualType ToT = Importer.Visit(fromTy); + if (ToT.isNull()) + return ToT; + + // Record the imported type. + ImportedTypes[fromTy] = ToT.getTypePtr(); + + return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers()); +} + +TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) { + if (!FromTSI) + return FromTSI; + + // FIXME: For now we just create a "trivial" type source info based + // on the type and a single location. Implement a real version of this. + QualType T = Import(FromTSI->getType()); + if (T.isNull()) + return 0; + + return ToContext.getTrivialTypeSourceInfo(T, + FromTSI->getTypeLoc().getLocStart()); +} + +Decl *ASTImporter::Import(Decl *FromD) { + if (!FromD) + return 0; + + ASTNodeImporter Importer(*this); + + // Check whether we've already imported this declaration. + llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD); + if (Pos != ImportedDecls.end()) { + Decl *ToD = Pos->second; + Importer.ImportDefinitionIfNeeded(FromD, ToD); + return ToD; + } + + // Import the type + Decl *ToD = Importer.Visit(FromD); + if (!ToD) + return 0; + + // Record the imported declaration. + ImportedDecls[FromD] = ToD; + + if (TagDecl *FromTag = dyn_cast<TagDecl>(FromD)) { + // Keep track of anonymous tags that have an associated typedef. + if (FromTag->getTypedefNameForAnonDecl()) + AnonTagsWithPendingTypedefs.push_back(FromTag); + } else if (TypedefNameDecl *FromTypedef = dyn_cast<TypedefNameDecl>(FromD)) { + // When we've finished transforming a typedef, see whether it was the + // typedef for an anonymous tag. + for (SmallVectorImpl<TagDecl *>::iterator + FromTag = AnonTagsWithPendingTypedefs.begin(), + FromTagEnd = AnonTagsWithPendingTypedefs.end(); + FromTag != FromTagEnd; ++FromTag) { + if ((*FromTag)->getTypedefNameForAnonDecl() == FromTypedef) { + if (TagDecl *ToTag = cast_or_null<TagDecl>(Import(*FromTag))) { + // We found the typedef for an anonymous tag; link them. + ToTag->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToD)); + AnonTagsWithPendingTypedefs.erase(FromTag); + break; + } + } + } + } + + return ToD; +} + +DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) { + if (!FromDC) + return FromDC; + + DeclContext *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC))); + if (!ToDC) + return 0; + + // When we're using a record/enum/Objective-C class/protocol as a context, we + // need it to have a definition. + if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(ToDC)) { + RecordDecl *FromRecord = cast<RecordDecl>(FromDC); + if (ToRecord->isCompleteDefinition()) { + // Do nothing. + } else if (FromRecord->isCompleteDefinition()) { + ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord, + ASTNodeImporter::IDK_Basic); + } else { + CompleteDecl(ToRecord); + } + } else if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(ToDC)) { + EnumDecl *FromEnum = cast<EnumDecl>(FromDC); + if (ToEnum->isCompleteDefinition()) { + // Do nothing. + } else if (FromEnum->isCompleteDefinition()) { + ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum, + ASTNodeImporter::IDK_Basic); + } else { + CompleteDecl(ToEnum); + } + } else if (ObjCInterfaceDecl *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) { + ObjCInterfaceDecl *FromClass = cast<ObjCInterfaceDecl>(FromDC); + if (ToClass->getDefinition()) { + // Do nothing. + } else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) { + ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass, + ASTNodeImporter::IDK_Basic); + } else { + CompleteDecl(ToClass); + } + } else if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) { + ObjCProtocolDecl *FromProto = cast<ObjCProtocolDecl>(FromDC); + if (ToProto->getDefinition()) { + // Do nothing. + } else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) { + ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto, + ASTNodeImporter::IDK_Basic); + } else { + CompleteDecl(ToProto); + } + } + + return ToDC; +} + +Expr *ASTImporter::Import(Expr *FromE) { + if (!FromE) + return 0; + + return cast_or_null<Expr>(Import(cast<Stmt>(FromE))); +} + +Stmt *ASTImporter::Import(Stmt *FromS) { + if (!FromS) + return 0; + + // Check whether we've already imported this declaration. + llvm::DenseMap<Stmt *, Stmt *>::iterator Pos = ImportedStmts.find(FromS); + if (Pos != ImportedStmts.end()) + return Pos->second; + + // Import the type + ASTNodeImporter Importer(*this); + Stmt *ToS = Importer.Visit(FromS); + if (!ToS) + return 0; + + // Record the imported declaration. + ImportedStmts[FromS] = ToS; + return ToS; +} + +NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) { + if (!FromNNS) + return 0; + + NestedNameSpecifier *prefix = Import(FromNNS->getPrefix()); + + switch (FromNNS->getKind()) { + case NestedNameSpecifier::Identifier: + if (IdentifierInfo *II = Import(FromNNS->getAsIdentifier())) { + return NestedNameSpecifier::Create(ToContext, prefix, II); + } + return 0; + + case NestedNameSpecifier::Namespace: + if (NamespaceDecl *NS = + cast<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) { + return NestedNameSpecifier::Create(ToContext, prefix, NS); + } + return 0; + + case NestedNameSpecifier::NamespaceAlias: + if (NamespaceAliasDecl *NSAD = + cast<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) { + return NestedNameSpecifier::Create(ToContext, prefix, NSAD); + } + return 0; + + case NestedNameSpecifier::Global: + return NestedNameSpecifier::GlobalSpecifier(ToContext); + + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: { + QualType T = Import(QualType(FromNNS->getAsType(), 0u)); + if (!T.isNull()) { + bool bTemplate = FromNNS->getKind() == + NestedNameSpecifier::TypeSpecWithTemplate; + return NestedNameSpecifier::Create(ToContext, prefix, + bTemplate, T.getTypePtr()); + } + } + return 0; + } + + llvm_unreachable("Invalid nested name specifier kind"); +} + +NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) { + // FIXME: Implement! + return NestedNameSpecifierLoc(); +} + +TemplateName ASTImporter::Import(TemplateName From) { + switch (From.getKind()) { + case TemplateName::Template: + if (TemplateDecl *ToTemplate + = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl()))) + return TemplateName(ToTemplate); + + return TemplateName(); + + case TemplateName::OverloadedTemplate: { + OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate(); + UnresolvedSet<2> ToTemplates; + for (OverloadedTemplateStorage::iterator I = FromStorage->begin(), + E = FromStorage->end(); + I != E; ++I) { + if (NamedDecl *To = cast_or_null<NamedDecl>(Import(*I))) + ToTemplates.addDecl(To); + else + return TemplateName(); + } + return ToContext.getOverloadedTemplateName(ToTemplates.begin(), + ToTemplates.end()); + } + + case TemplateName::QualifiedTemplate: { + QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName(); + NestedNameSpecifier *Qualifier = Import(QTN->getQualifier()); + if (!Qualifier) + return TemplateName(); + + if (TemplateDecl *ToTemplate + = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl()))) + return ToContext.getQualifiedTemplateName(Qualifier, + QTN->hasTemplateKeyword(), + ToTemplate); + + return TemplateName(); + } + + case TemplateName::DependentTemplate: { + DependentTemplateName *DTN = From.getAsDependentTemplateName(); + NestedNameSpecifier *Qualifier = Import(DTN->getQualifier()); + if (!Qualifier) + return TemplateName(); + + if (DTN->isIdentifier()) { + return ToContext.getDependentTemplateName(Qualifier, + Import(DTN->getIdentifier())); + } + + return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator()); + } + + case TemplateName::SubstTemplateTemplateParm: { + SubstTemplateTemplateParmStorage *subst + = From.getAsSubstTemplateTemplateParm(); + TemplateTemplateParmDecl *param + = cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter())); + if (!param) + return TemplateName(); + + TemplateName replacement = Import(subst->getReplacement()); + if (replacement.isNull()) return TemplateName(); + + return ToContext.getSubstTemplateTemplateParm(param, replacement); + } + + case TemplateName::SubstTemplateTemplateParmPack: { + SubstTemplateTemplateParmPackStorage *SubstPack + = From.getAsSubstTemplateTemplateParmPack(); + TemplateTemplateParmDecl *Param + = cast_or_null<TemplateTemplateParmDecl>( + Import(SubstPack->getParameterPack())); + if (!Param) + return TemplateName(); + + ASTNodeImporter Importer(*this); + TemplateArgument ArgPack + = Importer.ImportTemplateArgument(SubstPack->getArgumentPack()); + if (ArgPack.isNull()) + return TemplateName(); + + return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack); + } + } + + llvm_unreachable("Invalid template name kind"); +} + +SourceLocation ASTImporter::Import(SourceLocation FromLoc) { + if (FromLoc.isInvalid()) + return SourceLocation(); + + SourceManager &FromSM = FromContext.getSourceManager(); + + // For now, map everything down to its spelling location, so that we + // don't have to import macro expansions. + // FIXME: Import macro expansions! + FromLoc = FromSM.getSpellingLoc(FromLoc); + std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc); + SourceManager &ToSM = ToContext.getSourceManager(); + return ToSM.getLocForStartOfFile(Import(Decomposed.first)) + .getLocWithOffset(Decomposed.second); +} + +SourceRange ASTImporter::Import(SourceRange FromRange) { + return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd())); +} + +FileID ASTImporter::Import(FileID FromID) { + llvm::DenseMap<FileID, FileID>::iterator Pos + = ImportedFileIDs.find(FromID); + if (Pos != ImportedFileIDs.end()) + return Pos->second; + + SourceManager &FromSM = FromContext.getSourceManager(); + SourceManager &ToSM = ToContext.getSourceManager(); + const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID); + assert(FromSLoc.isFile() && "Cannot handle macro expansions yet"); + + // Include location of this file. + SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc()); + + // Map the FileID for to the "to" source manager. + FileID ToID; + const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache(); + if (Cache->OrigEntry) { + // FIXME: We probably want to use getVirtualFile(), so we don't hit the + // disk again + // FIXME: We definitely want to re-use the existing MemoryBuffer, rather + // than mmap the files several times. + const FileEntry *Entry = ToFileManager.getFile(Cache->OrigEntry->getName()); + ToID = ToSM.createFileID(Entry, ToIncludeLoc, + FromSLoc.getFile().getFileCharacteristic()); + } else { + // FIXME: We want to re-use the existing MemoryBuffer! + const llvm::MemoryBuffer * + FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM); + llvm::MemoryBuffer *ToBuf + = llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(), + FromBuf->getBufferIdentifier()); + ToID = ToSM.createFileIDForMemBuffer(ToBuf, + FromSLoc.getFile().getFileCharacteristic()); + } + + + ImportedFileIDs[FromID] = ToID; + return ToID; +} + +void ASTImporter::ImportDefinition(Decl *From) { + Decl *To = Import(From); + if (!To) + return; + + if (DeclContext *FromDC = cast<DeclContext>(From)) { + ASTNodeImporter Importer(*this); + + if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(To)) { + if (!ToRecord->getDefinition()) { + Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord, + ASTNodeImporter::IDK_Everything); + return; + } + } + + if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(To)) { + if (!ToEnum->getDefinition()) { + Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum, + ASTNodeImporter::IDK_Everything); + return; + } + } + + if (ObjCInterfaceDecl *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) { + if (!ToIFace->getDefinition()) { + Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace, + ASTNodeImporter::IDK_Everything); + return; + } + } + + if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(To)) { + if (!ToProto->getDefinition()) { + Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto, + ASTNodeImporter::IDK_Everything); + return; + } + } + + Importer.ImportDeclContext(FromDC, true); + } +} + +DeclarationName ASTImporter::Import(DeclarationName FromName) { + if (!FromName) + return DeclarationName(); + + switch (FromName.getNameKind()) { + case DeclarationName::Identifier: + return Import(FromName.getAsIdentifierInfo()); + + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + return Import(FromName.getObjCSelector()); + + case DeclarationName::CXXConstructorName: { + QualType T = Import(FromName.getCXXNameType()); + if (T.isNull()) + return DeclarationName(); + + return ToContext.DeclarationNames.getCXXConstructorName( + ToContext.getCanonicalType(T)); + } + + case DeclarationName::CXXDestructorName: { + QualType T = Import(FromName.getCXXNameType()); + if (T.isNull()) + return DeclarationName(); + + return ToContext.DeclarationNames.getCXXDestructorName( + ToContext.getCanonicalType(T)); + } + + case DeclarationName::CXXConversionFunctionName: { + QualType T = Import(FromName.getCXXNameType()); + if (T.isNull()) + return DeclarationName(); + + return ToContext.DeclarationNames.getCXXConversionFunctionName( + ToContext.getCanonicalType(T)); + } + + case DeclarationName::CXXOperatorName: + return ToContext.DeclarationNames.getCXXOperatorName( + FromName.getCXXOverloadedOperator()); + + case DeclarationName::CXXLiteralOperatorName: + return ToContext.DeclarationNames.getCXXLiteralOperatorName( + Import(FromName.getCXXLiteralIdentifier())); + + case DeclarationName::CXXUsingDirective: + // FIXME: STATICS! + return DeclarationName::getUsingDirectiveName(); + } + + llvm_unreachable("Invalid DeclarationName Kind!"); +} + +IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) { + if (!FromId) + return 0; + + return &ToContext.Idents.get(FromId->getName()); +} + +Selector ASTImporter::Import(Selector FromSel) { + if (FromSel.isNull()) + return Selector(); + + SmallVector<IdentifierInfo *, 4> Idents; + Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0))); + for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I) + Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I))); + return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data()); +} + +DeclarationName ASTImporter::HandleNameConflict(DeclarationName Name, + DeclContext *DC, + unsigned IDNS, + NamedDecl **Decls, + unsigned NumDecls) { + return Name; +} + +DiagnosticBuilder ASTImporter::ToDiag(SourceLocation Loc, unsigned DiagID) { + if (LastDiagFromFrom) + ToContext.getDiagnostics().notePriorDiagnosticFrom( + FromContext.getDiagnostics()); + LastDiagFromFrom = false; + return ToContext.getDiagnostics().Report(Loc, DiagID); +} + +DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) { + if (!LastDiagFromFrom) + FromContext.getDiagnostics().notePriorDiagnosticFrom( + ToContext.getDiagnostics()); + LastDiagFromFrom = true; + return FromContext.getDiagnostics().Report(Loc, DiagID); +} + +void ASTImporter::CompleteDecl (Decl *D) { + if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) { + if (!ID->getDefinition()) + ID->startDefinition(); + } + else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) { + if (!PD->getDefinition()) + PD->startDefinition(); + } + else if (TagDecl *TD = dyn_cast<TagDecl>(D)) { + if (!TD->getDefinition() && !TD->isBeingDefined()) { + TD->startDefinition(); + TD->setCompleteDefinition(true); + } + } + else { + assert (0 && "CompleteDecl called on a Decl that can't be completed"); + } +} + +Decl *ASTImporter::Imported(Decl *From, Decl *To) { + ImportedDecls[From] = To; + return To; +} + +bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To, + bool Complain) { + llvm::DenseMap<const Type *, const Type *>::iterator Pos + = ImportedTypes.find(From.getTypePtr()); + if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To)) + return true; + + StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls, + false, Complain); + return Ctx.IsStructurallyEquivalent(From, To); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm/tools/clang/lib/AST/ASTTypeTraits.cpp new file mode 100644 index 000000000000..ae47ea98882b --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ASTTypeTraits.cpp @@ -0,0 +1,105 @@ +//===--- ASTTypeTraits.cpp --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Provides a dynamic type identifier and a dynamically typed node container +// that can be used to store an AST base node at runtime in the same storage in +// a type safe way. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTTypeTraits.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" + +namespace clang { +namespace ast_type_traits { + +const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = { + { NKI_None, "<None>" }, + { NKI_None, "CXXCtorInitializer" }, + { NKI_None, "TemplateArgument" }, + { NKI_None, "NestedNameSpecifier" }, + { NKI_None, "NestedNameSpecifierLoc" }, + { NKI_None, "QualType" }, + { NKI_None, "TypeLoc" }, + { NKI_None, "Decl" }, +#define DECL(DERIVED, BASE) { NKI_##BASE, #DERIVED "Decl" }, +#include "clang/AST/DeclNodes.inc" + { NKI_None, "Stmt" }, +#define STMT(DERIVED, BASE) { NKI_##BASE, #DERIVED }, +#include "clang/AST/StmtNodes.inc" + { NKI_None, "Type" }, +#define TYPE(DERIVED, BASE) { NKI_##BASE, #DERIVED "Type" }, +#include "clang/AST/TypeNodes.def" +}; + +bool ASTNodeKind::isBaseOf(ASTNodeKind Other) const { + return isBaseOf(KindId, Other.KindId); +} + +bool ASTNodeKind::isSame(ASTNodeKind Other) const { + return KindId != NKI_None && KindId == Other.KindId; +} + +bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived) { + if (Base == NKI_None || Derived == NKI_None) return false; + while (Derived != Base && Derived != NKI_None) + Derived = AllKindInfo[Derived].ParentId; + return Derived == Base; +} + +StringRef ASTNodeKind::asStringRef() const { return AllKindInfo[KindId].Name; } + +void DynTypedNode::print(llvm::raw_ostream &OS, + const PrintingPolicy &PP) const { + if (const TemplateArgument *TA = get<TemplateArgument>()) + TA->print(PP, OS); + else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>()) + NNS->print(OS, PP); + else if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>()) + NNSL->getNestedNameSpecifier()->print(OS, PP); + else if (const QualType *QT = get<QualType>()) + QT->print(OS, PP); + else if (const TypeLoc *TL = get<TypeLoc>()) + TL->getType().print(OS, PP); + else if (const Decl *D = get<Decl>()) + D->print(OS, PP); + else if (const Stmt *S = get<Stmt>()) + S->printPretty(OS, 0, PP); + else if (const Type *T = get<Type>()) + QualType(T, 0).print(OS, PP); + else + OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n"; +} + +void DynTypedNode::dump(llvm::raw_ostream &OS, SourceManager &SM) const { + if (const Decl *D = get<Decl>()) + D->dump(OS); + else if (const Stmt *S = get<Stmt>()) + S->dump(OS, SM); + else + OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n"; +} + +SourceRange DynTypedNode::getSourceRange() const { + if (const CXXCtorInitializer *CCI = get<CXXCtorInitializer>()) + return CCI->getSourceRange(); + if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>()) + return NNSL->getSourceRange(); + if (const TypeLoc *TL = get<TypeLoc>()) + return TL->getSourceRange(); + if (const Decl *D = get<Decl>()) + return D->getSourceRange(); + if (const Stmt *S = get<Stmt>()) + return S->getSourceRange(); + return SourceRange(); +} + +} // end namespace ast_type_traits +} // end namespace clang diff --git a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp new file mode 100644 index 000000000000..7af3c8b16263 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp @@ -0,0 +1,29 @@ +//===--- AttrImpl.cpp - Classes for representing attributes -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains out-of-line virtual methods for Attr classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Attr.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Expr.h" +#include "clang/AST/Type.h" +#include "llvm/ADT/StringSwitch.h" +using namespace clang; + +Attr::~Attr() { } + +void InheritableAttr::anchor() { } + +void InheritableParamAttr::anchor() { } + +void MSInheritanceAttr::anchor() { } + +#include "clang/AST/AttrImpl.inc" diff --git a/contrib/llvm/tools/clang/lib/AST/CXXABI.h b/contrib/llvm/tools/clang/lib/AST/CXXABI.h new file mode 100644 index 000000000000..89203f18ca77 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/CXXABI.h @@ -0,0 +1,52 @@ +//===----- CXXABI.h - Interface to C++ ABIs ---------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for C++ AST support. Concrete +// subclasses of this implement AST support for specific C++ ABIs. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_CXXABI_H +#define LLVM_CLANG_AST_CXXABI_H + +#include "clang/AST/Type.h" + +namespace clang { + +class ASTContext; +class MemberPointerType; +class MangleNumberingContext; + +/// Implements C++ ABI-specific semantic analysis functions. +class CXXABI { +public: + virtual ~CXXABI(); + + /// Returns the width and alignment of a member pointer in bits. + virtual std::pair<uint64_t, unsigned> + getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const = 0; + + /// Returns the default calling convention for C++ methods. + virtual CallingConv getDefaultMethodCallConv(bool isVariadic) const = 0; + + /// Returns whether the given class is nearly empty, with just virtual + /// pointers and no data except possibly virtual bases. + virtual bool isNearlyEmpty(const CXXRecordDecl *RD) const = 0; + + /// Returns a new mangling number context for this C++ ABI. + virtual MangleNumberingContext *createMangleNumberingContext() const = 0; +}; + +/// Creates an instance of a C++ ABI class. +CXXABI *CreateARMCXXABI(ASTContext &Ctx); +CXXABI *CreateItaniumCXXABI(ASTContext &Ctx); +CXXABI *CreateMicrosoftCXXABI(ASTContext &Ctx); +} + +#endif diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp new file mode 100644 index 000000000000..b51014b7428f --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp @@ -0,0 +1,741 @@ +//===------ CXXInheritance.cpp - C++ Inheritance ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides routines that help analyzing C++ inheritance hierarchies. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/RecordLayout.h" +#include "llvm/ADT/SetVector.h" +#include <algorithm> +#include <set> + +using namespace clang; + +/// \brief Computes the set of declarations referenced by these base +/// paths. +void CXXBasePaths::ComputeDeclsFound() { + assert(NumDeclsFound == 0 && !DeclsFound && + "Already computed the set of declarations"); + + llvm::SetVector<NamedDecl *, SmallVector<NamedDecl *, 8> > Decls; + for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path) + Decls.insert(Path->Decls.front()); + + NumDeclsFound = Decls.size(); + DeclsFound = new NamedDecl * [NumDeclsFound]; + std::copy(Decls.begin(), Decls.end(), DeclsFound); +} + +CXXBasePaths::decl_iterator CXXBasePaths::found_decls_begin() { + if (NumDeclsFound == 0) + ComputeDeclsFound(); + return DeclsFound; +} + +CXXBasePaths::decl_iterator CXXBasePaths::found_decls_end() { + if (NumDeclsFound == 0) + ComputeDeclsFound(); + return DeclsFound + NumDeclsFound; +} + +/// isAmbiguous - Determines whether the set of paths provided is +/// ambiguous, i.e., there are two or more paths that refer to +/// different base class subobjects of the same type. BaseType must be +/// an unqualified, canonical class type. +bool CXXBasePaths::isAmbiguous(CanQualType BaseType) { + BaseType = BaseType.getUnqualifiedType(); + std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType]; + return Subobjects.second + (Subobjects.first? 1 : 0) > 1; +} + +/// clear - Clear out all prior path information. +void CXXBasePaths::clear() { + Paths.clear(); + ClassSubobjects.clear(); + ScratchPath.clear(); + DetectedVirtual = 0; +} + +/// @brief Swaps the contents of this CXXBasePaths structure with the +/// contents of Other. +void CXXBasePaths::swap(CXXBasePaths &Other) { + std::swap(Origin, Other.Origin); + Paths.swap(Other.Paths); + ClassSubobjects.swap(Other.ClassSubobjects); + std::swap(FindAmbiguities, Other.FindAmbiguities); + std::swap(RecordPaths, Other.RecordPaths); + std::swap(DetectVirtual, Other.DetectVirtual); + std::swap(DetectedVirtual, Other.DetectedVirtual); +} + +bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base) const { + CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false, + /*DetectVirtual=*/false); + return isDerivedFrom(Base, Paths); +} + +bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base, + CXXBasePaths &Paths) const { + if (getCanonicalDecl() == Base->getCanonicalDecl()) + return false; + + Paths.setOrigin(const_cast<CXXRecordDecl*>(this)); + return lookupInBases(&FindBaseClass, + const_cast<CXXRecordDecl*>(Base->getCanonicalDecl()), + Paths); +} + +bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const { + if (!getNumVBases()) + return false; + + CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false, + /*DetectVirtual=*/false); + + if (getCanonicalDecl() == Base->getCanonicalDecl()) + return false; + + Paths.setOrigin(const_cast<CXXRecordDecl*>(this)); + + const void *BasePtr = static_cast<const void*>(Base->getCanonicalDecl()); + return lookupInBases(&FindVirtualBaseClass, + const_cast<void *>(BasePtr), + Paths); +} + +static bool BaseIsNot(const CXXRecordDecl *Base, void *OpaqueTarget) { + // OpaqueTarget is a CXXRecordDecl*. + return Base->getCanonicalDecl() != (const CXXRecordDecl*) OpaqueTarget; +} + +bool CXXRecordDecl::isProvablyNotDerivedFrom(const CXXRecordDecl *Base) const { + return forallBases(BaseIsNot, + const_cast<CXXRecordDecl *>(Base->getCanonicalDecl())); +} + +bool +CXXRecordDecl::isCurrentInstantiation(const DeclContext *CurContext) const { + assert(isDependentContext()); + + for (; !CurContext->isFileContext(); CurContext = CurContext->getParent()) + if (CurContext->Equals(this)) + return true; + + return false; +} + +bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches, + void *OpaqueData, + bool AllowShortCircuit) const { + SmallVector<const CXXRecordDecl*, 8> Queue; + + const CXXRecordDecl *Record = this; + bool AllMatches = true; + while (true) { + for (CXXRecordDecl::base_class_const_iterator + I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) { + const RecordType *Ty = I->getType()->getAs<RecordType>(); + if (!Ty) { + if (AllowShortCircuit) return false; + AllMatches = false; + continue; + } + + CXXRecordDecl *Base = + cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition()); + if (!Base || + (Base->isDependentContext() && + !Base->isCurrentInstantiation(Record))) { + if (AllowShortCircuit) return false; + AllMatches = false; + continue; + } + + Queue.push_back(Base); + if (!BaseMatches(Base, OpaqueData)) { + if (AllowShortCircuit) return false; + AllMatches = false; + continue; + } + } + + if (Queue.empty()) + break; + Record = Queue.pop_back_val(); // not actually a queue. + } + + return AllMatches; +} + +bool CXXBasePaths::lookupInBases(ASTContext &Context, + const CXXRecordDecl *Record, + CXXRecordDecl::BaseMatchesCallback *BaseMatches, + void *UserData) { + bool FoundPath = false; + + // The access of the path down to this record. + AccessSpecifier AccessToHere = ScratchPath.Access; + bool IsFirstStep = ScratchPath.empty(); + + for (CXXRecordDecl::base_class_const_iterator BaseSpec = Record->bases_begin(), + BaseSpecEnd = Record->bases_end(); + BaseSpec != BaseSpecEnd; + ++BaseSpec) { + // Find the record of the base class subobjects for this type. + QualType BaseType = Context.getCanonicalType(BaseSpec->getType()) + .getUnqualifiedType(); + + // C++ [temp.dep]p3: + // In the definition of a class template or a member of a class template, + // if a base class of the class template depends on a template-parameter, + // the base class scope is not examined during unqualified name lookup + // either at the point of definition of the class template or member or + // during an instantiation of the class tem- plate or member. + if (BaseType->isDependentType()) + continue; + + // Determine whether we need to visit this base class at all, + // updating the count of subobjects appropriately. + std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType]; + bool VisitBase = true; + bool SetVirtual = false; + if (BaseSpec->isVirtual()) { + VisitBase = !Subobjects.first; + Subobjects.first = true; + if (isDetectingVirtual() && DetectedVirtual == 0) { + // If this is the first virtual we find, remember it. If it turns out + // there is no base path here, we'll reset it later. + DetectedVirtual = BaseType->getAs<RecordType>(); + SetVirtual = true; + } + } else + ++Subobjects.second; + + if (isRecordingPaths()) { + // Add this base specifier to the current path. + CXXBasePathElement Element; + Element.Base = &*BaseSpec; + Element.Class = Record; + if (BaseSpec->isVirtual()) + Element.SubobjectNumber = 0; + else + Element.SubobjectNumber = Subobjects.second; + ScratchPath.push_back(Element); + + // Calculate the "top-down" access to this base class. + // The spec actually describes this bottom-up, but top-down is + // equivalent because the definition works out as follows: + // 1. Write down the access along each step in the inheritance + // chain, followed by the access of the decl itself. + // For example, in + // class A { public: int foo; }; + // class B : protected A {}; + // class C : public B {}; + // class D : private C {}; + // we would write: + // private public protected public + // 2. If 'private' appears anywhere except far-left, access is denied. + // 3. Otherwise, overall access is determined by the most restrictive + // access in the sequence. + if (IsFirstStep) + ScratchPath.Access = BaseSpec->getAccessSpecifier(); + else + ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere, + BaseSpec->getAccessSpecifier()); + } + + // Track whether there's a path involving this specific base. + bool FoundPathThroughBase = false; + + if (BaseMatches(BaseSpec, ScratchPath, UserData)) { + // We've found a path that terminates at this base. + FoundPath = FoundPathThroughBase = true; + if (isRecordingPaths()) { + // We have a path. Make a copy of it before moving on. + Paths.push_back(ScratchPath); + } else if (!isFindingAmbiguities()) { + // We found a path and we don't care about ambiguities; + // return immediately. + return FoundPath; + } + } else if (VisitBase) { + CXXRecordDecl *BaseRecord + = cast<CXXRecordDecl>(BaseSpec->getType()->castAs<RecordType>() + ->getDecl()); + if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) { + // C++ [class.member.lookup]p2: + // A member name f in one sub-object B hides a member name f in + // a sub-object A if A is a base class sub-object of B. Any + // declarations that are so hidden are eliminated from + // consideration. + + // There is a path to a base class that meets the criteria. If we're + // not collecting paths or finding ambiguities, we're done. + FoundPath = FoundPathThroughBase = true; + if (!isFindingAmbiguities()) + return FoundPath; + } + } + + // Pop this base specifier off the current path (if we're + // collecting paths). + if (isRecordingPaths()) { + ScratchPath.pop_back(); + } + + // If we set a virtual earlier, and this isn't a path, forget it again. + if (SetVirtual && !FoundPathThroughBase) { + DetectedVirtual = 0; + } + } + + // Reset the scratch path access. + ScratchPath.Access = AccessToHere; + + return FoundPath; +} + +bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches, + void *UserData, + CXXBasePaths &Paths) const { + // If we didn't find anything, report that. + if (!Paths.lookupInBases(getASTContext(), this, BaseMatches, UserData)) + return false; + + // If we're not recording paths or we won't ever find ambiguities, + // we're done. + if (!Paths.isRecordingPaths() || !Paths.isFindingAmbiguities()) + return true; + + // C++ [class.member.lookup]p6: + // When virtual base classes are used, a hidden declaration can be + // reached along a path through the sub-object lattice that does + // not pass through the hiding declaration. This is not an + // ambiguity. The identical use with nonvirtual base classes is an + // ambiguity; in that case there is no unique instance of the name + // that hides all the others. + // + // FIXME: This is an O(N^2) algorithm, but DPG doesn't see an easy + // way to make it any faster. + for (CXXBasePaths::paths_iterator P = Paths.begin(), PEnd = Paths.end(); + P != PEnd; /* increment in loop */) { + bool Hidden = false; + + for (CXXBasePath::iterator PE = P->begin(), PEEnd = P->end(); + PE != PEEnd && !Hidden; ++PE) { + if (PE->Base->isVirtual()) { + CXXRecordDecl *VBase = 0; + if (const RecordType *Record = PE->Base->getType()->getAs<RecordType>()) + VBase = cast<CXXRecordDecl>(Record->getDecl()); + if (!VBase) + break; + + // The declaration(s) we found along this path were found in a + // subobject of a virtual base. Check whether this virtual + // base is a subobject of any other path; if so, then the + // declaration in this path are hidden by that patch. + for (CXXBasePaths::paths_iterator HidingP = Paths.begin(), + HidingPEnd = Paths.end(); + HidingP != HidingPEnd; + ++HidingP) { + CXXRecordDecl *HidingClass = 0; + if (const RecordType *Record + = HidingP->back().Base->getType()->getAs<RecordType>()) + HidingClass = cast<CXXRecordDecl>(Record->getDecl()); + if (!HidingClass) + break; + + if (HidingClass->isVirtuallyDerivedFrom(VBase)) { + Hidden = true; + break; + } + } + } + } + + if (Hidden) + P = Paths.Paths.erase(P); + else + ++P; + } + + return true; +} + +bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier, + CXXBasePath &Path, + void *BaseRecord) { + assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord && + "User data for FindBaseClass is not canonical!"); + return Specifier->getType()->castAs<RecordType>()->getDecl() + ->getCanonicalDecl() == BaseRecord; +} + +bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier, + CXXBasePath &Path, + void *BaseRecord) { + assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord && + "User data for FindBaseClass is not canonical!"); + return Specifier->isVirtual() && + Specifier->getType()->castAs<RecordType>()->getDecl() + ->getCanonicalDecl() == BaseRecord; +} + +bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier, + CXXBasePath &Path, + void *Name) { + RecordDecl *BaseRecord = + Specifier->getType()->castAs<RecordType>()->getDecl(); + + DeclarationName N = DeclarationName::getFromOpaquePtr(Name); + for (Path.Decls = BaseRecord->lookup(N); + !Path.Decls.empty(); + Path.Decls = Path.Decls.slice(1)) { + if (Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag)) + return true; + } + + return false; +} + +bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier, + CXXBasePath &Path, + void *Name) { + RecordDecl *BaseRecord = + Specifier->getType()->castAs<RecordType>()->getDecl(); + + const unsigned IDNS = IDNS_Ordinary | IDNS_Tag | IDNS_Member; + DeclarationName N = DeclarationName::getFromOpaquePtr(Name); + for (Path.Decls = BaseRecord->lookup(N); + !Path.Decls.empty(); + Path.Decls = Path.Decls.slice(1)) { + if (Path.Decls.front()->isInIdentifierNamespace(IDNS)) + return true; + } + + return false; +} + +bool CXXRecordDecl:: +FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier, + CXXBasePath &Path, + void *Name) { + RecordDecl *BaseRecord = + Specifier->getType()->castAs<RecordType>()->getDecl(); + + DeclarationName N = DeclarationName::getFromOpaquePtr(Name); + for (Path.Decls = BaseRecord->lookup(N); + !Path.Decls.empty(); + Path.Decls = Path.Decls.slice(1)) { + // FIXME: Refactor the "is it a nested-name-specifier?" check + if (isa<TypedefNameDecl>(Path.Decls.front()) || + Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag)) + return true; + } + + return false; +} + +void OverridingMethods::add(unsigned OverriddenSubobject, + UniqueVirtualMethod Overriding) { + SmallVectorImpl<UniqueVirtualMethod> &SubobjectOverrides + = Overrides[OverriddenSubobject]; + if (std::find(SubobjectOverrides.begin(), SubobjectOverrides.end(), + Overriding) == SubobjectOverrides.end()) + SubobjectOverrides.push_back(Overriding); +} + +void OverridingMethods::add(const OverridingMethods &Other) { + for (const_iterator I = Other.begin(), IE = Other.end(); I != IE; ++I) { + for (overriding_const_iterator M = I->second.begin(), + MEnd = I->second.end(); + M != MEnd; + ++M) + add(I->first, *M); + } +} + +void OverridingMethods::replaceAll(UniqueVirtualMethod Overriding) { + for (iterator I = begin(), IEnd = end(); I != IEnd; ++I) { + I->second.clear(); + I->second.push_back(Overriding); + } +} + + +namespace { + class FinalOverriderCollector { + /// \brief The number of subobjects of a given class type that + /// occur within the class hierarchy. + llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCount; + + /// \brief Overriders for each virtual base subobject. + llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *> VirtualOverriders; + + CXXFinalOverriderMap FinalOverriders; + + public: + ~FinalOverriderCollector(); + + void Collect(const CXXRecordDecl *RD, bool VirtualBase, + const CXXRecordDecl *InVirtualSubobject, + CXXFinalOverriderMap &Overriders); + }; +} + +void FinalOverriderCollector::Collect(const CXXRecordDecl *RD, + bool VirtualBase, + const CXXRecordDecl *InVirtualSubobject, + CXXFinalOverriderMap &Overriders) { + unsigned SubobjectNumber = 0; + if (!VirtualBase) + SubobjectNumber + = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())]; + + for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(), + BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) { + if (const RecordType *RT = Base->getType()->getAs<RecordType>()) { + const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl()); + if (!BaseDecl->isPolymorphic()) + continue; + + if (Overriders.empty() && !Base->isVirtual()) { + // There are no other overriders of virtual member functions, + // so let the base class fill in our overriders for us. + Collect(BaseDecl, false, InVirtualSubobject, Overriders); + continue; + } + + // Collect all of the overridders from the base class subobject + // and merge them into the set of overridders for this class. + // For virtual base classes, populate or use the cached virtual + // overrides so that we do not walk the virtual base class (and + // its base classes) more than once. + CXXFinalOverriderMap ComputedBaseOverriders; + CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders; + if (Base->isVirtual()) { + CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl]; + BaseOverriders = MyVirtualOverriders; + if (!MyVirtualOverriders) { + MyVirtualOverriders = new CXXFinalOverriderMap; + + // Collect may cause VirtualOverriders to reallocate, invalidating the + // MyVirtualOverriders reference. Set BaseOverriders to the right + // value now. + BaseOverriders = MyVirtualOverriders; + + Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders); + } + } else + Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders); + + // Merge the overriders from this base class into our own set of + // overriders. + for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(), + OMEnd = BaseOverriders->end(); + OM != OMEnd; + ++OM) { + const CXXMethodDecl *CanonOM + = cast<CXXMethodDecl>(OM->first->getCanonicalDecl()); + Overriders[CanonOM].add(OM->second); + } + } + } + + for (CXXRecordDecl::method_iterator M = RD->method_begin(), + MEnd = RD->method_end(); + M != MEnd; + ++M) { + // We only care about virtual methods. + if (!M->isVirtual()) + continue; + + CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl()); + + if (CanonM->begin_overridden_methods() + == CanonM->end_overridden_methods()) { + // This is a new virtual function that does not override any + // other virtual function. Add it to the map of virtual + // functions for which we are tracking overridders. + + // C++ [class.virtual]p2: + // For convenience we say that any virtual function overrides itself. + Overriders[CanonM].add(SubobjectNumber, + UniqueVirtualMethod(CanonM, SubobjectNumber, + InVirtualSubobject)); + continue; + } + + // This virtual method overrides other virtual methods, so it does + // not add any new slots into the set of overriders. Instead, we + // replace entries in the set of overriders with the new + // overrider. To do so, we dig down to the original virtual + // functions using data recursion and update all of the methods it + // overrides. + typedef std::pair<CXXMethodDecl::method_iterator, + CXXMethodDecl::method_iterator> OverriddenMethods; + SmallVector<OverriddenMethods, 4> Stack; + Stack.push_back(std::make_pair(CanonM->begin_overridden_methods(), + CanonM->end_overridden_methods())); + while (!Stack.empty()) { + OverriddenMethods OverMethods = Stack.back(); + Stack.pop_back(); + + for (; OverMethods.first != OverMethods.second; ++OverMethods.first) { + const CXXMethodDecl *CanonOM + = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl()); + + // C++ [class.virtual]p2: + // A virtual member function C::vf of a class object S is + // a final overrider unless the most derived class (1.8) + // of which S is a base class subobject (if any) declares + // or inherits another member function that overrides vf. + // + // Treating this object like the most derived class, we + // replace any overrides from base classes with this + // overriding virtual function. + Overriders[CanonOM].replaceAll( + UniqueVirtualMethod(CanonM, SubobjectNumber, + InVirtualSubobject)); + + if (CanonOM->begin_overridden_methods() + == CanonOM->end_overridden_methods()) + continue; + + // Continue recursion to the methods that this virtual method + // overrides. + Stack.push_back(std::make_pair(CanonOM->begin_overridden_methods(), + CanonOM->end_overridden_methods())); + } + } + + // C++ [class.virtual]p2: + // For convenience we say that any virtual function overrides itself. + Overriders[CanonM].add(SubobjectNumber, + UniqueVirtualMethod(CanonM, SubobjectNumber, + InVirtualSubobject)); + } +} + +FinalOverriderCollector::~FinalOverriderCollector() { + for (llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *>::iterator + VO = VirtualOverriders.begin(), VOEnd = VirtualOverriders.end(); + VO != VOEnd; + ++VO) + delete VO->second; +} + +void +CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const { + FinalOverriderCollector Collector; + Collector.Collect(this, false, 0, FinalOverriders); + + // Weed out any final overriders that come from virtual base class + // subobjects that were hidden by other subobjects along any path. + // This is the final-overrider variant of C++ [class.member.lookup]p10. + for (CXXFinalOverriderMap::iterator OM = FinalOverriders.begin(), + OMEnd = FinalOverriders.end(); + OM != OMEnd; + ++OM) { + for (OverridingMethods::iterator SO = OM->second.begin(), + SOEnd = OM->second.end(); + SO != SOEnd; + ++SO) { + SmallVectorImpl<UniqueVirtualMethod> &Overriding = SO->second; + if (Overriding.size() < 2) + continue; + + for (SmallVectorImpl<UniqueVirtualMethod>::iterator + Pos = Overriding.begin(), PosEnd = Overriding.end(); + Pos != PosEnd; + /* increment in loop */) { + if (!Pos->InVirtualSubobject) { + ++Pos; + continue; + } + + // We have an overriding method in a virtual base class + // subobject (or non-virtual base class subobject thereof); + // determine whether there exists an other overriding method + // in a base class subobject that hides the virtual base class + // subobject. + bool Hidden = false; + for (SmallVectorImpl<UniqueVirtualMethod>::iterator + OP = Overriding.begin(), OPEnd = Overriding.end(); + OP != OPEnd && !Hidden; + ++OP) { + if (Pos == OP) + continue; + + if (OP->Method->getParent()->isVirtuallyDerivedFrom( + const_cast<CXXRecordDecl *>(Pos->InVirtualSubobject))) + Hidden = true; + } + + if (Hidden) { + // The current overriding function is hidden by another + // overriding function; remove this one. + Pos = Overriding.erase(Pos); + PosEnd = Overriding.end(); + } else { + ++Pos; + } + } + } + } +} + +static void +AddIndirectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context, + CXXIndirectPrimaryBaseSet& Bases) { + // If the record has a virtual primary base class, add it to our set. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + if (Layout.isPrimaryBaseVirtual()) + Bases.insert(Layout.getPrimaryBase()); + + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + assert(!I->getType()->isDependentType() && + "Cannot get indirect primary bases for class with dependent bases."); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); + + // Only bases with virtual bases participate in computing the + // indirect primary virtual base classes. + if (BaseDecl->getNumVBases()) + AddIndirectPrimaryBases(BaseDecl, Context, Bases); + } + +} + +void +CXXRecordDecl::getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const { + ASTContext &Context = getASTContext(); + + if (!getNumVBases()) + return; + + for (CXXRecordDecl::base_class_const_iterator I = bases_begin(), + E = bases_end(); I != E; ++I) { + assert(!I->getType()->isDependentType() && + "Cannot get indirect primary bases for class with dependent bases."); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); + + // Only bases with virtual bases participate in computing the + // indirect primary virtual base classes. + if (BaseDecl->getNumVBases()) + AddIndirectPrimaryBases(BaseDecl, Context, Bases); + } +} diff --git a/contrib/llvm/tools/clang/lib/AST/Comment.cpp b/contrib/llvm/tools/clang/lib/AST/Comment.cpp new file mode 100644 index 000000000000..f24a23d34c57 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/Comment.cpp @@ -0,0 +1,318 @@ +//===--- Comment.cpp - Comment AST node implementation --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Comment.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/Basic/CharInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +namespace clang { +namespace comments { + +const char *Comment::getCommentKindName() const { + switch (getCommentKind()) { + case NoCommentKind: return "NoCommentKind"; +#define ABSTRACT_COMMENT(COMMENT) +#define COMMENT(CLASS, PARENT) \ + case CLASS##Kind: \ + return #CLASS; +#include "clang/AST/CommentNodes.inc" +#undef COMMENT +#undef ABSTRACT_COMMENT + } + llvm_unreachable("Unknown comment kind!"); +} + +namespace { +struct good {}; +struct bad {}; + +template <typename T> +good implements_child_begin_end(Comment::child_iterator (T::*)() const) { + return good(); +} + +LLVM_ATTRIBUTE_UNUSED +static inline bad implements_child_begin_end( + Comment::child_iterator (Comment::*)() const) { + return bad(); +} + +#define ASSERT_IMPLEMENTS_child_begin(function) \ + (void) good(implements_child_begin_end(function)) + +LLVM_ATTRIBUTE_UNUSED +static inline void CheckCommentASTNodes() { +#define ABSTRACT_COMMENT(COMMENT) +#define COMMENT(CLASS, PARENT) \ + ASSERT_IMPLEMENTS_child_begin(&CLASS::child_begin); \ + ASSERT_IMPLEMENTS_child_begin(&CLASS::child_end); +#include "clang/AST/CommentNodes.inc" +#undef COMMENT +#undef ABSTRACT_COMMENT +} + +#undef ASSERT_IMPLEMENTS_child_begin + +} // end unnamed namespace + +Comment::child_iterator Comment::child_begin() const { + switch (getCommentKind()) { + case NoCommentKind: llvm_unreachable("comment without a kind"); +#define ABSTRACT_COMMENT(COMMENT) +#define COMMENT(CLASS, PARENT) \ + case CLASS##Kind: \ + return static_cast<const CLASS *>(this)->child_begin(); +#include "clang/AST/CommentNodes.inc" +#undef COMMENT +#undef ABSTRACT_COMMENT + } + llvm_unreachable("Unknown comment kind!"); +} + +Comment::child_iterator Comment::child_end() const { + switch (getCommentKind()) { + case NoCommentKind: llvm_unreachable("comment without a kind"); +#define ABSTRACT_COMMENT(COMMENT) +#define COMMENT(CLASS, PARENT) \ + case CLASS##Kind: \ + return static_cast<const CLASS *>(this)->child_end(); +#include "clang/AST/CommentNodes.inc" +#undef COMMENT +#undef ABSTRACT_COMMENT + } + llvm_unreachable("Unknown comment kind!"); +} + +bool TextComment::isWhitespaceNoCache() const { + for (StringRef::const_iterator I = Text.begin(), E = Text.end(); + I != E; ++I) { + if (!clang::isWhitespace(*I)) + return false; + } + return true; +} + +bool ParagraphComment::isWhitespaceNoCache() const { + for (child_iterator I = child_begin(), E = child_end(); I != E; ++I) { + if (const TextComment *TC = dyn_cast<TextComment>(*I)) { + if (!TC->isWhitespace()) + return false; + } else + return false; + } + return true; +} + +const char *ParamCommandComment::getDirectionAsString(PassDirection D) { + switch (D) { + case ParamCommandComment::In: + return "[in]"; + case ParamCommandComment::Out: + return "[out]"; + case ParamCommandComment::InOut: + return "[in,out]"; + } + llvm_unreachable("unknown PassDirection"); +} + +void DeclInfo::fill() { + assert(!IsFilled); + + // Set defaults. + Kind = OtherKind; + TemplateKind = NotTemplate; + IsObjCMethod = false; + IsInstanceMethod = false; + IsClassMethod = false; + ParamVars = None; + TemplateParameters = NULL; + + if (!CommentDecl) { + // If there is no declaration, the defaults is our only guess. + IsFilled = true; + return; + } + CurrentDecl = CommentDecl; + + Decl::Kind K = CommentDecl->getKind(); + switch (K) { + default: + // Defaults are should be good for declarations we don't handle explicitly. + break; + case Decl::Function: + case Decl::CXXMethod: + case Decl::CXXConstructor: + case Decl::CXXDestructor: + case Decl::CXXConversion: { + const FunctionDecl *FD = cast<FunctionDecl>(CommentDecl); + Kind = FunctionKind; + ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(), + FD->getNumParams()); + ResultType = FD->getResultType(); + unsigned NumLists = FD->getNumTemplateParameterLists(); + if (NumLists != 0) { + TemplateKind = TemplateSpecialization; + TemplateParameters = + FD->getTemplateParameterList(NumLists - 1); + } + + if (K == Decl::CXXMethod || K == Decl::CXXConstructor || + K == Decl::CXXDestructor || K == Decl::CXXConversion) { + const CXXMethodDecl *MD = cast<CXXMethodDecl>(CommentDecl); + IsInstanceMethod = MD->isInstance(); + IsClassMethod = !IsInstanceMethod; + } + break; + } + case Decl::ObjCMethod: { + const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(CommentDecl); + Kind = FunctionKind; + ParamVars = ArrayRef<const ParmVarDecl *>(MD->param_begin(), + MD->param_size()); + ResultType = MD->getResultType(); + IsObjCMethod = true; + IsInstanceMethod = MD->isInstanceMethod(); + IsClassMethod = !IsInstanceMethod; + break; + } + case Decl::FunctionTemplate: { + const FunctionTemplateDecl *FTD = cast<FunctionTemplateDecl>(CommentDecl); + Kind = FunctionKind; + TemplateKind = Template; + const FunctionDecl *FD = FTD->getTemplatedDecl(); + ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(), + FD->getNumParams()); + ResultType = FD->getResultType(); + TemplateParameters = FTD->getTemplateParameters(); + break; + } + case Decl::ClassTemplate: { + const ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(CommentDecl); + Kind = ClassKind; + TemplateKind = Template; + TemplateParameters = CTD->getTemplateParameters(); + break; + } + case Decl::ClassTemplatePartialSpecialization: { + const ClassTemplatePartialSpecializationDecl *CTPSD = + cast<ClassTemplatePartialSpecializationDecl>(CommentDecl); + Kind = ClassKind; + TemplateKind = TemplatePartialSpecialization; + TemplateParameters = CTPSD->getTemplateParameters(); + break; + } + case Decl::ClassTemplateSpecialization: + Kind = ClassKind; + TemplateKind = TemplateSpecialization; + break; + case Decl::Record: + case Decl::CXXRecord: + Kind = ClassKind; + break; + case Decl::Var: + case Decl::Field: + case Decl::EnumConstant: + case Decl::ObjCIvar: + case Decl::ObjCAtDefsField: + Kind = VariableKind; + break; + case Decl::Namespace: + Kind = NamespaceKind; + break; + case Decl::Typedef: { + Kind = TypedefKind; + // If this is a typedef to something we consider a function, extract + // arguments and return type. + const TypedefDecl *TD = cast<TypedefDecl>(CommentDecl); + const TypeSourceInfo *TSI = TD->getTypeSourceInfo(); + if (!TSI) + break; + TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc(); + while (true) { + TL = TL.IgnoreParens(); + // Look through qualified types. + if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) { + TL = QualifiedTL.getUnqualifiedLoc(); + continue; + } + // Look through pointer types. + if (PointerTypeLoc PointerTL = TL.getAs<PointerTypeLoc>()) { + TL = PointerTL.getPointeeLoc().getUnqualifiedLoc(); + continue; + } + if (BlockPointerTypeLoc BlockPointerTL = + TL.getAs<BlockPointerTypeLoc>()) { + TL = BlockPointerTL.getPointeeLoc().getUnqualifiedLoc(); + continue; + } + if (MemberPointerTypeLoc MemberPointerTL = + TL.getAs<MemberPointerTypeLoc>()) { + TL = MemberPointerTL.getPointeeLoc().getUnqualifiedLoc(); + continue; + } + // Is this a typedef for a function type? + if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) { + Kind = FunctionKind; + ArrayRef<ParmVarDecl *> Params = FTL.getParams(); + ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(), + Params.size()); + ResultType = FTL.getResultLoc().getType(); + break; + } + break; + } + break; + } + case Decl::TypeAlias: + Kind = TypedefKind; + break; + case Decl::TypeAliasTemplate: { + const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(CommentDecl); + Kind = TypedefKind; + TemplateKind = Template; + TemplateParameters = TAT->getTemplateParameters(); + break; + } + case Decl::Enum: + Kind = EnumKind; + break; + } + + IsFilled = true; +} + +StringRef ParamCommandComment::getParamName(const FullComment *FC) const { + assert(isParamIndexValid()); + if (isVarArgParam()) + return "..."; + return FC->getDeclInfo()->ParamVars[getParamIndex()]->getName(); +} + +StringRef TParamCommandComment::getParamName(const FullComment *FC) const { + assert(isPositionValid()); + const TemplateParameterList *TPL = FC->getDeclInfo()->TemplateParameters; + for (unsigned i = 0, e = getDepth(); i != e; ++i) { + if (i == e-1) + return TPL->getParam(getIndex(i))->getName(); + const NamedDecl *Param = TPL->getParam(getIndex(i)); + if (const TemplateTemplateParmDecl *TTP = + dyn_cast<TemplateTemplateParmDecl>(Param)) + TPL = TTP->getTemplateParameters(); + } + return ""; +} + +} // end namespace comments +} // end namespace clang + diff --git a/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp new file mode 100644 index 000000000000..090b9211d4c1 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/CommentBriefParser.cpp @@ -0,0 +1,155 @@ +//===--- CommentBriefParser.cpp - Dumb comment parser ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/CommentBriefParser.h" +#include "clang/AST/CommentCommandTraits.h" +#include "llvm/ADT/StringSwitch.h" + +namespace clang { +namespace comments { + +namespace { +inline bool isWhitespace(char C) { + return C == ' ' || C == '\n' || C == '\r' || + C == '\t' || C == '\f' || C == '\v'; +} + +/// Convert all whitespace into spaces, remove leading and trailing spaces, +/// compress multiple spaces into one. +void cleanupBrief(std::string &S) { + bool PrevWasSpace = true; + std::string::iterator O = S.begin(); + for (std::string::iterator I = S.begin(), E = S.end(); + I != E; ++I) { + const char C = *I; + if (isWhitespace(C)) { + if (!PrevWasSpace) { + *O++ = ' '; + PrevWasSpace = true; + } + continue; + } else { + *O++ = C; + PrevWasSpace = false; + } + } + if (O != S.begin() && *(O - 1) == ' ') + --O; + + S.resize(O - S.begin()); +} + +bool isWhitespace(StringRef Text) { + for (StringRef::const_iterator I = Text.begin(), E = Text.end(); + I != E; ++I) { + if (!isWhitespace(*I)) + return false; + } + return true; +} +} // unnamed namespace + +BriefParser::BriefParser(Lexer &L, const CommandTraits &Traits) : + L(L), Traits(Traits) { + // Get lookahead token. + ConsumeToken(); +} + +std::string BriefParser::Parse() { + std::string FirstParagraphOrBrief; + std::string ReturnsParagraph; + bool InFirstParagraph = true; + bool InBrief = false; + bool InReturns = false; + + while (Tok.isNot(tok::eof)) { + if (Tok.is(tok::text)) { + if (InFirstParagraph || InBrief) + FirstParagraphOrBrief += Tok.getText(); + else if (InReturns) + ReturnsParagraph += Tok.getText(); + ConsumeToken(); + continue; + } + + if (Tok.is(tok::backslash_command) || Tok.is(tok::at_command)) { + const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID()); + if (Info->IsBriefCommand) { + FirstParagraphOrBrief.clear(); + InBrief = true; + ConsumeToken(); + continue; + } + if (Info->IsReturnsCommand) { + InReturns = true; + InBrief = false; + InFirstParagraph = false; + ReturnsParagraph += "Returns "; + ConsumeToken(); + continue; + } + // Block commands implicitly start a new paragraph. + if (Info->IsBlockCommand) { + // We found an implicit paragraph end. + InFirstParagraph = false; + if (InBrief) + break; + } + } + + if (Tok.is(tok::newline)) { + if (InFirstParagraph || InBrief) + FirstParagraphOrBrief += ' '; + else if (InReturns) + ReturnsParagraph += ' '; + ConsumeToken(); + + // If the next token is a whitespace only text, ignore it. Thus we allow + // two paragraphs to be separated by line that has only whitespace in it. + // + // We don't need to add a space to the parsed text because we just added + // a space for the newline. + if (Tok.is(tok::text)) { + if (isWhitespace(Tok.getText())) + ConsumeToken(); + } + + if (Tok.is(tok::newline)) { + ConsumeToken(); + // We found a paragraph end. This ends the brief description if + // \\brief command or its equivalent was explicitly used. + // Stop scanning text because an explicit \\brief paragraph is the + // preffered one. + if (InBrief) + break; + // End first paragraph if we found some non-whitespace text. + if (InFirstParagraph && !isWhitespace(FirstParagraphOrBrief)) + InFirstParagraph = false; + // End the \\returns paragraph because we found the paragraph end. + InReturns = false; + } + continue; + } + + // We didn't handle this token, so just drop it. + ConsumeToken(); + } + + cleanupBrief(FirstParagraphOrBrief); + if (!FirstParagraphOrBrief.empty()) + return FirstParagraphOrBrief; + + cleanupBrief(ReturnsParagraph); + return ReturnsParagraph; +} + +} // end namespace comments +} // end namespace clang + + diff --git a/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp new file mode 100644 index 000000000000..01bd12e5fefa --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/CommentCommandTraits.cpp @@ -0,0 +1,140 @@ +//===--- CommentCommandTraits.cpp - Comment command properties --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/CommentCommandTraits.h" +#include "llvm/ADT/STLExtras.h" + +namespace clang { +namespace comments { + +#include "clang/AST/CommentCommandInfo.inc" + +CommandTraits::CommandTraits(llvm::BumpPtrAllocator &Allocator, + const CommentOptions &CommentOptions) : + NextID(llvm::array_lengthof(Commands)), Allocator(Allocator) { + registerCommentOptions(CommentOptions); +} + +void CommandTraits::registerCommentOptions( + const CommentOptions &CommentOptions) { + for (CommentOptions::BlockCommandNamesTy::const_iterator + I = CommentOptions.BlockCommandNames.begin(), + E = CommentOptions.BlockCommandNames.end(); + I != E; I++) { + registerBlockCommand(*I); + } +} + +const CommandInfo *CommandTraits::getCommandInfoOrNULL(StringRef Name) const { + if (const CommandInfo *Info = getBuiltinCommandInfo(Name)) + return Info; + return getRegisteredCommandInfo(Name); +} + +const CommandInfo *CommandTraits::getCommandInfo(unsigned CommandID) const { + if (const CommandInfo *Info = getBuiltinCommandInfo(CommandID)) + return Info; + return getRegisteredCommandInfo(CommandID); +} + +static void +HelperTypoCorrectCommandInfo(SmallVectorImpl<const CommandInfo *> &BestCommand, + StringRef Typo, const CommandInfo *Command) { + const unsigned MaxEditDistance = 1; + unsigned BestEditDistance = MaxEditDistance + 1; + StringRef Name = Command->Name; + + unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size()); + if (MinPossibleEditDistance > 0 && + Typo.size() / MinPossibleEditDistance < 1) + return; + unsigned EditDistance = Typo.edit_distance(Name, true, MaxEditDistance); + if (EditDistance > MaxEditDistance) + return; + if (EditDistance == BestEditDistance) + BestCommand.push_back(Command); + else if (EditDistance < BestEditDistance) { + BestCommand.clear(); + BestCommand.push_back(Command); + BestEditDistance = EditDistance; + } +} + +const CommandInfo * +CommandTraits::getTypoCorrectCommandInfo(StringRef Typo) const { + // single character command impostures, such as \t or \n must not go + // through the fixit logic. + if (Typo.size() <= 1) + return NULL; + + SmallVector<const CommandInfo *, 2> BestCommand; + + const int NumOfCommands = llvm::array_lengthof(Commands); + for (int i = 0; i < NumOfCommands; i++) + HelperTypoCorrectCommandInfo(BestCommand, Typo, &Commands[i]); + + for (unsigned i = 0, e = RegisteredCommands.size(); i != e; ++i) + if (!RegisteredCommands[i]->IsUnknownCommand) + HelperTypoCorrectCommandInfo(BestCommand, Typo, RegisteredCommands[i]); + + return (BestCommand.size() != 1) ? NULL : BestCommand[0]; +} + +CommandInfo *CommandTraits::createCommandInfoWithName(StringRef CommandName) { + char *Name = Allocator.Allocate<char>(CommandName.size() + 1); + memcpy(Name, CommandName.data(), CommandName.size()); + Name[CommandName.size()] = '\0'; + + // Value-initialize (=zero-initialize in this case) a new CommandInfo. + CommandInfo *Info = new (Allocator) CommandInfo(); + Info->Name = Name; + Info->ID = NextID++; + + RegisteredCommands.push_back(Info); + + return Info; +} + +const CommandInfo *CommandTraits::registerUnknownCommand( + StringRef CommandName) { + CommandInfo *Info = createCommandInfoWithName(CommandName); + Info->IsUnknownCommand = true; + return Info; +} + +const CommandInfo *CommandTraits::registerBlockCommand(StringRef CommandName) { + CommandInfo *Info = createCommandInfoWithName(CommandName); + Info->IsBlockCommand = true; + return Info; +} + +const CommandInfo *CommandTraits::getBuiltinCommandInfo( + unsigned CommandID) { + if (CommandID < llvm::array_lengthof(Commands)) + return &Commands[CommandID]; + return NULL; +} + +const CommandInfo *CommandTraits::getRegisteredCommandInfo( + StringRef Name) const { + for (unsigned i = 0, e = RegisteredCommands.size(); i != e; ++i) { + if (RegisteredCommands[i]->Name == Name) + return RegisteredCommands[i]; + } + return NULL; +} + +const CommandInfo *CommandTraits::getRegisteredCommandInfo( + unsigned CommandID) const { + return RegisteredCommands[CommandID - llvm::array_lengthof(Commands)]; +} + +} // end namespace comments +} // end namespace clang + diff --git a/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp new file mode 100644 index 000000000000..01ed3ce80a66 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp @@ -0,0 +1,832 @@ +#include "clang/AST/CommentLexer.h" +#include "clang/AST/CommentCommandTraits.h" +#include "clang/AST/CommentDiagnostic.h" +#include "clang/Basic/CharInfo.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/Support/ConvertUTF.h" +#include "llvm/Support/ErrorHandling.h" + +namespace clang { +namespace comments { + +void Token::dump(const Lexer &L, const SourceManager &SM) const { + llvm::errs() << "comments::Token Kind=" << Kind << " "; + Loc.dump(SM); + llvm::errs() << " " << Length << " \"" << L.getSpelling(*this, SM) << "\"\n"; +} + +static inline bool isHTMLNamedCharacterReferenceCharacter(char C) { + return isLetter(C); +} + +static inline bool isHTMLDecimalCharacterReferenceCharacter(char C) { + return isDigit(C); +} + +static inline bool isHTMLHexCharacterReferenceCharacter(char C) { + return isHexDigit(C); +} + +static inline StringRef convertCodePointToUTF8( + llvm::BumpPtrAllocator &Allocator, + unsigned CodePoint) { + char *Resolved = Allocator.Allocate<char>(UNI_MAX_UTF8_BYTES_PER_CODE_POINT); + char *ResolvedPtr = Resolved; + if (llvm::ConvertCodePointToUTF8(CodePoint, ResolvedPtr)) + return StringRef(Resolved, ResolvedPtr - Resolved); + else + return StringRef(); +} + +namespace { + +#include "clang/AST/CommentHTMLTags.inc" +#include "clang/AST/CommentHTMLNamedCharacterReferences.inc" + +} // unnamed namespace + +StringRef Lexer::resolveHTMLNamedCharacterReference(StringRef Name) const { + // Fast path, first check a few most widely used named character references. + return llvm::StringSwitch<StringRef>(Name) + .Case("amp", "&") + .Case("lt", "<") + .Case("gt", ">") + .Case("quot", "\"") + .Case("apos", "\'") + // Slow path. + .Default(translateHTMLNamedCharacterReferenceToUTF8(Name)); +} + +StringRef Lexer::resolveHTMLDecimalCharacterReference(StringRef Name) const { + unsigned CodePoint = 0; + for (unsigned i = 0, e = Name.size(); i != e; ++i) { + assert(isHTMLDecimalCharacterReferenceCharacter(Name[i])); + CodePoint *= 10; + CodePoint += Name[i] - '0'; + } + return convertCodePointToUTF8(Allocator, CodePoint); +} + +StringRef Lexer::resolveHTMLHexCharacterReference(StringRef Name) const { + unsigned CodePoint = 0; + for (unsigned i = 0, e = Name.size(); i != e; ++i) { + CodePoint *= 16; + const char C = Name[i]; + assert(isHTMLHexCharacterReferenceCharacter(C)); + CodePoint += llvm::hexDigitValue(C); + } + return convertCodePointToUTF8(Allocator, CodePoint); +} + +void Lexer::skipLineStartingDecorations() { + // This function should be called only for C comments + assert(CommentState == LCS_InsideCComment); + + if (BufferPtr == CommentEnd) + return; + + switch (*BufferPtr) { + case ' ': + case '\t': + case '\f': + case '\v': { + const char *NewBufferPtr = BufferPtr; + NewBufferPtr++; + if (NewBufferPtr == CommentEnd) + return; + + char C = *NewBufferPtr; + while (isHorizontalWhitespace(C)) { + NewBufferPtr++; + if (NewBufferPtr == CommentEnd) + return; + C = *NewBufferPtr; + } + if (C == '*') + BufferPtr = NewBufferPtr + 1; + break; + } + case '*': + BufferPtr++; + break; + } +} + +namespace { +/// Returns pointer to the first newline character in the string. +const char *findNewline(const char *BufferPtr, const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (isVerticalWhitespace(*BufferPtr)) + return BufferPtr; + } + return BufferEnd; +} + +const char *skipNewline(const char *BufferPtr, const char *BufferEnd) { + if (BufferPtr == BufferEnd) + return BufferPtr; + + if (*BufferPtr == '\n') + BufferPtr++; + else { + assert(*BufferPtr == '\r'); + BufferPtr++; + if (BufferPtr != BufferEnd && *BufferPtr == '\n') + BufferPtr++; + } + return BufferPtr; +} + +const char *skipNamedCharacterReference(const char *BufferPtr, + const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (!isHTMLNamedCharacterReferenceCharacter(*BufferPtr)) + return BufferPtr; + } + return BufferEnd; +} + +const char *skipDecimalCharacterReference(const char *BufferPtr, + const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (!isHTMLDecimalCharacterReferenceCharacter(*BufferPtr)) + return BufferPtr; + } + return BufferEnd; +} + +const char *skipHexCharacterReference(const char *BufferPtr, + const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (!isHTMLHexCharacterReferenceCharacter(*BufferPtr)) + return BufferPtr; + } + return BufferEnd; +} + +bool isHTMLIdentifierStartingCharacter(char C) { + return isLetter(C); +} + +bool isHTMLIdentifierCharacter(char C) { + return isAlphanumeric(C); +} + +const char *skipHTMLIdentifier(const char *BufferPtr, const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (!isHTMLIdentifierCharacter(*BufferPtr)) + return BufferPtr; + } + return BufferEnd; +} + +/// Skip HTML string quoted in single or double quotes. Escaping quotes inside +/// string allowed. +/// +/// Returns pointer to closing quote. +const char *skipHTMLQuotedString(const char *BufferPtr, const char *BufferEnd) +{ + const char Quote = *BufferPtr; + assert(Quote == '\"' || Quote == '\''); + + BufferPtr++; + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + const char C = *BufferPtr; + if (C == Quote && BufferPtr[-1] != '\\') + return BufferPtr; + } + return BufferEnd; +} + +const char *skipWhitespace(const char *BufferPtr, const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (!isWhitespace(*BufferPtr)) + return BufferPtr; + } + return BufferEnd; +} + +bool isWhitespace(const char *BufferPtr, const char *BufferEnd) { + return skipWhitespace(BufferPtr, BufferEnd) == BufferEnd; +} + +bool isCommandNameStartCharacter(char C) { + return isLetter(C); +} + +bool isCommandNameCharacter(char C) { + return isAlphanumeric(C); +} + +const char *skipCommandName(const char *BufferPtr, const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (!isCommandNameCharacter(*BufferPtr)) + return BufferPtr; + } + return BufferEnd; +} + +/// Return the one past end pointer for BCPL comments. +/// Handles newlines escaped with backslash or trigraph for backslahs. +const char *findBCPLCommentEnd(const char *BufferPtr, const char *BufferEnd) { + const char *CurPtr = BufferPtr; + while (CurPtr != BufferEnd) { + while (!isVerticalWhitespace(*CurPtr)) { + CurPtr++; + if (CurPtr == BufferEnd) + return BufferEnd; + } + // We found a newline, check if it is escaped. + const char *EscapePtr = CurPtr - 1; + while(isHorizontalWhitespace(*EscapePtr)) + EscapePtr--; + + if (*EscapePtr == '\\' || + (EscapePtr - 2 >= BufferPtr && EscapePtr[0] == '/' && + EscapePtr[-1] == '?' && EscapePtr[-2] == '?')) { + // We found an escaped newline. + CurPtr = skipNewline(CurPtr, BufferEnd); + } else + return CurPtr; // Not an escaped newline. + } + return BufferEnd; +} + +/// Return the one past end pointer for C comments. +/// Very dumb, does not handle escaped newlines or trigraphs. +const char *findCCommentEnd(const char *BufferPtr, const char *BufferEnd) { + for ( ; BufferPtr != BufferEnd; ++BufferPtr) { + if (*BufferPtr == '*') { + assert(BufferPtr + 1 != BufferEnd); + if (*(BufferPtr + 1) == '/') + return BufferPtr; + } + } + llvm_unreachable("buffer end hit before '*/' was seen"); +} + +} // unnamed namespace + +void Lexer::lexCommentText(Token &T) { + assert(CommentState == LCS_InsideBCPLComment || + CommentState == LCS_InsideCComment); + + switch (State) { + case LS_Normal: + break; + case LS_VerbatimBlockFirstLine: + lexVerbatimBlockFirstLine(T); + return; + case LS_VerbatimBlockBody: + lexVerbatimBlockBody(T); + return; + case LS_VerbatimLineText: + lexVerbatimLineText(T); + return; + case LS_HTMLStartTag: + lexHTMLStartTag(T); + return; + case LS_HTMLEndTag: + lexHTMLEndTag(T); + return; + } + + assert(State == LS_Normal); + + const char *TokenPtr = BufferPtr; + assert(TokenPtr < CommentEnd); + while (TokenPtr != CommentEnd) { + switch(*TokenPtr) { + case '\\': + case '@': { + // Commands that start with a backslash and commands that start with + // 'at' have equivalent semantics. But we keep information about the + // exact syntax in AST for comments. + tok::TokenKind CommandKind = + (*TokenPtr == '@') ? tok::at_command : tok::backslash_command; + TokenPtr++; + if (TokenPtr == CommentEnd) { + formTextToken(T, TokenPtr); + return; + } + char C = *TokenPtr; + switch (C) { + default: + break; + + case '\\': case '@': case '&': case '$': + case '#': case '<': case '>': case '%': + case '\"': case '.': case ':': + // This is one of \\ \@ \& \$ etc escape sequences. + TokenPtr++; + if (C == ':' && TokenPtr != CommentEnd && *TokenPtr == ':') { + // This is the \:: escape sequence. + TokenPtr++; + } + StringRef UnescapedText(BufferPtr + 1, TokenPtr - (BufferPtr + 1)); + formTokenWithChars(T, TokenPtr, tok::text); + T.setText(UnescapedText); + return; + } + + // Don't make zero-length commands. + if (!isCommandNameStartCharacter(*TokenPtr)) { + formTextToken(T, TokenPtr); + return; + } + + TokenPtr = skipCommandName(TokenPtr, CommentEnd); + unsigned Length = TokenPtr - (BufferPtr + 1); + + // Hardcoded support for lexing LaTeX formula commands + // \f$ \f[ \f] \f{ \f} as a single command. + if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) { + C = *TokenPtr; + if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') { + TokenPtr++; + Length++; + } + } + + const StringRef CommandName(BufferPtr + 1, Length); + + const CommandInfo *Info = Traits.getCommandInfoOrNULL(CommandName); + if (!Info) { + if ((Info = Traits.getTypoCorrectCommandInfo(CommandName))) { + StringRef CorrectedName = Info->Name; + SourceLocation Loc = getSourceLocation(BufferPtr); + SourceRange CommandRange(Loc.getLocWithOffset(1), + getSourceLocation(TokenPtr)); + Diag(Loc, diag::warn_correct_comment_command_name) + << CommandName << CorrectedName + << FixItHint::CreateReplacement(CommandRange, CorrectedName); + } else { + formTokenWithChars(T, TokenPtr, tok::unknown_command); + T.setUnknownCommandName(CommandName); + Diag(T.getLocation(), diag::warn_unknown_comment_command_name); + return; + } + } + if (Info->IsVerbatimBlockCommand) { + setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, Info); + return; + } + if (Info->IsVerbatimLineCommand) { + setupAndLexVerbatimLine(T, TokenPtr, Info); + return; + } + formTokenWithChars(T, TokenPtr, CommandKind); + T.setCommandID(Info->getID()); + return; + } + + case '&': + lexHTMLCharacterReference(T); + return; + + case '<': { + TokenPtr++; + if (TokenPtr == CommentEnd) { + formTextToken(T, TokenPtr); + return; + } + const char C = *TokenPtr; + if (isHTMLIdentifierStartingCharacter(C)) + setupAndLexHTMLStartTag(T); + else if (C == '/') + setupAndLexHTMLEndTag(T); + else + formTextToken(T, TokenPtr); + + return; + } + + case '\n': + case '\r': + TokenPtr = skipNewline(TokenPtr, CommentEnd); + formTokenWithChars(T, TokenPtr, tok::newline); + + if (CommentState == LCS_InsideCComment) + skipLineStartingDecorations(); + return; + + default: { + size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr). + find_first_of("\n\r\\@&<"); + if (End != StringRef::npos) + TokenPtr += End; + else + TokenPtr = CommentEnd; + formTextToken(T, TokenPtr); + return; + } + } + } +} + +void Lexer::setupAndLexVerbatimBlock(Token &T, + const char *TextBegin, + char Marker, const CommandInfo *Info) { + assert(Info->IsVerbatimBlockCommand); + + VerbatimBlockEndCommandName.clear(); + VerbatimBlockEndCommandName.append(Marker == '\\' ? "\\" : "@"); + VerbatimBlockEndCommandName.append(Info->EndCommandName); + + formTokenWithChars(T, TextBegin, tok::verbatim_block_begin); + T.setVerbatimBlockID(Info->getID()); + + // If there is a newline following the verbatim opening command, skip the + // newline so that we don't create an tok::verbatim_block_line with empty + // text content. + if (BufferPtr != CommentEnd && + isVerticalWhitespace(*BufferPtr)) { + BufferPtr = skipNewline(BufferPtr, CommentEnd); + State = LS_VerbatimBlockBody; + return; + } + + State = LS_VerbatimBlockFirstLine; +} + +void Lexer::lexVerbatimBlockFirstLine(Token &T) { +again: + assert(BufferPtr < CommentEnd); + + // FIXME: It would be better to scan the text once, finding either the block + // end command or newline. + // + // Extract current line. + const char *Newline = findNewline(BufferPtr, CommentEnd); + StringRef Line(BufferPtr, Newline - BufferPtr); + + // Look for end command in current line. + size_t Pos = Line.find(VerbatimBlockEndCommandName); + const char *TextEnd; + const char *NextLine; + if (Pos == StringRef::npos) { + // Current line is completely verbatim. + TextEnd = Newline; + NextLine = skipNewline(Newline, CommentEnd); + } else if (Pos == 0) { + // Current line contains just an end command. + const char *End = BufferPtr + VerbatimBlockEndCommandName.size(); + StringRef Name(BufferPtr + 1, End - (BufferPtr + 1)); + formTokenWithChars(T, End, tok::verbatim_block_end); + T.setVerbatimBlockID(Traits.getCommandInfo(Name)->getID()); + State = LS_Normal; + return; + } else { + // There is some text, followed by end command. Extract text first. + TextEnd = BufferPtr + Pos; + NextLine = TextEnd; + // If there is only whitespace before end command, skip whitespace. + if (isWhitespace(BufferPtr, TextEnd)) { + BufferPtr = TextEnd; + goto again; + } + } + + StringRef Text(BufferPtr, TextEnd - BufferPtr); + formTokenWithChars(T, NextLine, tok::verbatim_block_line); + T.setVerbatimBlockText(Text); + + State = LS_VerbatimBlockBody; +} + +void Lexer::lexVerbatimBlockBody(Token &T) { + assert(State == LS_VerbatimBlockBody); + + if (CommentState == LCS_InsideCComment) + skipLineStartingDecorations(); + + lexVerbatimBlockFirstLine(T); +} + +void Lexer::setupAndLexVerbatimLine(Token &T, const char *TextBegin, + const CommandInfo *Info) { + assert(Info->IsVerbatimLineCommand); + formTokenWithChars(T, TextBegin, tok::verbatim_line_name); + T.setVerbatimLineID(Info->getID()); + + State = LS_VerbatimLineText; +} + +void Lexer::lexVerbatimLineText(Token &T) { + assert(State == LS_VerbatimLineText); + + // Extract current line. + const char *Newline = findNewline(BufferPtr, CommentEnd); + const StringRef Text(BufferPtr, Newline - BufferPtr); + formTokenWithChars(T, Newline, tok::verbatim_line_text); + T.setVerbatimLineText(Text); + + State = LS_Normal; +} + +void Lexer::lexHTMLCharacterReference(Token &T) { + const char *TokenPtr = BufferPtr; + assert(*TokenPtr == '&'); + TokenPtr++; + if (TokenPtr == CommentEnd) { + formTextToken(T, TokenPtr); + return; + } + const char *NamePtr; + bool isNamed = false; + bool isDecimal = false; + char C = *TokenPtr; + if (isHTMLNamedCharacterReferenceCharacter(C)) { + NamePtr = TokenPtr; + TokenPtr = skipNamedCharacterReference(TokenPtr, CommentEnd); + isNamed = true; + } else if (C == '#') { + TokenPtr++; + if (TokenPtr == CommentEnd) { + formTextToken(T, TokenPtr); + return; + } + C = *TokenPtr; + if (isHTMLDecimalCharacterReferenceCharacter(C)) { + NamePtr = TokenPtr; + TokenPtr = skipDecimalCharacterReference(TokenPtr, CommentEnd); + isDecimal = true; + } else if (C == 'x' || C == 'X') { + TokenPtr++; + NamePtr = TokenPtr; + TokenPtr = skipHexCharacterReference(TokenPtr, CommentEnd); + } else { + formTextToken(T, TokenPtr); + return; + } + } else { + formTextToken(T, TokenPtr); + return; + } + if (NamePtr == TokenPtr || TokenPtr == CommentEnd || + *TokenPtr != ';') { + formTextToken(T, TokenPtr); + return; + } + StringRef Name(NamePtr, TokenPtr - NamePtr); + TokenPtr++; // Skip semicolon. + StringRef Resolved; + if (isNamed) + Resolved = resolveHTMLNamedCharacterReference(Name); + else if (isDecimal) + Resolved = resolveHTMLDecimalCharacterReference(Name); + else + Resolved = resolveHTMLHexCharacterReference(Name); + + if (Resolved.empty()) { + formTextToken(T, TokenPtr); + return; + } + formTokenWithChars(T, TokenPtr, tok::text); + T.setText(Resolved); + return; +} + +void Lexer::setupAndLexHTMLStartTag(Token &T) { + assert(BufferPtr[0] == '<' && + isHTMLIdentifierStartingCharacter(BufferPtr[1])); + const char *TagNameEnd = skipHTMLIdentifier(BufferPtr + 2, CommentEnd); + StringRef Name(BufferPtr + 1, TagNameEnd - (BufferPtr + 1)); + if (!isHTMLTagName(Name)) { + formTextToken(T, TagNameEnd); + return; + } + + formTokenWithChars(T, TagNameEnd, tok::html_start_tag); + T.setHTMLTagStartName(Name); + + BufferPtr = skipWhitespace(BufferPtr, CommentEnd); + + const char C = *BufferPtr; + if (BufferPtr != CommentEnd && + (C == '>' || C == '/' || isHTMLIdentifierStartingCharacter(C))) + State = LS_HTMLStartTag; +} + +void Lexer::lexHTMLStartTag(Token &T) { + assert(State == LS_HTMLStartTag); + + const char *TokenPtr = BufferPtr; + char C = *TokenPtr; + if (isHTMLIdentifierCharacter(C)) { + TokenPtr = skipHTMLIdentifier(TokenPtr, CommentEnd); + StringRef Ident(BufferPtr, TokenPtr - BufferPtr); + formTokenWithChars(T, TokenPtr, tok::html_ident); + T.setHTMLIdent(Ident); + } else { + switch (C) { + case '=': + TokenPtr++; + formTokenWithChars(T, TokenPtr, tok::html_equals); + break; + case '\"': + case '\'': { + const char *OpenQuote = TokenPtr; + TokenPtr = skipHTMLQuotedString(TokenPtr, CommentEnd); + const char *ClosingQuote = TokenPtr; + if (TokenPtr != CommentEnd) // Skip closing quote. + TokenPtr++; + formTokenWithChars(T, TokenPtr, tok::html_quoted_string); + T.setHTMLQuotedString(StringRef(OpenQuote + 1, + ClosingQuote - (OpenQuote + 1))); + break; + } + case '>': + TokenPtr++; + formTokenWithChars(T, TokenPtr, tok::html_greater); + State = LS_Normal; + return; + case '/': + TokenPtr++; + if (TokenPtr != CommentEnd && *TokenPtr == '>') { + TokenPtr++; + formTokenWithChars(T, TokenPtr, tok::html_slash_greater); + } else + formTextToken(T, TokenPtr); + + State = LS_Normal; + return; + } + } + + // Now look ahead and return to normal state if we don't see any HTML tokens + // ahead. + BufferPtr = skipWhitespace(BufferPtr, CommentEnd); + if (BufferPtr == CommentEnd) { + State = LS_Normal; + return; + } + + C = *BufferPtr; + if (!isHTMLIdentifierStartingCharacter(C) && + C != '=' && C != '\"' && C != '\'' && C != '>') { + State = LS_Normal; + return; + } +} + +void Lexer::setupAndLexHTMLEndTag(Token &T) { + assert(BufferPtr[0] == '<' && BufferPtr[1] == '/'); + + const char *TagNameBegin = skipWhitespace(BufferPtr + 2, CommentEnd); + const char *TagNameEnd = skipHTMLIdentifier(TagNameBegin, CommentEnd); + StringRef Name(TagNameBegin, TagNameEnd - TagNameBegin); + if (!isHTMLTagName(Name)) { + formTextToken(T, TagNameEnd); + return; + } + + const char *End = skipWhitespace(TagNameEnd, CommentEnd); + + formTokenWithChars(T, End, tok::html_end_tag); + T.setHTMLTagEndName(Name); + + if (BufferPtr != CommentEnd && *BufferPtr == '>') + State = LS_HTMLEndTag; +} + +void Lexer::lexHTMLEndTag(Token &T) { + assert(BufferPtr != CommentEnd && *BufferPtr == '>'); + + formTokenWithChars(T, BufferPtr + 1, tok::html_greater); + State = LS_Normal; +} + +Lexer::Lexer(llvm::BumpPtrAllocator &Allocator, DiagnosticsEngine &Diags, + const CommandTraits &Traits, + SourceLocation FileLoc, + const char *BufferStart, const char *BufferEnd): + Allocator(Allocator), Diags(Diags), Traits(Traits), + BufferStart(BufferStart), BufferEnd(BufferEnd), + FileLoc(FileLoc), BufferPtr(BufferStart), + CommentState(LCS_BeforeComment), State(LS_Normal) { +} + +void Lexer::lex(Token &T) { +again: + switch (CommentState) { + case LCS_BeforeComment: + if (BufferPtr == BufferEnd) { + formTokenWithChars(T, BufferPtr, tok::eof); + return; + } + + assert(*BufferPtr == '/'); + BufferPtr++; // Skip first slash. + switch(*BufferPtr) { + case '/': { // BCPL comment. + BufferPtr++; // Skip second slash. + + if (BufferPtr != BufferEnd) { + // Skip Doxygen magic marker, if it is present. + // It might be missing because of a typo //< or /*<, or because we + // merged this non-Doxygen comment into a bunch of Doxygen comments + // around it: /** ... */ /* ... */ /** ... */ + const char C = *BufferPtr; + if (C == '/' || C == '!') + BufferPtr++; + } + + // Skip less-than symbol that marks trailing comments. + // Skip it even if the comment is not a Doxygen one, because //< and /*< + // are frequent typos. + if (BufferPtr != BufferEnd && *BufferPtr == '<') + BufferPtr++; + + CommentState = LCS_InsideBCPLComment; + if (State != LS_VerbatimBlockBody && State != LS_VerbatimBlockFirstLine) + State = LS_Normal; + CommentEnd = findBCPLCommentEnd(BufferPtr, BufferEnd); + goto again; + } + case '*': { // C comment. + BufferPtr++; // Skip star. + + // Skip Doxygen magic marker. + const char C = *BufferPtr; + if ((C == '*' && *(BufferPtr + 1) != '/') || C == '!') + BufferPtr++; + + // Skip less-than symbol that marks trailing comments. + if (BufferPtr != BufferEnd && *BufferPtr == '<') + BufferPtr++; + + CommentState = LCS_InsideCComment; + State = LS_Normal; + CommentEnd = findCCommentEnd(BufferPtr, BufferEnd); + goto again; + } + default: + llvm_unreachable("second character of comment should be '/' or '*'"); + } + + case LCS_BetweenComments: { + // Consecutive comments are extracted only if there is only whitespace + // between them. So we can search for the start of the next comment. + const char *EndWhitespace = BufferPtr; + while(EndWhitespace != BufferEnd && *EndWhitespace != '/') + EndWhitespace++; + + // Turn any whitespace between comments (and there is only whitespace + // between them -- guaranteed by comment extraction) into a newline. We + // have two newlines between C comments in total (first one was synthesized + // after a comment). + formTokenWithChars(T, EndWhitespace, tok::newline); + + CommentState = LCS_BeforeComment; + break; + } + + case LCS_InsideBCPLComment: + case LCS_InsideCComment: + if (BufferPtr != CommentEnd) { + lexCommentText(T); + break; + } else { + // Skip C comment closing sequence. + if (CommentState == LCS_InsideCComment) { + assert(BufferPtr[0] == '*' && BufferPtr[1] == '/'); + BufferPtr += 2; + assert(BufferPtr <= BufferEnd); + + // Synthenize newline just after the C comment, regardless if there is + // actually a newline. + formTokenWithChars(T, BufferPtr, tok::newline); + + CommentState = LCS_BetweenComments; + break; + } else { + // Don't synthesized a newline after BCPL comment. + CommentState = LCS_BetweenComments; + goto again; + } + } + } +} + +StringRef Lexer::getSpelling(const Token &Tok, + const SourceManager &SourceMgr, + bool *Invalid) const { + SourceLocation Loc = Tok.getLocation(); + std::pair<FileID, unsigned> LocInfo = SourceMgr.getDecomposedLoc(Loc); + + bool InvalidTemp = false; + StringRef File = SourceMgr.getBufferData(LocInfo.first, &InvalidTemp); + if (InvalidTemp) { + *Invalid = true; + return StringRef(); + } + + const char *Begin = File.data() + LocInfo.second; + return StringRef(Begin, Tok.getLength()); +} + +} // end namespace comments +} // end namespace clang + diff --git a/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp new file mode 100644 index 000000000000..03e01015b954 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/CommentParser.cpp @@ -0,0 +1,776 @@ +//===--- CommentParser.cpp - Doxygen comment parser -----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/CommentParser.h" +#include "clang/AST/CommentCommandTraits.h" +#include "clang/AST/CommentDiagnostic.h" +#include "clang/AST/CommentSema.h" +#include "clang/Basic/CharInfo.h" +#include "clang/Basic/SourceManager.h" +#include "llvm/Support/ErrorHandling.h" + +namespace clang { + +static inline bool isWhitespace(llvm::StringRef S) { + for (StringRef::const_iterator I = S.begin(), E = S.end(); I != E; ++I) { + if (!isWhitespace(*I)) + return false; + } + return true; +} + +namespace comments { + +/// Re-lexes a sequence of tok::text tokens. +class TextTokenRetokenizer { + llvm::BumpPtrAllocator &Allocator; + Parser &P; + + /// This flag is set when there are no more tokens we can fetch from lexer. + bool NoMoreInterestingTokens; + + /// Token buffer: tokens we have processed and lookahead. + SmallVector<Token, 16> Toks; + + /// A position in \c Toks. + struct Position { + unsigned CurToken; + const char *BufferStart; + const char *BufferEnd; + const char *BufferPtr; + SourceLocation BufferStartLoc; + }; + + /// Current position in Toks. + Position Pos; + + bool isEnd() const { + return Pos.CurToken >= Toks.size(); + } + + /// Sets up the buffer pointers to point to current token. + void setupBuffer() { + assert(!isEnd()); + const Token &Tok = Toks[Pos.CurToken]; + + Pos.BufferStart = Tok.getText().begin(); + Pos.BufferEnd = Tok.getText().end(); + Pos.BufferPtr = Pos.BufferStart; + Pos.BufferStartLoc = Tok.getLocation(); + } + + SourceLocation getSourceLocation() const { + const unsigned CharNo = Pos.BufferPtr - Pos.BufferStart; + return Pos.BufferStartLoc.getLocWithOffset(CharNo); + } + + char peek() const { + assert(!isEnd()); + assert(Pos.BufferPtr != Pos.BufferEnd); + return *Pos.BufferPtr; + } + + void consumeChar() { + assert(!isEnd()); + assert(Pos.BufferPtr != Pos.BufferEnd); + Pos.BufferPtr++; + if (Pos.BufferPtr == Pos.BufferEnd) { + Pos.CurToken++; + if (isEnd() && !addToken()) + return; + + assert(!isEnd()); + setupBuffer(); + } + } + + /// Add a token. + /// Returns true on success, false if there are no interesting tokens to + /// fetch from lexer. + bool addToken() { + if (NoMoreInterestingTokens) + return false; + + if (P.Tok.is(tok::newline)) { + // If we see a single newline token between text tokens, skip it. + Token Newline = P.Tok; + P.consumeToken(); + if (P.Tok.isNot(tok::text)) { + P.putBack(Newline); + NoMoreInterestingTokens = true; + return false; + } + } + if (P.Tok.isNot(tok::text)) { + NoMoreInterestingTokens = true; + return false; + } + + Toks.push_back(P.Tok); + P.consumeToken(); + if (Toks.size() == 1) + setupBuffer(); + return true; + } + + void consumeWhitespace() { + while (!isEnd()) { + if (isWhitespace(peek())) + consumeChar(); + else + break; + } + } + + void formTokenWithChars(Token &Result, + SourceLocation Loc, + const char *TokBegin, + unsigned TokLength, + StringRef Text) { + Result.setLocation(Loc); + Result.setKind(tok::text); + Result.setLength(TokLength); +#ifndef NDEBUG + Result.TextPtr = "<UNSET>"; + Result.IntVal = 7; +#endif + Result.setText(Text); + } + +public: + TextTokenRetokenizer(llvm::BumpPtrAllocator &Allocator, Parser &P): + Allocator(Allocator), P(P), NoMoreInterestingTokens(false) { + Pos.CurToken = 0; + addToken(); + } + + /// Extract a word -- sequence of non-whitespace characters. + bool lexWord(Token &Tok) { + if (isEnd()) + return false; + + Position SavedPos = Pos; + + consumeWhitespace(); + SmallString<32> WordText; + const char *WordBegin = Pos.BufferPtr; + SourceLocation Loc = getSourceLocation(); + while (!isEnd()) { + const char C = peek(); + if (!isWhitespace(C)) { + WordText.push_back(C); + consumeChar(); + } else + break; + } + const unsigned Length = WordText.size(); + if (Length == 0) { + Pos = SavedPos; + return false; + } + + char *TextPtr = Allocator.Allocate<char>(Length + 1); + + memcpy(TextPtr, WordText.c_str(), Length + 1); + StringRef Text = StringRef(TextPtr, Length); + + formTokenWithChars(Tok, Loc, WordBegin, Length, Text); + return true; + } + + bool lexDelimitedSeq(Token &Tok, char OpenDelim, char CloseDelim) { + if (isEnd()) + return false; + + Position SavedPos = Pos; + + consumeWhitespace(); + SmallString<32> WordText; + const char *WordBegin = Pos.BufferPtr; + SourceLocation Loc = getSourceLocation(); + bool Error = false; + if (!isEnd()) { + const char C = peek(); + if (C == OpenDelim) { + WordText.push_back(C); + consumeChar(); + } else + Error = true; + } + char C = '\0'; + while (!Error && !isEnd()) { + C = peek(); + WordText.push_back(C); + consumeChar(); + if (C == CloseDelim) + break; + } + if (!Error && C != CloseDelim) + Error = true; + + if (Error) { + Pos = SavedPos; + return false; + } + + const unsigned Length = WordText.size(); + char *TextPtr = Allocator.Allocate<char>(Length + 1); + + memcpy(TextPtr, WordText.c_str(), Length + 1); + StringRef Text = StringRef(TextPtr, Length); + + formTokenWithChars(Tok, Loc, WordBegin, + Pos.BufferPtr - WordBegin, Text); + return true; + } + + /// Put back tokens that we didn't consume. + void putBackLeftoverTokens() { + if (isEnd()) + return; + + bool HavePartialTok = false; + Token PartialTok; + if (Pos.BufferPtr != Pos.BufferStart) { + formTokenWithChars(PartialTok, getSourceLocation(), + Pos.BufferPtr, Pos.BufferEnd - Pos.BufferPtr, + StringRef(Pos.BufferPtr, + Pos.BufferEnd - Pos.BufferPtr)); + HavePartialTok = true; + Pos.CurToken++; + } + + P.putBack(llvm::makeArrayRef(Toks.begin() + Pos.CurToken, Toks.end())); + Pos.CurToken = Toks.size(); + + if (HavePartialTok) + P.putBack(PartialTok); + } +}; + +Parser::Parser(Lexer &L, Sema &S, llvm::BumpPtrAllocator &Allocator, + const SourceManager &SourceMgr, DiagnosticsEngine &Diags, + const CommandTraits &Traits): + L(L), S(S), Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags), + Traits(Traits) { + consumeToken(); +} + +void Parser::parseParamCommandArgs(ParamCommandComment *PC, + TextTokenRetokenizer &Retokenizer) { + Token Arg; + // Check if argument looks like direction specification: [dir] + // e.g., [in], [out], [in,out] + if (Retokenizer.lexDelimitedSeq(Arg, '[', ']')) + S.actOnParamCommandDirectionArg(PC, + Arg.getLocation(), + Arg.getEndLocation(), + Arg.getText()); + + if (Retokenizer.lexWord(Arg)) + S.actOnParamCommandParamNameArg(PC, + Arg.getLocation(), + Arg.getEndLocation(), + Arg.getText()); +} + +void Parser::parseTParamCommandArgs(TParamCommandComment *TPC, + TextTokenRetokenizer &Retokenizer) { + Token Arg; + if (Retokenizer.lexWord(Arg)) + S.actOnTParamCommandParamNameArg(TPC, + Arg.getLocation(), + Arg.getEndLocation(), + Arg.getText()); +} + +void Parser::parseBlockCommandArgs(BlockCommandComment *BC, + TextTokenRetokenizer &Retokenizer, + unsigned NumArgs) { + typedef BlockCommandComment::Argument Argument; + Argument *Args = + new (Allocator.Allocate<Argument>(NumArgs)) Argument[NumArgs]; + unsigned ParsedArgs = 0; + Token Arg; + while (ParsedArgs < NumArgs && Retokenizer.lexWord(Arg)) { + Args[ParsedArgs] = Argument(SourceRange(Arg.getLocation(), + Arg.getEndLocation()), + Arg.getText()); + ParsedArgs++; + } + + S.actOnBlockCommandArgs(BC, llvm::makeArrayRef(Args, ParsedArgs)); +} + +BlockCommandComment *Parser::parseBlockCommand() { + assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command)); + + ParamCommandComment *PC = 0; + TParamCommandComment *TPC = 0; + BlockCommandComment *BC = 0; + const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID()); + CommandMarkerKind CommandMarker = + Tok.is(tok::backslash_command) ? CMK_Backslash : CMK_At; + if (Info->IsParamCommand) { + PC = S.actOnParamCommandStart(Tok.getLocation(), + Tok.getEndLocation(), + Tok.getCommandID(), + CommandMarker); + } else if (Info->IsTParamCommand) { + TPC = S.actOnTParamCommandStart(Tok.getLocation(), + Tok.getEndLocation(), + Tok.getCommandID(), + CommandMarker); + } else { + BC = S.actOnBlockCommandStart(Tok.getLocation(), + Tok.getEndLocation(), + Tok.getCommandID(), + CommandMarker); + } + consumeToken(); + + if (isTokBlockCommand()) { + // Block command ahead. We can't nest block commands, so pretend that this + // command has an empty argument. + ParagraphComment *Paragraph = S.actOnParagraphComment(None); + if (PC) { + S.actOnParamCommandFinish(PC, Paragraph); + return PC; + } else if (TPC) { + S.actOnTParamCommandFinish(TPC, Paragraph); + return TPC; + } else { + S.actOnBlockCommandFinish(BC, Paragraph); + return BC; + } + } + + if (PC || TPC || Info->NumArgs > 0) { + // In order to parse command arguments we need to retokenize a few + // following text tokens. + TextTokenRetokenizer Retokenizer(Allocator, *this); + + if (PC) + parseParamCommandArgs(PC, Retokenizer); + else if (TPC) + parseTParamCommandArgs(TPC, Retokenizer); + else + parseBlockCommandArgs(BC, Retokenizer, Info->NumArgs); + + Retokenizer.putBackLeftoverTokens(); + } + + // If there's a block command ahead, we will attach an empty paragraph to + // this command. + bool EmptyParagraph = false; + if (isTokBlockCommand()) + EmptyParagraph = true; + else if (Tok.is(tok::newline)) { + Token PrevTok = Tok; + consumeToken(); + EmptyParagraph = isTokBlockCommand(); + putBack(PrevTok); + } + + ParagraphComment *Paragraph; + if (EmptyParagraph) + Paragraph = S.actOnParagraphComment(None); + else { + BlockContentComment *Block = parseParagraphOrBlockCommand(); + // Since we have checked for a block command, we should have parsed a + // paragraph. + Paragraph = cast<ParagraphComment>(Block); + } + + if (PC) { + S.actOnParamCommandFinish(PC, Paragraph); + return PC; + } else if (TPC) { + S.actOnTParamCommandFinish(TPC, Paragraph); + return TPC; + } else { + S.actOnBlockCommandFinish(BC, Paragraph); + return BC; + } +} + +InlineCommandComment *Parser::parseInlineCommand() { + assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command)); + + const Token CommandTok = Tok; + consumeToken(); + + TextTokenRetokenizer Retokenizer(Allocator, *this); + + Token ArgTok; + bool ArgTokValid = Retokenizer.lexWord(ArgTok); + + InlineCommandComment *IC; + if (ArgTokValid) { + IC = S.actOnInlineCommand(CommandTok.getLocation(), + CommandTok.getEndLocation(), + CommandTok.getCommandID(), + ArgTok.getLocation(), + ArgTok.getEndLocation(), + ArgTok.getText()); + } else { + IC = S.actOnInlineCommand(CommandTok.getLocation(), + CommandTok.getEndLocation(), + CommandTok.getCommandID()); + } + + Retokenizer.putBackLeftoverTokens(); + + return IC; +} + +HTMLStartTagComment *Parser::parseHTMLStartTag() { + assert(Tok.is(tok::html_start_tag)); + HTMLStartTagComment *HST = + S.actOnHTMLStartTagStart(Tok.getLocation(), + Tok.getHTMLTagStartName()); + consumeToken(); + + SmallVector<HTMLStartTagComment::Attribute, 2> Attrs; + while (true) { + switch (Tok.getKind()) { + case tok::html_ident: { + Token Ident = Tok; + consumeToken(); + if (Tok.isNot(tok::html_equals)) { + Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(), + Ident.getHTMLIdent())); + continue; + } + Token Equals = Tok; + consumeToken(); + if (Tok.isNot(tok::html_quoted_string)) { + Diag(Tok.getLocation(), + diag::warn_doc_html_start_tag_expected_quoted_string) + << SourceRange(Equals.getLocation()); + Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(), + Ident.getHTMLIdent())); + while (Tok.is(tok::html_equals) || + Tok.is(tok::html_quoted_string)) + consumeToken(); + continue; + } + Attrs.push_back(HTMLStartTagComment::Attribute( + Ident.getLocation(), + Ident.getHTMLIdent(), + Equals.getLocation(), + SourceRange(Tok.getLocation(), + Tok.getEndLocation()), + Tok.getHTMLQuotedString())); + consumeToken(); + continue; + } + + case tok::html_greater: + S.actOnHTMLStartTagFinish(HST, + S.copyArray(llvm::makeArrayRef(Attrs)), + Tok.getLocation(), + /* IsSelfClosing = */ false); + consumeToken(); + return HST; + + case tok::html_slash_greater: + S.actOnHTMLStartTagFinish(HST, + S.copyArray(llvm::makeArrayRef(Attrs)), + Tok.getLocation(), + /* IsSelfClosing = */ true); + consumeToken(); + return HST; + + case tok::html_equals: + case tok::html_quoted_string: + Diag(Tok.getLocation(), + diag::warn_doc_html_start_tag_expected_ident_or_greater); + while (Tok.is(tok::html_equals) || + Tok.is(tok::html_quoted_string)) + consumeToken(); + if (Tok.is(tok::html_ident) || + Tok.is(tok::html_greater) || + Tok.is(tok::html_slash_greater)) + continue; + + S.actOnHTMLStartTagFinish(HST, + S.copyArray(llvm::makeArrayRef(Attrs)), + SourceLocation(), + /* IsSelfClosing = */ false); + return HST; + + default: + // Not a token from an HTML start tag. Thus HTML tag prematurely ended. + S.actOnHTMLStartTagFinish(HST, + S.copyArray(llvm::makeArrayRef(Attrs)), + SourceLocation(), + /* IsSelfClosing = */ false); + bool StartLineInvalid; + const unsigned StartLine = SourceMgr.getPresumedLineNumber( + HST->getLocation(), + &StartLineInvalid); + bool EndLineInvalid; + const unsigned EndLine = SourceMgr.getPresumedLineNumber( + Tok.getLocation(), + &EndLineInvalid); + if (StartLineInvalid || EndLineInvalid || StartLine == EndLine) + Diag(Tok.getLocation(), + diag::warn_doc_html_start_tag_expected_ident_or_greater) + << HST->getSourceRange(); + else { + Diag(Tok.getLocation(), + diag::warn_doc_html_start_tag_expected_ident_or_greater); + Diag(HST->getLocation(), diag::note_doc_html_tag_started_here) + << HST->getSourceRange(); + } + return HST; + } + } +} + +HTMLEndTagComment *Parser::parseHTMLEndTag() { + assert(Tok.is(tok::html_end_tag)); + Token TokEndTag = Tok; + consumeToken(); + SourceLocation Loc; + if (Tok.is(tok::html_greater)) { + Loc = Tok.getLocation(); + consumeToken(); + } + + return S.actOnHTMLEndTag(TokEndTag.getLocation(), + Loc, + TokEndTag.getHTMLTagEndName()); +} + +BlockContentComment *Parser::parseParagraphOrBlockCommand() { + SmallVector<InlineContentComment *, 8> Content; + + while (true) { + switch (Tok.getKind()) { + case tok::verbatim_block_begin: + case tok::verbatim_line_name: + case tok::eof: + assert(Content.size() != 0); + break; // Block content or EOF ahead, finish this parapgaph. + + case tok::unknown_command: + Content.push_back(S.actOnUnknownCommand(Tok.getLocation(), + Tok.getEndLocation(), + Tok.getUnknownCommandName())); + consumeToken(); + continue; + + case tok::backslash_command: + case tok::at_command: { + const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID()); + if (Info->IsBlockCommand) { + if (Content.size() == 0) + return parseBlockCommand(); + break; // Block command ahead, finish this parapgaph. + } + if (Info->IsVerbatimBlockEndCommand) { + Diag(Tok.getLocation(), + diag::warn_verbatim_block_end_without_start) + << Tok.is(tok::at_command) + << Info->Name + << SourceRange(Tok.getLocation(), Tok.getEndLocation()); + consumeToken(); + continue; + } + if (Info->IsUnknownCommand) { + Content.push_back(S.actOnUnknownCommand(Tok.getLocation(), + Tok.getEndLocation(), + Info->getID())); + consumeToken(); + continue; + } + assert(Info->IsInlineCommand); + Content.push_back(parseInlineCommand()); + continue; + } + + case tok::newline: { + consumeToken(); + if (Tok.is(tok::newline) || Tok.is(tok::eof)) { + consumeToken(); + break; // Two newlines -- end of paragraph. + } + // Also allow [tok::newline, tok::text, tok::newline] if the middle + // tok::text is just whitespace. + if (Tok.is(tok::text) && isWhitespace(Tok.getText())) { + Token WhitespaceTok = Tok; + consumeToken(); + if (Tok.is(tok::newline) || Tok.is(tok::eof)) { + consumeToken(); + break; + } + // We have [tok::newline, tok::text, non-newline]. Put back tok::text. + putBack(WhitespaceTok); + } + if (Content.size() > 0) + Content.back()->addTrailingNewline(); + continue; + } + + // Don't deal with HTML tag soup now. + case tok::html_start_tag: + Content.push_back(parseHTMLStartTag()); + continue; + + case tok::html_end_tag: + Content.push_back(parseHTMLEndTag()); + continue; + + case tok::text: + Content.push_back(S.actOnText(Tok.getLocation(), + Tok.getEndLocation(), + Tok.getText())); + consumeToken(); + continue; + + case tok::verbatim_block_line: + case tok::verbatim_block_end: + case tok::verbatim_line_text: + case tok::html_ident: + case tok::html_equals: + case tok::html_quoted_string: + case tok::html_greater: + case tok::html_slash_greater: + llvm_unreachable("should not see this token"); + } + break; + } + + return S.actOnParagraphComment(S.copyArray(llvm::makeArrayRef(Content))); +} + +VerbatimBlockComment *Parser::parseVerbatimBlock() { + assert(Tok.is(tok::verbatim_block_begin)); + + VerbatimBlockComment *VB = + S.actOnVerbatimBlockStart(Tok.getLocation(), + Tok.getVerbatimBlockID()); + consumeToken(); + + // Don't create an empty line if verbatim opening command is followed + // by a newline. + if (Tok.is(tok::newline)) + consumeToken(); + + SmallVector<VerbatimBlockLineComment *, 8> Lines; + while (Tok.is(tok::verbatim_block_line) || + Tok.is(tok::newline)) { + VerbatimBlockLineComment *Line; + if (Tok.is(tok::verbatim_block_line)) { + Line = S.actOnVerbatimBlockLine(Tok.getLocation(), + Tok.getVerbatimBlockText()); + consumeToken(); + if (Tok.is(tok::newline)) { + consumeToken(); + } + } else { + // Empty line, just a tok::newline. + Line = S.actOnVerbatimBlockLine(Tok.getLocation(), ""); + consumeToken(); + } + Lines.push_back(Line); + } + + if (Tok.is(tok::verbatim_block_end)) { + const CommandInfo *Info = Traits.getCommandInfo(Tok.getVerbatimBlockID()); + S.actOnVerbatimBlockFinish(VB, Tok.getLocation(), + Info->Name, + S.copyArray(llvm::makeArrayRef(Lines))); + consumeToken(); + } else { + // Unterminated \\verbatim block + S.actOnVerbatimBlockFinish(VB, SourceLocation(), "", + S.copyArray(llvm::makeArrayRef(Lines))); + } + + return VB; +} + +VerbatimLineComment *Parser::parseVerbatimLine() { + assert(Tok.is(tok::verbatim_line_name)); + + Token NameTok = Tok; + consumeToken(); + + SourceLocation TextBegin; + StringRef Text; + // Next token might not be a tok::verbatim_line_text if verbatim line + // starting command comes just before a newline or comment end. + if (Tok.is(tok::verbatim_line_text)) { + TextBegin = Tok.getLocation(); + Text = Tok.getVerbatimLineText(); + } else { + TextBegin = NameTok.getEndLocation(); + Text = ""; + } + + VerbatimLineComment *VL = S.actOnVerbatimLine(NameTok.getLocation(), + NameTok.getVerbatimLineID(), + TextBegin, + Text); + consumeToken(); + return VL; +} + +BlockContentComment *Parser::parseBlockContent() { + switch (Tok.getKind()) { + case tok::text: + case tok::unknown_command: + case tok::backslash_command: + case tok::at_command: + case tok::html_start_tag: + case tok::html_end_tag: + return parseParagraphOrBlockCommand(); + + case tok::verbatim_block_begin: + return parseVerbatimBlock(); + + case tok::verbatim_line_name: + return parseVerbatimLine(); + + case tok::eof: + case tok::newline: + case tok::verbatim_block_line: + case tok::verbatim_block_end: + case tok::verbatim_line_text: + case tok::html_ident: + case tok::html_equals: + case tok::html_quoted_string: + case tok::html_greater: + case tok::html_slash_greater: + llvm_unreachable("should not see this token"); + } + llvm_unreachable("bogus token kind"); +} + +FullComment *Parser::parseFullComment() { + // Skip newlines at the beginning of the comment. + while (Tok.is(tok::newline)) + consumeToken(); + + SmallVector<BlockContentComment *, 8> Blocks; + while (Tok.isNot(tok::eof)) { + Blocks.push_back(parseBlockContent()); + + // Skip extra newlines after paragraph end. + while (Tok.is(tok::newline)) + consumeToken(); + } + return S.actOnFullComment(S.copyArray(llvm::makeArrayRef(Blocks))); +} + +} // end namespace comments +} // end namespace clang diff --git a/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp new file mode 100644 index 000000000000..1c6222f9ec02 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/CommentSema.cpp @@ -0,0 +1,1065 @@ +//===--- CommentSema.cpp - Doxygen comment semantic analysis --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/CommentSema.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CommentCommandTraits.h" +#include "clang/AST/CommentDiagnostic.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Lex/Preprocessor.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringSwitch.h" + +namespace clang { +namespace comments { + +namespace { +#include "clang/AST/CommentHTMLTagsProperties.inc" +} // unnamed namespace + +Sema::Sema(llvm::BumpPtrAllocator &Allocator, const SourceManager &SourceMgr, + DiagnosticsEngine &Diags, CommandTraits &Traits, + const Preprocessor *PP) : + Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags), Traits(Traits), + PP(PP), ThisDeclInfo(NULL), BriefCommand(NULL), HeaderfileCommand(NULL) { +} + +void Sema::setDecl(const Decl *D) { + if (!D) + return; + + ThisDeclInfo = new (Allocator) DeclInfo; + ThisDeclInfo->CommentDecl = D; + ThisDeclInfo->IsFilled = false; +} + +ParagraphComment *Sema::actOnParagraphComment( + ArrayRef<InlineContentComment *> Content) { + return new (Allocator) ParagraphComment(Content); +} + +BlockCommandComment *Sema::actOnBlockCommandStart( + SourceLocation LocBegin, + SourceLocation LocEnd, + unsigned CommandID, + CommandMarkerKind CommandMarker) { + BlockCommandComment *BC = new (Allocator) BlockCommandComment(LocBegin, LocEnd, + CommandID, + CommandMarker); + checkContainerDecl(BC); + return BC; +} + +void Sema::actOnBlockCommandArgs(BlockCommandComment *Command, + ArrayRef<BlockCommandComment::Argument> Args) { + Command->setArgs(Args); +} + +void Sema::actOnBlockCommandFinish(BlockCommandComment *Command, + ParagraphComment *Paragraph) { + Command->setParagraph(Paragraph); + checkBlockCommandEmptyParagraph(Command); + checkBlockCommandDuplicate(Command); + checkReturnsCommand(Command); + checkDeprecatedCommand(Command); +} + +ParamCommandComment *Sema::actOnParamCommandStart( + SourceLocation LocBegin, + SourceLocation LocEnd, + unsigned CommandID, + CommandMarkerKind CommandMarker) { + ParamCommandComment *Command = + new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID, + CommandMarker); + + if (!isFunctionDecl()) + Diag(Command->getLocation(), + diag::warn_doc_param_not_attached_to_a_function_decl) + << CommandMarker + << Command->getCommandNameRange(Traits); + + return Command; +} + +void Sema::checkFunctionDeclVerbatimLine(const BlockCommandComment *Comment) { + const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID()); + if (!Info->IsFunctionDeclarationCommand) + return; + + unsigned DiagSelect; + switch (Comment->getCommandID()) { + case CommandTraits::KCI_function: + DiagSelect = (!isAnyFunctionDecl() && !isFunctionTemplateDecl())? 1 : 0; + break; + case CommandTraits::KCI_functiongroup: + DiagSelect = (!isAnyFunctionDecl() && !isFunctionTemplateDecl())? 2 : 0; + break; + case CommandTraits::KCI_method: + DiagSelect = !isObjCMethodDecl() ? 3 : 0; + break; + case CommandTraits::KCI_methodgroup: + DiagSelect = !isObjCMethodDecl() ? 4 : 0; + break; + case CommandTraits::KCI_callback: + DiagSelect = !isFunctionPointerVarDecl() ? 5 : 0; + break; + default: + DiagSelect = 0; + break; + } + if (DiagSelect) + Diag(Comment->getLocation(), diag::warn_doc_function_method_decl_mismatch) + << Comment->getCommandMarker() + << (DiagSelect-1) << (DiagSelect-1) + << Comment->getSourceRange(); +} + +void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) { + const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID()); + if (!Info->IsRecordLikeDeclarationCommand) + return; + unsigned DiagSelect; + switch (Comment->getCommandID()) { + case CommandTraits::KCI_class: + DiagSelect = (!isClassOrStructDecl() && !isClassTemplateDecl()) ? 1 : 0; + // Allow @class command on @interface declarations. + // FIXME. Currently, \class and @class are indistinguishable. So, + // \class is also allowed on an @interface declaration + if (DiagSelect && Comment->getCommandMarker() && isObjCInterfaceDecl()) + DiagSelect = 0; + break; + case CommandTraits::KCI_interface: + DiagSelect = !isObjCInterfaceDecl() ? 2 : 0; + break; + case CommandTraits::KCI_protocol: + DiagSelect = !isObjCProtocolDecl() ? 3 : 0; + break; + case CommandTraits::KCI_struct: + DiagSelect = !isClassOrStructDecl() ? 4 : 0; + break; + case CommandTraits::KCI_union: + DiagSelect = !isUnionDecl() ? 5 : 0; + break; + default: + DiagSelect = 0; + break; + } + if (DiagSelect) + Diag(Comment->getLocation(), diag::warn_doc_api_container_decl_mismatch) + << Comment->getCommandMarker() + << (DiagSelect-1) << (DiagSelect-1) + << Comment->getSourceRange(); +} + +void Sema::checkContainerDecl(const BlockCommandComment *Comment) { + const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID()); + if (!Info->IsRecordLikeDetailCommand || isRecordLikeDecl()) + return; + unsigned DiagSelect; + switch (Comment->getCommandID()) { + case CommandTraits::KCI_classdesign: + DiagSelect = 1; + break; + case CommandTraits::KCI_coclass: + DiagSelect = 2; + break; + case CommandTraits::KCI_dependency: + DiagSelect = 3; + break; + case CommandTraits::KCI_helper: + DiagSelect = 4; + break; + case CommandTraits::KCI_helperclass: + DiagSelect = 5; + break; + case CommandTraits::KCI_helps: + DiagSelect = 6; + break; + case CommandTraits::KCI_instancesize: + DiagSelect = 7; + break; + case CommandTraits::KCI_ownership: + DiagSelect = 8; + break; + case CommandTraits::KCI_performance: + DiagSelect = 9; + break; + case CommandTraits::KCI_security: + DiagSelect = 10; + break; + case CommandTraits::KCI_superclass: + DiagSelect = 11; + break; + default: + DiagSelect = 0; + break; + } + if (DiagSelect) + Diag(Comment->getLocation(), diag::warn_doc_container_decl_mismatch) + << Comment->getCommandMarker() + << (DiagSelect-1) + << Comment->getSourceRange(); +} + +/// \brief Turn a string into the corresponding PassDirection or -1 if it's not +/// valid. +static int getParamPassDirection(StringRef Arg) { + return llvm::StringSwitch<int>(Arg) + .Case("[in]", ParamCommandComment::In) + .Case("[out]", ParamCommandComment::Out) + .Cases("[in,out]", "[out,in]", ParamCommandComment::InOut) + .Default(-1); +} + +void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command, + SourceLocation ArgLocBegin, + SourceLocation ArgLocEnd, + StringRef Arg) { + std::string ArgLower = Arg.lower(); + int Direction = getParamPassDirection(ArgLower); + + if (Direction == -1) { + // Try again with whitespace removed. + ArgLower.erase( + std::remove_if(ArgLower.begin(), ArgLower.end(), clang::isWhitespace), + ArgLower.end()); + Direction = getParamPassDirection(ArgLower); + + SourceRange ArgRange(ArgLocBegin, ArgLocEnd); + if (Direction != -1) { + const char *FixedName = ParamCommandComment::getDirectionAsString( + (ParamCommandComment::PassDirection)Direction); + Diag(ArgLocBegin, diag::warn_doc_param_spaces_in_direction) + << ArgRange << FixItHint::CreateReplacement(ArgRange, FixedName); + } else { + Diag(ArgLocBegin, diag::warn_doc_param_invalid_direction) << ArgRange; + Direction = ParamCommandComment::In; // Sane fall back. + } + } + Command->setDirection((ParamCommandComment::PassDirection)Direction, + /*Explicit=*/true); +} + +void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command, + SourceLocation ArgLocBegin, + SourceLocation ArgLocEnd, + StringRef Arg) { + // Parser will not feed us more arguments than needed. + assert(Command->getNumArgs() == 0); + + if (!Command->isDirectionExplicit()) { + // User didn't provide a direction argument. + Command->setDirection(ParamCommandComment::In, /* Explicit = */ false); + } + typedef BlockCommandComment::Argument Argument; + Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin, + ArgLocEnd), + Arg); + Command->setArgs(llvm::makeArrayRef(A, 1)); +} + +void Sema::actOnParamCommandFinish(ParamCommandComment *Command, + ParagraphComment *Paragraph) { + Command->setParagraph(Paragraph); + checkBlockCommandEmptyParagraph(Command); +} + +TParamCommandComment *Sema::actOnTParamCommandStart( + SourceLocation LocBegin, + SourceLocation LocEnd, + unsigned CommandID, + CommandMarkerKind CommandMarker) { + TParamCommandComment *Command = + new (Allocator) TParamCommandComment(LocBegin, LocEnd, CommandID, + CommandMarker); + + if (!isTemplateOrSpecialization()) + Diag(Command->getLocation(), + diag::warn_doc_tparam_not_attached_to_a_template_decl) + << CommandMarker + << Command->getCommandNameRange(Traits); + + return Command; +} + +void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command, + SourceLocation ArgLocBegin, + SourceLocation ArgLocEnd, + StringRef Arg) { + // Parser will not feed us more arguments than needed. + assert(Command->getNumArgs() == 0); + + typedef BlockCommandComment::Argument Argument; + Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin, + ArgLocEnd), + Arg); + Command->setArgs(llvm::makeArrayRef(A, 1)); + + if (!isTemplateOrSpecialization()) { + // We already warned that this \\tparam is not attached to a template decl. + return; + } + + const TemplateParameterList *TemplateParameters = + ThisDeclInfo->TemplateParameters; + SmallVector<unsigned, 2> Position; + if (resolveTParamReference(Arg, TemplateParameters, &Position)) { + Command->setPosition(copyArray(llvm::makeArrayRef(Position))); + TParamCommandComment *&PrevCommand = TemplateParameterDocs[Arg]; + if (PrevCommand) { + SourceRange ArgRange(ArgLocBegin, ArgLocEnd); + Diag(ArgLocBegin, diag::warn_doc_tparam_duplicate) + << Arg << ArgRange; + Diag(PrevCommand->getLocation(), diag::note_doc_tparam_previous) + << PrevCommand->getParamNameRange(); + } + PrevCommand = Command; + return; + } + + SourceRange ArgRange(ArgLocBegin, ArgLocEnd); + Diag(ArgLocBegin, diag::warn_doc_tparam_not_found) + << Arg << ArgRange; + + if (!TemplateParameters || TemplateParameters->size() == 0) + return; + + StringRef CorrectedName; + if (TemplateParameters->size() == 1) { + const NamedDecl *Param = TemplateParameters->getParam(0); + const IdentifierInfo *II = Param->getIdentifier(); + if (II) + CorrectedName = II->getName(); + } else { + CorrectedName = correctTypoInTParamReference(Arg, TemplateParameters); + } + + if (!CorrectedName.empty()) { + Diag(ArgLocBegin, diag::note_doc_tparam_name_suggestion) + << CorrectedName + << FixItHint::CreateReplacement(ArgRange, CorrectedName); + } + + return; +} + +void Sema::actOnTParamCommandFinish(TParamCommandComment *Command, + ParagraphComment *Paragraph) { + Command->setParagraph(Paragraph); + checkBlockCommandEmptyParagraph(Command); +} + +InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin, + SourceLocation CommandLocEnd, + unsigned CommandID) { + ArrayRef<InlineCommandComment::Argument> Args; + StringRef CommandName = Traits.getCommandInfo(CommandID)->Name; + return new (Allocator) InlineCommandComment( + CommandLocBegin, + CommandLocEnd, + CommandID, + getInlineCommandRenderKind(CommandName), + Args); +} + +InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin, + SourceLocation CommandLocEnd, + unsigned CommandID, + SourceLocation ArgLocBegin, + SourceLocation ArgLocEnd, + StringRef Arg) { + typedef InlineCommandComment::Argument Argument; + Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin, + ArgLocEnd), + Arg); + StringRef CommandName = Traits.getCommandInfo(CommandID)->Name; + + return new (Allocator) InlineCommandComment( + CommandLocBegin, + CommandLocEnd, + CommandID, + getInlineCommandRenderKind(CommandName), + llvm::makeArrayRef(A, 1)); +} + +InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin, + SourceLocation LocEnd, + StringRef CommandName) { + unsigned CommandID = Traits.registerUnknownCommand(CommandName)->getID(); + return actOnUnknownCommand(LocBegin, LocEnd, CommandID); +} + +InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin, + SourceLocation LocEnd, + unsigned CommandID) { + ArrayRef<InlineCommandComment::Argument> Args; + return new (Allocator) InlineCommandComment( + LocBegin, LocEnd, CommandID, + InlineCommandComment::RenderNormal, + Args); +} + +TextComment *Sema::actOnText(SourceLocation LocBegin, + SourceLocation LocEnd, + StringRef Text) { + return new (Allocator) TextComment(LocBegin, LocEnd, Text); +} + +VerbatimBlockComment *Sema::actOnVerbatimBlockStart(SourceLocation Loc, + unsigned CommandID) { + StringRef CommandName = Traits.getCommandInfo(CommandID)->Name; + return new (Allocator) VerbatimBlockComment( + Loc, + Loc.getLocWithOffset(1 + CommandName.size()), + CommandID); +} + +VerbatimBlockLineComment *Sema::actOnVerbatimBlockLine(SourceLocation Loc, + StringRef Text) { + return new (Allocator) VerbatimBlockLineComment(Loc, Text); +} + +void Sema::actOnVerbatimBlockFinish( + VerbatimBlockComment *Block, + SourceLocation CloseNameLocBegin, + StringRef CloseName, + ArrayRef<VerbatimBlockLineComment *> Lines) { + Block->setCloseName(CloseName, CloseNameLocBegin); + Block->setLines(Lines); +} + +VerbatimLineComment *Sema::actOnVerbatimLine(SourceLocation LocBegin, + unsigned CommandID, + SourceLocation TextBegin, + StringRef Text) { + VerbatimLineComment *VL = new (Allocator) VerbatimLineComment( + LocBegin, + TextBegin.getLocWithOffset(Text.size()), + CommandID, + TextBegin, + Text); + checkFunctionDeclVerbatimLine(VL); + checkContainerDeclVerbatimLine(VL); + return VL; +} + +HTMLStartTagComment *Sema::actOnHTMLStartTagStart(SourceLocation LocBegin, + StringRef TagName) { + return new (Allocator) HTMLStartTagComment(LocBegin, TagName); +} + +void Sema::actOnHTMLStartTagFinish( + HTMLStartTagComment *Tag, + ArrayRef<HTMLStartTagComment::Attribute> Attrs, + SourceLocation GreaterLoc, + bool IsSelfClosing) { + Tag->setAttrs(Attrs); + Tag->setGreaterLoc(GreaterLoc); + if (IsSelfClosing) + Tag->setSelfClosing(); + else if (!isHTMLEndTagForbidden(Tag->getTagName())) + HTMLOpenTags.push_back(Tag); +} + +HTMLEndTagComment *Sema::actOnHTMLEndTag(SourceLocation LocBegin, + SourceLocation LocEnd, + StringRef TagName) { + HTMLEndTagComment *HET = + new (Allocator) HTMLEndTagComment(LocBegin, LocEnd, TagName); + if (isHTMLEndTagForbidden(TagName)) { + Diag(HET->getLocation(), diag::warn_doc_html_end_forbidden) + << TagName << HET->getSourceRange(); + return HET; + } + + bool FoundOpen = false; + for (SmallVectorImpl<HTMLStartTagComment *>::const_reverse_iterator + I = HTMLOpenTags.rbegin(), E = HTMLOpenTags.rend(); + I != E; ++I) { + if ((*I)->getTagName() == TagName) { + FoundOpen = true; + break; + } + } + if (!FoundOpen) { + Diag(HET->getLocation(), diag::warn_doc_html_end_unbalanced) + << HET->getSourceRange(); + return HET; + } + + while (!HTMLOpenTags.empty()) { + const HTMLStartTagComment *HST = HTMLOpenTags.pop_back_val(); + StringRef LastNotClosedTagName = HST->getTagName(); + if (LastNotClosedTagName == TagName) + break; + + if (isHTMLEndTagOptional(LastNotClosedTagName)) + continue; + + bool OpenLineInvalid; + const unsigned OpenLine = SourceMgr.getPresumedLineNumber( + HST->getLocation(), + &OpenLineInvalid); + bool CloseLineInvalid; + const unsigned CloseLine = SourceMgr.getPresumedLineNumber( + HET->getLocation(), + &CloseLineInvalid); + + if (OpenLineInvalid || CloseLineInvalid || OpenLine == CloseLine) + Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch) + << HST->getTagName() << HET->getTagName() + << HST->getSourceRange() << HET->getSourceRange(); + else { + Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch) + << HST->getTagName() << HET->getTagName() + << HST->getSourceRange(); + Diag(HET->getLocation(), diag::note_doc_html_end_tag) + << HET->getSourceRange(); + } + } + + return HET; +} + +FullComment *Sema::actOnFullComment( + ArrayRef<BlockContentComment *> Blocks) { + FullComment *FC = new (Allocator) FullComment(Blocks, ThisDeclInfo); + resolveParamCommandIndexes(FC); + return FC; +} + +void Sema::checkBlockCommandEmptyParagraph(BlockCommandComment *Command) { + if (Traits.getCommandInfo(Command->getCommandID())->IsEmptyParagraphAllowed) + return; + + ParagraphComment *Paragraph = Command->getParagraph(); + if (Paragraph->isWhitespace()) { + SourceLocation DiagLoc; + if (Command->getNumArgs() > 0) + DiagLoc = Command->getArgRange(Command->getNumArgs() - 1).getEnd(); + if (!DiagLoc.isValid()) + DiagLoc = Command->getCommandNameRange(Traits).getEnd(); + Diag(DiagLoc, diag::warn_doc_block_command_empty_paragraph) + << Command->getCommandMarker() + << Command->getCommandName(Traits) + << Command->getSourceRange(); + } +} + +void Sema::checkReturnsCommand(const BlockCommandComment *Command) { + if (!Traits.getCommandInfo(Command->getCommandID())->IsReturnsCommand) + return; + if (isFunctionDecl()) { + if (ThisDeclInfo->ResultType->isVoidType()) { + unsigned DiagKind; + switch (ThisDeclInfo->CommentDecl->getKind()) { + default: + if (ThisDeclInfo->IsObjCMethod) + DiagKind = 3; + else + DiagKind = 0; + break; + case Decl::CXXConstructor: + DiagKind = 1; + break; + case Decl::CXXDestructor: + DiagKind = 2; + break; + } + Diag(Command->getLocation(), + diag::warn_doc_returns_attached_to_a_void_function) + << Command->getCommandMarker() + << Command->getCommandName(Traits) + << DiagKind + << Command->getSourceRange(); + } + return; + } + else if (isObjCPropertyDecl()) + return; + + Diag(Command->getLocation(), + diag::warn_doc_returns_not_attached_to_a_function_decl) + << Command->getCommandMarker() + << Command->getCommandName(Traits) + << Command->getSourceRange(); +} + +void Sema::checkBlockCommandDuplicate(const BlockCommandComment *Command) { + const CommandInfo *Info = Traits.getCommandInfo(Command->getCommandID()); + const BlockCommandComment *PrevCommand = NULL; + if (Info->IsBriefCommand) { + if (!BriefCommand) { + BriefCommand = Command; + return; + } + PrevCommand = BriefCommand; + } else if (Info->IsHeaderfileCommand) { + if (!HeaderfileCommand) { + HeaderfileCommand = Command; + return; + } + PrevCommand = HeaderfileCommand; + } else { + // We don't want to check this command for duplicates. + return; + } + StringRef CommandName = Command->getCommandName(Traits); + StringRef PrevCommandName = PrevCommand->getCommandName(Traits); + Diag(Command->getLocation(), diag::warn_doc_block_command_duplicate) + << Command->getCommandMarker() + << CommandName + << Command->getSourceRange(); + if (CommandName == PrevCommandName) + Diag(PrevCommand->getLocation(), diag::note_doc_block_command_previous) + << PrevCommand->getCommandMarker() + << PrevCommandName + << PrevCommand->getSourceRange(); + else + Diag(PrevCommand->getLocation(), + diag::note_doc_block_command_previous_alias) + << PrevCommand->getCommandMarker() + << PrevCommandName + << CommandName; +} + +void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) { + if (!Traits.getCommandInfo(Command->getCommandID())->IsDeprecatedCommand) + return; + + const Decl *D = ThisDeclInfo->CommentDecl; + if (!D) + return; + + if (D->hasAttr<DeprecatedAttr>() || + D->hasAttr<AvailabilityAttr>() || + D->hasAttr<UnavailableAttr>()) + return; + + Diag(Command->getLocation(), + diag::warn_doc_deprecated_not_sync) + << Command->getSourceRange(); + + // Try to emit a fixit with a deprecation attribute. + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + // Don't emit a Fix-It for non-member function definitions. GCC does not + // accept attributes on them. + const DeclContext *Ctx = FD->getDeclContext(); + if ((!Ctx || !Ctx->isRecord()) && + FD->doesThisDeclarationHaveABody()) + return; + + StringRef AttributeSpelling = "__attribute__((deprecated))"; + if (PP) { + TokenValue Tokens[] = { + tok::kw___attribute, tok::l_paren, tok::l_paren, + PP->getIdentifierInfo("deprecated"), + tok::r_paren, tok::r_paren + }; + StringRef MacroName = PP->getLastMacroWithSpelling(FD->getLocation(), + Tokens); + if (!MacroName.empty()) + AttributeSpelling = MacroName; + } + + SmallString<64> TextToInsert(" "); + TextToInsert += AttributeSpelling; + Diag(FD->getLocEnd(), + diag::note_add_deprecation_attr) + << FixItHint::CreateInsertion(FD->getLocEnd().getLocWithOffset(1), + TextToInsert); + } +} + +void Sema::resolveParamCommandIndexes(const FullComment *FC) { + if (!isFunctionDecl()) { + // We already warned that \\param commands are not attached to a function + // decl. + return; + } + + SmallVector<ParamCommandComment *, 8> UnresolvedParamCommands; + + // Comment AST nodes that correspond to \c ParamVars for which we have + // found a \\param command or NULL if no documentation was found so far. + SmallVector<ParamCommandComment *, 8> ParamVarDocs; + + ArrayRef<const ParmVarDecl *> ParamVars = getParamVars(); + ParamVarDocs.resize(ParamVars.size(), NULL); + + // First pass over all \\param commands: resolve all parameter names. + for (Comment::child_iterator I = FC->child_begin(), E = FC->child_end(); + I != E; ++I) { + ParamCommandComment *PCC = dyn_cast<ParamCommandComment>(*I); + if (!PCC || !PCC->hasParamName()) + continue; + StringRef ParamName = PCC->getParamNameAsWritten(); + + // Check that referenced parameter name is in the function decl. + const unsigned ResolvedParamIndex = resolveParmVarReference(ParamName, + ParamVars); + if (ResolvedParamIndex == ParamCommandComment::VarArgParamIndex) { + PCC->setIsVarArgParam(); + continue; + } + if (ResolvedParamIndex == ParamCommandComment::InvalidParamIndex) { + UnresolvedParamCommands.push_back(PCC); + continue; + } + PCC->setParamIndex(ResolvedParamIndex); + if (ParamVarDocs[ResolvedParamIndex]) { + SourceRange ArgRange = PCC->getParamNameRange(); + Diag(ArgRange.getBegin(), diag::warn_doc_param_duplicate) + << ParamName << ArgRange; + ParamCommandComment *PrevCommand = ParamVarDocs[ResolvedParamIndex]; + Diag(PrevCommand->getLocation(), diag::note_doc_param_previous) + << PrevCommand->getParamNameRange(); + } + ParamVarDocs[ResolvedParamIndex] = PCC; + } + + // Find parameter declarations that have no corresponding \\param. + SmallVector<const ParmVarDecl *, 8> OrphanedParamDecls; + for (unsigned i = 0, e = ParamVarDocs.size(); i != e; ++i) { + if (!ParamVarDocs[i]) + OrphanedParamDecls.push_back(ParamVars[i]); + } + + // Second pass over unresolved \\param commands: do typo correction. + // Suggest corrections from a set of parameter declarations that have no + // corresponding \\param. + for (unsigned i = 0, e = UnresolvedParamCommands.size(); i != e; ++i) { + const ParamCommandComment *PCC = UnresolvedParamCommands[i]; + + SourceRange ArgRange = PCC->getParamNameRange(); + StringRef ParamName = PCC->getParamNameAsWritten(); + Diag(ArgRange.getBegin(), diag::warn_doc_param_not_found) + << ParamName << ArgRange; + + // All parameters documented -- can't suggest a correction. + if (OrphanedParamDecls.size() == 0) + continue; + + unsigned CorrectedParamIndex = ParamCommandComment::InvalidParamIndex; + if (OrphanedParamDecls.size() == 1) { + // If one parameter is not documented then that parameter is the only + // possible suggestion. + CorrectedParamIndex = 0; + } else { + // Do typo correction. + CorrectedParamIndex = correctTypoInParmVarReference(ParamName, + OrphanedParamDecls); + } + if (CorrectedParamIndex != ParamCommandComment::InvalidParamIndex) { + const ParmVarDecl *CorrectedPVD = OrphanedParamDecls[CorrectedParamIndex]; + if (const IdentifierInfo *CorrectedII = CorrectedPVD->getIdentifier()) + Diag(ArgRange.getBegin(), diag::note_doc_param_name_suggestion) + << CorrectedII->getName() + << FixItHint::CreateReplacement(ArgRange, CorrectedII->getName()); + } + } +} + +bool Sema::isFunctionDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->getKind() == DeclInfo::FunctionKind; +} + +bool Sema::isAnyFunctionDecl() { + return isFunctionDecl() && ThisDeclInfo->CurrentDecl && + isa<FunctionDecl>(ThisDeclInfo->CurrentDecl); +} + +bool Sema::isFunctionOrMethodVariadic() { + if (!isAnyFunctionDecl() && !isObjCMethodDecl()) + return false; + if (const FunctionDecl *FD = + dyn_cast<FunctionDecl>(ThisDeclInfo->CurrentDecl)) + return FD->isVariadic(); + if (const ObjCMethodDecl *MD = + dyn_cast<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl)) + return MD->isVariadic(); + return false; +} + +bool Sema::isObjCMethodDecl() { + return isFunctionDecl() && ThisDeclInfo->CurrentDecl && + isa<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl); +} + +bool Sema::isFunctionPointerVarDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + if (ThisDeclInfo->getKind() == DeclInfo::VariableKind) { + if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(ThisDeclInfo->CurrentDecl)) { + QualType QT = VD->getType(); + return QT->isFunctionPointerType(); + } + } + return false; +} + +bool Sema::isObjCPropertyDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->CurrentDecl->getKind() == Decl::ObjCProperty; +} + +bool Sema::isTemplateOrSpecialization() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->getTemplateKind() != DeclInfo::NotTemplate; +} + +bool Sema::isRecordLikeDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return isUnionDecl() || isClassOrStructDecl() + || isObjCInterfaceDecl() || isObjCProtocolDecl(); +} + +bool Sema::isUnionDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + if (const RecordDecl *RD = + dyn_cast_or_null<RecordDecl>(ThisDeclInfo->CurrentDecl)) + return RD->isUnion(); + return false; +} + +bool Sema::isClassOrStructDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->CurrentDecl && + isa<RecordDecl>(ThisDeclInfo->CurrentDecl) && + !isUnionDecl(); +} + +bool Sema::isClassTemplateDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->CurrentDecl && + (isa<ClassTemplateDecl>(ThisDeclInfo->CurrentDecl)); +} + +bool Sema::isFunctionTemplateDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->CurrentDecl && + (isa<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl)); +} + +bool Sema::isObjCInterfaceDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->CurrentDecl && + isa<ObjCInterfaceDecl>(ThisDeclInfo->CurrentDecl); +} + +bool Sema::isObjCProtocolDecl() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->CurrentDecl && + isa<ObjCProtocolDecl>(ThisDeclInfo->CurrentDecl); +} + +ArrayRef<const ParmVarDecl *> Sema::getParamVars() { + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->ParamVars; +} + +void Sema::inspectThisDecl() { + ThisDeclInfo->fill(); +} + +unsigned Sema::resolveParmVarReference(StringRef Name, + ArrayRef<const ParmVarDecl *> ParamVars) { + for (unsigned i = 0, e = ParamVars.size(); i != e; ++i) { + const IdentifierInfo *II = ParamVars[i]->getIdentifier(); + if (II && II->getName() == Name) + return i; + } + if (Name == "..." && isFunctionOrMethodVariadic()) + return ParamCommandComment::VarArgParamIndex; + return ParamCommandComment::InvalidParamIndex; +} + +namespace { +class SimpleTypoCorrector { + StringRef Typo; + const unsigned MaxEditDistance; + + const NamedDecl *BestDecl; + unsigned BestEditDistance; + unsigned BestIndex; + unsigned NextIndex; + +public: + SimpleTypoCorrector(StringRef Typo) : + Typo(Typo), MaxEditDistance((Typo.size() + 2) / 3), + BestDecl(NULL), BestEditDistance(MaxEditDistance + 1), + BestIndex(0), NextIndex(0) + { } + + void addDecl(const NamedDecl *ND); + + const NamedDecl *getBestDecl() const { + if (BestEditDistance > MaxEditDistance) + return NULL; + + return BestDecl; + } + + unsigned getBestDeclIndex() const { + assert(getBestDecl()); + return BestIndex; + } +}; + +void SimpleTypoCorrector::addDecl(const NamedDecl *ND) { + unsigned CurrIndex = NextIndex++; + + const IdentifierInfo *II = ND->getIdentifier(); + if (!II) + return; + + StringRef Name = II->getName(); + unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size()); + if (MinPossibleEditDistance > 0 && + Typo.size() / MinPossibleEditDistance < 3) + return; + + unsigned EditDistance = Typo.edit_distance(Name, true, MaxEditDistance); + if (EditDistance < BestEditDistance) { + BestEditDistance = EditDistance; + BestDecl = ND; + BestIndex = CurrIndex; + } +} +} // unnamed namespace + +unsigned Sema::correctTypoInParmVarReference( + StringRef Typo, + ArrayRef<const ParmVarDecl *> ParamVars) { + SimpleTypoCorrector Corrector(Typo); + for (unsigned i = 0, e = ParamVars.size(); i != e; ++i) + Corrector.addDecl(ParamVars[i]); + if (Corrector.getBestDecl()) + return Corrector.getBestDeclIndex(); + else + return ParamCommandComment::InvalidParamIndex; +} + +namespace { +bool ResolveTParamReferenceHelper( + StringRef Name, + const TemplateParameterList *TemplateParameters, + SmallVectorImpl<unsigned> *Position) { + for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) { + const NamedDecl *Param = TemplateParameters->getParam(i); + const IdentifierInfo *II = Param->getIdentifier(); + if (II && II->getName() == Name) { + Position->push_back(i); + return true; + } + + if (const TemplateTemplateParmDecl *TTP = + dyn_cast<TemplateTemplateParmDecl>(Param)) { + Position->push_back(i); + if (ResolveTParamReferenceHelper(Name, TTP->getTemplateParameters(), + Position)) + return true; + Position->pop_back(); + } + } + return false; +} +} // unnamed namespace + +bool Sema::resolveTParamReference( + StringRef Name, + const TemplateParameterList *TemplateParameters, + SmallVectorImpl<unsigned> *Position) { + Position->clear(); + if (!TemplateParameters) + return false; + + return ResolveTParamReferenceHelper(Name, TemplateParameters, Position); +} + +namespace { +void CorrectTypoInTParamReferenceHelper( + const TemplateParameterList *TemplateParameters, + SimpleTypoCorrector &Corrector) { + for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) { + const NamedDecl *Param = TemplateParameters->getParam(i); + Corrector.addDecl(Param); + + if (const TemplateTemplateParmDecl *TTP = + dyn_cast<TemplateTemplateParmDecl>(Param)) + CorrectTypoInTParamReferenceHelper(TTP->getTemplateParameters(), + Corrector); + } +} +} // unnamed namespace + +StringRef Sema::correctTypoInTParamReference( + StringRef Typo, + const TemplateParameterList *TemplateParameters) { + SimpleTypoCorrector Corrector(Typo); + CorrectTypoInTParamReferenceHelper(TemplateParameters, Corrector); + if (const NamedDecl *ND = Corrector.getBestDecl()) { + const IdentifierInfo *II = ND->getIdentifier(); + assert(II && "SimpleTypoCorrector should not return this decl"); + return II->getName(); + } + return StringRef(); +} + +InlineCommandComment::RenderKind +Sema::getInlineCommandRenderKind(StringRef Name) const { + assert(Traits.getCommandInfo(Name)->IsInlineCommand); + + return llvm::StringSwitch<InlineCommandComment::RenderKind>(Name) + .Case("b", InlineCommandComment::RenderBold) + .Cases("c", "p", InlineCommandComment::RenderMonospaced) + .Cases("a", "e", "em", InlineCommandComment::RenderEmphasized) + .Default(InlineCommandComment::RenderNormal); +} + +} // end namespace comments +} // end namespace clang + diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp new file mode 100644 index 000000000000..6bd985851a39 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp @@ -0,0 +1,3775 @@ +//===--- Decl.cpp - Declaration AST Node Implementation -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Decl subclasses. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Decl.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTMutationListener.h" +#include "clang/AST/Attr.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/Stmt.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/IdentifierTable.h" +#include "clang/Basic/Module.h" +#include "clang/Basic/Specifiers.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/type_traits.h" +#include <algorithm> + +using namespace clang; + +Decl *clang::getPrimaryMergedDecl(Decl *D) { + return D->getASTContext().getPrimaryMergedDecl(D); +} + +//===----------------------------------------------------------------------===// +// NamedDecl Implementation +//===----------------------------------------------------------------------===// + +// Visibility rules aren't rigorously externally specified, but here +// are the basic principles behind what we implement: +// +// 1. An explicit visibility attribute is generally a direct expression +// of the user's intent and should be honored. Only the innermost +// visibility attribute applies. If no visibility attribute applies, +// global visibility settings are considered. +// +// 2. There is one caveat to the above: on or in a template pattern, +// an explicit visibility attribute is just a default rule, and +// visibility can be decreased by the visibility of template +// arguments. But this, too, has an exception: an attribute on an +// explicit specialization or instantiation causes all the visibility +// restrictions of the template arguments to be ignored. +// +// 3. A variable that does not otherwise have explicit visibility can +// be restricted by the visibility of its type. +// +// 4. A visibility restriction is explicit if it comes from an +// attribute (or something like it), not a global visibility setting. +// When emitting a reference to an external symbol, visibility +// restrictions are ignored unless they are explicit. +// +// 5. When computing the visibility of a non-type, including a +// non-type member of a class, only non-type visibility restrictions +// are considered: the 'visibility' attribute, global value-visibility +// settings, and a few special cases like __private_extern. +// +// 6. When computing the visibility of a type, including a type member +// of a class, only type visibility restrictions are considered: +// the 'type_visibility' attribute and global type-visibility settings. +// However, a 'visibility' attribute counts as a 'type_visibility' +// attribute on any declaration that only has the former. +// +// The visibility of a "secondary" entity, like a template argument, +// is computed using the kind of that entity, not the kind of the +// primary entity for which we are computing visibility. For example, +// the visibility of a specialization of either of these templates: +// template <class T, bool (&compare)(T, X)> bool has_match(list<T>, X); +// template <class T, bool (&compare)(T, X)> class matcher; +// is restricted according to the type visibility of the argument 'T', +// the type visibility of 'bool(&)(T,X)', and the value visibility of +// the argument function 'compare'. That 'has_match' is a value +// and 'matcher' is a type only matters when looking for attributes +// and settings from the immediate context. + +const unsigned IgnoreExplicitVisibilityBit = 2; +const unsigned IgnoreAllVisibilityBit = 4; + +/// Kinds of LV computation. The linkage side of the computation is +/// always the same, but different things can change how visibility is +/// computed. +enum LVComputationKind { + /// Do an LV computation for, ultimately, a type. + /// Visibility may be restricted by type visibility settings and + /// the visibility of template arguments. + LVForType = NamedDecl::VisibilityForType, + + /// Do an LV computation for, ultimately, a non-type declaration. + /// Visibility may be restricted by value visibility settings and + /// the visibility of template arguments. + LVForValue = NamedDecl::VisibilityForValue, + + /// Do an LV computation for, ultimately, a type that already has + /// some sort of explicit visibility. Visibility may only be + /// restricted by the visibility of template arguments. + LVForExplicitType = (LVForType | IgnoreExplicitVisibilityBit), + + /// Do an LV computation for, ultimately, a non-type declaration + /// that already has some sort of explicit visibility. Visibility + /// may only be restricted by the visibility of template arguments. + LVForExplicitValue = (LVForValue | IgnoreExplicitVisibilityBit), + + /// Do an LV computation when we only care about the linkage. + LVForLinkageOnly = + LVForValue | IgnoreExplicitVisibilityBit | IgnoreAllVisibilityBit +}; + +/// Does this computation kind permit us to consider additional +/// visibility settings from attributes and the like? +static bool hasExplicitVisibilityAlready(LVComputationKind computation) { + return ((unsigned(computation) & IgnoreExplicitVisibilityBit) != 0); +} + +/// Given an LVComputationKind, return one of the same type/value sort +/// that records that it already has explicit visibility. +static LVComputationKind +withExplicitVisibilityAlready(LVComputationKind oldKind) { + LVComputationKind newKind = + static_cast<LVComputationKind>(unsigned(oldKind) | + IgnoreExplicitVisibilityBit); + assert(oldKind != LVForType || newKind == LVForExplicitType); + assert(oldKind != LVForValue || newKind == LVForExplicitValue); + assert(oldKind != LVForExplicitType || newKind == LVForExplicitType); + assert(oldKind != LVForExplicitValue || newKind == LVForExplicitValue); + return newKind; +} + +static Optional<Visibility> getExplicitVisibility(const NamedDecl *D, + LVComputationKind kind) { + assert(!hasExplicitVisibilityAlready(kind) && + "asking for explicit visibility when we shouldn't be"); + return D->getExplicitVisibility((NamedDecl::ExplicitVisibilityKind) kind); +} + +/// Is the given declaration a "type" or a "value" for the purposes of +/// visibility computation? +static bool usesTypeVisibility(const NamedDecl *D) { + return isa<TypeDecl>(D) || + isa<ClassTemplateDecl>(D) || + isa<ObjCInterfaceDecl>(D); +} + +/// Does the given declaration have member specialization information, +/// and if so, is it an explicit specialization? +template <class T> static typename +llvm::enable_if_c<!llvm::is_base_of<RedeclarableTemplateDecl, T>::value, + bool>::type +isExplicitMemberSpecialization(const T *D) { + if (const MemberSpecializationInfo *member = + D->getMemberSpecializationInfo()) { + return member->isExplicitSpecialization(); + } + return false; +} + +/// For templates, this question is easier: a member template can't be +/// explicitly instantiated, so there's a single bit indicating whether +/// or not this is an explicit member specialization. +static bool isExplicitMemberSpecialization(const RedeclarableTemplateDecl *D) { + return D->isMemberSpecialization(); +} + +/// Given a visibility attribute, return the explicit visibility +/// associated with it. +template <class T> +static Visibility getVisibilityFromAttr(const T *attr) { + switch (attr->getVisibility()) { + case T::Default: + return DefaultVisibility; + case T::Hidden: + return HiddenVisibility; + case T::Protected: + return ProtectedVisibility; + } + llvm_unreachable("bad visibility kind"); +} + +/// Return the explicit visibility of the given declaration. +static Optional<Visibility> getVisibilityOf(const NamedDecl *D, + NamedDecl::ExplicitVisibilityKind kind) { + // If we're ultimately computing the visibility of a type, look for + // a 'type_visibility' attribute before looking for 'visibility'. + if (kind == NamedDecl::VisibilityForType) { + if (const TypeVisibilityAttr *A = D->getAttr<TypeVisibilityAttr>()) { + return getVisibilityFromAttr(A); + } + } + + // If this declaration has an explicit visibility attribute, use it. + if (const VisibilityAttr *A = D->getAttr<VisibilityAttr>()) { + return getVisibilityFromAttr(A); + } + + // If we're on Mac OS X, an 'availability' for Mac OS X attribute + // implies visibility(default). + if (D->getASTContext().getTargetInfo().getTriple().isOSDarwin()) { + for (specific_attr_iterator<AvailabilityAttr> + A = D->specific_attr_begin<AvailabilityAttr>(), + AEnd = D->specific_attr_end<AvailabilityAttr>(); + A != AEnd; ++A) + if ((*A)->getPlatform()->getName().equals("macosx")) + return DefaultVisibility; + } + + return None; +} + +static LinkageInfo +getLVForType(const Type &T, LVComputationKind computation) { + if (computation == LVForLinkageOnly) + return LinkageInfo(T.getLinkage(), DefaultVisibility, true); + return T.getLinkageAndVisibility(); +} + +/// \brief Get the most restrictive linkage for the types in the given +/// template parameter list. For visibility purposes, template +/// parameters are part of the signature of a template. +static LinkageInfo +getLVForTemplateParameterList(const TemplateParameterList *params, + LVComputationKind computation) { + LinkageInfo LV; + for (TemplateParameterList::const_iterator P = params->begin(), + PEnd = params->end(); + P != PEnd; ++P) { + + // Template type parameters are the most common and never + // contribute to visibility, pack or not. + if (isa<TemplateTypeParmDecl>(*P)) + continue; + + // Non-type template parameters can be restricted by the value type, e.g. + // template <enum X> class A { ... }; + // We have to be careful here, though, because we can be dealing with + // dependent types. + if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { + // Handle the non-pack case first. + if (!NTTP->isExpandedParameterPack()) { + if (!NTTP->getType()->isDependentType()) { + LV.merge(getLVForType(*NTTP->getType(), computation)); + } + continue; + } + + // Look at all the types in an expanded pack. + for (unsigned i = 0, n = NTTP->getNumExpansionTypes(); i != n; ++i) { + QualType type = NTTP->getExpansionType(i); + if (!type->isDependentType()) + LV.merge(type->getLinkageAndVisibility()); + } + continue; + } + + // Template template parameters can be restricted by their + // template parameters, recursively. + TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P); + + // Handle the non-pack case first. + if (!TTP->isExpandedParameterPack()) { + LV.merge(getLVForTemplateParameterList(TTP->getTemplateParameters(), + computation)); + continue; + } + + // Look at all expansions in an expanded pack. + for (unsigned i = 0, n = TTP->getNumExpansionTemplateParameters(); + i != n; ++i) { + LV.merge(getLVForTemplateParameterList( + TTP->getExpansionTemplateParameters(i), computation)); + } + } + + return LV; +} + +/// getLVForDecl - Get the linkage and visibility for the given declaration. +static LinkageInfo getLVForDecl(const NamedDecl *D, + LVComputationKind computation); + +static const Decl *getOutermostFuncOrBlockContext(const Decl *D) { + const Decl *Ret = NULL; + const DeclContext *DC = D->getDeclContext(); + while (DC->getDeclKind() != Decl::TranslationUnit) { + if (isa<FunctionDecl>(DC) || isa<BlockDecl>(DC)) + Ret = cast<Decl>(DC); + DC = DC->getParent(); + } + return Ret; +} + +/// \brief Get the most restrictive linkage for the types and +/// declarations in the given template argument list. +/// +/// Note that we don't take an LVComputationKind because we always +/// want to honor the visibility of template arguments in the same way. +static LinkageInfo +getLVForTemplateArgumentList(ArrayRef<TemplateArgument> args, + LVComputationKind computation) { + LinkageInfo LV; + + for (unsigned i = 0, e = args.size(); i != e; ++i) { + const TemplateArgument &arg = args[i]; + switch (arg.getKind()) { + case TemplateArgument::Null: + case TemplateArgument::Integral: + case TemplateArgument::Expression: + continue; + + case TemplateArgument::Type: + LV.merge(getLVForType(*arg.getAsType(), computation)); + continue; + + case TemplateArgument::Declaration: + if (NamedDecl *ND = dyn_cast<NamedDecl>(arg.getAsDecl())) { + assert(!usesTypeVisibility(ND)); + LV.merge(getLVForDecl(ND, computation)); + } + continue; + + case TemplateArgument::NullPtr: + LV.merge(arg.getNullPtrType()->getLinkageAndVisibility()); + continue; + + case TemplateArgument::Template: + case TemplateArgument::TemplateExpansion: + if (TemplateDecl *Template + = arg.getAsTemplateOrTemplatePattern().getAsTemplateDecl()) + LV.merge(getLVForDecl(Template, computation)); + continue; + + case TemplateArgument::Pack: + LV.merge(getLVForTemplateArgumentList(arg.getPackAsArray(), computation)); + continue; + } + llvm_unreachable("bad template argument kind"); + } + + return LV; +} + +static LinkageInfo +getLVForTemplateArgumentList(const TemplateArgumentList &TArgs, + LVComputationKind computation) { + return getLVForTemplateArgumentList(TArgs.asArray(), computation); +} + +static bool shouldConsiderTemplateVisibility(const FunctionDecl *fn, + const FunctionTemplateSpecializationInfo *specInfo) { + // Include visibility from the template parameters and arguments + // only if this is not an explicit instantiation or specialization + // with direct explicit visibility. (Implicit instantiations won't + // have a direct attribute.) + if (!specInfo->isExplicitInstantiationOrSpecialization()) + return true; + + return !fn->hasAttr<VisibilityAttr>(); +} + +/// Merge in template-related linkage and visibility for the given +/// function template specialization. +/// +/// We don't need a computation kind here because we can assume +/// LVForValue. +/// +/// \param[out] LV the computation to use for the parent +static void +mergeTemplateLV(LinkageInfo &LV, const FunctionDecl *fn, + const FunctionTemplateSpecializationInfo *specInfo, + LVComputationKind computation) { + bool considerVisibility = + shouldConsiderTemplateVisibility(fn, specInfo); + + // Merge information from the template parameters. + FunctionTemplateDecl *temp = specInfo->getTemplate(); + LinkageInfo tempLV = + getLVForTemplateParameterList(temp->getTemplateParameters(), computation); + LV.mergeMaybeWithVisibility(tempLV, considerVisibility); + + // Merge information from the template arguments. + const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments; + LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation); + LV.mergeMaybeWithVisibility(argsLV, considerVisibility); +} + +/// Does the given declaration have a direct visibility attribute +/// that would match the given rules? +static bool hasDirectVisibilityAttribute(const NamedDecl *D, + LVComputationKind computation) { + switch (computation) { + case LVForType: + case LVForExplicitType: + if (D->hasAttr<TypeVisibilityAttr>()) + return true; + // fallthrough + case LVForValue: + case LVForExplicitValue: + if (D->hasAttr<VisibilityAttr>()) + return true; + return false; + case LVForLinkageOnly: + return false; + } + llvm_unreachable("bad visibility computation kind"); +} + +/// Should we consider visibility associated with the template +/// arguments and parameters of the given class template specialization? +static bool shouldConsiderTemplateVisibility( + const ClassTemplateSpecializationDecl *spec, + LVComputationKind computation) { + // Include visibility from the template parameters and arguments + // only if this is not an explicit instantiation or specialization + // with direct explicit visibility (and note that implicit + // instantiations won't have a direct attribute). + // + // Furthermore, we want to ignore template parameters and arguments + // for an explicit specialization when computing the visibility of a + // member thereof with explicit visibility. + // + // This is a bit complex; let's unpack it. + // + // An explicit class specialization is an independent, top-level + // declaration. As such, if it or any of its members has an + // explicit visibility attribute, that must directly express the + // user's intent, and we should honor it. The same logic applies to + // an explicit instantiation of a member of such a thing. + + // Fast path: if this is not an explicit instantiation or + // specialization, we always want to consider template-related + // visibility restrictions. + if (!spec->isExplicitInstantiationOrSpecialization()) + return true; + + // This is the 'member thereof' check. + if (spec->isExplicitSpecialization() && + hasExplicitVisibilityAlready(computation)) + return false; + + return !hasDirectVisibilityAttribute(spec, computation); +} + +/// Merge in template-related linkage and visibility for the given +/// class template specialization. +static void mergeTemplateLV(LinkageInfo &LV, + const ClassTemplateSpecializationDecl *spec, + LVComputationKind computation) { + bool considerVisibility = shouldConsiderTemplateVisibility(spec, computation); + + // Merge information from the template parameters, but ignore + // visibility if we're only considering template arguments. + + ClassTemplateDecl *temp = spec->getSpecializedTemplate(); + LinkageInfo tempLV = + getLVForTemplateParameterList(temp->getTemplateParameters(), computation); + LV.mergeMaybeWithVisibility(tempLV, + considerVisibility && !hasExplicitVisibilityAlready(computation)); + + // Merge information from the template arguments. We ignore + // template-argument visibility if we've got an explicit + // instantiation with a visibility attribute. + const TemplateArgumentList &templateArgs = spec->getTemplateArgs(); + LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation); + if (considerVisibility) + LV.mergeVisibility(argsLV); + LV.mergeExternalVisibility(argsLV); +} + +static bool useInlineVisibilityHidden(const NamedDecl *D) { + // FIXME: we should warn if -fvisibility-inlines-hidden is used with c. + const LangOptions &Opts = D->getASTContext().getLangOpts(); + if (!Opts.CPlusPlus || !Opts.InlineVisibilityHidden) + return false; + + const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); + if (!FD) + return false; + + TemplateSpecializationKind TSK = TSK_Undeclared; + if (FunctionTemplateSpecializationInfo *spec + = FD->getTemplateSpecializationInfo()) { + TSK = spec->getTemplateSpecializationKind(); + } else if (MemberSpecializationInfo *MSI = + FD->getMemberSpecializationInfo()) { + TSK = MSI->getTemplateSpecializationKind(); + } + + const FunctionDecl *Def = 0; + // InlineVisibilityHidden only applies to definitions, and + // isInlined() only gives meaningful answers on definitions + // anyway. + return TSK != TSK_ExplicitInstantiationDeclaration && + TSK != TSK_ExplicitInstantiationDefinition && + FD->hasBody(Def) && Def->isInlined() && !Def->hasAttr<GNUInlineAttr>(); +} + +template <typename T> static bool isFirstInExternCContext(T *D) { + const T *First = D->getFirstDecl(); + return First->isInExternCContext(); +} + +static bool isSingleLineExternC(const Decl &D) { + if (const LinkageSpecDecl *SD = dyn_cast<LinkageSpecDecl>(D.getDeclContext())) + if (SD->getLanguage() == LinkageSpecDecl::lang_c && !SD->hasBraces()) + return true; + return false; +} + +static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, + LVComputationKind computation) { + assert(D->getDeclContext()->getRedeclContext()->isFileContext() && + "Not a name having namespace scope"); + ASTContext &Context = D->getASTContext(); + + // C++ [basic.link]p3: + // A name having namespace scope (3.3.6) has internal linkage if it + // is the name of + // - an object, reference, function or function template that is + // explicitly declared static; or, + // (This bullet corresponds to C99 6.2.2p3.) + if (const VarDecl *Var = dyn_cast<VarDecl>(D)) { + // Explicitly declared static. + if (Var->getStorageClass() == SC_Static) + return LinkageInfo::internal(); + + // - a non-volatile object or reference that is explicitly declared const + // or constexpr and neither explicitly declared extern nor previously + // declared to have external linkage; or (there is no equivalent in C99) + if (Context.getLangOpts().CPlusPlus && + Var->getType().isConstQualified() && + !Var->getType().isVolatileQualified()) { + const VarDecl *PrevVar = Var->getPreviousDecl(); + if (PrevVar) + return getLVForDecl(PrevVar, computation); + + if (Var->getStorageClass() != SC_Extern && + Var->getStorageClass() != SC_PrivateExtern && + !isSingleLineExternC(*Var)) + return LinkageInfo::internal(); + } + + for (const VarDecl *PrevVar = Var->getPreviousDecl(); PrevVar; + PrevVar = PrevVar->getPreviousDecl()) { + if (PrevVar->getStorageClass() == SC_PrivateExtern && + Var->getStorageClass() == SC_None) + return PrevVar->getLinkageAndVisibility(); + // Explicitly declared static. + if (PrevVar->getStorageClass() == SC_Static) + return LinkageInfo::internal(); + } + } else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) { + // C++ [temp]p4: + // A non-member function template can have internal linkage; any + // other template name shall have external linkage. + const FunctionDecl *Function = 0; + if (const FunctionTemplateDecl *FunTmpl + = dyn_cast<FunctionTemplateDecl>(D)) + Function = FunTmpl->getTemplatedDecl(); + else + Function = cast<FunctionDecl>(D); + + // Explicitly declared static. + if (Function->getCanonicalDecl()->getStorageClass() == SC_Static) + return LinkageInfo(InternalLinkage, DefaultVisibility, false); + } + // - a data member of an anonymous union. + assert(!isa<IndirectFieldDecl>(D) && "Didn't expect an IndirectFieldDecl!"); + assert(!isa<FieldDecl>(D) && "Didn't expect a FieldDecl!"); + + if (D->isInAnonymousNamespace()) { + const VarDecl *Var = dyn_cast<VarDecl>(D); + const FunctionDecl *Func = dyn_cast<FunctionDecl>(D); + if ((!Var || !isFirstInExternCContext(Var)) && + (!Func || !isFirstInExternCContext(Func))) + return LinkageInfo::uniqueExternal(); + } + + // Set up the defaults. + + // C99 6.2.2p5: + // If the declaration of an identifier for an object has file + // scope and no storage-class specifier, its linkage is + // external. + LinkageInfo LV; + + if (!hasExplicitVisibilityAlready(computation)) { + if (Optional<Visibility> Vis = getExplicitVisibility(D, computation)) { + LV.mergeVisibility(*Vis, true); + } else { + // If we're declared in a namespace with a visibility attribute, + // use that namespace's visibility, and it still counts as explicit. + for (const DeclContext *DC = D->getDeclContext(); + !isa<TranslationUnitDecl>(DC); + DC = DC->getParent()) { + const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC); + if (!ND) continue; + if (Optional<Visibility> Vis = getExplicitVisibility(ND, computation)) { + LV.mergeVisibility(*Vis, true); + break; + } + } + } + + // Add in global settings if the above didn't give us direct visibility. + if (!LV.isVisibilityExplicit()) { + // Use global type/value visibility as appropriate. + Visibility globalVisibility; + if (computation == LVForValue) { + globalVisibility = Context.getLangOpts().getValueVisibilityMode(); + } else { + assert(computation == LVForType); + globalVisibility = Context.getLangOpts().getTypeVisibilityMode(); + } + LV.mergeVisibility(globalVisibility, /*explicit*/ false); + + // If we're paying attention to global visibility, apply + // -finline-visibility-hidden if this is an inline method. + if (useInlineVisibilityHidden(D)) + LV.mergeVisibility(HiddenVisibility, true); + } + } + + // C++ [basic.link]p4: + + // A name having namespace scope has external linkage if it is the + // name of + // + // - an object or reference, unless it has internal linkage; or + if (const VarDecl *Var = dyn_cast<VarDecl>(D)) { + // GCC applies the following optimization to variables and static + // data members, but not to functions: + // + // Modify the variable's LV by the LV of its type unless this is + // C or extern "C". This follows from [basic.link]p9: + // A type without linkage shall not be used as the type of a + // variable or function with external linkage unless + // - the entity has C language linkage, or + // - the entity is declared within an unnamed namespace, or + // - the entity is not used or is defined in the same + // translation unit. + // and [basic.link]p10: + // ...the types specified by all declarations referring to a + // given variable or function shall be identical... + // C does not have an equivalent rule. + // + // Ignore this if we've got an explicit attribute; the user + // probably knows what they're doing. + // + // Note that we don't want to make the variable non-external + // because of this, but unique-external linkage suits us. + if (Context.getLangOpts().CPlusPlus && !isFirstInExternCContext(Var)) { + LinkageInfo TypeLV = getLVForType(*Var->getType(), computation); + if (TypeLV.getLinkage() != ExternalLinkage) + return LinkageInfo::uniqueExternal(); + if (!LV.isVisibilityExplicit()) + LV.mergeVisibility(TypeLV); + } + + if (Var->getStorageClass() == SC_PrivateExtern) + LV.mergeVisibility(HiddenVisibility, true); + + // Note that Sema::MergeVarDecl already takes care of implementing + // C99 6.2.2p4 and propagating the visibility attribute, so we don't have + // to do it here. + + // - a function, unless it has internal linkage; or + } else if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) { + // In theory, we can modify the function's LV by the LV of its + // type unless it has C linkage (see comment above about variables + // for justification). In practice, GCC doesn't do this, so it's + // just too painful to make work. + + if (Function->getStorageClass() == SC_PrivateExtern) + LV.mergeVisibility(HiddenVisibility, true); + + // Note that Sema::MergeCompatibleFunctionDecls already takes care of + // merging storage classes and visibility attributes, so we don't have to + // look at previous decls in here. + + // In C++, then if the type of the function uses a type with + // unique-external linkage, it's not legally usable from outside + // this translation unit. However, we should use the C linkage + // rules instead for extern "C" declarations. + if (Context.getLangOpts().CPlusPlus && + !Function->isInExternCContext()) { + // Only look at the type-as-written. If this function has an auto-deduced + // return type, we can't compute the linkage of that type because it could + // require looking at the linkage of this function, and we don't need this + // for correctness because the type is not part of the function's + // signature. + // FIXME: This is a hack. We should be able to solve this circularity and + // the one in getLVForClassMember for Functions some other way. + QualType TypeAsWritten = Function->getType(); + if (TypeSourceInfo *TSI = Function->getTypeSourceInfo()) + TypeAsWritten = TSI->getType(); + if (TypeAsWritten->getLinkage() == UniqueExternalLinkage) + return LinkageInfo::uniqueExternal(); + } + + // Consider LV from the template and the template arguments. + // We're at file scope, so we do not need to worry about nested + // specializations. + if (FunctionTemplateSpecializationInfo *specInfo + = Function->getTemplateSpecializationInfo()) { + mergeTemplateLV(LV, Function, specInfo, computation); + } + + // - a named class (Clause 9), or an unnamed class defined in a + // typedef declaration in which the class has the typedef name + // for linkage purposes (7.1.3); or + // - a named enumeration (7.2), or an unnamed enumeration + // defined in a typedef declaration in which the enumeration + // has the typedef name for linkage purposes (7.1.3); or + } else if (const TagDecl *Tag = dyn_cast<TagDecl>(D)) { + // Unnamed tags have no linkage. + if (!Tag->hasNameForLinkage()) + return LinkageInfo::none(); + + // If this is a class template specialization, consider the + // linkage of the template and template arguments. We're at file + // scope, so we do not need to worry about nested specializations. + if (const ClassTemplateSpecializationDecl *spec + = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) { + mergeTemplateLV(LV, spec, computation); + } + + // - an enumerator belonging to an enumeration with external linkage; + } else if (isa<EnumConstantDecl>(D)) { + LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()), + computation); + if (!isExternalFormalLinkage(EnumLV.getLinkage())) + return LinkageInfo::none(); + LV.merge(EnumLV); + + // - a template, unless it is a function template that has + // internal linkage (Clause 14); + } else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) { + bool considerVisibility = !hasExplicitVisibilityAlready(computation); + LinkageInfo tempLV = + getLVForTemplateParameterList(temp->getTemplateParameters(), computation); + LV.mergeMaybeWithVisibility(tempLV, considerVisibility); + + // - a namespace (7.3), unless it is declared within an unnamed + // namespace. + } else if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace()) { + return LV; + + // By extension, we assign external linkage to Objective-C + // interfaces. + } else if (isa<ObjCInterfaceDecl>(D)) { + // fallout + + // Everything not covered here has no linkage. + } else { + return LinkageInfo::none(); + } + + // If we ended up with non-external linkage, visibility should + // always be default. + if (LV.getLinkage() != ExternalLinkage) + return LinkageInfo(LV.getLinkage(), DefaultVisibility, false); + + return LV; +} + +static LinkageInfo getLVForClassMember(const NamedDecl *D, + LVComputationKind computation) { + // Only certain class members have linkage. Note that fields don't + // really have linkage, but it's convenient to say they do for the + // purposes of calculating linkage of pointer-to-data-member + // template arguments. + if (!(isa<CXXMethodDecl>(D) || + isa<VarDecl>(D) || + isa<FieldDecl>(D) || + isa<IndirectFieldDecl>(D) || + isa<TagDecl>(D))) + return LinkageInfo::none(); + + LinkageInfo LV; + + // If we have an explicit visibility attribute, merge that in. + if (!hasExplicitVisibilityAlready(computation)) { + if (Optional<Visibility> Vis = getExplicitVisibility(D, computation)) + LV.mergeVisibility(*Vis, true); + // If we're paying attention to global visibility, apply + // -finline-visibility-hidden if this is an inline method. + // + // Note that we do this before merging information about + // the class visibility. + if (!LV.isVisibilityExplicit() && useInlineVisibilityHidden(D)) + LV.mergeVisibility(HiddenVisibility, true); + } + + // If this class member has an explicit visibility attribute, the only + // thing that can change its visibility is the template arguments, so + // only look for them when processing the class. + LVComputationKind classComputation = computation; + if (LV.isVisibilityExplicit()) + classComputation = withExplicitVisibilityAlready(computation); + + LinkageInfo classLV = + getLVForDecl(cast<RecordDecl>(D->getDeclContext()), classComputation); + // If the class already has unique-external linkage, we can't improve. + if (classLV.getLinkage() == UniqueExternalLinkage) + return LinkageInfo::uniqueExternal(); + + if (!isExternallyVisible(classLV.getLinkage())) + return LinkageInfo::none(); + + + // Otherwise, don't merge in classLV yet, because in certain cases + // we need to completely ignore the visibility from it. + + // Specifically, if this decl exists and has an explicit attribute. + const NamedDecl *explicitSpecSuppressor = 0; + + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) { + // If the type of the function uses a type with unique-external + // linkage, it's not legally usable from outside this translation unit. + // But only look at the type-as-written. If this function has an auto-deduced + // return type, we can't compute the linkage of that type because it could + // require looking at the linkage of this function, and we don't need this + // for correctness because the type is not part of the function's + // signature. + // FIXME: This is a hack. We should be able to solve this circularity and the + // one in getLVForNamespaceScopeDecl for Functions some other way. + { + QualType TypeAsWritten = MD->getType(); + if (TypeSourceInfo *TSI = MD->getTypeSourceInfo()) + TypeAsWritten = TSI->getType(); + if (TypeAsWritten->getLinkage() == UniqueExternalLinkage) + return LinkageInfo::uniqueExternal(); + } + // If this is a method template specialization, use the linkage for + // the template parameters and arguments. + if (FunctionTemplateSpecializationInfo *spec + = MD->getTemplateSpecializationInfo()) { + mergeTemplateLV(LV, MD, spec, computation); + if (spec->isExplicitSpecialization()) { + explicitSpecSuppressor = MD; + } else if (isExplicitMemberSpecialization(spec->getTemplate())) { + explicitSpecSuppressor = spec->getTemplate()->getTemplatedDecl(); + } + } else if (isExplicitMemberSpecialization(MD)) { + explicitSpecSuppressor = MD; + } + + } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + if (const ClassTemplateSpecializationDecl *spec + = dyn_cast<ClassTemplateSpecializationDecl>(RD)) { + mergeTemplateLV(LV, spec, computation); + if (spec->isExplicitSpecialization()) { + explicitSpecSuppressor = spec; + } else { + const ClassTemplateDecl *temp = spec->getSpecializedTemplate(); + if (isExplicitMemberSpecialization(temp)) { + explicitSpecSuppressor = temp->getTemplatedDecl(); + } + } + } else if (isExplicitMemberSpecialization(RD)) { + explicitSpecSuppressor = RD; + } + + // Static data members. + } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + // Modify the variable's linkage by its type, but ignore the + // type's visibility unless it's a definition. + LinkageInfo typeLV = getLVForType(*VD->getType(), computation); + if (!LV.isVisibilityExplicit() && !classLV.isVisibilityExplicit()) + LV.mergeVisibility(typeLV); + LV.mergeExternalVisibility(typeLV); + + if (isExplicitMemberSpecialization(VD)) { + explicitSpecSuppressor = VD; + } + + // Template members. + } else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) { + bool considerVisibility = + (!LV.isVisibilityExplicit() && + !classLV.isVisibilityExplicit() && + !hasExplicitVisibilityAlready(computation)); + LinkageInfo tempLV = + getLVForTemplateParameterList(temp->getTemplateParameters(), computation); + LV.mergeMaybeWithVisibility(tempLV, considerVisibility); + + if (const RedeclarableTemplateDecl *redeclTemp = + dyn_cast<RedeclarableTemplateDecl>(temp)) { + if (isExplicitMemberSpecialization(redeclTemp)) { + explicitSpecSuppressor = temp->getTemplatedDecl(); + } + } + } + + // We should never be looking for an attribute directly on a template. + assert(!explicitSpecSuppressor || !isa<TemplateDecl>(explicitSpecSuppressor)); + + // If this member is an explicit member specialization, and it has + // an explicit attribute, ignore visibility from the parent. + bool considerClassVisibility = true; + if (explicitSpecSuppressor && + // optimization: hasDVA() is true only with explicit visibility. + LV.isVisibilityExplicit() && + classLV.getVisibility() != DefaultVisibility && + hasDirectVisibilityAttribute(explicitSpecSuppressor, computation)) { + considerClassVisibility = false; + } + + // Finally, merge in information from the class. + LV.mergeMaybeWithVisibility(classLV, considerClassVisibility); + return LV; +} + +void NamedDecl::anchor() { } + +static LinkageInfo computeLVForDecl(const NamedDecl *D, + LVComputationKind computation); + +bool NamedDecl::isLinkageValid() const { + if (!hasCachedLinkage()) + return true; + + return computeLVForDecl(this, LVForLinkageOnly).getLinkage() == + getCachedLinkage(); +} + +Linkage NamedDecl::getLinkageInternal() const { + // We don't care about visibility here, so ask for the cheapest + // possible visibility analysis. + return getLVForDecl(this, LVForLinkageOnly).getLinkage(); +} + +LinkageInfo NamedDecl::getLinkageAndVisibility() const { + LVComputationKind computation = + (usesTypeVisibility(this) ? LVForType : LVForValue); + return getLVForDecl(this, computation); +} + +static Optional<Visibility> +getExplicitVisibilityAux(const NamedDecl *ND, + NamedDecl::ExplicitVisibilityKind kind, + bool IsMostRecent) { + assert(!IsMostRecent || ND == ND->getMostRecentDecl()); + + // Check the declaration itself first. + if (Optional<Visibility> V = getVisibilityOf(ND, kind)) + return V; + + // If this is a member class of a specialization of a class template + // and the corresponding decl has explicit visibility, use that. + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND)) { + CXXRecordDecl *InstantiatedFrom = RD->getInstantiatedFromMemberClass(); + if (InstantiatedFrom) + return getVisibilityOf(InstantiatedFrom, kind); + } + + // If there wasn't explicit visibility there, and this is a + // specialization of a class template, check for visibility + // on the pattern. + if (const ClassTemplateSpecializationDecl *spec + = dyn_cast<ClassTemplateSpecializationDecl>(ND)) + return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl(), + kind); + + // Use the most recent declaration. + if (!IsMostRecent && !isa<NamespaceDecl>(ND)) { + const NamedDecl *MostRecent = ND->getMostRecentDecl(); + if (MostRecent != ND) + return getExplicitVisibilityAux(MostRecent, kind, true); + } + + if (const VarDecl *Var = dyn_cast<VarDecl>(ND)) { + if (Var->isStaticDataMember()) { + VarDecl *InstantiatedFrom = Var->getInstantiatedFromStaticDataMember(); + if (InstantiatedFrom) + return getVisibilityOf(InstantiatedFrom, kind); + } + + return None; + } + // Also handle function template specializations. + if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) { + // If the function is a specialization of a template with an + // explicit visibility attribute, use that. + if (FunctionTemplateSpecializationInfo *templateInfo + = fn->getTemplateSpecializationInfo()) + return getVisibilityOf(templateInfo->getTemplate()->getTemplatedDecl(), + kind); + + // If the function is a member of a specialization of a class template + // and the corresponding decl has explicit visibility, use that. + FunctionDecl *InstantiatedFrom = fn->getInstantiatedFromMemberFunction(); + if (InstantiatedFrom) + return getVisibilityOf(InstantiatedFrom, kind); + + return None; + } + + // The visibility of a template is stored in the templated decl. + if (const TemplateDecl *TD = dyn_cast<TemplateDecl>(ND)) + return getVisibilityOf(TD->getTemplatedDecl(), kind); + + return None; +} + +Optional<Visibility> +NamedDecl::getExplicitVisibility(ExplicitVisibilityKind kind) const { + return getExplicitVisibilityAux(this, kind, false); +} + +static LinkageInfo getLVForClosure(const DeclContext *DC, Decl *ContextDecl, + LVComputationKind computation) { + // This lambda has its linkage/visibility determined by its owner. + if (ContextDecl) { + if (isa<ParmVarDecl>(ContextDecl)) + DC = ContextDecl->getDeclContext()->getRedeclContext(); + else + return getLVForDecl(cast<NamedDecl>(ContextDecl), computation); + } + + if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC)) + return getLVForDecl(ND, computation); + + return LinkageInfo::external(); +} + +static LinkageInfo getLVForLocalDecl(const NamedDecl *D, + LVComputationKind computation) { + if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) { + if (Function->isInAnonymousNamespace() && + !Function->isInExternCContext()) + return LinkageInfo::uniqueExternal(); + + // This is a "void f();" which got merged with a file static. + if (Function->getCanonicalDecl()->getStorageClass() == SC_Static) + return LinkageInfo::internal(); + + LinkageInfo LV; + if (!hasExplicitVisibilityAlready(computation)) { + if (Optional<Visibility> Vis = + getExplicitVisibility(Function, computation)) + LV.mergeVisibility(*Vis, true); + } + + // Note that Sema::MergeCompatibleFunctionDecls already takes care of + // merging storage classes and visibility attributes, so we don't have to + // look at previous decls in here. + + return LV; + } + + if (const VarDecl *Var = dyn_cast<VarDecl>(D)) { + if (Var->hasExternalStorage()) { + if (Var->isInAnonymousNamespace() && !Var->isInExternCContext()) + return LinkageInfo::uniqueExternal(); + + LinkageInfo LV; + if (Var->getStorageClass() == SC_PrivateExtern) + LV.mergeVisibility(HiddenVisibility, true); + else if (!hasExplicitVisibilityAlready(computation)) { + if (Optional<Visibility> Vis = getExplicitVisibility(Var, computation)) + LV.mergeVisibility(*Vis, true); + } + + if (const VarDecl *Prev = Var->getPreviousDecl()) { + LinkageInfo PrevLV = getLVForDecl(Prev, computation); + if (PrevLV.getLinkage()) + LV.setLinkage(PrevLV.getLinkage()); + LV.mergeVisibility(PrevLV); + } + + return LV; + } + + if (!Var->isStaticLocal()) + return LinkageInfo::none(); + } + + ASTContext &Context = D->getASTContext(); + if (!Context.getLangOpts().CPlusPlus) + return LinkageInfo::none(); + + const Decl *OuterD = getOutermostFuncOrBlockContext(D); + if (!OuterD) + return LinkageInfo::none(); + + LinkageInfo LV; + if (const BlockDecl *BD = dyn_cast<BlockDecl>(OuterD)) { + if (!BD->getBlockManglingNumber()) + return LinkageInfo::none(); + + LV = getLVForClosure(BD->getDeclContext()->getRedeclContext(), + BD->getBlockManglingContextDecl(), computation); + } else { + const FunctionDecl *FD = cast<FunctionDecl>(OuterD); + if (!FD->isInlined() && + FD->getTemplateSpecializationKind() == TSK_Undeclared) + return LinkageInfo::none(); + + LV = getLVForDecl(FD, computation); + } + if (!isExternallyVisible(LV.getLinkage())) + return LinkageInfo::none(); + return LinkageInfo(VisibleNoLinkage, LV.getVisibility(), + LV.isVisibilityExplicit()); +} + +static inline const CXXRecordDecl* +getOutermostEnclosingLambda(const CXXRecordDecl *Record) { + const CXXRecordDecl *Ret = Record; + while (Record && Record->isLambda()) { + Ret = Record; + if (!Record->getParent()) break; + // Get the Containing Class of this Lambda Class + Record = dyn_cast_or_null<CXXRecordDecl>( + Record->getParent()->getParent()); + } + return Ret; +} + +static LinkageInfo computeLVForDecl(const NamedDecl *D, + LVComputationKind computation) { + // Objective-C: treat all Objective-C declarations as having external + // linkage. + switch (D->getKind()) { + default: + break; + case Decl::ParmVar: + return LinkageInfo::none(); + case Decl::TemplateTemplateParm: // count these as external + case Decl::NonTypeTemplateParm: + case Decl::ObjCAtDefsField: + case Decl::ObjCCategory: + case Decl::ObjCCategoryImpl: + case Decl::ObjCCompatibleAlias: + case Decl::ObjCImplementation: + case Decl::ObjCMethod: + case Decl::ObjCProperty: + case Decl::ObjCPropertyImpl: + case Decl::ObjCProtocol: + return LinkageInfo::external(); + + case Decl::CXXRecord: { + const CXXRecordDecl *Record = cast<CXXRecordDecl>(D); + if (Record->isLambda()) { + if (!Record->getLambdaManglingNumber()) { + // This lambda has no mangling number, so it's internal. + return LinkageInfo::internal(); + } + + // This lambda has its linkage/visibility determined: + // - either by the outermost lambda if that lambda has no mangling + // number. + // - or by the parent of the outer most lambda + // This prevents infinite recursion in settings such as nested lambdas + // used in NSDMI's, for e.g. + // struct L { + // int t{}; + // int t2 = ([](int a) { return [](int b) { return b; };})(t)(t); + // }; + const CXXRecordDecl *OuterMostLambda = + getOutermostEnclosingLambda(Record); + if (!OuterMostLambda->getLambdaManglingNumber()) + return LinkageInfo::internal(); + + return getLVForClosure( + OuterMostLambda->getDeclContext()->getRedeclContext(), + OuterMostLambda->getLambdaContextDecl(), computation); + } + + break; + } + } + + // Handle linkage for namespace-scope names. + if (D->getDeclContext()->getRedeclContext()->isFileContext()) + return getLVForNamespaceScopeDecl(D, computation); + + // C++ [basic.link]p5: + // In addition, a member function, static data member, a named + // class or enumeration of class scope, or an unnamed class or + // enumeration defined in a class-scope typedef declaration such + // that the class or enumeration has the typedef name for linkage + // purposes (7.1.3), has external linkage if the name of the class + // has external linkage. + if (D->getDeclContext()->isRecord()) + return getLVForClassMember(D, computation); + + // C++ [basic.link]p6: + // The name of a function declared in block scope and the name of + // an object declared by a block scope extern declaration have + // linkage. If there is a visible declaration of an entity with + // linkage having the same name and type, ignoring entities + // declared outside the innermost enclosing namespace scope, the + // block scope declaration declares that same entity and receives + // the linkage of the previous declaration. If there is more than + // one such matching entity, the program is ill-formed. Otherwise, + // if no matching entity is found, the block scope entity receives + // external linkage. + if (D->getDeclContext()->isFunctionOrMethod()) + return getLVForLocalDecl(D, computation); + + // C++ [basic.link]p6: + // Names not covered by these rules have no linkage. + return LinkageInfo::none(); +} + +namespace clang { +class LinkageComputer { +public: + static LinkageInfo getLVForDecl(const NamedDecl *D, + LVComputationKind computation) { + if (computation == LVForLinkageOnly && D->hasCachedLinkage()) + return LinkageInfo(D->getCachedLinkage(), DefaultVisibility, false); + + LinkageInfo LV = computeLVForDecl(D, computation); + if (D->hasCachedLinkage()) + assert(D->getCachedLinkage() == LV.getLinkage()); + + D->setCachedLinkage(LV.getLinkage()); + +#ifndef NDEBUG + // In C (because of gnu inline) and in c++ with microsoft extensions an + // static can follow an extern, so we can have two decls with different + // linkages. + const LangOptions &Opts = D->getASTContext().getLangOpts(); + if (!Opts.CPlusPlus || Opts.MicrosoftExt) + return LV; + + // We have just computed the linkage for this decl. By induction we know + // that all other computed linkages match, check that the one we just + // computed + // also does. + NamedDecl *Old = NULL; + for (NamedDecl::redecl_iterator I = D->redecls_begin(), + E = D->redecls_end(); + I != E; ++I) { + NamedDecl *T = cast<NamedDecl>(*I); + if (T == D) + continue; + if (T->hasCachedLinkage()) { + Old = T; + break; + } + } + assert(!Old || Old->getCachedLinkage() == D->getCachedLinkage()); +#endif + + return LV; + } +}; +} + +static LinkageInfo getLVForDecl(const NamedDecl *D, + LVComputationKind computation) { + return clang::LinkageComputer::getLVForDecl(D, computation); +} + +std::string NamedDecl::getQualifiedNameAsString() const { + return getQualifiedNameAsString(getASTContext().getPrintingPolicy()); +} + +std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const { + std::string QualName; + llvm::raw_string_ostream OS(QualName); + printQualifiedName(OS, P); + return OS.str(); +} + +void NamedDecl::printQualifiedName(raw_ostream &OS) const { + printQualifiedName(OS, getASTContext().getPrintingPolicy()); +} + +void NamedDecl::printQualifiedName(raw_ostream &OS, + const PrintingPolicy &P) const { + const DeclContext *Ctx = getDeclContext(); + + if (Ctx->isFunctionOrMethod()) { + printName(OS); + return; + } + + typedef SmallVector<const DeclContext *, 8> ContextsTy; + ContextsTy Contexts; + + // Collect contexts. + while (Ctx && isa<NamedDecl>(Ctx)) { + Contexts.push_back(Ctx); + Ctx = Ctx->getParent(); + } + + for (ContextsTy::reverse_iterator I = Contexts.rbegin(), E = Contexts.rend(); + I != E; ++I) { + if (const ClassTemplateSpecializationDecl *Spec + = dyn_cast<ClassTemplateSpecializationDecl>(*I)) { + OS << Spec->getName(); + const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); + TemplateSpecializationType::PrintTemplateArgumentList(OS, + TemplateArgs.data(), + TemplateArgs.size(), + P); + } else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) { + if (ND->isAnonymousNamespace()) + OS << "<anonymous namespace>"; + else + OS << *ND; + } else if (const RecordDecl *RD = dyn_cast<RecordDecl>(*I)) { + if (!RD->getIdentifier()) + OS << "<anonymous " << RD->getKindName() << '>'; + else + OS << *RD; + } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { + const FunctionProtoType *FT = 0; + if (FD->hasWrittenPrototype()) + FT = dyn_cast<FunctionProtoType>(FD->getType()->castAs<FunctionType>()); + + OS << *FD << '('; + if (FT) { + unsigned NumParams = FD->getNumParams(); + for (unsigned i = 0; i < NumParams; ++i) { + if (i) + OS << ", "; + OS << FD->getParamDecl(i)->getType().stream(P); + } + + if (FT->isVariadic()) { + if (NumParams > 0) + OS << ", "; + OS << "..."; + } + } + OS << ')'; + } else { + OS << *cast<NamedDecl>(*I); + } + OS << "::"; + } + + if (getDeclName()) + OS << *this; + else + OS << "<anonymous>"; +} + +void NamedDecl::getNameForDiagnostic(raw_ostream &OS, + const PrintingPolicy &Policy, + bool Qualified) const { + if (Qualified) + printQualifiedName(OS, Policy); + else + printName(OS); +} + +bool NamedDecl::declarationReplaces(NamedDecl *OldD) const { + assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch"); + + // UsingDirectiveDecl's are not really NamedDecl's, and all have same name. + // We want to keep it, unless it nominates same namespace. + if (getKind() == Decl::UsingDirective) { + return cast<UsingDirectiveDecl>(this)->getNominatedNamespace() + ->getOriginalNamespace() == + cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace() + ->getOriginalNamespace(); + } + + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) + // For function declarations, we keep track of redeclarations. + return FD->getPreviousDecl() == OldD; + + // For function templates, the underlying function declarations are linked. + if (const FunctionTemplateDecl *FunctionTemplate + = dyn_cast<FunctionTemplateDecl>(this)) + if (const FunctionTemplateDecl *OldFunctionTemplate + = dyn_cast<FunctionTemplateDecl>(OldD)) + return FunctionTemplate->getTemplatedDecl() + ->declarationReplaces(OldFunctionTemplate->getTemplatedDecl()); + + // For method declarations, we keep track of redeclarations. + if (isa<ObjCMethodDecl>(this)) + return false; + + if (isa<ObjCInterfaceDecl>(this) && isa<ObjCCompatibleAliasDecl>(OldD)) + return true; + + if (isa<UsingShadowDecl>(this) && isa<UsingShadowDecl>(OldD)) + return cast<UsingShadowDecl>(this)->getTargetDecl() == + cast<UsingShadowDecl>(OldD)->getTargetDecl(); + + if (isa<UsingDecl>(this) && isa<UsingDecl>(OldD)) { + ASTContext &Context = getASTContext(); + return Context.getCanonicalNestedNameSpecifier( + cast<UsingDecl>(this)->getQualifier()) == + Context.getCanonicalNestedNameSpecifier( + cast<UsingDecl>(OldD)->getQualifier()); + } + + if (isa<UnresolvedUsingValueDecl>(this) && + isa<UnresolvedUsingValueDecl>(OldD)) { + ASTContext &Context = getASTContext(); + return Context.getCanonicalNestedNameSpecifier( + cast<UnresolvedUsingValueDecl>(this)->getQualifier()) == + Context.getCanonicalNestedNameSpecifier( + cast<UnresolvedUsingValueDecl>(OldD)->getQualifier()); + } + + // A typedef of an Objective-C class type can replace an Objective-C class + // declaration or definition, and vice versa. + if ((isa<TypedefNameDecl>(this) && isa<ObjCInterfaceDecl>(OldD)) || + (isa<ObjCInterfaceDecl>(this) && isa<TypedefNameDecl>(OldD))) + return true; + + // For non-function declarations, if the declarations are of the + // same kind then this must be a redeclaration, or semantic analysis + // would not have given us the new declaration. + return this->getKind() == OldD->getKind(); +} + +bool NamedDecl::hasLinkage() const { + return getFormalLinkage() != NoLinkage; +} + +NamedDecl *NamedDecl::getUnderlyingDeclImpl() { + NamedDecl *ND = this; + while (UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(ND)) + ND = UD->getTargetDecl(); + + if (ObjCCompatibleAliasDecl *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND)) + return AD->getClassInterface(); + + return ND; +} + +bool NamedDecl::isCXXInstanceMember() const { + if (!isCXXClassMember()) + return false; + + const NamedDecl *D = this; + if (isa<UsingShadowDecl>(D)) + D = cast<UsingShadowDecl>(D)->getTargetDecl(); + + if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D)) + return true; + if (isa<CXXMethodDecl>(D)) + return cast<CXXMethodDecl>(D)->isInstance(); + if (isa<FunctionTemplateDecl>(D)) + return cast<CXXMethodDecl>(cast<FunctionTemplateDecl>(D) + ->getTemplatedDecl())->isInstance(); + return false; +} + +//===----------------------------------------------------------------------===// +// DeclaratorDecl Implementation +//===----------------------------------------------------------------------===// + +template <typename DeclT> +static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) { + if (decl->getNumTemplateParameterLists() > 0) + return decl->getTemplateParameterList(0)->getTemplateLoc(); + else + return decl->getInnerLocStart(); +} + +SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const { + TypeSourceInfo *TSI = getTypeSourceInfo(); + if (TSI) return TSI->getTypeLoc().getBeginLoc(); + return SourceLocation(); +} + +void DeclaratorDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) { + if (QualifierLoc) { + // Make sure the extended decl info is allocated. + if (!hasExtInfo()) { + // Save (non-extended) type source info pointer. + TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>(); + // Allocate external info struct. + DeclInfo = new (getASTContext()) ExtInfo; + // Restore savedTInfo into (extended) decl info. + getExtInfo()->TInfo = savedTInfo; + } + // Set qualifier info. + getExtInfo()->QualifierLoc = QualifierLoc; + } else { + // Here Qualifier == 0, i.e., we are removing the qualifier (if any). + if (hasExtInfo()) { + if (getExtInfo()->NumTemplParamLists == 0) { + // Save type source info pointer. + TypeSourceInfo *savedTInfo = getExtInfo()->TInfo; + // Deallocate the extended decl info. + getASTContext().Deallocate(getExtInfo()); + // Restore savedTInfo into (non-extended) decl info. + DeclInfo = savedTInfo; + } + else + getExtInfo()->QualifierLoc = QualifierLoc; + } + } +} + +void +DeclaratorDecl::setTemplateParameterListsInfo(ASTContext &Context, + unsigned NumTPLists, + TemplateParameterList **TPLists) { + assert(NumTPLists > 0); + // Make sure the extended decl info is allocated. + if (!hasExtInfo()) { + // Save (non-extended) type source info pointer. + TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>(); + // Allocate external info struct. + DeclInfo = new (getASTContext()) ExtInfo; + // Restore savedTInfo into (extended) decl info. + getExtInfo()->TInfo = savedTInfo; + } + // Set the template parameter lists info. + getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists); +} + +SourceLocation DeclaratorDecl::getOuterLocStart() const { + return getTemplateOrInnerLocStart(this); +} + +namespace { + +// Helper function: returns true if QT is or contains a type +// having a postfix component. +bool typeIsPostfix(clang::QualType QT) { + while (true) { + const Type* T = QT.getTypePtr(); + switch (T->getTypeClass()) { + default: + return false; + case Type::Pointer: + QT = cast<PointerType>(T)->getPointeeType(); + break; + case Type::BlockPointer: + QT = cast<BlockPointerType>(T)->getPointeeType(); + break; + case Type::MemberPointer: + QT = cast<MemberPointerType>(T)->getPointeeType(); + break; + case Type::LValueReference: + case Type::RValueReference: + QT = cast<ReferenceType>(T)->getPointeeType(); + break; + case Type::PackExpansion: + QT = cast<PackExpansionType>(T)->getPattern(); + break; + case Type::Paren: + case Type::ConstantArray: + case Type::DependentSizedArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::FunctionProto: + case Type::FunctionNoProto: + return true; + } + } +} + +} // namespace + +SourceRange DeclaratorDecl::getSourceRange() const { + SourceLocation RangeEnd = getLocation(); + if (TypeSourceInfo *TInfo = getTypeSourceInfo()) { + if (typeIsPostfix(TInfo->getType())) + RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd(); + } + return SourceRange(getOuterLocStart(), RangeEnd); +} + +void +QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context, + unsigned NumTPLists, + TemplateParameterList **TPLists) { + assert((NumTPLists == 0 || TPLists != 0) && + "Empty array of template parameters with positive size!"); + + // Free previous template parameters (if any). + if (NumTemplParamLists > 0) { + Context.Deallocate(TemplParamLists); + TemplParamLists = 0; + NumTemplParamLists = 0; + } + // Set info on matched template parameter lists (if any). + if (NumTPLists > 0) { + TemplParamLists = new (Context) TemplateParameterList*[NumTPLists]; + NumTemplParamLists = NumTPLists; + for (unsigned i = NumTPLists; i-- > 0; ) + TemplParamLists[i] = TPLists[i]; + } +} + +//===----------------------------------------------------------------------===// +// VarDecl Implementation +//===----------------------------------------------------------------------===// + +const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) { + switch (SC) { + case SC_None: break; + case SC_Auto: return "auto"; + case SC_Extern: return "extern"; + case SC_OpenCLWorkGroupLocal: return "<<work-group-local>>"; + case SC_PrivateExtern: return "__private_extern__"; + case SC_Register: return "register"; + case SC_Static: return "static"; + } + + llvm_unreachable("Invalid storage class"); +} + +VarDecl::VarDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, StorageClass SC) + : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), Init() { + assert(sizeof(VarDeclBitfields) <= sizeof(unsigned)); + assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned)); + AllBits = 0; + VarDeclBits.SClass = SC; + // Everything else is implicitly initialized to false. +} + +VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartL, SourceLocation IdL, + IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, + StorageClass S) { + return new (C) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S); +} + +VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarDecl)); + return new (Mem) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0, + QualType(), 0, SC_None); +} + +void VarDecl::setStorageClass(StorageClass SC) { + assert(isLegalForVariable(SC)); + VarDeclBits.SClass = SC; +} + +SourceRange VarDecl::getSourceRange() const { + if (const Expr *Init = getInit()) { + SourceLocation InitEnd = Init->getLocEnd(); + // If Init is implicit, ignore its source range and fallback on + // DeclaratorDecl::getSourceRange() to handle postfix elements. + if (InitEnd.isValid() && InitEnd != getLocation()) + return SourceRange(getOuterLocStart(), InitEnd); + } + return DeclaratorDecl::getSourceRange(); +} + +template<typename T> +static LanguageLinkage getLanguageLinkageTemplate(const T &D) { + // C++ [dcl.link]p1: All function types, function names with external linkage, + // and variable names with external linkage have a language linkage. + if (!D.hasExternalFormalLinkage()) + return NoLanguageLinkage; + + // Language linkage is a C++ concept, but saying that everything else in C has + // C language linkage fits the implementation nicely. + ASTContext &Context = D.getASTContext(); + if (!Context.getLangOpts().CPlusPlus) + return CLanguageLinkage; + + // C++ [dcl.link]p4: A C language linkage is ignored in determining the + // language linkage of the names of class members and the function type of + // class member functions. + const DeclContext *DC = D.getDeclContext(); + if (DC->isRecord()) + return CXXLanguageLinkage; + + // If the first decl is in an extern "C" context, any other redeclaration + // will have C language linkage. If the first one is not in an extern "C" + // context, we would have reported an error for any other decl being in one. + if (isFirstInExternCContext(&D)) + return CLanguageLinkage; + return CXXLanguageLinkage; +} + +template<typename T> +static bool isExternCTemplate(const T &D) { + // Since the context is ignored for class members, they can only have C++ + // language linkage or no language linkage. + const DeclContext *DC = D.getDeclContext(); + if (DC->isRecord()) { + assert(D.getASTContext().getLangOpts().CPlusPlus); + return false; + } + + return D.getLanguageLinkage() == CLanguageLinkage; +} + +LanguageLinkage VarDecl::getLanguageLinkage() const { + return getLanguageLinkageTemplate(*this); +} + +bool VarDecl::isExternC() const { + return isExternCTemplate(*this); +} + +bool VarDecl::isInExternCContext() const { + return getLexicalDeclContext()->isExternCContext(); +} + +bool VarDecl::isInExternCXXContext() const { + return getLexicalDeclContext()->isExternCXXContext(); +} + +VarDecl *VarDecl::getCanonicalDecl() { return getFirstDecl(); } + +VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition( + ASTContext &C) const +{ + // C++ [basic.def]p2: + // A declaration is a definition unless [...] it contains the 'extern' + // specifier or a linkage-specification and neither an initializer [...], + // it declares a static data member in a class declaration [...]. + // C++1y [temp.expl.spec]p15: + // An explicit specialization of a static data member or an explicit + // specialization of a static data member template is a definition if the + // declaration includes an initializer; otherwise, it is a declaration. + // + // FIXME: How do you declare (but not define) a partial specialization of + // a static data member template outside the containing class? + if (isStaticDataMember()) { + if (isOutOfLine() && + (hasInit() || + // If the first declaration is out-of-line, this may be an + // instantiation of an out-of-line partial specialization of a variable + // template for which we have not yet instantiated the initializer. + (getFirstDecl()->isOutOfLine() + ? getTemplateSpecializationKind() == TSK_Undeclared + : getTemplateSpecializationKind() != + TSK_ExplicitSpecialization) || + isa<VarTemplatePartialSpecializationDecl>(this))) + return Definition; + else + return DeclarationOnly; + } + // C99 6.7p5: + // A definition of an identifier is a declaration for that identifier that + // [...] causes storage to be reserved for that object. + // Note: that applies for all non-file-scope objects. + // C99 6.9.2p1: + // If the declaration of an identifier for an object has file scope and an + // initializer, the declaration is an external definition for the identifier + if (hasInit()) + return Definition; + + if (hasAttr<AliasAttr>()) + return Definition; + + // A variable template specialization (other than a static data member + // template or an explicit specialization) is a declaration until we + // instantiate its initializer. + if (isa<VarTemplateSpecializationDecl>(this) && + getTemplateSpecializationKind() != TSK_ExplicitSpecialization) + return DeclarationOnly; + + if (hasExternalStorage()) + return DeclarationOnly; + + // [dcl.link] p7: + // A declaration directly contained in a linkage-specification is treated + // as if it contains the extern specifier for the purpose of determining + // the linkage of the declared name and whether it is a definition. + if (isSingleLineExternC(*this)) + return DeclarationOnly; + + // C99 6.9.2p2: + // A declaration of an object that has file scope without an initializer, + // and without a storage class specifier or the scs 'static', constitutes + // a tentative definition. + // No such thing in C++. + if (!C.getLangOpts().CPlusPlus && isFileVarDecl()) + return TentativeDefinition; + + // What's left is (in C, block-scope) declarations without initializers or + // external storage. These are definitions. + return Definition; +} + +VarDecl *VarDecl::getActingDefinition() { + DefinitionKind Kind = isThisDeclarationADefinition(); + if (Kind != TentativeDefinition) + return 0; + + VarDecl *LastTentative = 0; + VarDecl *First = getFirstDecl(); + for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end(); + I != E; ++I) { + Kind = (*I)->isThisDeclarationADefinition(); + if (Kind == Definition) + return 0; + else if (Kind == TentativeDefinition) + LastTentative = *I; + } + return LastTentative; +} + +VarDecl *VarDecl::getDefinition(ASTContext &C) { + VarDecl *First = getFirstDecl(); + for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end(); + I != E; ++I) { + if ((*I)->isThisDeclarationADefinition(C) == Definition) + return *I; + } + return 0; +} + +VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const { + DefinitionKind Kind = DeclarationOnly; + + const VarDecl *First = getFirstDecl(); + for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end(); + I != E; ++I) { + Kind = std::max(Kind, (*I)->isThisDeclarationADefinition(C)); + if (Kind == Definition) + break; + } + + return Kind; +} + +const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const { + redecl_iterator I = redecls_begin(), E = redecls_end(); + while (I != E && !I->getInit()) + ++I; + + if (I != E) { + D = *I; + return I->getInit(); + } + return 0; +} + +bool VarDecl::isOutOfLine() const { + if (Decl::isOutOfLine()) + return true; + + if (!isStaticDataMember()) + return false; + + // If this static data member was instantiated from a static data member of + // a class template, check whether that static data member was defined + // out-of-line. + if (VarDecl *VD = getInstantiatedFromStaticDataMember()) + return VD->isOutOfLine(); + + return false; +} + +VarDecl *VarDecl::getOutOfLineDefinition() { + if (!isStaticDataMember()) + return 0; + + for (VarDecl::redecl_iterator RD = redecls_begin(), RDEnd = redecls_end(); + RD != RDEnd; ++RD) { + if (RD->getLexicalDeclContext()->isFileContext()) + return *RD; + } + + return 0; +} + +void VarDecl::setInit(Expr *I) { + if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>()) { + Eval->~EvaluatedStmt(); + getASTContext().Deallocate(Eval); + } + + Init = I; +} + +bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const { + const LangOptions &Lang = C.getLangOpts(); + + if (!Lang.CPlusPlus) + return false; + + // In C++11, any variable of reference type can be used in a constant + // expression if it is initialized by a constant expression. + if (Lang.CPlusPlus11 && getType()->isReferenceType()) + return true; + + // Only const objects can be used in constant expressions in C++. C++98 does + // not require the variable to be non-volatile, but we consider this to be a + // defect. + if (!getType().isConstQualified() || getType().isVolatileQualified()) + return false; + + // In C++, const, non-volatile variables of integral or enumeration types + // can be used in constant expressions. + if (getType()->isIntegralOrEnumerationType()) + return true; + + // Additionally, in C++11, non-volatile constexpr variables can be used in + // constant expressions. + return Lang.CPlusPlus11 && isConstexpr(); +} + +/// Convert the initializer for this declaration to the elaborated EvaluatedStmt +/// form, which contains extra information on the evaluated value of the +/// initializer. +EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const { + EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>(); + if (!Eval) { + Stmt *S = Init.get<Stmt *>(); + // Note: EvaluatedStmt contains an APValue, which usually holds + // resources not allocated from the ASTContext. We need to do some + // work to avoid leaking those, but we do so in VarDecl::evaluateValue + // where we can detect whether there's anything to clean up or not. + Eval = new (getASTContext()) EvaluatedStmt; + Eval->Value = S; + Init = Eval; + } + return Eval; +} + +APValue *VarDecl::evaluateValue() const { + SmallVector<PartialDiagnosticAt, 8> Notes; + return evaluateValue(Notes); +} + +namespace { +// Destroy an APValue that was allocated in an ASTContext. +void DestroyAPValue(void* UntypedValue) { + static_cast<APValue*>(UntypedValue)->~APValue(); +} +} // namespace + +APValue *VarDecl::evaluateValue( + SmallVectorImpl<PartialDiagnosticAt> &Notes) const { + EvaluatedStmt *Eval = ensureEvaluatedStmt(); + + // We only produce notes indicating why an initializer is non-constant the + // first time it is evaluated. FIXME: The notes won't always be emitted the + // first time we try evaluation, so might not be produced at all. + if (Eval->WasEvaluated) + return Eval->Evaluated.isUninit() ? 0 : &Eval->Evaluated; + + const Expr *Init = cast<Expr>(Eval->Value); + assert(!Init->isValueDependent()); + + if (Eval->IsEvaluating) { + // FIXME: Produce a diagnostic for self-initialization. + Eval->CheckedICE = true; + Eval->IsICE = false; + return 0; + } + + Eval->IsEvaluating = true; + + bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(), + this, Notes); + + // Ensure the computed APValue is cleaned up later if evaluation succeeded, + // or that it's empty (so that there's nothing to clean up) if evaluation + // failed. + if (!Result) + Eval->Evaluated = APValue(); + else if (Eval->Evaluated.needsCleanup()) + getASTContext().AddDeallocation(DestroyAPValue, &Eval->Evaluated); + + Eval->IsEvaluating = false; + Eval->WasEvaluated = true; + + // In C++11, we have determined whether the initializer was a constant + // expression as a side-effect. + if (getASTContext().getLangOpts().CPlusPlus11 && !Eval->CheckedICE) { + Eval->CheckedICE = true; + Eval->IsICE = Result && Notes.empty(); + } + + return Result ? &Eval->Evaluated : 0; +} + +bool VarDecl::checkInitIsICE() const { + // Initializers of weak variables are never ICEs. + if (isWeak()) + return false; + + EvaluatedStmt *Eval = ensureEvaluatedStmt(); + if (Eval->CheckedICE) + // We have already checked whether this subexpression is an + // integral constant expression. + return Eval->IsICE; + + const Expr *Init = cast<Expr>(Eval->Value); + assert(!Init->isValueDependent()); + + // In C++11, evaluate the initializer to check whether it's a constant + // expression. + if (getASTContext().getLangOpts().CPlusPlus11) { + SmallVector<PartialDiagnosticAt, 8> Notes; + evaluateValue(Notes); + return Eval->IsICE; + } + + // It's an ICE whether or not the definition we found is + // out-of-line. See DR 721 and the discussion in Clang PR + // 6206 for details. + + if (Eval->CheckingICE) + return false; + Eval->CheckingICE = true; + + Eval->IsICE = Init->isIntegerConstantExpr(getASTContext()); + Eval->CheckingICE = false; + Eval->CheckedICE = true; + return Eval->IsICE; +} + +VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const { + if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) + return cast<VarDecl>(MSI->getInstantiatedFrom()); + + return 0; +} + +TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const { + if (const VarTemplateSpecializationDecl *Spec = + dyn_cast<VarTemplateSpecializationDecl>(this)) + return Spec->getSpecializationKind(); + + if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) + return MSI->getTemplateSpecializationKind(); + + return TSK_Undeclared; +} + +SourceLocation VarDecl::getPointOfInstantiation() const { + if (const VarTemplateSpecializationDecl *Spec = + dyn_cast<VarTemplateSpecializationDecl>(this)) + return Spec->getPointOfInstantiation(); + + if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) + return MSI->getPointOfInstantiation(); + + return SourceLocation(); +} + +VarTemplateDecl *VarDecl::getDescribedVarTemplate() const { + return getASTContext().getTemplateOrSpecializationInfo(this) + .dyn_cast<VarTemplateDecl *>(); +} + +void VarDecl::setDescribedVarTemplate(VarTemplateDecl *Template) { + getASTContext().setTemplateOrSpecializationInfo(this, Template); +} + +MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const { + if (isStaticDataMember()) + // FIXME: Remove ? + // return getASTContext().getInstantiatedFromStaticDataMember(this); + return getASTContext().getTemplateOrSpecializationInfo(this) + .dyn_cast<MemberSpecializationInfo *>(); + return 0; +} + +void VarDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK, + SourceLocation PointOfInstantiation) { + assert((isa<VarTemplateSpecializationDecl>(this) || + getMemberSpecializationInfo()) && + "not a variable or static data member template specialization"); + + if (VarTemplateSpecializationDecl *Spec = + dyn_cast<VarTemplateSpecializationDecl>(this)) { + Spec->setSpecializationKind(TSK); + if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() && + Spec->getPointOfInstantiation().isInvalid()) + Spec->setPointOfInstantiation(PointOfInstantiation); + } + + if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) { + MSI->setTemplateSpecializationKind(TSK); + if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() && + MSI->getPointOfInstantiation().isInvalid()) + MSI->setPointOfInstantiation(PointOfInstantiation); + } +} + +void +VarDecl::setInstantiationOfStaticDataMember(VarDecl *VD, + TemplateSpecializationKind TSK) { + assert(getASTContext().getTemplateOrSpecializationInfo(this).isNull() && + "Previous template or instantiation?"); + getASTContext().setInstantiatedFromStaticDataMember(this, VD, TSK); +} + +//===----------------------------------------------------------------------===// +// ParmVarDecl Implementation +//===----------------------------------------------------------------------===// + +ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, + SourceLocation IdLoc, IdentifierInfo *Id, + QualType T, TypeSourceInfo *TInfo, + StorageClass S, Expr *DefArg) { + return new (C) ParmVarDecl(ParmVar, DC, StartLoc, IdLoc, Id, T, TInfo, + S, DefArg); +} + +QualType ParmVarDecl::getOriginalType() const { + TypeSourceInfo *TSI = getTypeSourceInfo(); + QualType T = TSI ? TSI->getType() : getType(); + if (const DecayedType *DT = dyn_cast<DecayedType>(T)) + return DT->getOriginalType(); + return T; +} + +ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ParmVarDecl)); + return new (Mem) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(), + 0, QualType(), 0, SC_None, 0); +} + +SourceRange ParmVarDecl::getSourceRange() const { + if (!hasInheritedDefaultArg()) { + SourceRange ArgRange = getDefaultArgRange(); + if (ArgRange.isValid()) + return SourceRange(getOuterLocStart(), ArgRange.getEnd()); + } + + // DeclaratorDecl considers the range of postfix types as overlapping with the + // declaration name, but this is not the case with parameters in ObjC methods. + if (isa<ObjCMethodDecl>(getDeclContext())) + return SourceRange(DeclaratorDecl::getLocStart(), getLocation()); + + return DeclaratorDecl::getSourceRange(); +} + +Expr *ParmVarDecl::getDefaultArg() { + assert(!hasUnparsedDefaultArg() && "Default argument is not yet parsed!"); + assert(!hasUninstantiatedDefaultArg() && + "Default argument is not yet instantiated!"); + + Expr *Arg = getInit(); + if (ExprWithCleanups *E = dyn_cast_or_null<ExprWithCleanups>(Arg)) + return E->getSubExpr(); + + return Arg; +} + +SourceRange ParmVarDecl::getDefaultArgRange() const { + if (const Expr *E = getInit()) + return E->getSourceRange(); + + if (hasUninstantiatedDefaultArg()) + return getUninstantiatedDefaultArg()->getSourceRange(); + + return SourceRange(); +} + +bool ParmVarDecl::isParameterPack() const { + return isa<PackExpansionType>(getType()); +} + +void ParmVarDecl::setParameterIndexLarge(unsigned parameterIndex) { + getASTContext().setParameterIndex(this, parameterIndex); + ParmVarDeclBits.ParameterIndex = ParameterIndexSentinel; +} + +unsigned ParmVarDecl::getParameterIndexLarge() const { + return getASTContext().getParameterIndex(this); +} + +//===----------------------------------------------------------------------===// +// FunctionDecl Implementation +//===----------------------------------------------------------------------===// + +void FunctionDecl::getNameForDiagnostic( + raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const { + NamedDecl::getNameForDiagnostic(OS, Policy, Qualified); + const TemplateArgumentList *TemplateArgs = getTemplateSpecializationArgs(); + if (TemplateArgs) + TemplateSpecializationType::PrintTemplateArgumentList( + OS, TemplateArgs->data(), TemplateArgs->size(), Policy); +} + +bool FunctionDecl::isVariadic() const { + if (const FunctionProtoType *FT = getType()->getAs<FunctionProtoType>()) + return FT->isVariadic(); + return false; +} + +bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const { + for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) { + if (I->Body || I->IsLateTemplateParsed) { + Definition = *I; + return true; + } + } + + return false; +} + +bool FunctionDecl::hasTrivialBody() const +{ + Stmt *S = getBody(); + if (!S) { + // Since we don't have a body for this function, we don't know if it's + // trivial or not. + return false; + } + + if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty()) + return true; + return false; +} + +bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const { + for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) { + if (I->IsDeleted || I->IsDefaulted || I->Body || I->IsLateTemplateParsed || + I->hasAttr<AliasAttr>()) { + Definition = I->IsDeleted ? I->getCanonicalDecl() : *I; + return true; + } + } + + return false; +} + +Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const { + if (!hasBody(Definition)) + return 0; + + if (Definition->Body) + return Definition->Body.get(getASTContext().getExternalSource()); + + return 0; +} + +void FunctionDecl::setBody(Stmt *B) { + Body = B; + if (B) + EndRangeLoc = B->getLocEnd(); +} + +void FunctionDecl::setPure(bool P) { + IsPure = P; + if (P) + if (CXXRecordDecl *Parent = dyn_cast<CXXRecordDecl>(getDeclContext())) + Parent->markedVirtualFunctionPure(); +} + +template<std::size_t Len> +static bool isNamed(const NamedDecl *ND, const char (&Str)[Len]) { + IdentifierInfo *II = ND->getIdentifier(); + return II && II->isStr(Str); +} + +bool FunctionDecl::isMain() const { + const TranslationUnitDecl *tunit = + dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext()); + return tunit && + !tunit->getASTContext().getLangOpts().Freestanding && + isNamed(this, "main"); +} + +bool FunctionDecl::isMSVCRTEntryPoint() const { + const TranslationUnitDecl *TUnit = + dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext()); + if (!TUnit) + return false; + + // Even though we aren't really targeting MSVCRT if we are freestanding, + // semantic analysis for these functions remains the same. + + // MSVCRT entry points only exist on MSVCRT targets. + if (!TUnit->getASTContext().getTargetInfo().getTriple().isOSMSVCRT()) + return false; + + // Nameless functions like constructors cannot be entry points. + if (!getIdentifier()) + return false; + + return llvm::StringSwitch<bool>(getName()) + .Cases("main", // an ANSI console app + "wmain", // a Unicode console App + "WinMain", // an ANSI GUI app + "wWinMain", // a Unicode GUI app + "DllMain", // a DLL + true) + .Default(false); +} + +bool FunctionDecl::isReservedGlobalPlacementOperator() const { + assert(getDeclName().getNameKind() == DeclarationName::CXXOperatorName); + assert(getDeclName().getCXXOverloadedOperator() == OO_New || + getDeclName().getCXXOverloadedOperator() == OO_Delete || + getDeclName().getCXXOverloadedOperator() == OO_Array_New || + getDeclName().getCXXOverloadedOperator() == OO_Array_Delete); + + if (isa<CXXRecordDecl>(getDeclContext())) return false; + assert(getDeclContext()->getRedeclContext()->isTranslationUnit()); + + const FunctionProtoType *proto = getType()->castAs<FunctionProtoType>(); + if (proto->getNumArgs() != 2 || proto->isVariadic()) return false; + + ASTContext &Context = + cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext()) + ->getASTContext(); + + // The result type and first argument type are constant across all + // these operators. The second argument must be exactly void*. + return (proto->getArgType(1).getCanonicalType() == Context.VoidPtrTy); +} + +static bool isNamespaceStd(const DeclContext *DC) { + const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC->getRedeclContext()); + return ND && isNamed(ND, "std") && + ND->getParent()->getRedeclContext()->isTranslationUnit(); +} + +bool FunctionDecl::isReplaceableGlobalAllocationFunction() const { + if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName) + return false; + if (getDeclName().getCXXOverloadedOperator() != OO_New && + getDeclName().getCXXOverloadedOperator() != OO_Delete && + getDeclName().getCXXOverloadedOperator() != OO_Array_New && + getDeclName().getCXXOverloadedOperator() != OO_Array_Delete) + return false; + + if (isa<CXXRecordDecl>(getDeclContext())) + return false; + assert(getDeclContext()->getRedeclContext()->isTranslationUnit()); + + const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>(); + if (FPT->getNumArgs() > 2 || FPT->isVariadic()) + return false; + + // If this is a single-parameter function, it must be a replaceable global + // allocation or deallocation function. + if (FPT->getNumArgs() == 1) + return true; + + // Otherwise, we're looking for a second parameter whose type is + // 'const std::nothrow_t &', or, in C++1y, 'std::size_t'. + QualType Ty = FPT->getArgType(1); + ASTContext &Ctx = getASTContext(); + if (Ctx.getLangOpts().SizedDeallocation && + Ctx.hasSameType(Ty, Ctx.getSizeType())) + return true; + if (!Ty->isReferenceType()) + return false; + Ty = Ty->getPointeeType(); + if (Ty.getCVRQualifiers() != Qualifiers::Const) + return false; + // FIXME: Recognise nothrow_t in an inline namespace inside std? + const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); + return RD && isNamed(RD, "nothrow_t") && isNamespaceStd(RD->getDeclContext()); +} + +FunctionDecl * +FunctionDecl::getCorrespondingUnsizedGlobalDeallocationFunction() const { + ASTContext &Ctx = getASTContext(); + if (!Ctx.getLangOpts().SizedDeallocation) + return 0; + + if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName) + return 0; + if (getDeclName().getCXXOverloadedOperator() != OO_Delete && + getDeclName().getCXXOverloadedOperator() != OO_Array_Delete) + return 0; + if (isa<CXXRecordDecl>(getDeclContext())) + return 0; + assert(getDeclContext()->getRedeclContext()->isTranslationUnit()); + + if (getNumParams() != 2 || isVariadic() || + !Ctx.hasSameType(getType()->castAs<FunctionProtoType>()->getArgType(1), + Ctx.getSizeType())) + return 0; + + // This is a sized deallocation function. Find the corresponding unsized + // deallocation function. + lookup_const_result R = getDeclContext()->lookup(getDeclName()); + for (lookup_const_result::iterator RI = R.begin(), RE = R.end(); RI != RE; + ++RI) + if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*RI)) + if (FD->getNumParams() == 1 && !FD->isVariadic()) + return FD; + return 0; +} + +LanguageLinkage FunctionDecl::getLanguageLinkage() const { + return getLanguageLinkageTemplate(*this); +} + +bool FunctionDecl::isExternC() const { + return isExternCTemplate(*this); +} + +bool FunctionDecl::isInExternCContext() const { + return getLexicalDeclContext()->isExternCContext(); +} + +bool FunctionDecl::isInExternCXXContext() const { + return getLexicalDeclContext()->isExternCXXContext(); +} + +bool FunctionDecl::isGlobal() const { + if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(this)) + return Method->isStatic(); + + if (getCanonicalDecl()->getStorageClass() == SC_Static) + return false; + + for (const DeclContext *DC = getDeclContext(); + DC->isNamespace(); + DC = DC->getParent()) { + if (const NamespaceDecl *Namespace = cast<NamespaceDecl>(DC)) { + if (!Namespace->getDeclName()) + return false; + break; + } + } + + return true; +} + +bool FunctionDecl::isNoReturn() const { + return hasAttr<NoReturnAttr>() || hasAttr<CXX11NoReturnAttr>() || + hasAttr<C11NoReturnAttr>() || + getType()->getAs<FunctionType>()->getNoReturnAttr(); +} + +void +FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) { + redeclarable_base::setPreviousDecl(PrevDecl); + + if (FunctionTemplateDecl *FunTmpl = getDescribedFunctionTemplate()) { + FunctionTemplateDecl *PrevFunTmpl + = PrevDecl? PrevDecl->getDescribedFunctionTemplate() : 0; + assert((!PrevDecl || PrevFunTmpl) && "Function/function template mismatch"); + FunTmpl->setPreviousDecl(PrevFunTmpl); + } + + if (PrevDecl && PrevDecl->IsInline) + IsInline = true; +} + +const FunctionDecl *FunctionDecl::getCanonicalDecl() const { + return getFirstDecl(); +} + +FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); } + +/// \brief Returns a value indicating whether this function +/// corresponds to a builtin function. +/// +/// The function corresponds to a built-in function if it is +/// declared at translation scope or within an extern "C" block and +/// its name matches with the name of a builtin. The returned value +/// will be 0 for functions that do not correspond to a builtin, a +/// value of type \c Builtin::ID if in the target-independent range +/// \c [1,Builtin::First), or a target-specific builtin value. +unsigned FunctionDecl::getBuiltinID() const { + if (!getIdentifier()) + return 0; + + unsigned BuiltinID = getIdentifier()->getBuiltinID(); + if (!BuiltinID) + return 0; + + ASTContext &Context = getASTContext(); + if (Context.getLangOpts().CPlusPlus) { + const LinkageSpecDecl *LinkageDecl = dyn_cast<LinkageSpecDecl>( + getFirstDecl()->getDeclContext()); + // In C++, the first declaration of a builtin is always inside an implicit + // extern "C". + // FIXME: A recognised library function may not be directly in an extern "C" + // declaration, for instance "extern "C" { namespace std { decl } }". + if (!LinkageDecl || LinkageDecl->getLanguage() != LinkageSpecDecl::lang_c) + return 0; + } + + // If the function is marked "overloadable", it has a different mangled name + // and is not the C library function. + if (getAttr<OverloadableAttr>()) + return 0; + + if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) + return BuiltinID; + + // This function has the name of a known C library + // function. Determine whether it actually refers to the C library + // function or whether it just has the same name. + + // If this is a static function, it's not a builtin. + if (getStorageClass() == SC_Static) + return 0; + + return BuiltinID; +} + + +/// getNumParams - Return the number of parameters this function must have +/// based on its FunctionType. This is the length of the ParamInfo array +/// after it has been created. +unsigned FunctionDecl::getNumParams() const { + const FunctionType *FT = getType()->castAs<FunctionType>(); + if (isa<FunctionNoProtoType>(FT)) + return 0; + return cast<FunctionProtoType>(FT)->getNumArgs(); + +} + +void FunctionDecl::setParams(ASTContext &C, + ArrayRef<ParmVarDecl *> NewParamInfo) { + assert(ParamInfo == 0 && "Already has param info!"); + assert(NewParamInfo.size() == getNumParams() && "Parameter count mismatch!"); + + // Zero params -> null pointer. + if (!NewParamInfo.empty()) { + ParamInfo = new (C) ParmVarDecl*[NewParamInfo.size()]; + std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo); + } +} + +void FunctionDecl::setDeclsInPrototypeScope(ArrayRef<NamedDecl *> NewDecls) { + assert(DeclsInPrototypeScope.empty() && "Already has prototype decls!"); + + if (!NewDecls.empty()) { + NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()]; + std::copy(NewDecls.begin(), NewDecls.end(), A); + DeclsInPrototypeScope = ArrayRef<NamedDecl *>(A, NewDecls.size()); + } +} + +/// getMinRequiredArguments - Returns the minimum number of arguments +/// needed to call this function. This may be fewer than the number of +/// function parameters, if some of the parameters have default +/// arguments (in C++) or the last parameter is a parameter pack. +unsigned FunctionDecl::getMinRequiredArguments() const { + if (!getASTContext().getLangOpts().CPlusPlus) + return getNumParams(); + + unsigned NumRequiredArgs = getNumParams(); + + // If the last parameter is a parameter pack, we don't need an argument for + // it. + if (NumRequiredArgs > 0 && + getParamDecl(NumRequiredArgs - 1)->isParameterPack()) + --NumRequiredArgs; + + // If this parameter has a default argument, we don't need an argument for + // it. + while (NumRequiredArgs > 0 && + getParamDecl(NumRequiredArgs-1)->hasDefaultArg()) + --NumRequiredArgs; + + // We might have parameter packs before the end. These can't be deduced, + // but they can still handle multiple arguments. + unsigned ArgIdx = NumRequiredArgs; + while (ArgIdx > 0) { + if (getParamDecl(ArgIdx - 1)->isParameterPack()) + NumRequiredArgs = ArgIdx; + + --ArgIdx; + } + + return NumRequiredArgs; +} + +static bool RedeclForcesDefC99(const FunctionDecl *Redecl) { + // Only consider file-scope declarations in this test. + if (!Redecl->getLexicalDeclContext()->isTranslationUnit()) + return false; + + // Only consider explicit declarations; the presence of a builtin for a + // libcall shouldn't affect whether a definition is externally visible. + if (Redecl->isImplicit()) + return false; + + if (!Redecl->isInlineSpecified() || Redecl->getStorageClass() == SC_Extern) + return true; // Not an inline definition + + return false; +} + +/// \brief For a function declaration in C or C++, determine whether this +/// declaration causes the definition to be externally visible. +/// +/// Specifically, this determines if adding the current declaration to the set +/// of redeclarations of the given functions causes +/// isInlineDefinitionExternallyVisible to change from false to true. +bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const { + assert(!doesThisDeclarationHaveABody() && + "Must have a declaration without a body."); + + ASTContext &Context = getASTContext(); + + if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) { + // With GNU inlining, a declaration with 'inline' but not 'extern', forces + // an externally visible definition. + // + // FIXME: What happens if gnu_inline gets added on after the first + // declaration? + if (!isInlineSpecified() || getStorageClass() == SC_Extern) + return false; + + const FunctionDecl *Prev = this; + bool FoundBody = false; + while ((Prev = Prev->getPreviousDecl())) { + FoundBody |= Prev->Body.isValid(); + + if (Prev->Body) { + // If it's not the case that both 'inline' and 'extern' are + // specified on the definition, then it is always externally visible. + if (!Prev->isInlineSpecified() || + Prev->getStorageClass() != SC_Extern) + return false; + } else if (Prev->isInlineSpecified() && + Prev->getStorageClass() != SC_Extern) { + return false; + } + } + return FoundBody; + } + + if (Context.getLangOpts().CPlusPlus) + return false; + + // C99 6.7.4p6: + // [...] If all of the file scope declarations for a function in a + // translation unit include the inline function specifier without extern, + // then the definition in that translation unit is an inline definition. + if (isInlineSpecified() && getStorageClass() != SC_Extern) + return false; + const FunctionDecl *Prev = this; + bool FoundBody = false; + while ((Prev = Prev->getPreviousDecl())) { + FoundBody |= Prev->Body.isValid(); + if (RedeclForcesDefC99(Prev)) + return false; + } + return FoundBody; +} + +/// \brief For an inline function definition in C, or for a gnu_inline function +/// in C++, determine whether the definition will be externally visible. +/// +/// Inline function definitions are always available for inlining optimizations. +/// However, depending on the language dialect, declaration specifiers, and +/// attributes, the definition of an inline function may or may not be +/// "externally" visible to other translation units in the program. +/// +/// In C99, inline definitions are not externally visible by default. However, +/// if even one of the global-scope declarations is marked "extern inline", the +/// inline definition becomes externally visible (C99 6.7.4p6). +/// +/// In GNU89 mode, or if the gnu_inline attribute is attached to the function +/// definition, we use the GNU semantics for inline, which are nearly the +/// opposite of C99 semantics. In particular, "inline" by itself will create +/// an externally visible symbol, but "extern inline" will not create an +/// externally visible symbol. +bool FunctionDecl::isInlineDefinitionExternallyVisible() const { + assert(doesThisDeclarationHaveABody() && "Must have the function definition"); + assert(isInlined() && "Function must be inline"); + ASTContext &Context = getASTContext(); + + if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) { + // Note: If you change the logic here, please change + // doesDeclarationForceExternallyVisibleDefinition as well. + // + // If it's not the case that both 'inline' and 'extern' are + // specified on the definition, then this inline definition is + // externally visible. + if (!(isInlineSpecified() && getStorageClass() == SC_Extern)) + return true; + + // If any declaration is 'inline' but not 'extern', then this definition + // is externally visible. + for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end(); + Redecl != RedeclEnd; + ++Redecl) { + if (Redecl->isInlineSpecified() && + Redecl->getStorageClass() != SC_Extern) + return true; + } + + return false; + } + + // The rest of this function is C-only. + assert(!Context.getLangOpts().CPlusPlus && + "should not use C inline rules in C++"); + + // C99 6.7.4p6: + // [...] If all of the file scope declarations for a function in a + // translation unit include the inline function specifier without extern, + // then the definition in that translation unit is an inline definition. + for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end(); + Redecl != RedeclEnd; + ++Redecl) { + if (RedeclForcesDefC99(*Redecl)) + return true; + } + + // C99 6.7.4p6: + // An inline definition does not provide an external definition for the + // function, and does not forbid an external definition in another + // translation unit. + return false; +} + +/// getOverloadedOperator - Which C++ overloaded operator this +/// function represents, if any. +OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const { + if (getDeclName().getNameKind() == DeclarationName::CXXOperatorName) + return getDeclName().getCXXOverloadedOperator(); + else + return OO_None; +} + +/// getLiteralIdentifier - The literal suffix identifier this function +/// represents, if any. +const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const { + if (getDeclName().getNameKind() == DeclarationName::CXXLiteralOperatorName) + return getDeclName().getCXXLiteralIdentifier(); + else + return 0; +} + +FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const { + if (TemplateOrSpecialization.isNull()) + return TK_NonTemplate; + if (TemplateOrSpecialization.is<FunctionTemplateDecl *>()) + return TK_FunctionTemplate; + if (TemplateOrSpecialization.is<MemberSpecializationInfo *>()) + return TK_MemberSpecialization; + if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>()) + return TK_FunctionTemplateSpecialization; + if (TemplateOrSpecialization.is + <DependentFunctionTemplateSpecializationInfo*>()) + return TK_DependentFunctionTemplateSpecialization; + + llvm_unreachable("Did we miss a TemplateOrSpecialization type?"); +} + +FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const { + if (MemberSpecializationInfo *Info = getMemberSpecializationInfo()) + return cast<FunctionDecl>(Info->getInstantiatedFrom()); + + return 0; +} + +void +FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C, + FunctionDecl *FD, + TemplateSpecializationKind TSK) { + assert(TemplateOrSpecialization.isNull() && + "Member function is already a specialization"); + MemberSpecializationInfo *Info + = new (C) MemberSpecializationInfo(FD, TSK); + TemplateOrSpecialization = Info; +} + +bool FunctionDecl::isImplicitlyInstantiable() const { + // If the function is invalid, it can't be implicitly instantiated. + if (isInvalidDecl()) + return false; + + switch (getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitInstantiationDefinition: + return false; + + case TSK_ImplicitInstantiation: + return true; + + // It is possible to instantiate TSK_ExplicitSpecialization kind + // if the FunctionDecl has a class scope specialization pattern. + case TSK_ExplicitSpecialization: + return getClassScopeSpecializationPattern() != 0; + + case TSK_ExplicitInstantiationDeclaration: + // Handled below. + break; + } + + // Find the actual template from which we will instantiate. + const FunctionDecl *PatternDecl = getTemplateInstantiationPattern(); + bool HasPattern = false; + if (PatternDecl) + HasPattern = PatternDecl->hasBody(PatternDecl); + + // C++0x [temp.explicit]p9: + // Except for inline functions, other explicit instantiation declarations + // have the effect of suppressing the implicit instantiation of the entity + // to which they refer. + if (!HasPattern || !PatternDecl) + return true; + + return PatternDecl->isInlined(); +} + +bool FunctionDecl::isTemplateInstantiation() const { + switch (getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + return false; + case TSK_ImplicitInstantiation: + case TSK_ExplicitInstantiationDeclaration: + case TSK_ExplicitInstantiationDefinition: + return true; + } + llvm_unreachable("All TSK values handled."); +} + +FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const { + // Handle class scope explicit specialization special case. + if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization) + return getClassScopeSpecializationPattern(); + + if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) { + while (Primary->getInstantiatedFromMemberTemplate()) { + // If we have hit a point where the user provided a specialization of + // this template, we're done looking. + if (Primary->isMemberSpecialization()) + break; + + Primary = Primary->getInstantiatedFromMemberTemplate(); + } + + return Primary->getTemplatedDecl(); + } + + return getInstantiatedFromMemberFunction(); +} + +FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const { + if (FunctionTemplateSpecializationInfo *Info + = TemplateOrSpecialization + .dyn_cast<FunctionTemplateSpecializationInfo*>()) { + return Info->Template.getPointer(); + } + return 0; +} + +FunctionDecl *FunctionDecl::getClassScopeSpecializationPattern() const { + return getASTContext().getClassScopeSpecializationPattern(this); +} + +const TemplateArgumentList * +FunctionDecl::getTemplateSpecializationArgs() const { + if (FunctionTemplateSpecializationInfo *Info + = TemplateOrSpecialization + .dyn_cast<FunctionTemplateSpecializationInfo*>()) { + return Info->TemplateArguments; + } + return 0; +} + +const ASTTemplateArgumentListInfo * +FunctionDecl::getTemplateSpecializationArgsAsWritten() const { + if (FunctionTemplateSpecializationInfo *Info + = TemplateOrSpecialization + .dyn_cast<FunctionTemplateSpecializationInfo*>()) { + return Info->TemplateArgumentsAsWritten; + } + return 0; +} + +void +FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C, + FunctionTemplateDecl *Template, + const TemplateArgumentList *TemplateArgs, + void *InsertPos, + TemplateSpecializationKind TSK, + const TemplateArgumentListInfo *TemplateArgsAsWritten, + SourceLocation PointOfInstantiation) { + assert(TSK != TSK_Undeclared && + "Must specify the type of function template specialization"); + FunctionTemplateSpecializationInfo *Info + = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>(); + if (!Info) + Info = FunctionTemplateSpecializationInfo::Create(C, this, Template, TSK, + TemplateArgs, + TemplateArgsAsWritten, + PointOfInstantiation); + TemplateOrSpecialization = Info; + Template->addSpecialization(Info, InsertPos); +} + +void +FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context, + const UnresolvedSetImpl &Templates, + const TemplateArgumentListInfo &TemplateArgs) { + assert(TemplateOrSpecialization.isNull()); + size_t Size = sizeof(DependentFunctionTemplateSpecializationInfo); + Size += Templates.size() * sizeof(FunctionTemplateDecl*); + Size += TemplateArgs.size() * sizeof(TemplateArgumentLoc); + void *Buffer = Context.Allocate(Size); + DependentFunctionTemplateSpecializationInfo *Info = + new (Buffer) DependentFunctionTemplateSpecializationInfo(Templates, + TemplateArgs); + TemplateOrSpecialization = Info; +} + +DependentFunctionTemplateSpecializationInfo:: +DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts, + const TemplateArgumentListInfo &TArgs) + : AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) { + + d.NumTemplates = Ts.size(); + d.NumArgs = TArgs.size(); + + FunctionTemplateDecl **TsArray = + const_cast<FunctionTemplateDecl**>(getTemplates()); + for (unsigned I = 0, E = Ts.size(); I != E; ++I) + TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl()); + + TemplateArgumentLoc *ArgsArray = + const_cast<TemplateArgumentLoc*>(getTemplateArgs()); + for (unsigned I = 0, E = TArgs.size(); I != E; ++I) + new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]); +} + +TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const { + // For a function template specialization, query the specialization + // information object. + FunctionTemplateSpecializationInfo *FTSInfo + = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>(); + if (FTSInfo) + return FTSInfo->getTemplateSpecializationKind(); + + MemberSpecializationInfo *MSInfo + = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>(); + if (MSInfo) + return MSInfo->getTemplateSpecializationKind(); + + return TSK_Undeclared; +} + +void +FunctionDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK, + SourceLocation PointOfInstantiation) { + if (FunctionTemplateSpecializationInfo *FTSInfo + = TemplateOrSpecialization.dyn_cast< + FunctionTemplateSpecializationInfo*>()) { + FTSInfo->setTemplateSpecializationKind(TSK); + if (TSK != TSK_ExplicitSpecialization && + PointOfInstantiation.isValid() && + FTSInfo->getPointOfInstantiation().isInvalid()) + FTSInfo->setPointOfInstantiation(PointOfInstantiation); + } else if (MemberSpecializationInfo *MSInfo + = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>()) { + MSInfo->setTemplateSpecializationKind(TSK); + if (TSK != TSK_ExplicitSpecialization && + PointOfInstantiation.isValid() && + MSInfo->getPointOfInstantiation().isInvalid()) + MSInfo->setPointOfInstantiation(PointOfInstantiation); + } else + llvm_unreachable("Function cannot have a template specialization kind"); +} + +SourceLocation FunctionDecl::getPointOfInstantiation() const { + if (FunctionTemplateSpecializationInfo *FTSInfo + = TemplateOrSpecialization.dyn_cast< + FunctionTemplateSpecializationInfo*>()) + return FTSInfo->getPointOfInstantiation(); + else if (MemberSpecializationInfo *MSInfo + = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>()) + return MSInfo->getPointOfInstantiation(); + + return SourceLocation(); +} + +bool FunctionDecl::isOutOfLine() const { + if (Decl::isOutOfLine()) + return true; + + // If this function was instantiated from a member function of a + // class template, check whether that member function was defined out-of-line. + if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) { + const FunctionDecl *Definition; + if (FD->hasBody(Definition)) + return Definition->isOutOfLine(); + } + + // If this function was instantiated from a function template, + // check whether that function template was defined out-of-line. + if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) { + const FunctionDecl *Definition; + if (FunTmpl->getTemplatedDecl()->hasBody(Definition)) + return Definition->isOutOfLine(); + } + + return false; +} + +SourceRange FunctionDecl::getSourceRange() const { + return SourceRange(getOuterLocStart(), EndRangeLoc); +} + +unsigned FunctionDecl::getMemoryFunctionKind() const { + IdentifierInfo *FnInfo = getIdentifier(); + + if (!FnInfo) + return 0; + + // Builtin handling. + switch (getBuiltinID()) { + case Builtin::BI__builtin_memset: + case Builtin::BI__builtin___memset_chk: + case Builtin::BImemset: + return Builtin::BImemset; + + case Builtin::BI__builtin_memcpy: + case Builtin::BI__builtin___memcpy_chk: + case Builtin::BImemcpy: + return Builtin::BImemcpy; + + case Builtin::BI__builtin_memmove: + case Builtin::BI__builtin___memmove_chk: + case Builtin::BImemmove: + return Builtin::BImemmove; + + case Builtin::BIstrlcpy: + return Builtin::BIstrlcpy; + case Builtin::BIstrlcat: + return Builtin::BIstrlcat; + + case Builtin::BI__builtin_memcmp: + case Builtin::BImemcmp: + return Builtin::BImemcmp; + + case Builtin::BI__builtin_strncpy: + case Builtin::BI__builtin___strncpy_chk: + case Builtin::BIstrncpy: + return Builtin::BIstrncpy; + + case Builtin::BI__builtin_strncmp: + case Builtin::BIstrncmp: + return Builtin::BIstrncmp; + + case Builtin::BI__builtin_strncasecmp: + case Builtin::BIstrncasecmp: + return Builtin::BIstrncasecmp; + + case Builtin::BI__builtin_strncat: + case Builtin::BI__builtin___strncat_chk: + case Builtin::BIstrncat: + return Builtin::BIstrncat; + + case Builtin::BI__builtin_strndup: + case Builtin::BIstrndup: + return Builtin::BIstrndup; + + case Builtin::BI__builtin_strlen: + case Builtin::BIstrlen: + return Builtin::BIstrlen; + + default: + if (isExternC()) { + if (FnInfo->isStr("memset")) + return Builtin::BImemset; + else if (FnInfo->isStr("memcpy")) + return Builtin::BImemcpy; + else if (FnInfo->isStr("memmove")) + return Builtin::BImemmove; + else if (FnInfo->isStr("memcmp")) + return Builtin::BImemcmp; + else if (FnInfo->isStr("strncpy")) + return Builtin::BIstrncpy; + else if (FnInfo->isStr("strncmp")) + return Builtin::BIstrncmp; + else if (FnInfo->isStr("strncasecmp")) + return Builtin::BIstrncasecmp; + else if (FnInfo->isStr("strncat")) + return Builtin::BIstrncat; + else if (FnInfo->isStr("strndup")) + return Builtin::BIstrndup; + else if (FnInfo->isStr("strlen")) + return Builtin::BIstrlen; + } + break; + } + return 0; +} + +//===----------------------------------------------------------------------===// +// FieldDecl Implementation +//===----------------------------------------------------------------------===// + +FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, Expr *BW, bool Mutable, + InClassInitStyle InitStyle) { + return new (C) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo, + BW, Mutable, InitStyle); +} + +FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FieldDecl)); + return new (Mem) FieldDecl(Field, 0, SourceLocation(), SourceLocation(), + 0, QualType(), 0, 0, false, ICIS_NoInit); +} + +bool FieldDecl::isAnonymousStructOrUnion() const { + if (!isImplicit() || getDeclName()) + return false; + + if (const RecordType *Record = getType()->getAs<RecordType>()) + return Record->getDecl()->isAnonymousStructOrUnion(); + + return false; +} + +unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const { + assert(isBitField() && "not a bitfield"); + Expr *BitWidth = InitializerOrBitWidth.getPointer(); + return BitWidth->EvaluateKnownConstInt(Ctx).getZExtValue(); +} + +unsigned FieldDecl::getFieldIndex() const { + const FieldDecl *Canonical = getCanonicalDecl(); + if (Canonical != this) + return Canonical->getFieldIndex(); + + if (CachedFieldIndex) return CachedFieldIndex - 1; + + unsigned Index = 0; + const RecordDecl *RD = getParent(); + + for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I, ++Index) + I->getCanonicalDecl()->CachedFieldIndex = Index + 1; + + assert(CachedFieldIndex && "failed to find field in parent"); + return CachedFieldIndex - 1; +} + +SourceRange FieldDecl::getSourceRange() const { + if (const Expr *E = InitializerOrBitWidth.getPointer()) + return SourceRange(getInnerLocStart(), E->getLocEnd()); + return DeclaratorDecl::getSourceRange(); +} + +void FieldDecl::setBitWidth(Expr *Width) { + assert(!InitializerOrBitWidth.getPointer() && !hasInClassInitializer() && + "bit width or initializer already set"); + InitializerOrBitWidth.setPointer(Width); +} + +void FieldDecl::setInClassInitializer(Expr *Init) { + assert(!InitializerOrBitWidth.getPointer() && hasInClassInitializer() && + "bit width or initializer already set"); + InitializerOrBitWidth.setPointer(Init); +} + +//===----------------------------------------------------------------------===// +// TagDecl Implementation +//===----------------------------------------------------------------------===// + +SourceLocation TagDecl::getOuterLocStart() const { + return getTemplateOrInnerLocStart(this); +} + +SourceRange TagDecl::getSourceRange() const { + SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation(); + return SourceRange(getOuterLocStart(), E); +} + +TagDecl *TagDecl::getCanonicalDecl() { return getFirstDecl(); } + +void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) { + NamedDeclOrQualifier = TDD; + if (TypeForDecl) + assert(TypeForDecl->isLinkageValid()); + assert(isLinkageValid()); +} + +void TagDecl::startDefinition() { + IsBeingDefined = true; + + if (CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(this)) { + struct CXXRecordDecl::DefinitionData *Data = + new (getASTContext()) struct CXXRecordDecl::DefinitionData(D); + for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) + cast<CXXRecordDecl>(*I)->DefinitionData = Data; + } +} + +void TagDecl::completeDefinition() { + assert((!isa<CXXRecordDecl>(this) || + cast<CXXRecordDecl>(this)->hasDefinition()) && + "definition completed but not started"); + + IsCompleteDefinition = true; + IsBeingDefined = false; + + if (ASTMutationListener *L = getASTMutationListener()) + L->CompletedTagDefinition(this); +} + +TagDecl *TagDecl::getDefinition() const { + if (isCompleteDefinition()) + return const_cast<TagDecl *>(this); + + // If it's possible for us to have an out-of-date definition, check now. + if (MayHaveOutOfDateDef) { + if (IdentifierInfo *II = getIdentifier()) { + if (II->isOutOfDate()) { + updateOutOfDate(*II); + } + } + } + + if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this)) + return CXXRD->getDefinition(); + + for (redecl_iterator R = redecls_begin(), REnd = redecls_end(); + R != REnd; ++R) + if (R->isCompleteDefinition()) + return *R; + + return 0; +} + +void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) { + if (QualifierLoc) { + // Make sure the extended qualifier info is allocated. + if (!hasExtInfo()) + NamedDeclOrQualifier = new (getASTContext()) ExtInfo; + // Set qualifier info. + getExtInfo()->QualifierLoc = QualifierLoc; + } else { + // Here Qualifier == 0, i.e., we are removing the qualifier (if any). + if (hasExtInfo()) { + if (getExtInfo()->NumTemplParamLists == 0) { + getASTContext().Deallocate(getExtInfo()); + NamedDeclOrQualifier = (TypedefNameDecl*) 0; + } + else + getExtInfo()->QualifierLoc = QualifierLoc; + } + } +} + +void TagDecl::setTemplateParameterListsInfo(ASTContext &Context, + unsigned NumTPLists, + TemplateParameterList **TPLists) { + assert(NumTPLists > 0); + // Make sure the extended decl info is allocated. + if (!hasExtInfo()) + // Allocate external info struct. + NamedDeclOrQualifier = new (getASTContext()) ExtInfo; + // Set the template parameter lists info. + getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists); +} + +//===----------------------------------------------------------------------===// +// EnumDecl Implementation +//===----------------------------------------------------------------------===// + +void EnumDecl::anchor() { } + +EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + IdentifierInfo *Id, + EnumDecl *PrevDecl, bool IsScoped, + bool IsScopedUsingClassTag, bool IsFixed) { + EnumDecl *Enum = new (C) EnumDecl(DC, StartLoc, IdLoc, Id, PrevDecl, + IsScoped, IsScopedUsingClassTag, IsFixed); + Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules; + C.getTypeDeclType(Enum, PrevDecl); + return Enum; +} + +EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumDecl)); + EnumDecl *Enum = new (Mem) EnumDecl(0, SourceLocation(), SourceLocation(), + 0, 0, false, false, false); + Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules; + return Enum; +} + +void EnumDecl::completeDefinition(QualType NewType, + QualType NewPromotionType, + unsigned NumPositiveBits, + unsigned NumNegativeBits) { + assert(!isCompleteDefinition() && "Cannot redefine enums!"); + if (!IntegerType) + IntegerType = NewType.getTypePtr(); + PromotionType = NewPromotionType; + setNumPositiveBits(NumPositiveBits); + setNumNegativeBits(NumNegativeBits); + TagDecl::completeDefinition(); +} + +TemplateSpecializationKind EnumDecl::getTemplateSpecializationKind() const { + if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) + return MSI->getTemplateSpecializationKind(); + + return TSK_Undeclared; +} + +void EnumDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK, + SourceLocation PointOfInstantiation) { + MemberSpecializationInfo *MSI = getMemberSpecializationInfo(); + assert(MSI && "Not an instantiated member enumeration?"); + MSI->setTemplateSpecializationKind(TSK); + if (TSK != TSK_ExplicitSpecialization && + PointOfInstantiation.isValid() && + MSI->getPointOfInstantiation().isInvalid()) + MSI->setPointOfInstantiation(PointOfInstantiation); +} + +EnumDecl *EnumDecl::getInstantiatedFromMemberEnum() const { + if (SpecializationInfo) + return cast<EnumDecl>(SpecializationInfo->getInstantiatedFrom()); + + return 0; +} + +void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED, + TemplateSpecializationKind TSK) { + assert(!SpecializationInfo && "Member enum is already a specialization"); + SpecializationInfo = new (C) MemberSpecializationInfo(ED, TSK); +} + +//===----------------------------------------------------------------------===// +// RecordDecl Implementation +//===----------------------------------------------------------------------===// + +RecordDecl::RecordDecl(Kind DK, TagKind TK, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + IdentifierInfo *Id, RecordDecl *PrevDecl) + : TagDecl(DK, TK, DC, IdLoc, Id, PrevDecl, StartLoc) { + HasFlexibleArrayMember = false; + AnonymousStructOrUnion = false; + HasObjectMember = false; + HasVolatileMember = false; + LoadedFieldsFromExternalStorage = false; + assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!"); +} + +RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + IdentifierInfo *Id, RecordDecl* PrevDecl) { + RecordDecl* R = new (C) RecordDecl(Record, TK, DC, StartLoc, IdLoc, Id, + PrevDecl); + R->MayHaveOutOfDateDef = C.getLangOpts().Modules; + + C.getTypeDeclType(R, PrevDecl); + return R; +} + +RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(RecordDecl)); + RecordDecl *R = new (Mem) RecordDecl(Record, TTK_Struct, 0, SourceLocation(), + SourceLocation(), 0, 0); + R->MayHaveOutOfDateDef = C.getLangOpts().Modules; + return R; +} + +bool RecordDecl::isInjectedClassName() const { + return isImplicit() && getDeclName() && getDeclContext()->isRecord() && + cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName(); +} + +RecordDecl::field_iterator RecordDecl::field_begin() const { + if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage) + LoadFieldsFromExternalStorage(); + + return field_iterator(decl_iterator(FirstDecl)); +} + +/// completeDefinition - Notes that the definition of this type is now +/// complete. +void RecordDecl::completeDefinition() { + assert(!isCompleteDefinition() && "Cannot redefine record!"); + TagDecl::completeDefinition(); +} + +/// isMsStruct - Get whether or not this record uses ms_struct layout. +/// This which can be turned on with an attribute, pragma, or the +/// -mms-bitfields command-line option. +bool RecordDecl::isMsStruct(const ASTContext &C) const { + return hasAttr<MsStructAttr>() || C.getLangOpts().MSBitfields == 1; +} + +static bool isFieldOrIndirectField(Decl::Kind K) { + return FieldDecl::classofKind(K) || IndirectFieldDecl::classofKind(K); +} + +void RecordDecl::LoadFieldsFromExternalStorage() const { + ExternalASTSource *Source = getASTContext().getExternalSource(); + assert(hasExternalLexicalStorage() && Source && "No external storage?"); + + // Notify that we have a RecordDecl doing some initialization. + ExternalASTSource::Deserializing TheFields(Source); + + SmallVector<Decl*, 64> Decls; + LoadedFieldsFromExternalStorage = true; + switch (Source->FindExternalLexicalDecls(this, isFieldOrIndirectField, + Decls)) { + case ELR_Success: + break; + + case ELR_AlreadyLoaded: + case ELR_Failure: + return; + } + +#ifndef NDEBUG + // Check that all decls we got were FieldDecls. + for (unsigned i=0, e=Decls.size(); i != e; ++i) + assert(isa<FieldDecl>(Decls[i]) || isa<IndirectFieldDecl>(Decls[i])); +#endif + + if (Decls.empty()) + return; + + llvm::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls, + /*FieldsAlreadyLoaded=*/false); +} + +//===----------------------------------------------------------------------===// +// BlockDecl Implementation +//===----------------------------------------------------------------------===// + +void BlockDecl::setParams(ArrayRef<ParmVarDecl *> NewParamInfo) { + assert(ParamInfo == 0 && "Already has param info!"); + + // Zero params -> null pointer. + if (!NewParamInfo.empty()) { + NumParams = NewParamInfo.size(); + ParamInfo = new (getASTContext()) ParmVarDecl*[NewParamInfo.size()]; + std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo); + } +} + +void BlockDecl::setCaptures(ASTContext &Context, + const Capture *begin, + const Capture *end, + bool capturesCXXThis) { + CapturesCXXThis = capturesCXXThis; + + if (begin == end) { + NumCaptures = 0; + Captures = 0; + return; + } + + NumCaptures = end - begin; + + // Avoid new Capture[] because we don't want to provide a default + // constructor. + size_t allocationSize = NumCaptures * sizeof(Capture); + void *buffer = Context.Allocate(allocationSize, /*alignment*/sizeof(void*)); + memcpy(buffer, begin, allocationSize); + Captures = static_cast<Capture*>(buffer); +} + +bool BlockDecl::capturesVariable(const VarDecl *variable) const { + for (capture_const_iterator + i = capture_begin(), e = capture_end(); i != e; ++i) + // Only auto vars can be captured, so no redeclaration worries. + if (i->getVariable() == variable) + return true; + + return false; +} + +SourceRange BlockDecl::getSourceRange() const { + return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation()); +} + +//===----------------------------------------------------------------------===// +// Other Decl Allocation/Deallocation Method Implementations +//===----------------------------------------------------------------------===// + +void TranslationUnitDecl::anchor() { } + +TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) { + return new (C) TranslationUnitDecl(C); +} + +void LabelDecl::anchor() { } + +LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation IdentL, IdentifierInfo *II) { + return new (C) LabelDecl(DC, IdentL, II, 0, IdentL); +} + +LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation IdentL, IdentifierInfo *II, + SourceLocation GnuLabelL) { + assert(GnuLabelL != IdentL && "Use this only for GNU local labels"); + return new (C) LabelDecl(DC, IdentL, II, 0, GnuLabelL); +} + +LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LabelDecl)); + return new (Mem) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation()); +} + +void ValueDecl::anchor() { } + +bool ValueDecl::isWeak() const { + for (attr_iterator I = attr_begin(), E = attr_end(); I != E; ++I) + if (isa<WeakAttr>(*I) || isa<WeakRefAttr>(*I)) + return true; + + return isWeakImported(); +} + +void ImplicitParamDecl::anchor() { } + +ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation IdLoc, + IdentifierInfo *Id, + QualType Type) { + return new (C) ImplicitParamDecl(DC, IdLoc, Id, Type); +} + +ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ImplicitParamDecl)); + return new (Mem) ImplicitParamDecl(0, SourceLocation(), 0, QualType()); +} + +FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, + const DeclarationNameInfo &NameInfo, + QualType T, TypeSourceInfo *TInfo, + StorageClass SC, + bool isInlineSpecified, + bool hasWrittenPrototype, + bool isConstexprSpecified) { + FunctionDecl *New = new (C) FunctionDecl(Function, DC, StartLoc, NameInfo, + T, TInfo, SC, + isInlineSpecified, + isConstexprSpecified); + New->HasWrittenPrototype = hasWrittenPrototype; + return New; +} + +FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionDecl)); + return new (Mem) FunctionDecl(Function, 0, SourceLocation(), + DeclarationNameInfo(), QualType(), 0, + SC_None, false, false); +} + +BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) { + return new (C) BlockDecl(DC, L); +} + +BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(BlockDecl)); + return new (Mem) BlockDecl(0, SourceLocation()); +} + +MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(MSPropertyDecl)); + return new (Mem) MSPropertyDecl(0, SourceLocation(), DeclarationName(), + QualType(), 0, SourceLocation(), + 0, 0); +} + +CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC, + unsigned NumParams) { + unsigned Size = sizeof(CapturedDecl) + NumParams * sizeof(ImplicitParamDecl*); + return new (C.Allocate(Size)) CapturedDecl(DC, NumParams); +} + +CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID, + unsigned NumParams) { + unsigned Size = sizeof(CapturedDecl) + NumParams * sizeof(ImplicitParamDecl*); + void *Mem = AllocateDeserializedDecl(C, ID, Size); + return new (Mem) CapturedDecl(0, NumParams); +} + +EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD, + SourceLocation L, + IdentifierInfo *Id, QualType T, + Expr *E, const llvm::APSInt &V) { + return new (C) EnumConstantDecl(CD, L, Id, T, E, V); +} + +EnumConstantDecl * +EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumConstantDecl)); + return new (Mem) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0, + llvm::APSInt()); +} + +void IndirectFieldDecl::anchor() { } + +IndirectFieldDecl * +IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, + IdentifierInfo *Id, QualType T, NamedDecl **CH, + unsigned CHS) { + return new (C) IndirectFieldDecl(DC, L, Id, T, CH, CHS); +} + +IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(IndirectFieldDecl)); + return new (Mem) IndirectFieldDecl(0, SourceLocation(), DeclarationName(), + QualType(), 0, 0); +} + +SourceRange EnumConstantDecl::getSourceRange() const { + SourceLocation End = getLocation(); + if (Init) + End = Init->getLocEnd(); + return SourceRange(getLocation(), End); +} + +void TypeDecl::anchor() { } + +TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + IdentifierInfo *Id, TypeSourceInfo *TInfo) { + return new (C) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo); +} + +void TypedefNameDecl::anchor() { } + +TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypedefDecl)); + return new (Mem) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0); +} + +TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, + SourceLocation IdLoc, IdentifierInfo *Id, + TypeSourceInfo *TInfo) { + return new (C) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo); +} + +TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasDecl)); + return new (Mem) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0); +} + +SourceRange TypedefDecl::getSourceRange() const { + SourceLocation RangeEnd = getLocation(); + if (TypeSourceInfo *TInfo = getTypeSourceInfo()) { + if (typeIsPostfix(TInfo->getType())) + RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd(); + } + return SourceRange(getLocStart(), RangeEnd); +} + +SourceRange TypeAliasDecl::getSourceRange() const { + SourceLocation RangeEnd = getLocStart(); + if (TypeSourceInfo *TInfo = getTypeSourceInfo()) + RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd(); + return SourceRange(getLocStart(), RangeEnd); +} + +void FileScopeAsmDecl::anchor() { } + +FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC, + StringLiteral *Str, + SourceLocation AsmLoc, + SourceLocation RParenLoc) { + return new (C) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc); +} + +FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FileScopeAsmDecl)); + return new (Mem) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation()); +} + +void EmptyDecl::anchor() {} + +EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) { + return new (C) EmptyDecl(DC, L); +} + +EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EmptyDecl)); + return new (Mem) EmptyDecl(0, SourceLocation()); +} + +//===----------------------------------------------------------------------===// +// ImportDecl Implementation +//===----------------------------------------------------------------------===// + +/// \brief Retrieve the number of module identifiers needed to name the given +/// module. +static unsigned getNumModuleIdentifiers(Module *Mod) { + unsigned Result = 1; + while (Mod->Parent) { + Mod = Mod->Parent; + ++Result; + } + return Result; +} + +ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc, + Module *Imported, + ArrayRef<SourceLocation> IdentifierLocs) + : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true), + NextLocalImport() +{ + assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size()); + SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(this + 1); + memcpy(StoredLocs, IdentifierLocs.data(), + IdentifierLocs.size() * sizeof(SourceLocation)); +} + +ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc, + Module *Imported, SourceLocation EndLoc) + : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false), + NextLocalImport() +{ + *reinterpret_cast<SourceLocation *>(this + 1) = EndLoc; +} + +ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, Module *Imported, + ArrayRef<SourceLocation> IdentifierLocs) { + void *Mem = C.Allocate(sizeof(ImportDecl) + + IdentifierLocs.size() * sizeof(SourceLocation)); + return new (Mem) ImportDecl(DC, StartLoc, Imported, IdentifierLocs); +} + +ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, + Module *Imported, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(ImportDecl) + sizeof(SourceLocation)); + ImportDecl *Import = new (Mem) ImportDecl(DC, StartLoc, Imported, EndLoc); + Import->setImplicit(); + return Import; +} + +ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID, + unsigned NumLocations) { + void *Mem = AllocateDeserializedDecl(C, ID, + (sizeof(ImportDecl) + + NumLocations * sizeof(SourceLocation))); + return new (Mem) ImportDecl(EmptyShell()); +} + +ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const { + if (!ImportedAndComplete.getInt()) + return None; + + const SourceLocation *StoredLocs + = reinterpret_cast<const SourceLocation *>(this + 1); + return ArrayRef<SourceLocation>(StoredLocs, + getNumModuleIdentifiers(getImportedModule())); +} + +SourceRange ImportDecl::getSourceRange() const { + if (!ImportedAndComplete.getInt()) + return SourceRange(getLocation(), + *reinterpret_cast<const SourceLocation *>(this + 1)); + + return SourceRange(getLocation(), getIdentifierLocs().back()); +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp new file mode 100644 index 000000000000..121c5a671a29 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp @@ -0,0 +1,1582 @@ +//===--- DeclBase.cpp - Declaration AST Node Implementation ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Decl and DeclContext classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/DeclBase.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTMutationListener.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclContextInternals.h" +#include "clang/AST/DeclFriend.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclOpenMP.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/DependentDiagnostic.h" +#include "clang/AST/ExternalASTSource.h" +#include "clang/AST/Stmt.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/Type.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +using namespace clang; + +//===----------------------------------------------------------------------===// +// Statistics +//===----------------------------------------------------------------------===// + +#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + +void Decl::updateOutOfDate(IdentifierInfo &II) const { + getASTContext().getExternalSource()->updateOutOfDateIdentifier(II); +} + +void *Decl::AllocateDeserializedDecl(const ASTContext &Context, + unsigned ID, + unsigned Size) { + // Allocate an extra 8 bytes worth of storage, which ensures that the + // resulting pointer will still be 8-byte aligned. + void *Start = Context.Allocate(Size + 8); + void *Result = (char*)Start + 8; + + unsigned *PrefixPtr = (unsigned *)Result - 2; + + // Zero out the first 4 bytes; this is used to store the owning module ID. + PrefixPtr[0] = 0; + + // Store the global declaration ID in the second 4 bytes. + PrefixPtr[1] = ID; + + return Result; +} + +Module *Decl::getOwningModuleSlow() const { + assert(isFromASTFile() && "Not from AST file?"); + return getASTContext().getExternalSource()->getModule(getOwningModuleID()); +} + +const char *Decl::getDeclKindName() const { + switch (DeclKind) { + default: llvm_unreachable("Declaration not in DeclNodes.inc!"); +#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + } +} + +void Decl::setInvalidDecl(bool Invalid) { + InvalidDecl = Invalid; + if (Invalid && !isa<ParmVarDecl>(this)) { + // Defensive maneuver for ill-formed code: we're likely not to make it to + // a point where we set the access specifier, so default it to "public" + // to avoid triggering asserts elsewhere in the front end. + setAccess(AS_public); + } +} + +const char *DeclContext::getDeclKindName() const { + switch (DeclKind) { + default: llvm_unreachable("Declaration context not in DeclNodes.inc!"); +#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + } +} + +bool Decl::StatisticsEnabled = false; +void Decl::EnableStatistics() { + StatisticsEnabled = true; +} + +void Decl::PrintStats() { + llvm::errs() << "\n*** Decl Stats:\n"; + + int totalDecls = 0; +#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + llvm::errs() << " " << totalDecls << " decls total.\n"; + + int totalBytes = 0; +#define DECL(DERIVED, BASE) \ + if (n##DERIVED##s > 0) { \ + totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \ + llvm::errs() << " " << n##DERIVED##s << " " #DERIVED " decls, " \ + << sizeof(DERIVED##Decl) << " each (" \ + << n##DERIVED##s * sizeof(DERIVED##Decl) \ + << " bytes)\n"; \ + } +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + + llvm::errs() << "Total bytes = " << totalBytes << "\n"; +} + +void Decl::add(Kind k) { + switch (k) { +#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + } +} + +bool Decl::isTemplateParameterPack() const { + if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(this)) + return TTP->isParameterPack(); + if (const NonTypeTemplateParmDecl *NTTP + = dyn_cast<NonTypeTemplateParmDecl>(this)) + return NTTP->isParameterPack(); + if (const TemplateTemplateParmDecl *TTP + = dyn_cast<TemplateTemplateParmDecl>(this)) + return TTP->isParameterPack(); + return false; +} + +bool Decl::isParameterPack() const { + if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(this)) + return Parm->isParameterPack(); + + return isTemplateParameterPack(); +} + +bool Decl::isFunctionOrFunctionTemplate() const { + if (const UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(this)) + return UD->getTargetDecl()->isFunctionOrFunctionTemplate(); + + return isa<FunctionDecl>(this) || isa<FunctionTemplateDecl>(this); +} + +bool Decl::isTemplateDecl() const { + return isa<TemplateDecl>(this); +} + +const DeclContext *Decl::getParentFunctionOrMethod() const { + for (const DeclContext *DC = getDeclContext(); + DC && !DC->isTranslationUnit() && !DC->isNamespace(); + DC = DC->getParent()) + if (DC->isFunctionOrMethod()) + return DC; + + return 0; +} + + +//===----------------------------------------------------------------------===// +// PrettyStackTraceDecl Implementation +//===----------------------------------------------------------------------===// + +void PrettyStackTraceDecl::print(raw_ostream &OS) const { + SourceLocation TheLoc = Loc; + if (TheLoc.isInvalid() && TheDecl) + TheLoc = TheDecl->getLocation(); + + if (TheLoc.isValid()) { + TheLoc.print(OS, SM); + OS << ": "; + } + + OS << Message; + + if (const NamedDecl *DN = dyn_cast_or_null<NamedDecl>(TheDecl)) { + OS << " '"; + DN->printQualifiedName(OS); + OS << '\''; + } + OS << '\n'; +} + +//===----------------------------------------------------------------------===// +// Decl Implementation +//===----------------------------------------------------------------------===// + +// Out-of-line virtual method providing a home for Decl. +Decl::~Decl() { } + +void Decl::setDeclContext(DeclContext *DC) { + DeclCtx = DC; +} + +void Decl::setLexicalDeclContext(DeclContext *DC) { + if (DC == getLexicalDeclContext()) + return; + + if (isInSemaDC()) { + setDeclContextsImpl(getDeclContext(), DC, getASTContext()); + } else { + getMultipleDC()->LexicalDC = DC; + } +} + +void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC, + ASTContext &Ctx) { + if (SemaDC == LexicalDC) { + DeclCtx = SemaDC; + } else { + Decl::MultipleDC *MDC = new (Ctx) Decl::MultipleDC(); + MDC->SemanticDC = SemaDC; + MDC->LexicalDC = LexicalDC; + DeclCtx = MDC; + } +} + +bool Decl::isInAnonymousNamespace() const { + const DeclContext *DC = getDeclContext(); + do { + if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC)) + if (ND->isAnonymousNamespace()) + return true; + } while ((DC = DC->getParent())); + + return false; +} + +TranslationUnitDecl *Decl::getTranslationUnitDecl() { + if (TranslationUnitDecl *TUD = dyn_cast<TranslationUnitDecl>(this)) + return TUD; + + DeclContext *DC = getDeclContext(); + assert(DC && "This decl is not contained in a translation unit!"); + + while (!DC->isTranslationUnit()) { + DC = DC->getParent(); + assert(DC && "This decl is not contained in a translation unit!"); + } + + return cast<TranslationUnitDecl>(DC); +} + +ASTContext &Decl::getASTContext() const { + return getTranslationUnitDecl()->getASTContext(); +} + +ASTMutationListener *Decl::getASTMutationListener() const { + return getASTContext().getASTMutationListener(); +} + +unsigned Decl::getMaxAlignment() const { + if (!hasAttrs()) + return 0; + + unsigned Align = 0; + const AttrVec &V = getAttrs(); + ASTContext &Ctx = getASTContext(); + specific_attr_iterator<AlignedAttr> I(V.begin()), E(V.end()); + for (; I != E; ++I) + Align = std::max(Align, I->getAlignment(Ctx)); + return Align; +} + +bool Decl::isUsed(bool CheckUsedAttr) const { + if (Used) + return true; + + // Check for used attribute. + if (CheckUsedAttr && hasAttr<UsedAttr>()) + return true; + + return false; +} + +void Decl::markUsed(ASTContext &C) { + if (Used) + return; + + if (C.getASTMutationListener()) + C.getASTMutationListener()->DeclarationMarkedUsed(this); + + Used = true; +} + +bool Decl::isReferenced() const { + if (Referenced) + return true; + + // Check redeclarations. + for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) + if (I->Referenced) + return true; + + return false; +} + +/// \brief Determine the availability of the given declaration based on +/// the target platform. +/// +/// When it returns an availability result other than \c AR_Available, +/// if the \p Message parameter is non-NULL, it will be set to a +/// string describing why the entity is unavailable. +/// +/// FIXME: Make these strings localizable, since they end up in +/// diagnostics. +static AvailabilityResult CheckAvailability(ASTContext &Context, + const AvailabilityAttr *A, + std::string *Message) { + StringRef TargetPlatform = Context.getTargetInfo().getPlatformName(); + StringRef PrettyPlatformName + = AvailabilityAttr::getPrettyPlatformName(TargetPlatform); + if (PrettyPlatformName.empty()) + PrettyPlatformName = TargetPlatform; + + VersionTuple TargetMinVersion = Context.getTargetInfo().getPlatformMinVersion(); + if (TargetMinVersion.empty()) + return AR_Available; + + // Match the platform name. + if (A->getPlatform()->getName() != TargetPlatform) + return AR_Available; + + std::string HintMessage; + if (!A->getMessage().empty()) { + HintMessage = " - "; + HintMessage += A->getMessage(); + } + + // Make sure that this declaration has not been marked 'unavailable'. + if (A->getUnavailable()) { + if (Message) { + Message->clear(); + llvm::raw_string_ostream Out(*Message); + Out << "not available on " << PrettyPlatformName + << HintMessage; + } + + return AR_Unavailable; + } + + // Make sure that this declaration has already been introduced. + if (!A->getIntroduced().empty() && + TargetMinVersion < A->getIntroduced()) { + if (Message) { + Message->clear(); + llvm::raw_string_ostream Out(*Message); + Out << "introduced in " << PrettyPlatformName << ' ' + << A->getIntroduced() << HintMessage; + } + + return AR_NotYetIntroduced; + } + + // Make sure that this declaration hasn't been obsoleted. + if (!A->getObsoleted().empty() && TargetMinVersion >= A->getObsoleted()) { + if (Message) { + Message->clear(); + llvm::raw_string_ostream Out(*Message); + Out << "obsoleted in " << PrettyPlatformName << ' ' + << A->getObsoleted() << HintMessage; + } + + return AR_Unavailable; + } + + // Make sure that this declaration hasn't been deprecated. + if (!A->getDeprecated().empty() && TargetMinVersion >= A->getDeprecated()) { + if (Message) { + Message->clear(); + llvm::raw_string_ostream Out(*Message); + Out << "first deprecated in " << PrettyPlatformName << ' ' + << A->getDeprecated() << HintMessage; + } + + return AR_Deprecated; + } + + return AR_Available; +} + +AvailabilityResult Decl::getAvailability(std::string *Message) const { + AvailabilityResult Result = AR_Available; + std::string ResultMessage; + + for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) { + if (DeprecatedAttr *Deprecated = dyn_cast<DeprecatedAttr>(*A)) { + if (Result >= AR_Deprecated) + continue; + + if (Message) + ResultMessage = Deprecated->getMessage(); + + Result = AR_Deprecated; + continue; + } + + if (UnavailableAttr *Unavailable = dyn_cast<UnavailableAttr>(*A)) { + if (Message) + *Message = Unavailable->getMessage(); + return AR_Unavailable; + } + + if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) { + AvailabilityResult AR = CheckAvailability(getASTContext(), Availability, + Message); + + if (AR == AR_Unavailable) + return AR_Unavailable; + + if (AR > Result) { + Result = AR; + if (Message) + ResultMessage.swap(*Message); + } + continue; + } + } + + if (Message) + Message->swap(ResultMessage); + return Result; +} + +bool Decl::canBeWeakImported(bool &IsDefinition) const { + IsDefinition = false; + + // Variables, if they aren't definitions. + if (const VarDecl *Var = dyn_cast<VarDecl>(this)) { + if (Var->isThisDeclarationADefinition()) { + IsDefinition = true; + return false; + } + return true; + + // Functions, if they aren't definitions. + } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) { + if (FD->hasBody()) { + IsDefinition = true; + return false; + } + return true; + + // Objective-C classes, if this is the non-fragile runtime. + } else if (isa<ObjCInterfaceDecl>(this) && + getASTContext().getLangOpts().ObjCRuntime.hasWeakClassImport()) { + return true; + + // Nothing else. + } else { + return false; + } +} + +bool Decl::isWeakImported() const { + bool IsDefinition; + if (!canBeWeakImported(IsDefinition)) + return false; + + for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) { + if (isa<WeakImportAttr>(*A)) + return true; + + if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) { + if (CheckAvailability(getASTContext(), Availability, 0) + == AR_NotYetIntroduced) + return true; + } + } + + return false; +} + +unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { + switch (DeclKind) { + case Function: + case CXXMethod: + case CXXConstructor: + case CXXDestructor: + case CXXConversion: + case EnumConstant: + case Var: + case ImplicitParam: + case ParmVar: + case NonTypeTemplateParm: + case ObjCMethod: + case ObjCProperty: + case MSProperty: + return IDNS_Ordinary; + case Label: + return IDNS_Label; + case IndirectField: + return IDNS_Ordinary | IDNS_Member; + + case ObjCCompatibleAlias: + case ObjCInterface: + return IDNS_Ordinary | IDNS_Type; + + case Typedef: + case TypeAlias: + case TypeAliasTemplate: + case UnresolvedUsingTypename: + case TemplateTypeParm: + return IDNS_Ordinary | IDNS_Type; + + case UsingShadow: + return 0; // we'll actually overwrite this later + + case UnresolvedUsingValue: + return IDNS_Ordinary | IDNS_Using; + + case Using: + return IDNS_Using; + + case ObjCProtocol: + return IDNS_ObjCProtocol; + + case Field: + case ObjCAtDefsField: + case ObjCIvar: + return IDNS_Member; + + case Record: + case CXXRecord: + case Enum: + return IDNS_Tag | IDNS_Type; + + case Namespace: + case NamespaceAlias: + return IDNS_Namespace; + + case FunctionTemplate: + case VarTemplate: + return IDNS_Ordinary; + + case ClassTemplate: + case TemplateTemplateParm: + return IDNS_Ordinary | IDNS_Tag | IDNS_Type; + + // Never have names. + case Friend: + case FriendTemplate: + case AccessSpec: + case LinkageSpec: + case FileScopeAsm: + case StaticAssert: + case ObjCPropertyImpl: + case Block: + case Captured: + case TranslationUnit: + + case UsingDirective: + case ClassTemplateSpecialization: + case ClassTemplatePartialSpecialization: + case ClassScopeFunctionSpecialization: + case VarTemplateSpecialization: + case VarTemplatePartialSpecialization: + case ObjCImplementation: + case ObjCCategory: + case ObjCCategoryImpl: + case Import: + case OMPThreadPrivate: + case Empty: + // Never looked up by name. + return 0; + } + + llvm_unreachable("Invalid DeclKind!"); +} + +void Decl::setAttrsImpl(const AttrVec &attrs, ASTContext &Ctx) { + assert(!HasAttrs && "Decl already contains attrs."); + + AttrVec &AttrBlank = Ctx.getDeclAttrs(this); + assert(AttrBlank.empty() && "HasAttrs was wrong?"); + + AttrBlank = attrs; + HasAttrs = true; +} + +void Decl::dropAttrs() { + if (!HasAttrs) return; + + HasAttrs = false; + getASTContext().eraseDeclAttrs(this); +} + +const AttrVec &Decl::getAttrs() const { + assert(HasAttrs && "No attrs to get!"); + return getASTContext().getDeclAttrs(this); +} + +Decl *Decl::castFromDeclContext (const DeclContext *D) { + Decl::Kind DK = D->getDeclKind(); + switch(DK) { +#define DECL(NAME, BASE) +#define DECL_CONTEXT(NAME) \ + case Decl::NAME: \ + return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D)); +#define DECL_CONTEXT_BASE(NAME) +#include "clang/AST/DeclNodes.inc" + default: +#define DECL(NAME, BASE) +#define DECL_CONTEXT_BASE(NAME) \ + if (DK >= first##NAME && DK <= last##NAME) \ + return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D)); +#include "clang/AST/DeclNodes.inc" + llvm_unreachable("a decl that inherits DeclContext isn't handled"); + } +} + +DeclContext *Decl::castToDeclContext(const Decl *D) { + Decl::Kind DK = D->getKind(); + switch(DK) { +#define DECL(NAME, BASE) +#define DECL_CONTEXT(NAME) \ + case Decl::NAME: \ + return static_cast<NAME##Decl*>(const_cast<Decl*>(D)); +#define DECL_CONTEXT_BASE(NAME) +#include "clang/AST/DeclNodes.inc" + default: +#define DECL(NAME, BASE) +#define DECL_CONTEXT_BASE(NAME) \ + if (DK >= first##NAME && DK <= last##NAME) \ + return static_cast<NAME##Decl*>(const_cast<Decl*>(D)); +#include "clang/AST/DeclNodes.inc" + llvm_unreachable("a decl that inherits DeclContext isn't handled"); + } +} + +SourceLocation Decl::getBodyRBrace() const { + // Special handling of FunctionDecl to avoid de-serializing the body from PCH. + // FunctionDecl stores EndRangeLoc for this purpose. + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) { + const FunctionDecl *Definition; + if (FD->hasBody(Definition)) + return Definition->getSourceRange().getEnd(); + return SourceLocation(); + } + + if (Stmt *Body = getBody()) + return Body->getSourceRange().getEnd(); + + return SourceLocation(); +} + +void Decl::CheckAccessDeclContext() const { +#ifndef NDEBUG + // Suppress this check if any of the following hold: + // 1. this is the translation unit (and thus has no parent) + // 2. this is a template parameter (and thus doesn't belong to its context) + // 3. this is a non-type template parameter + // 4. the context is not a record + // 5. it's invalid + // 6. it's a C++0x static_assert. + if (isa<TranslationUnitDecl>(this) || + isa<TemplateTypeParmDecl>(this) || + isa<NonTypeTemplateParmDecl>(this) || + !isa<CXXRecordDecl>(getDeclContext()) || + isInvalidDecl() || + isa<StaticAssertDecl>(this) || + // FIXME: a ParmVarDecl can have ClassTemplateSpecialization + // as DeclContext (?). + isa<ParmVarDecl>(this) || + // FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have + // AS_none as access specifier. + isa<CXXRecordDecl>(this) || + isa<ClassScopeFunctionSpecializationDecl>(this)) + return; + + assert(Access != AS_none && + "Access specifier is AS_none inside a record decl"); +#endif +} + +static Decl::Kind getKind(const Decl *D) { return D->getKind(); } +static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); } + +/// Starting at a given context (a Decl or DeclContext), look for a +/// code context that is not a closure (a lambda, block, etc.). +template <class T> static Decl *getNonClosureContext(T *D) { + if (getKind(D) == Decl::CXXMethod) { + CXXMethodDecl *MD = cast<CXXMethodDecl>(D); + if (MD->getOverloadedOperator() == OO_Call && + MD->getParent()->isLambda()) + return getNonClosureContext(MD->getParent()->getParent()); + return MD; + } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + return FD; + } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { + return MD; + } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + return getNonClosureContext(BD->getParent()); + } else if (CapturedDecl *CD = dyn_cast<CapturedDecl>(D)) { + return getNonClosureContext(CD->getParent()); + } else { + return 0; + } +} + +Decl *Decl::getNonClosureContext() { + return ::getNonClosureContext(this); +} + +Decl *DeclContext::getNonClosureAncestor() { + return ::getNonClosureContext(this); +} + +//===----------------------------------------------------------------------===// +// DeclContext Implementation +//===----------------------------------------------------------------------===// + +bool DeclContext::classof(const Decl *D) { + switch (D->getKind()) { +#define DECL(NAME, BASE) +#define DECL_CONTEXT(NAME) case Decl::NAME: +#define DECL_CONTEXT_BASE(NAME) +#include "clang/AST/DeclNodes.inc" + return true; + default: +#define DECL(NAME, BASE) +#define DECL_CONTEXT_BASE(NAME) \ + if (D->getKind() >= Decl::first##NAME && \ + D->getKind() <= Decl::last##NAME) \ + return true; +#include "clang/AST/DeclNodes.inc" + return false; + } +} + +DeclContext::~DeclContext() { } + +/// \brief Find the parent context of this context that will be +/// used for unqualified name lookup. +/// +/// Generally, the parent lookup context is the semantic context. However, for +/// a friend function the parent lookup context is the lexical context, which +/// is the class in which the friend is declared. +DeclContext *DeclContext::getLookupParent() { + // FIXME: Find a better way to identify friends + if (isa<FunctionDecl>(this)) + if (getParent()->getRedeclContext()->isFileContext() && + getLexicalParent()->getRedeclContext()->isRecord()) + return getLexicalParent(); + + return getParent(); +} + +bool DeclContext::isInlineNamespace() const { + return isNamespace() && + cast<NamespaceDecl>(this)->isInline(); +} + +bool DeclContext::isDependentContext() const { + if (isFileContext()) + return false; + + if (isa<ClassTemplatePartialSpecializationDecl>(this)) + return true; + + if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) { + if (Record->getDescribedClassTemplate()) + return true; + + if (Record->isDependentLambda()) + return true; + } + + if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) { + if (Function->getDescribedFunctionTemplate()) + return true; + + // Friend function declarations are dependent if their *lexical* + // context is dependent. + if (cast<Decl>(this)->getFriendObjectKind()) + return getLexicalParent()->isDependentContext(); + } + + return getParent() && getParent()->isDependentContext(); +} + +bool DeclContext::isTransparentContext() const { + if (DeclKind == Decl::Enum) + return !cast<EnumDecl>(this)->isScoped(); + else if (DeclKind == Decl::LinkageSpec) + return true; + + return false; +} + +static bool isLinkageSpecContext(const DeclContext *DC, + LinkageSpecDecl::LanguageIDs ID) { + while (DC->getDeclKind() != Decl::TranslationUnit) { + if (DC->getDeclKind() == Decl::LinkageSpec) + return cast<LinkageSpecDecl>(DC)->getLanguage() == ID; + DC = DC->getParent(); + } + return false; +} + +bool DeclContext::isExternCContext() const { + return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_c); +} + +bool DeclContext::isExternCXXContext() const { + return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_cxx); +} + +bool DeclContext::Encloses(const DeclContext *DC) const { + if (getPrimaryContext() != this) + return getPrimaryContext()->Encloses(DC); + + for (; DC; DC = DC->getParent()) + if (DC->getPrimaryContext() == this) + return true; + return false; +} + +DeclContext *DeclContext::getPrimaryContext() { + switch (DeclKind) { + case Decl::TranslationUnit: + case Decl::LinkageSpec: + case Decl::Block: + case Decl::Captured: + // There is only one DeclContext for these entities. + return this; + + case Decl::Namespace: + // The original namespace is our primary context. + return static_cast<NamespaceDecl*>(this)->getOriginalNamespace(); + + case Decl::ObjCMethod: + return this; + + case Decl::ObjCInterface: + if (ObjCInterfaceDecl *Def = cast<ObjCInterfaceDecl>(this)->getDefinition()) + return Def; + + return this; + + case Decl::ObjCProtocol: + if (ObjCProtocolDecl *Def = cast<ObjCProtocolDecl>(this)->getDefinition()) + return Def; + + return this; + + case Decl::ObjCCategory: + return this; + + case Decl::ObjCImplementation: + case Decl::ObjCCategoryImpl: + return this; + + default: + if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) { + // If this is a tag type that has a definition or is currently + // being defined, that definition is our primary context. + TagDecl *Tag = cast<TagDecl>(this); + assert(isa<TagType>(Tag->TypeForDecl) || + isa<InjectedClassNameType>(Tag->TypeForDecl)); + + if (TagDecl *Def = Tag->getDefinition()) + return Def; + + if (!isa<InjectedClassNameType>(Tag->TypeForDecl)) { + const TagType *TagTy = cast<TagType>(Tag->TypeForDecl); + if (TagTy->isBeingDefined()) + // FIXME: is it necessarily being defined in the decl + // that owns the type? + return TagTy->getDecl(); + } + + return Tag; + } + + assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction && + "Unknown DeclContext kind"); + return this; + } +} + +void +DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts){ + Contexts.clear(); + + if (DeclKind != Decl::Namespace) { + Contexts.push_back(this); + return; + } + + NamespaceDecl *Self = static_cast<NamespaceDecl *>(this); + for (NamespaceDecl *N = Self->getMostRecentDecl(); N; + N = N->getPreviousDecl()) + Contexts.push_back(N); + + std::reverse(Contexts.begin(), Contexts.end()); +} + +std::pair<Decl *, Decl *> +DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls, + bool FieldsAlreadyLoaded) { + // Build up a chain of declarations via the Decl::NextInContextAndBits field. + Decl *FirstNewDecl = 0; + Decl *PrevDecl = 0; + for (unsigned I = 0, N = Decls.size(); I != N; ++I) { + if (FieldsAlreadyLoaded && isa<FieldDecl>(Decls[I])) + continue; + + Decl *D = Decls[I]; + if (PrevDecl) + PrevDecl->NextInContextAndBits.setPointer(D); + else + FirstNewDecl = D; + + PrevDecl = D; + } + + return std::make_pair(FirstNewDecl, PrevDecl); +} + +/// \brief We have just acquired external visible storage, and we already have +/// built a lookup map. For every name in the map, pull in the new names from +/// the external storage. +void DeclContext::reconcileExternalVisibleStorage() { + assert(NeedToReconcileExternalVisibleStorage && LookupPtr.getPointer()); + NeedToReconcileExternalVisibleStorage = false; + + StoredDeclsMap &Map = *LookupPtr.getPointer(); + for (StoredDeclsMap::iterator I = Map.begin(); I != Map.end(); ++I) + I->second.setHasExternalDecls(); +} + +/// \brief Load the declarations within this lexical storage from an +/// external source. +void +DeclContext::LoadLexicalDeclsFromExternalStorage() const { + ExternalASTSource *Source = getParentASTContext().getExternalSource(); + assert(hasExternalLexicalStorage() && Source && "No external storage?"); + + // Notify that we have a DeclContext that is initializing. + ExternalASTSource::Deserializing ADeclContext(Source); + + // Load the external declarations, if any. + SmallVector<Decl*, 64> Decls; + ExternalLexicalStorage = false; + switch (Source->FindExternalLexicalDecls(this, Decls)) { + case ELR_Success: + break; + + case ELR_Failure: + case ELR_AlreadyLoaded: + return; + } + + if (Decls.empty()) + return; + + // We may have already loaded just the fields of this record, in which case + // we need to ignore them. + bool FieldsAlreadyLoaded = false; + if (const RecordDecl *RD = dyn_cast<RecordDecl>(this)) + FieldsAlreadyLoaded = RD->LoadedFieldsFromExternalStorage; + + // Splice the newly-read declarations into the beginning of the list + // of declarations. + Decl *ExternalFirst, *ExternalLast; + llvm::tie(ExternalFirst, ExternalLast) = BuildDeclChain(Decls, + FieldsAlreadyLoaded); + ExternalLast->NextInContextAndBits.setPointer(FirstDecl); + FirstDecl = ExternalFirst; + if (!LastDecl) + LastDecl = ExternalLast; +} + +DeclContext::lookup_result +ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC, + DeclarationName Name) { + ASTContext &Context = DC->getParentASTContext(); + StoredDeclsMap *Map; + if (!(Map = DC->LookupPtr.getPointer())) + Map = DC->CreateStoredDeclsMap(Context); + + (*Map)[Name].removeExternalDecls(); + + return DeclContext::lookup_result(); +} + +DeclContext::lookup_result +ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC, + DeclarationName Name, + ArrayRef<NamedDecl*> Decls) { + ASTContext &Context = DC->getParentASTContext(); + StoredDeclsMap *Map; + if (!(Map = DC->LookupPtr.getPointer())) + Map = DC->CreateStoredDeclsMap(Context); + + StoredDeclsList &List = (*Map)[Name]; + + // Clear out any old external visible declarations, to avoid quadratic + // performance in the redeclaration checks below. + List.removeExternalDecls(); + + if (!List.isNull()) { + // We have both existing declarations and new declarations for this name. + // Some of the declarations may simply replace existing ones. Handle those + // first. + llvm::SmallVector<unsigned, 8> Skip; + for (unsigned I = 0, N = Decls.size(); I != N; ++I) + if (List.HandleRedeclaration(Decls[I])) + Skip.push_back(I); + Skip.push_back(Decls.size()); + + // Add in any new declarations. + unsigned SkipPos = 0; + for (unsigned I = 0, N = Decls.size(); I != N; ++I) { + if (I == Skip[SkipPos]) + ++SkipPos; + else + List.AddSubsequentDecl(Decls[I]); + } + } else { + // Convert the array to a StoredDeclsList. + for (ArrayRef<NamedDecl*>::iterator + I = Decls.begin(), E = Decls.end(); I != E; ++I) { + if (List.isNull()) + List.setOnlyValue(*I); + else + List.AddSubsequentDecl(*I); + } + } + + return List.getLookupResult(); +} + +DeclContext::decl_iterator DeclContext::noload_decls_begin() const { + return decl_iterator(FirstDecl); +} + +DeclContext::decl_iterator DeclContext::decls_begin() const { + if (hasExternalLexicalStorage()) + LoadLexicalDeclsFromExternalStorage(); + + return decl_iterator(FirstDecl); +} + +bool DeclContext::decls_empty() const { + if (hasExternalLexicalStorage()) + LoadLexicalDeclsFromExternalStorage(); + + return !FirstDecl; +} + +bool DeclContext::containsDecl(Decl *D) const { + return (D->getLexicalDeclContext() == this && + (D->NextInContextAndBits.getPointer() || D == LastDecl)); +} + +void DeclContext::removeDecl(Decl *D) { + assert(D->getLexicalDeclContext() == this && + "decl being removed from non-lexical context"); + assert((D->NextInContextAndBits.getPointer() || D == LastDecl) && + "decl is not in decls list"); + + // Remove D from the decl chain. This is O(n) but hopefully rare. + if (D == FirstDecl) { + if (D == LastDecl) + FirstDecl = LastDecl = 0; + else + FirstDecl = D->NextInContextAndBits.getPointer(); + } else { + for (Decl *I = FirstDecl; true; I = I->NextInContextAndBits.getPointer()) { + assert(I && "decl not found in linked list"); + if (I->NextInContextAndBits.getPointer() == D) { + I->NextInContextAndBits.setPointer(D->NextInContextAndBits.getPointer()); + if (D == LastDecl) LastDecl = I; + break; + } + } + } + + // Mark that D is no longer in the decl chain. + D->NextInContextAndBits.setPointer(0); + + // Remove D from the lookup table if necessary. + if (isa<NamedDecl>(D)) { + NamedDecl *ND = cast<NamedDecl>(D); + + // Remove only decls that have a name + if (!ND->getDeclName()) return; + + StoredDeclsMap *Map = getPrimaryContext()->LookupPtr.getPointer(); + if (!Map) return; + + StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName()); + assert(Pos != Map->end() && "no lookup entry for decl"); + if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND) + Pos->second.remove(ND); + } +} + +void DeclContext::addHiddenDecl(Decl *D) { + assert(D->getLexicalDeclContext() == this && + "Decl inserted into wrong lexical context"); + assert(!D->getNextDeclInContext() && D != LastDecl && + "Decl already inserted into a DeclContext"); + + if (FirstDecl) { + LastDecl->NextInContextAndBits.setPointer(D); + LastDecl = D; + } else { + FirstDecl = LastDecl = D; + } + + // Notify a C++ record declaration that we've added a member, so it can + // update it's class-specific state. + if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) + Record->addedMember(D); + + // If this is a newly-created (not de-serialized) import declaration, wire + // it in to the list of local import declarations. + if (!D->isFromASTFile()) { + if (ImportDecl *Import = dyn_cast<ImportDecl>(D)) + D->getASTContext().addedLocalImportDecl(Import); + } +} + +void DeclContext::addDecl(Decl *D) { + addHiddenDecl(D); + + if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) + ND->getDeclContext()->getPrimaryContext()-> + makeDeclVisibleInContextWithFlags(ND, false, true); +} + +void DeclContext::addDeclInternal(Decl *D) { + addHiddenDecl(D); + + if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) + ND->getDeclContext()->getPrimaryContext()-> + makeDeclVisibleInContextWithFlags(ND, true, true); +} + +/// shouldBeHidden - Determine whether a declaration which was declared +/// within its semantic context should be invisible to qualified name lookup. +static bool shouldBeHidden(NamedDecl *D) { + // Skip unnamed declarations. + if (!D->getDeclName()) + return true; + + // Skip entities that can't be found by name lookup into a particular + // context. + if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) || + D->isTemplateParameter()) + return true; + + // Skip template specializations. + // FIXME: This feels like a hack. Should DeclarationName support + // template-ids, or is there a better way to keep specializations + // from being visible? + if (isa<ClassTemplateSpecializationDecl>(D)) + return true; + if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) + if (FD->isFunctionTemplateSpecialization()) + return true; + + return false; +} + +/// buildLookup - Build the lookup data structure with all of the +/// declarations in this DeclContext (and any other contexts linked +/// to it or transparent contexts nested within it) and return it. +StoredDeclsMap *DeclContext::buildLookup() { + assert(this == getPrimaryContext() && "buildLookup called on non-primary DC"); + + // FIXME: Should we keep going if hasExternalVisibleStorage? + if (!LookupPtr.getInt()) + return LookupPtr.getPointer(); + + SmallVector<DeclContext *, 2> Contexts; + collectAllContexts(Contexts); + for (unsigned I = 0, N = Contexts.size(); I != N; ++I) + buildLookupImpl<&DeclContext::decls_begin, + &DeclContext::decls_end>(Contexts[I]); + + // We no longer have any lazy decls. + LookupPtr.setInt(false); + NeedToReconcileExternalVisibleStorage = false; + return LookupPtr.getPointer(); +} + +/// buildLookupImpl - Build part of the lookup data structure for the +/// declarations contained within DCtx, which will either be this +/// DeclContext, a DeclContext linked to it, or a transparent context +/// nested within it. +template<DeclContext::decl_iterator (DeclContext::*Begin)() const, + DeclContext::decl_iterator (DeclContext::*End)() const> +void DeclContext::buildLookupImpl(DeclContext *DCtx) { + for (decl_iterator I = (DCtx->*Begin)(), E = (DCtx->*End)(); + I != E; ++I) { + Decl *D = *I; + + // Insert this declaration into the lookup structure, but only if + // it's semantically within its decl context. Any other decls which + // should be found in this context are added eagerly. + // + // If it's from an AST file, don't add it now. It'll get handled by + // FindExternalVisibleDeclsByName if needed. Exception: if we're not + // in C++, we do not track external visible decls for the TU, so in + // that case we need to collect them all here. + if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) + if (ND->getDeclContext() == DCtx && !shouldBeHidden(ND) && + (!ND->isFromASTFile() || + (isTranslationUnit() && + !getParentASTContext().getLangOpts().CPlusPlus))) + makeDeclVisibleInContextImpl(ND, false); + + // If this declaration is itself a transparent declaration context + // or inline namespace, add the members of this declaration of that + // context (recursively). + if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D)) + if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace()) + buildLookupImpl<Begin, End>(InnerCtx); + } +} + +DeclContext::lookup_result +DeclContext::lookup(DeclarationName Name) { + assert(DeclKind != Decl::LinkageSpec && + "Should not perform lookups into linkage specs!"); + + DeclContext *PrimaryContext = getPrimaryContext(); + if (PrimaryContext != this) + return PrimaryContext->lookup(Name); + + if (hasExternalVisibleStorage()) { + StoredDeclsMap *Map = LookupPtr.getPointer(); + if (LookupPtr.getInt()) + Map = buildLookup(); + else if (NeedToReconcileExternalVisibleStorage) + reconcileExternalVisibleStorage(); + + if (!Map) + Map = CreateStoredDeclsMap(getParentASTContext()); + + // If we have a lookup result with no external decls, we are done. + std::pair<StoredDeclsMap::iterator, bool> R = + Map->insert(std::make_pair(Name, StoredDeclsList())); + if (!R.second && !R.first->second.hasExternalDecls()) + return R.first->second.getLookupResult(); + + ExternalASTSource *Source = getParentASTContext().getExternalSource(); + if (Source->FindExternalVisibleDeclsByName(this, Name) || R.second) { + if (StoredDeclsMap *Map = LookupPtr.getPointer()) { + StoredDeclsMap::iterator I = Map->find(Name); + if (I != Map->end()) + return I->second.getLookupResult(); + } + } + + return lookup_result(lookup_iterator(0), lookup_iterator(0)); + } + + StoredDeclsMap *Map = LookupPtr.getPointer(); + if (LookupPtr.getInt()) + Map = buildLookup(); + + if (!Map) + return lookup_result(lookup_iterator(0), lookup_iterator(0)); + + StoredDeclsMap::iterator I = Map->find(Name); + if (I == Map->end()) + return lookup_result(lookup_iterator(0), lookup_iterator(0)); + + return I->second.getLookupResult(); +} + +DeclContext::lookup_result +DeclContext::noload_lookup(DeclarationName Name) { + assert(DeclKind != Decl::LinkageSpec && + "Should not perform lookups into linkage specs!"); + if (!hasExternalVisibleStorage()) + return lookup(Name); + + DeclContext *PrimaryContext = getPrimaryContext(); + if (PrimaryContext != this) + return PrimaryContext->noload_lookup(Name); + + StoredDeclsMap *Map = LookupPtr.getPointer(); + if (LookupPtr.getInt()) { + // Carefully build the lookup map, without deserializing anything. + SmallVector<DeclContext *, 2> Contexts; + collectAllContexts(Contexts); + for (unsigned I = 0, N = Contexts.size(); I != N; ++I) + buildLookupImpl<&DeclContext::noload_decls_begin, + &DeclContext::noload_decls_end>(Contexts[I]); + + // We no longer have any lazy decls. + LookupPtr.setInt(false); + + // There may now be names for which we have local decls but are + // missing the external decls. FIXME: Just set the hasExternalDecls + // flag on those names that have external decls. + NeedToReconcileExternalVisibleStorage = true; + + Map = LookupPtr.getPointer(); + } + + if (!Map) + return lookup_result(lookup_iterator(0), lookup_iterator(0)); + + StoredDeclsMap::iterator I = Map->find(Name); + return I != Map->end() + ? I->second.getLookupResult() + : lookup_result(lookup_iterator(0), lookup_iterator(0)); +} + +void DeclContext::localUncachedLookup(DeclarationName Name, + SmallVectorImpl<NamedDecl *> &Results) { + Results.clear(); + + // If there's no external storage, just perform a normal lookup and copy + // the results. + if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage() && Name) { + lookup_result LookupResults = lookup(Name); + Results.insert(Results.end(), LookupResults.begin(), LookupResults.end()); + return; + } + + // If we have a lookup table, check there first. Maybe we'll get lucky. + if (Name && !LookupPtr.getInt()) { + if (StoredDeclsMap *Map = LookupPtr.getPointer()) { + StoredDeclsMap::iterator Pos = Map->find(Name); + if (Pos != Map->end()) { + Results.insert(Results.end(), + Pos->second.getLookupResult().begin(), + Pos->second.getLookupResult().end()); + return; + } + } + } + + // Slow case: grovel through the declarations in our chain looking for + // matches. + for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) { + if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) + if (ND->getDeclName() == Name) + Results.push_back(ND); + } +} + +DeclContext *DeclContext::getRedeclContext() { + DeclContext *Ctx = this; + // Skip through transparent contexts. + while (Ctx->isTransparentContext()) + Ctx = Ctx->getParent(); + return Ctx; +} + +DeclContext *DeclContext::getEnclosingNamespaceContext() { + DeclContext *Ctx = this; + // Skip through non-namespace, non-translation-unit contexts. + while (!Ctx->isFileContext()) + Ctx = Ctx->getParent(); + return Ctx->getPrimaryContext(); +} + +bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const { + // For non-file contexts, this is equivalent to Equals. + if (!isFileContext()) + return O->Equals(this); + + do { + if (O->Equals(this)) + return true; + + const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(O); + if (!NS || !NS->isInline()) + break; + O = NS->getParent(); + } while (O); + + return false; +} + +void DeclContext::makeDeclVisibleInContext(NamedDecl *D) { + DeclContext *PrimaryDC = this->getPrimaryContext(); + DeclContext *DeclDC = D->getDeclContext()->getPrimaryContext(); + // If the decl is being added outside of its semantic decl context, we + // need to ensure that we eagerly build the lookup information for it. + PrimaryDC->makeDeclVisibleInContextWithFlags(D, false, PrimaryDC == DeclDC); +} + +void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal, + bool Recoverable) { + assert(this == getPrimaryContext() && "expected a primary DC"); + + // Skip declarations within functions. + if (isFunctionOrMethod()) + return; + + // Skip declarations which should be invisible to name lookup. + if (shouldBeHidden(D)) + return; + + // If we already have a lookup data structure, perform the insertion into + // it. If we might have externally-stored decls with this name, look them + // up and perform the insertion. If this decl was declared outside its + // semantic context, buildLookup won't add it, so add it now. + // + // FIXME: As a performance hack, don't add such decls into the translation + // unit unless we're in C++, since qualified lookup into the TU is never + // performed. + if (LookupPtr.getPointer() || hasExternalVisibleStorage() || + ((!Recoverable || D->getDeclContext() != D->getLexicalDeclContext()) && + (getParentASTContext().getLangOpts().CPlusPlus || + !isTranslationUnit()))) { + // If we have lazily omitted any decls, they might have the same name as + // the decl which we are adding, so build a full lookup table before adding + // this decl. + buildLookup(); + makeDeclVisibleInContextImpl(D, Internal); + } else { + LookupPtr.setInt(true); + } + + // If we are a transparent context or inline namespace, insert into our + // parent context, too. This operation is recursive. + if (isTransparentContext() || isInlineNamespace()) + getParent()->getPrimaryContext()-> + makeDeclVisibleInContextWithFlags(D, Internal, Recoverable); + + Decl *DCAsDecl = cast<Decl>(this); + // Notify that a decl was made visible unless we are a Tag being defined. + if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined())) + if (ASTMutationListener *L = DCAsDecl->getASTMutationListener()) + L->AddedVisibleDecl(this, D); +} + +void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) { + // Find or create the stored declaration map. + StoredDeclsMap *Map = LookupPtr.getPointer(); + if (!Map) { + ASTContext *C = &getParentASTContext(); + Map = CreateStoredDeclsMap(*C); + } + + // If there is an external AST source, load any declarations it knows about + // with this declaration's name. + // If the lookup table contains an entry about this name it means that we + // have already checked the external source. + if (!Internal) + if (ExternalASTSource *Source = getParentASTContext().getExternalSource()) + if (hasExternalVisibleStorage() && + Map->find(D->getDeclName()) == Map->end()) + Source->FindExternalVisibleDeclsByName(this, D->getDeclName()); + + // Insert this declaration into the map. + StoredDeclsList &DeclNameEntries = (*Map)[D->getDeclName()]; + + if (Internal) { + // If this is being added as part of loading an external declaration, + // this may not be the only external declaration with this name. + // In this case, we never try to replace an existing declaration; we'll + // handle that when we finalize the list of declarations for this name. + DeclNameEntries.setHasExternalDecls(); + DeclNameEntries.AddSubsequentDecl(D); + return; + } + + else if (DeclNameEntries.isNull()) { + DeclNameEntries.setOnlyValue(D); + return; + } + + if (DeclNameEntries.HandleRedeclaration(D)) { + // This declaration has replaced an existing one for which + // declarationReplaces returns true. + return; + } + + // Put this declaration into the appropriate slot. + DeclNameEntries.AddSubsequentDecl(D); +} + +/// Returns iterator range [First, Last) of UsingDirectiveDecls stored within +/// this context. +DeclContext::udir_iterator_range +DeclContext::getUsingDirectives() const { + // FIXME: Use something more efficient than normal lookup for using + // directives. In C++, using directives are looked up more than anything else. + lookup_const_result Result = lookup(UsingDirectiveDecl::getName()); + return udir_iterator_range(reinterpret_cast<udir_iterator>(Result.begin()), + reinterpret_cast<udir_iterator>(Result.end())); +} + +//===----------------------------------------------------------------------===// +// Creation and Destruction of StoredDeclsMaps. // +//===----------------------------------------------------------------------===// + +StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const { + assert(!LookupPtr.getPointer() && "context already has a decls map"); + assert(getPrimaryContext() == this && + "creating decls map on non-primary context"); + + StoredDeclsMap *M; + bool Dependent = isDependentContext(); + if (Dependent) + M = new DependentStoredDeclsMap(); + else + M = new StoredDeclsMap(); + M->Previous = C.LastSDM; + C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent); + LookupPtr.setPointer(M); + return M; +} + +void ASTContext::ReleaseDeclContextMaps() { + // It's okay to delete DependentStoredDeclsMaps via a StoredDeclsMap + // pointer because the subclass doesn't add anything that needs to + // be deleted. + StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt()); +} + +void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) { + while (Map) { + // Advance the iteration before we invalidate memory. + llvm::PointerIntPair<StoredDeclsMap*,1> Next = Map->Previous; + + if (Dependent) + delete static_cast<DependentStoredDeclsMap*>(Map); + else + delete Map; + + Map = Next.getPointer(); + Dependent = Next.getInt(); + } +} + +DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C, + DeclContext *Parent, + const PartialDiagnostic &PDiag) { + assert(Parent->isDependentContext() + && "cannot iterate dependent diagnostics of non-dependent context"); + Parent = Parent->getPrimaryContext(); + if (!Parent->LookupPtr.getPointer()) + Parent->CreateStoredDeclsMap(C); + + DependentStoredDeclsMap *Map + = static_cast<DependentStoredDeclsMap*>(Parent->LookupPtr.getPointer()); + + // Allocate the copy of the PartialDiagnostic via the ASTContext's + // BumpPtrAllocator, rather than the ASTContext itself. + PartialDiagnostic::Storage *DiagStorage = 0; + if (PDiag.hasStorage()) + DiagStorage = new (C) PartialDiagnostic::Storage; + + DependentDiagnostic *DD = new (C) DependentDiagnostic(PDiag, DiagStorage); + + // TODO: Maybe we shouldn't reverse the order during insertion. + DD->NextDiagnostic = Map->FirstDiagnostic; + Map->FirstDiagnostic = DD; + + return DD; +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp new file mode 100644 index 000000000000..a17abdd0ae2a --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp @@ -0,0 +1,2135 @@ +//===--- DeclCXX.cpp - C++ Declaration AST Node Implementation ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the C++ related Decl classes. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/DeclCXX.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTLambda.h" +#include "clang/AST/ASTMutationListener.h" +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/IdentifierTable.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallPtrSet.h" +using namespace clang; + +//===----------------------------------------------------------------------===// +// Decl Allocation/Deallocation Method Implementations +//===----------------------------------------------------------------------===// + +void AccessSpecDecl::anchor() { } + +AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(AccessSpecDecl)); + return new (Mem) AccessSpecDecl(EmptyShell()); +} + +void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const { + ExternalASTSource *Source = C.getExternalSource(); + assert(Impl.Decls.isLazy() && "getFromExternalSource for non-lazy set"); + assert(Source && "getFromExternalSource with no external source"); + + for (ASTUnresolvedSet::iterator I = Impl.begin(); I != Impl.end(); ++I) + I.setDecl(cast<NamedDecl>(Source->GetExternalDecl( + reinterpret_cast<uintptr_t>(I.getDecl()) >> 2))); + Impl.Decls.setLazy(false); +} + +CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D) + : UserDeclaredConstructor(false), UserDeclaredSpecialMembers(0), + Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false), + Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true), + HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false), + HasMutableFields(false), HasOnlyCMembers(true), + HasInClassInitializer(false), HasUninitializedReferenceMember(false), + NeedOverloadResolutionForMoveConstructor(false), + NeedOverloadResolutionForMoveAssignment(false), + NeedOverloadResolutionForDestructor(false), + DefaultedMoveConstructorIsDeleted(false), + DefaultedMoveAssignmentIsDeleted(false), + DefaultedDestructorIsDeleted(false), + HasTrivialSpecialMembers(SMF_All), + DeclaredNonTrivialSpecialMembers(0), + HasIrrelevantDestructor(true), + HasConstexprNonCopyMoveConstructor(false), + DefaultedDefaultConstructorIsConstexpr(true), + HasConstexprDefaultConstructor(false), + HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false), + UserProvidedDefaultConstructor(false), DeclaredSpecialMembers(0), + ImplicitCopyConstructorHasConstParam(true), + ImplicitCopyAssignmentHasConstParam(true), + HasDeclaredCopyConstructorWithConstParam(false), + HasDeclaredCopyAssignmentWithConstParam(false), + IsLambda(false), NumBases(0), NumVBases(0), Bases(), VBases(), + Definition(D), FirstFriend() { +} + +CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getBasesSlowCase() const { + return Bases.get(Definition->getASTContext().getExternalSource()); +} + +CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getVBasesSlowCase() const { + return VBases.get(Definition->getASTContext().getExternalSource()); +} + +CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + IdentifierInfo *Id, CXXRecordDecl *PrevDecl) + : RecordDecl(K, TK, DC, StartLoc, IdLoc, Id, PrevDecl), + DefinitionData(PrevDecl ? PrevDecl->DefinitionData : 0), + TemplateOrInstantiation() { } + +CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK, + DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, IdentifierInfo *Id, + CXXRecordDecl* PrevDecl, + bool DelayTypeCreation) { + CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TK, DC, StartLoc, IdLoc, + Id, PrevDecl); + R->MayHaveOutOfDateDef = C.getLangOpts().Modules; + + // FIXME: DelayTypeCreation seems like such a hack + if (!DelayTypeCreation) + C.getTypeDeclType(R, PrevDecl); + return R; +} + +CXXRecordDecl *CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC, + TypeSourceInfo *Info, SourceLocation Loc, + bool Dependent, bool IsGeneric, + LambdaCaptureDefault CaptureDefault) { + CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc, + 0, 0); + R->IsBeingDefined = true; + R->DefinitionData = new (C) struct LambdaDefinitionData(R, Info, + Dependent, + IsGeneric, + CaptureDefault); + R->MayHaveOutOfDateDef = false; + R->setImplicit(true); + C.getTypeDeclType(R, /*PrevDecl=*/0); + return R; +} + +CXXRecordDecl * +CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXRecordDecl)); + CXXRecordDecl *R = new (Mem) CXXRecordDecl(CXXRecord, TTK_Struct, 0, + SourceLocation(), SourceLocation(), + 0, 0); + R->MayHaveOutOfDateDef = false; + return R; +} + +void +CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases, + unsigned NumBases) { + ASTContext &C = getASTContext(); + + if (!data().Bases.isOffset() && data().NumBases > 0) + C.Deallocate(data().getBases()); + + if (NumBases) { + // C++ [dcl.init.aggr]p1: + // An aggregate is [...] a class with [...] no base classes [...]. + data().Aggregate = false; + + // C++ [class]p4: + // A POD-struct is an aggregate class... + data().PlainOldData = false; + } + + // The set of seen virtual base types. + llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes; + + // The virtual bases of this class. + SmallVector<const CXXBaseSpecifier *, 8> VBases; + + data().Bases = new(C) CXXBaseSpecifier [NumBases]; + data().NumBases = NumBases; + for (unsigned i = 0; i < NumBases; ++i) { + data().getBases()[i] = *Bases[i]; + // Keep track of inherited vbases for this base class. + const CXXBaseSpecifier *Base = Bases[i]; + QualType BaseType = Base->getType(); + // Skip dependent types; we can't do any checking on them now. + if (BaseType->isDependentType()) + continue; + CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); + + // A class with a non-empty base class is not empty. + // FIXME: Standard ref? + if (!BaseClassDecl->isEmpty()) { + if (!data().Empty) { + // C++0x [class]p7: + // A standard-layout class is a class that: + // [...] + // -- either has no non-static data members in the most derived + // class and at most one base class with non-static data members, + // or has no base classes with non-static data members, and + // If this is the second non-empty base, then neither of these two + // clauses can be true. + data().IsStandardLayout = false; + } + + data().Empty = false; + data().HasNoNonEmptyBases = false; + } + + // C++ [class.virtual]p1: + // A class that declares or inherits a virtual function is called a + // polymorphic class. + if (BaseClassDecl->isPolymorphic()) + data().Polymorphic = true; + + // C++0x [class]p7: + // A standard-layout class is a class that: [...] + // -- has no non-standard-layout base classes + if (!BaseClassDecl->isStandardLayout()) + data().IsStandardLayout = false; + + // Record if this base is the first non-literal field or base. + if (!hasNonLiteralTypeFieldsOrBases() && !BaseType->isLiteralType(C)) + data().HasNonLiteralTypeFieldsOrBases = true; + + // Now go through all virtual bases of this base and add them. + for (CXXRecordDecl::base_class_iterator VBase = + BaseClassDecl->vbases_begin(), + E = BaseClassDecl->vbases_end(); VBase != E; ++VBase) { + // Add this base if it's not already in the list. + if (SeenVBaseTypes.insert(C.getCanonicalType(VBase->getType()))) { + VBases.push_back(VBase); + + // C++11 [class.copy]p8: + // The implicitly-declared copy constructor for a class X will have + // the form 'X::X(const X&)' if each [...] virtual base class B of X + // has a copy constructor whose first parameter is of type + // 'const B&' or 'const volatile B&' [...] + if (CXXRecordDecl *VBaseDecl = VBase->getType()->getAsCXXRecordDecl()) + if (!VBaseDecl->hasCopyConstructorWithConstParam()) + data().ImplicitCopyConstructorHasConstParam = false; + } + } + + if (Base->isVirtual()) { + // Add this base if it's not already in the list. + if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType))) + VBases.push_back(Base); + + // C++0x [meta.unary.prop] is_empty: + // T is a class type, but not a union type, with ... no virtual base + // classes + data().Empty = false; + + // C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25: + // A [default constructor, copy/move constructor, or copy/move assignment + // operator for a class X] is trivial [...] if: + // -- class X has [...] no virtual base classes + data().HasTrivialSpecialMembers &= SMF_Destructor; + + // C++0x [class]p7: + // A standard-layout class is a class that: [...] + // -- has [...] no virtual base classes + data().IsStandardLayout = false; + + // C++11 [dcl.constexpr]p4: + // In the definition of a constexpr constructor [...] + // -- the class shall not have any virtual base classes + data().DefaultedDefaultConstructorIsConstexpr = false; + } else { + // C++ [class.ctor]p5: + // A default constructor is trivial [...] if: + // -- all the direct base classes of its class have trivial default + // constructors. + if (!BaseClassDecl->hasTrivialDefaultConstructor()) + data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor; + + // C++0x [class.copy]p13: + // A copy/move constructor for class X is trivial if [...] + // [...] + // -- the constructor selected to copy/move each direct base class + // subobject is trivial, and + if (!BaseClassDecl->hasTrivialCopyConstructor()) + data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor; + // If the base class doesn't have a simple move constructor, we'll eagerly + // declare it and perform overload resolution to determine which function + // it actually calls. If it does have a simple move constructor, this + // check is correct. + if (!BaseClassDecl->hasTrivialMoveConstructor()) + data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor; + + // C++0x [class.copy]p27: + // A copy/move assignment operator for class X is trivial if [...] + // [...] + // -- the assignment operator selected to copy/move each direct base + // class subobject is trivial, and + if (!BaseClassDecl->hasTrivialCopyAssignment()) + data().HasTrivialSpecialMembers &= ~SMF_CopyAssignment; + // If the base class doesn't have a simple move assignment, we'll eagerly + // declare it and perform overload resolution to determine which function + // it actually calls. If it does have a simple move assignment, this + // check is correct. + if (!BaseClassDecl->hasTrivialMoveAssignment()) + data().HasTrivialSpecialMembers &= ~SMF_MoveAssignment; + + // C++11 [class.ctor]p6: + // If that user-written default constructor would satisfy the + // requirements of a constexpr constructor, the implicitly-defined + // default constructor is constexpr. + if (!BaseClassDecl->hasConstexprDefaultConstructor()) + data().DefaultedDefaultConstructorIsConstexpr = false; + } + + // C++ [class.ctor]p3: + // A destructor is trivial if all the direct base classes of its class + // have trivial destructors. + if (!BaseClassDecl->hasTrivialDestructor()) + data().HasTrivialSpecialMembers &= ~SMF_Destructor; + + if (!BaseClassDecl->hasIrrelevantDestructor()) + data().HasIrrelevantDestructor = false; + + // C++11 [class.copy]p18: + // The implicitly-declared copy assignment oeprator for a class X will + // have the form 'X& X::operator=(const X&)' if each direct base class B + // of X has a copy assignment operator whose parameter is of type 'const + // B&', 'const volatile B&', or 'B' [...] + if (!BaseClassDecl->hasCopyAssignmentWithConstParam()) + data().ImplicitCopyAssignmentHasConstParam = false; + + // C++11 [class.copy]p8: + // The implicitly-declared copy constructor for a class X will have + // the form 'X::X(const X&)' if each direct [...] base class B of X + // has a copy constructor whose first parameter is of type + // 'const B&' or 'const volatile B&' [...] + if (!BaseClassDecl->hasCopyConstructorWithConstParam()) + data().ImplicitCopyConstructorHasConstParam = false; + + // A class has an Objective-C object member if... or any of its bases + // has an Objective-C object member. + if (BaseClassDecl->hasObjectMember()) + setHasObjectMember(true); + + if (BaseClassDecl->hasVolatileMember()) + setHasVolatileMember(true); + + // Keep track of the presence of mutable fields. + if (BaseClassDecl->hasMutableFields()) + data().HasMutableFields = true; + + if (BaseClassDecl->hasUninitializedReferenceMember()) + data().HasUninitializedReferenceMember = true; + + addedClassSubobject(BaseClassDecl); + } + + if (VBases.empty()) + return; + + // Create base specifier for any direct or indirect virtual bases. + data().VBases = new (C) CXXBaseSpecifier[VBases.size()]; + data().NumVBases = VBases.size(); + for (int I = 0, E = VBases.size(); I != E; ++I) { + QualType Type = VBases[I]->getType(); + if (!Type->isDependentType()) + addedClassSubobject(Type->getAsCXXRecordDecl()); + data().getVBases()[I] = *VBases[I]; + } +} + +void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) { + // C++11 [class.copy]p11: + // A defaulted copy/move constructor for a class X is defined as + // deleted if X has: + // -- a direct or virtual base class B that cannot be copied/moved [...] + // -- a non-static data member of class type M (or array thereof) + // that cannot be copied or moved [...] + if (!Subobj->hasSimpleMoveConstructor()) + data().NeedOverloadResolutionForMoveConstructor = true; + + // C++11 [class.copy]p23: + // A defaulted copy/move assignment operator for a class X is defined as + // deleted if X has: + // -- a direct or virtual base class B that cannot be copied/moved [...] + // -- a non-static data member of class type M (or array thereof) + // that cannot be copied or moved [...] + if (!Subobj->hasSimpleMoveAssignment()) + data().NeedOverloadResolutionForMoveAssignment = true; + + // C++11 [class.ctor]p5, C++11 [class.copy]p11, C++11 [class.dtor]p5: + // A defaulted [ctor or dtor] for a class X is defined as + // deleted if X has: + // -- any direct or virtual base class [...] has a type with a destructor + // that is deleted or inaccessible from the defaulted [ctor or dtor]. + // -- any non-static data member has a type with a destructor + // that is deleted or inaccessible from the defaulted [ctor or dtor]. + if (!Subobj->hasSimpleDestructor()) { + data().NeedOverloadResolutionForMoveConstructor = true; + data().NeedOverloadResolutionForDestructor = true; + } +} + +/// Callback function for CXXRecordDecl::forallBases that acknowledges +/// that it saw a base class. +static bool SawBase(const CXXRecordDecl *, void *) { + return true; +} + +bool CXXRecordDecl::hasAnyDependentBases() const { + if (!isDependentContext()) + return false; + + return !forallBases(SawBase, 0); +} + +bool CXXRecordDecl::isTriviallyCopyable() const { + // C++0x [class]p5: + // A trivially copyable class is a class that: + // -- has no non-trivial copy constructors, + if (hasNonTrivialCopyConstructor()) return false; + // -- has no non-trivial move constructors, + if (hasNonTrivialMoveConstructor()) return false; + // -- has no non-trivial copy assignment operators, + if (hasNonTrivialCopyAssignment()) return false; + // -- has no non-trivial move assignment operators, and + if (hasNonTrivialMoveAssignment()) return false; + // -- has a trivial destructor. + if (!hasTrivialDestructor()) return false; + + return true; +} + +void CXXRecordDecl::markedVirtualFunctionPure() { + // C++ [class.abstract]p2: + // A class is abstract if it has at least one pure virtual function. + data().Abstract = true; +} + +void CXXRecordDecl::addedMember(Decl *D) { + if (!D->isImplicit() && + !isa<FieldDecl>(D) && + !isa<IndirectFieldDecl>(D) && + (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class || + cast<TagDecl>(D)->getTagKind() == TTK_Interface)) + data().HasOnlyCMembers = false; + + // Ignore friends and invalid declarations. + if (D->getFriendObjectKind() || D->isInvalidDecl()) + return; + + FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D); + if (FunTmpl) + D = FunTmpl->getTemplatedDecl(); + + if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { + if (Method->isVirtual()) { + // C++ [dcl.init.aggr]p1: + // An aggregate is an array or a class with [...] no virtual functions. + data().Aggregate = false; + + // C++ [class]p4: + // A POD-struct is an aggregate class... + data().PlainOldData = false; + + // Virtual functions make the class non-empty. + // FIXME: Standard ref? + data().Empty = false; + + // C++ [class.virtual]p1: + // A class that declares or inherits a virtual function is called a + // polymorphic class. + data().Polymorphic = true; + + // C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25: + // A [default constructor, copy/move constructor, or copy/move + // assignment operator for a class X] is trivial [...] if: + // -- class X has no virtual functions [...] + data().HasTrivialSpecialMembers &= SMF_Destructor; + + // C++0x [class]p7: + // A standard-layout class is a class that: [...] + // -- has no virtual functions + data().IsStandardLayout = false; + } + } + + // Notify the listener if an implicit member was added after the definition + // was completed. + if (!isBeingDefined() && D->isImplicit()) + if (ASTMutationListener *L = getASTMutationListener()) + L->AddedCXXImplicitMember(data().Definition, D); + + // The kind of special member this declaration is, if any. + unsigned SMKind = 0; + + // Handle constructors. + if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) { + if (!Constructor->isImplicit()) { + // Note that we have a user-declared constructor. + data().UserDeclaredConstructor = true; + + // C++ [class]p4: + // A POD-struct is an aggregate class [...] + // Since the POD bit is meant to be C++03 POD-ness, clear it even if the + // type is technically an aggregate in C++0x since it wouldn't be in 03. + data().PlainOldData = false; + } + + // Technically, "user-provided" is only defined for special member + // functions, but the intent of the standard is clearly that it should apply + // to all functions. + bool UserProvided = Constructor->isUserProvided(); + + if (Constructor->isDefaultConstructor()) { + SMKind |= SMF_DefaultConstructor; + + if (UserProvided) + data().UserProvidedDefaultConstructor = true; + if (Constructor->isConstexpr()) + data().HasConstexprDefaultConstructor = true; + } + + if (!FunTmpl) { + unsigned Quals; + if (Constructor->isCopyConstructor(Quals)) { + SMKind |= SMF_CopyConstructor; + + if (Quals & Qualifiers::Const) + data().HasDeclaredCopyConstructorWithConstParam = true; + } else if (Constructor->isMoveConstructor()) + SMKind |= SMF_MoveConstructor; + } + + // Record if we see any constexpr constructors which are neither copy + // nor move constructors. + if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor()) + data().HasConstexprNonCopyMoveConstructor = true; + + // C++ [dcl.init.aggr]p1: + // An aggregate is an array or a class with no user-declared + // constructors [...]. + // C++11 [dcl.init.aggr]p1: + // An aggregate is an array or a class with no user-provided + // constructors [...]. + if (getASTContext().getLangOpts().CPlusPlus11 + ? UserProvided : !Constructor->isImplicit()) + data().Aggregate = false; + } + + // Handle destructors. + if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) { + SMKind |= SMF_Destructor; + + if (!DD->isImplicit()) + data().HasIrrelevantDestructor = false; + + // C++11 [class.dtor]p5: + // A destructor is trivial if [...] the destructor is not virtual. + if (DD->isVirtual()) + data().HasTrivialSpecialMembers &= ~SMF_Destructor; + } + + // Handle member functions. + if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { + if (Method->isCopyAssignmentOperator()) { + SMKind |= SMF_CopyAssignment; + + const ReferenceType *ParamTy = + Method->getParamDecl(0)->getType()->getAs<ReferenceType>(); + if (!ParamTy || ParamTy->getPointeeType().isConstQualified()) + data().HasDeclaredCopyAssignmentWithConstParam = true; + } + + if (Method->isMoveAssignmentOperator()) + SMKind |= SMF_MoveAssignment; + + // Keep the list of conversion functions up-to-date. + if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) { + // FIXME: We use the 'unsafe' accessor for the access specifier here, + // because Sema may not have set it yet. That's really just a misdesign + // in Sema. However, LLDB *will* have set the access specifier correctly, + // and adds declarations after the class is technically completed, + // so completeDefinition()'s overriding of the access specifiers doesn't + // work. + AccessSpecifier AS = Conversion->getAccessUnsafe(); + + if (Conversion->getPrimaryTemplate()) { + // We don't record specializations. + } else { + ASTContext &Ctx = getASTContext(); + ASTUnresolvedSet &Conversions = data().Conversions.get(Ctx); + NamedDecl *Primary = + FunTmpl ? cast<NamedDecl>(FunTmpl) : cast<NamedDecl>(Conversion); + if (Primary->getPreviousDecl()) + Conversions.replace(cast<NamedDecl>(Primary->getPreviousDecl()), + Primary, AS); + else + Conversions.addDecl(Ctx, Primary, AS); + } + } + + if (SMKind) { + // If this is the first declaration of a special member, we no longer have + // an implicit trivial special member. + data().HasTrivialSpecialMembers &= + data().DeclaredSpecialMembers | ~SMKind; + + if (!Method->isImplicit() && !Method->isUserProvided()) { + // This method is user-declared but not user-provided. We can't work out + // whether it's trivial yet (not until we get to the end of the class). + // We'll handle this method in finishedDefaultedOrDeletedMember. + } else if (Method->isTrivial()) + data().HasTrivialSpecialMembers |= SMKind; + else + data().DeclaredNonTrivialSpecialMembers |= SMKind; + + // Note when we have declared a declared special member, and suppress the + // implicit declaration of this special member. + data().DeclaredSpecialMembers |= SMKind; + + if (!Method->isImplicit()) { + data().UserDeclaredSpecialMembers |= SMKind; + + // C++03 [class]p4: + // A POD-struct is an aggregate class that has [...] no user-defined + // copy assignment operator and no user-defined destructor. + // + // Since the POD bit is meant to be C++03 POD-ness, and in C++03, + // aggregates could not have any constructors, clear it even for an + // explicitly defaulted or deleted constructor. + // type is technically an aggregate in C++0x since it wouldn't be in 03. + // + // Also, a user-declared move assignment operator makes a class non-POD. + // This is an extension in C++03. + data().PlainOldData = false; + } + } + + return; + } + + // Handle non-static data members. + if (FieldDecl *Field = dyn_cast<FieldDecl>(D)) { + // C++ [class.bit]p2: + // A declaration for a bit-field that omits the identifier declares an + // unnamed bit-field. Unnamed bit-fields are not members and cannot be + // initialized. + if (Field->isUnnamedBitfield()) + return; + + // C++ [dcl.init.aggr]p1: + // An aggregate is an array or a class (clause 9) with [...] no + // private or protected non-static data members (clause 11). + // + // A POD must be an aggregate. + if (D->getAccess() == AS_private || D->getAccess() == AS_protected) { + data().Aggregate = false; + data().PlainOldData = false; + } + + // C++0x [class]p7: + // A standard-layout class is a class that: + // [...] + // -- has the same access control for all non-static data members, + switch (D->getAccess()) { + case AS_private: data().HasPrivateFields = true; break; + case AS_protected: data().HasProtectedFields = true; break; + case AS_public: data().HasPublicFields = true; break; + case AS_none: llvm_unreachable("Invalid access specifier"); + }; + if ((data().HasPrivateFields + data().HasProtectedFields + + data().HasPublicFields) > 1) + data().IsStandardLayout = false; + + // Keep track of the presence of mutable fields. + if (Field->isMutable()) + data().HasMutableFields = true; + + // C++0x [class]p9: + // A POD struct is a class that is both a trivial class and a + // standard-layout class, and has no non-static data members of type + // non-POD struct, non-POD union (or array of such types). + // + // Automatic Reference Counting: the presence of a member of Objective-C pointer type + // that does not explicitly have no lifetime makes the class a non-POD. + // However, we delay setting PlainOldData to false in this case so that + // Sema has a chance to diagnostic causes where the same class will be + // non-POD with Automatic Reference Counting but a POD without ARC. + // In this case, the class will become a non-POD class when we complete + // the definition. + ASTContext &Context = getASTContext(); + QualType T = Context.getBaseElementType(Field->getType()); + if (T->isObjCRetainableType() || T.isObjCGCStrong()) { + if (!Context.getLangOpts().ObjCAutoRefCount || + T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) + setHasObjectMember(true); + } else if (!T.isCXX98PODType(Context)) + data().PlainOldData = false; + + if (T->isReferenceType()) { + if (!Field->hasInClassInitializer()) + data().HasUninitializedReferenceMember = true; + + // C++0x [class]p7: + // A standard-layout class is a class that: + // -- has no non-static data members of type [...] reference, + data().IsStandardLayout = false; + } + + // Record if this field is the first non-literal or volatile field or base. + if (!T->isLiteralType(Context) || T.isVolatileQualified()) + data().HasNonLiteralTypeFieldsOrBases = true; + + if (Field->hasInClassInitializer()) { + data().HasInClassInitializer = true; + + // C++11 [class]p5: + // A default constructor is trivial if [...] no non-static data member + // of its class has a brace-or-equal-initializer. + data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor; + + // C++11 [dcl.init.aggr]p1: + // An aggregate is a [...] class with [...] no + // brace-or-equal-initializers for non-static data members. + // + // This rule was removed in C++1y. + if (!getASTContext().getLangOpts().CPlusPlus1y) + data().Aggregate = false; + + // C++11 [class]p10: + // A POD struct is [...] a trivial class. + data().PlainOldData = false; + } + + // C++11 [class.copy]p23: + // A defaulted copy/move assignment operator for a class X is defined + // as deleted if X has: + // -- a non-static data member of reference type + if (T->isReferenceType()) + data().DefaultedMoveAssignmentIsDeleted = true; + + if (const RecordType *RecordTy = T->getAs<RecordType>()) { + CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl()); + if (FieldRec->getDefinition()) { + addedClassSubobject(FieldRec); + + // We may need to perform overload resolution to determine whether a + // field can be moved if it's const or volatile qualified. + if (T.getCVRQualifiers() & (Qualifiers::Const | Qualifiers::Volatile)) { + data().NeedOverloadResolutionForMoveConstructor = true; + data().NeedOverloadResolutionForMoveAssignment = true; + } + + // C++11 [class.ctor]p5, C++11 [class.copy]p11: + // A defaulted [special member] for a class X is defined as + // deleted if: + // -- X is a union-like class that has a variant member with a + // non-trivial [corresponding special member] + if (isUnion()) { + if (FieldRec->hasNonTrivialMoveConstructor()) + data().DefaultedMoveConstructorIsDeleted = true; + if (FieldRec->hasNonTrivialMoveAssignment()) + data().DefaultedMoveAssignmentIsDeleted = true; + if (FieldRec->hasNonTrivialDestructor()) + data().DefaultedDestructorIsDeleted = true; + } + + // C++0x [class.ctor]p5: + // A default constructor is trivial [...] if: + // -- for all the non-static data members of its class that are of + // class type (or array thereof), each such class has a trivial + // default constructor. + if (!FieldRec->hasTrivialDefaultConstructor()) + data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor; + + // C++0x [class.copy]p13: + // A copy/move constructor for class X is trivial if [...] + // [...] + // -- for each non-static data member of X that is of class type (or + // an array thereof), the constructor selected to copy/move that + // member is trivial; + if (!FieldRec->hasTrivialCopyConstructor()) + data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor; + // If the field doesn't have a simple move constructor, we'll eagerly + // declare the move constructor for this class and we'll decide whether + // it's trivial then. + if (!FieldRec->hasTrivialMoveConstructor()) + data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor; + + // C++0x [class.copy]p27: + // A copy/move assignment operator for class X is trivial if [...] + // [...] + // -- for each non-static data member of X that is of class type (or + // an array thereof), the assignment operator selected to + // copy/move that member is trivial; + if (!FieldRec->hasTrivialCopyAssignment()) + data().HasTrivialSpecialMembers &= ~SMF_CopyAssignment; + // If the field doesn't have a simple move assignment, we'll eagerly + // declare the move assignment for this class and we'll decide whether + // it's trivial then. + if (!FieldRec->hasTrivialMoveAssignment()) + data().HasTrivialSpecialMembers &= ~SMF_MoveAssignment; + + if (!FieldRec->hasTrivialDestructor()) + data().HasTrivialSpecialMembers &= ~SMF_Destructor; + if (!FieldRec->hasIrrelevantDestructor()) + data().HasIrrelevantDestructor = false; + if (FieldRec->hasObjectMember()) + setHasObjectMember(true); + if (FieldRec->hasVolatileMember()) + setHasVolatileMember(true); + + // C++0x [class]p7: + // A standard-layout class is a class that: + // -- has no non-static data members of type non-standard-layout + // class (or array of such types) [...] + if (!FieldRec->isStandardLayout()) + data().IsStandardLayout = false; + + // C++0x [class]p7: + // A standard-layout class is a class that: + // [...] + // -- has no base classes of the same type as the first non-static + // data member. + // We don't want to expend bits in the state of the record decl + // tracking whether this is the first non-static data member so we + // cheat a bit and use some of the existing state: the empty bit. + // Virtual bases and virtual methods make a class non-empty, but they + // also make it non-standard-layout so we needn't check here. + // A non-empty base class may leave the class standard-layout, but not + // if we have arrived here, and have at least on non-static data + // member. If IsStandardLayout remains true, then the first non-static + // data member must come through here with Empty still true, and Empty + // will subsequently be set to false below. + if (data().IsStandardLayout && data().Empty) { + for (CXXRecordDecl::base_class_const_iterator BI = bases_begin(), + BE = bases_end(); + BI != BE; ++BI) { + if (Context.hasSameUnqualifiedType(BI->getType(), T)) { + data().IsStandardLayout = false; + break; + } + } + } + + // Keep track of the presence of mutable fields. + if (FieldRec->hasMutableFields()) + data().HasMutableFields = true; + + // C++11 [class.copy]p13: + // If the implicitly-defined constructor would satisfy the + // requirements of a constexpr constructor, the implicitly-defined + // constructor is constexpr. + // C++11 [dcl.constexpr]p4: + // -- every constructor involved in initializing non-static data + // members [...] shall be a constexpr constructor + if (!Field->hasInClassInitializer() && + !FieldRec->hasConstexprDefaultConstructor() && !isUnion()) + // The standard requires any in-class initializer to be a constant + // expression. We consider this to be a defect. + data().DefaultedDefaultConstructorIsConstexpr = false; + + // C++11 [class.copy]p8: + // The implicitly-declared copy constructor for a class X will have + // the form 'X::X(const X&)' if [...] for all the non-static data + // members of X that are of a class type M (or array thereof), each + // such class type has a copy constructor whose first parameter is + // of type 'const M&' or 'const volatile M&'. + if (!FieldRec->hasCopyConstructorWithConstParam()) + data().ImplicitCopyConstructorHasConstParam = false; + + // C++11 [class.copy]p18: + // The implicitly-declared copy assignment oeprator for a class X will + // have the form 'X& X::operator=(const X&)' if [...] for all the + // non-static data members of X that are of a class type M (or array + // thereof), each such class type has a copy assignment operator whose + // parameter is of type 'const M&', 'const volatile M&' or 'M'. + if (!FieldRec->hasCopyAssignmentWithConstParam()) + data().ImplicitCopyAssignmentHasConstParam = false; + + if (FieldRec->hasUninitializedReferenceMember() && + !Field->hasInClassInitializer()) + data().HasUninitializedReferenceMember = true; + } + } else { + // Base element type of field is a non-class type. + if (!T->isLiteralType(Context) || + (!Field->hasInClassInitializer() && !isUnion())) + data().DefaultedDefaultConstructorIsConstexpr = false; + + // C++11 [class.copy]p23: + // A defaulted copy/move assignment operator for a class X is defined + // as deleted if X has: + // -- a non-static data member of const non-class type (or array + // thereof) + if (T.isConstQualified()) + data().DefaultedMoveAssignmentIsDeleted = true; + } + + // C++0x [class]p7: + // A standard-layout class is a class that: + // [...] + // -- either has no non-static data members in the most derived + // class and at most one base class with non-static data members, + // or has no base classes with non-static data members, and + // At this point we know that we have a non-static data member, so the last + // clause holds. + if (!data().HasNoNonEmptyBases) + data().IsStandardLayout = false; + + // If this is not a zero-length bit-field, then the class is not empty. + if (data().Empty) { + if (!Field->isBitField() || + (!Field->getBitWidth()->isTypeDependent() && + !Field->getBitWidth()->isValueDependent() && + Field->getBitWidthValue(Context) != 0)) + data().Empty = false; + } + } + + // Handle using declarations of conversion functions. + if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(D)) { + if (Shadow->getDeclName().getNameKind() + == DeclarationName::CXXConversionFunctionName) { + ASTContext &Ctx = getASTContext(); + data().Conversions.get(Ctx).addDecl(Ctx, Shadow, Shadow->getAccess()); + } + } +} + +void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) { + assert(!D->isImplicit() && !D->isUserProvided()); + + // The kind of special member this declaration is, if any. + unsigned SMKind = 0; + + if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) { + if (Constructor->isDefaultConstructor()) { + SMKind |= SMF_DefaultConstructor; + if (Constructor->isConstexpr()) + data().HasConstexprDefaultConstructor = true; + } + if (Constructor->isCopyConstructor()) + SMKind |= SMF_CopyConstructor; + else if (Constructor->isMoveConstructor()) + SMKind |= SMF_MoveConstructor; + else if (Constructor->isConstexpr()) + // We may now know that the constructor is constexpr. + data().HasConstexprNonCopyMoveConstructor = true; + } else if (isa<CXXDestructorDecl>(D)) + SMKind |= SMF_Destructor; + else if (D->isCopyAssignmentOperator()) + SMKind |= SMF_CopyAssignment; + else if (D->isMoveAssignmentOperator()) + SMKind |= SMF_MoveAssignment; + + // Update which trivial / non-trivial special members we have. + // addedMember will have skipped this step for this member. + if (D->isTrivial()) + data().HasTrivialSpecialMembers |= SMKind; + else + data().DeclaredNonTrivialSpecialMembers |= SMKind; +} + +bool CXXRecordDecl::isCLike() const { + if (getTagKind() == TTK_Class || getTagKind() == TTK_Interface || + !TemplateOrInstantiation.isNull()) + return false; + if (!hasDefinition()) + return true; + + return isPOD() && data().HasOnlyCMembers; +} + +bool CXXRecordDecl::isGenericLambda() const { + if (!isLambda()) return false; + return getLambdaData().IsGenericLambda; +} + +CXXMethodDecl* CXXRecordDecl::getLambdaCallOperator() const { + if (!isLambda()) return 0; + DeclarationName Name = + getASTContext().DeclarationNames.getCXXOperatorName(OO_Call); + DeclContext::lookup_const_result Calls = lookup(Name); + + assert(!Calls.empty() && "Missing lambda call operator!"); + assert(Calls.size() == 1 && "More than one lambda call operator!"); + + NamedDecl *CallOp = Calls.front(); + if (FunctionTemplateDecl *CallOpTmpl = + dyn_cast<FunctionTemplateDecl>(CallOp)) + return cast<CXXMethodDecl>(CallOpTmpl->getTemplatedDecl()); + + return cast<CXXMethodDecl>(CallOp); +} + +CXXMethodDecl* CXXRecordDecl::getLambdaStaticInvoker() const { + if (!isLambda()) return 0; + DeclarationName Name = + &getASTContext().Idents.get(getLambdaStaticInvokerName()); + DeclContext::lookup_const_result Invoker = lookup(Name); + if (Invoker.empty()) return 0; + assert(Invoker.size() == 1 && "More than one static invoker operator!"); + NamedDecl *InvokerFun = Invoker.front(); + if (FunctionTemplateDecl *InvokerTemplate = + dyn_cast<FunctionTemplateDecl>(InvokerFun)) + return cast<CXXMethodDecl>(InvokerTemplate->getTemplatedDecl()); + + return cast<CXXMethodDecl>(InvokerFun); +} + +void CXXRecordDecl::getCaptureFields( + llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures, + FieldDecl *&ThisCapture) const { + Captures.clear(); + ThisCapture = 0; + + LambdaDefinitionData &Lambda = getLambdaData(); + RecordDecl::field_iterator Field = field_begin(); + for (LambdaExpr::Capture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures; + C != CEnd; ++C, ++Field) { + if (C->capturesThis()) + ThisCapture = *Field; + else if (C->capturesVariable()) + Captures[C->getCapturedVar()] = *Field; + } + assert(Field == field_end()); +} + +TemplateParameterList * +CXXRecordDecl::getGenericLambdaTemplateParameterList() const { + if (!isLambda()) return 0; + CXXMethodDecl *CallOp = getLambdaCallOperator(); + if (FunctionTemplateDecl *Tmpl = CallOp->getDescribedFunctionTemplate()) + return Tmpl->getTemplateParameters(); + return 0; +} + +static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) { + QualType T; + if (isa<UsingShadowDecl>(Conv)) + Conv = cast<UsingShadowDecl>(Conv)->getTargetDecl(); + if (FunctionTemplateDecl *ConvTemp = dyn_cast<FunctionTemplateDecl>(Conv)) + T = ConvTemp->getTemplatedDecl()->getResultType(); + else + T = cast<CXXConversionDecl>(Conv)->getConversionType(); + return Context.getCanonicalType(T); +} + +/// Collect the visible conversions of a base class. +/// +/// \param Record a base class of the class we're considering +/// \param InVirtual whether this base class is a virtual base (or a base +/// of a virtual base) +/// \param Access the access along the inheritance path to this base +/// \param ParentHiddenTypes the conversions provided by the inheritors +/// of this base +/// \param Output the set to which to add conversions from non-virtual bases +/// \param VOutput the set to which to add conversions from virtual bases +/// \param HiddenVBaseCs the set of conversions which were hidden in a +/// virtual base along some inheritance path +static void CollectVisibleConversions(ASTContext &Context, + CXXRecordDecl *Record, + bool InVirtual, + AccessSpecifier Access, + const llvm::SmallPtrSet<CanQualType, 8> &ParentHiddenTypes, + ASTUnresolvedSet &Output, + UnresolvedSetImpl &VOutput, + llvm::SmallPtrSet<NamedDecl*, 8> &HiddenVBaseCs) { + // The set of types which have conversions in this class or its + // subclasses. As an optimization, we don't copy the derived set + // unless it might change. + const llvm::SmallPtrSet<CanQualType, 8> *HiddenTypes = &ParentHiddenTypes; + llvm::SmallPtrSet<CanQualType, 8> HiddenTypesBuffer; + + // Collect the direct conversions and figure out which conversions + // will be hidden in the subclasses. + CXXRecordDecl::conversion_iterator ConvI = Record->conversion_begin(); + CXXRecordDecl::conversion_iterator ConvE = Record->conversion_end(); + if (ConvI != ConvE) { + HiddenTypesBuffer = ParentHiddenTypes; + HiddenTypes = &HiddenTypesBuffer; + + for (CXXRecordDecl::conversion_iterator I = ConvI; I != ConvE; ++I) { + CanQualType ConvType(GetConversionType(Context, I.getDecl())); + bool Hidden = ParentHiddenTypes.count(ConvType); + if (!Hidden) + HiddenTypesBuffer.insert(ConvType); + + // If this conversion is hidden and we're in a virtual base, + // remember that it's hidden along some inheritance path. + if (Hidden && InVirtual) + HiddenVBaseCs.insert(cast<NamedDecl>(I.getDecl()->getCanonicalDecl())); + + // If this conversion isn't hidden, add it to the appropriate output. + else if (!Hidden) { + AccessSpecifier IAccess + = CXXRecordDecl::MergeAccess(Access, I.getAccess()); + + if (InVirtual) + VOutput.addDecl(I.getDecl(), IAccess); + else + Output.addDecl(Context, I.getDecl(), IAccess); + } + } + } + + // Collect information recursively from any base classes. + for (CXXRecordDecl::base_class_iterator + I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) { + const RecordType *RT = I->getType()->getAs<RecordType>(); + if (!RT) continue; + + AccessSpecifier BaseAccess + = CXXRecordDecl::MergeAccess(Access, I->getAccessSpecifier()); + bool BaseInVirtual = InVirtual || I->isVirtual(); + + CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl()); + CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess, + *HiddenTypes, Output, VOutput, HiddenVBaseCs); + } +} + +/// Collect the visible conversions of a class. +/// +/// This would be extremely straightforward if it weren't for virtual +/// bases. It might be worth special-casing that, really. +static void CollectVisibleConversions(ASTContext &Context, + CXXRecordDecl *Record, + ASTUnresolvedSet &Output) { + // The collection of all conversions in virtual bases that we've + // found. These will be added to the output as long as they don't + // appear in the hidden-conversions set. + UnresolvedSet<8> VBaseCs; + + // The set of conversions in virtual bases that we've determined to + // be hidden. + llvm::SmallPtrSet<NamedDecl*, 8> HiddenVBaseCs; + + // The set of types hidden by classes derived from this one. + llvm::SmallPtrSet<CanQualType, 8> HiddenTypes; + + // Go ahead and collect the direct conversions and add them to the + // hidden-types set. + CXXRecordDecl::conversion_iterator ConvI = Record->conversion_begin(); + CXXRecordDecl::conversion_iterator ConvE = Record->conversion_end(); + Output.append(Context, ConvI, ConvE); + for (; ConvI != ConvE; ++ConvI) + HiddenTypes.insert(GetConversionType(Context, ConvI.getDecl())); + + // Recursively collect conversions from base classes. + for (CXXRecordDecl::base_class_iterator + I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) { + const RecordType *RT = I->getType()->getAs<RecordType>(); + if (!RT) continue; + + CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()), + I->isVirtual(), I->getAccessSpecifier(), + HiddenTypes, Output, VBaseCs, HiddenVBaseCs); + } + + // Add any unhidden conversions provided by virtual bases. + for (UnresolvedSetIterator I = VBaseCs.begin(), E = VBaseCs.end(); + I != E; ++I) { + if (!HiddenVBaseCs.count(cast<NamedDecl>(I.getDecl()->getCanonicalDecl()))) + Output.addDecl(Context, I.getDecl(), I.getAccess()); + } +} + +/// getVisibleConversionFunctions - get all conversion functions visible +/// in current class; including conversion function templates. +std::pair<CXXRecordDecl::conversion_iterator,CXXRecordDecl::conversion_iterator> +CXXRecordDecl::getVisibleConversionFunctions() { + ASTContext &Ctx = getASTContext(); + + ASTUnresolvedSet *Set; + if (bases_begin() == bases_end()) { + // If root class, all conversions are visible. + Set = &data().Conversions.get(Ctx); + } else { + Set = &data().VisibleConversions.get(Ctx); + // If visible conversion list is not evaluated, evaluate it. + if (!data().ComputedVisibleConversions) { + CollectVisibleConversions(Ctx, this, *Set); + data().ComputedVisibleConversions = true; + } + } + return std::make_pair(Set->begin(), Set->end()); +} + +void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) { + // This operation is O(N) but extremely rare. Sema only uses it to + // remove UsingShadowDecls in a class that were followed by a direct + // declaration, e.g.: + // class A : B { + // using B::operator int; + // operator int(); + // }; + // This is uncommon by itself and even more uncommon in conjunction + // with sufficiently large numbers of directly-declared conversions + // that asymptotic behavior matters. + + ASTUnresolvedSet &Convs = data().Conversions.get(getASTContext()); + for (unsigned I = 0, E = Convs.size(); I != E; ++I) { + if (Convs[I].getDecl() == ConvDecl) { + Convs.erase(I); + assert(std::find(Convs.begin(), Convs.end(), ConvDecl) == Convs.end() + && "conversion was found multiple times in unresolved set"); + return; + } + } + + llvm_unreachable("conversion not found in set!"); +} + +CXXRecordDecl *CXXRecordDecl::getInstantiatedFromMemberClass() const { + if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) + return cast<CXXRecordDecl>(MSInfo->getInstantiatedFrom()); + + return 0; +} + +void +CXXRecordDecl::setInstantiationOfMemberClass(CXXRecordDecl *RD, + TemplateSpecializationKind TSK) { + assert(TemplateOrInstantiation.isNull() && + "Previous template or instantiation?"); + assert(!isa<ClassTemplatePartialSpecializationDecl>(this)); + TemplateOrInstantiation + = new (getASTContext()) MemberSpecializationInfo(RD, TSK); +} + +TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{ + if (const ClassTemplateSpecializationDecl *Spec + = dyn_cast<ClassTemplateSpecializationDecl>(this)) + return Spec->getSpecializationKind(); + + if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) + return MSInfo->getTemplateSpecializationKind(); + + return TSK_Undeclared; +} + +void +CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) { + if (ClassTemplateSpecializationDecl *Spec + = dyn_cast<ClassTemplateSpecializationDecl>(this)) { + Spec->setSpecializationKind(TSK); + return; + } + + if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) { + MSInfo->setTemplateSpecializationKind(TSK); + return; + } + + llvm_unreachable("Not a class template or member class specialization"); +} + +CXXDestructorDecl *CXXRecordDecl::getDestructor() const { + ASTContext &Context = getASTContext(); + QualType ClassType = Context.getTypeDeclType(this); + + DeclarationName Name + = Context.DeclarationNames.getCXXDestructorName( + Context.getCanonicalType(ClassType)); + + DeclContext::lookup_const_result R = lookup(Name); + if (R.empty()) + return 0; + + CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(R.front()); + return Dtor; +} + +void CXXRecordDecl::completeDefinition() { + completeDefinition(0); +} + +void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) { + RecordDecl::completeDefinition(); + + if (hasObjectMember() && getASTContext().getLangOpts().ObjCAutoRefCount) { + // Objective-C Automatic Reference Counting: + // If a class has a non-static data member of Objective-C pointer + // type (or array thereof), it is a non-POD type and its + // default constructor (if any), copy constructor, move constructor, + // copy assignment operator, move assignment operator, and destructor are + // non-trivial. + struct DefinitionData &Data = data(); + Data.PlainOldData = false; + Data.HasTrivialSpecialMembers = 0; + Data.HasIrrelevantDestructor = false; + } + + // If the class may be abstract (but hasn't been marked as such), check for + // any pure final overriders. + if (mayBeAbstract()) { + CXXFinalOverriderMap MyFinalOverriders; + if (!FinalOverriders) { + getFinalOverriders(MyFinalOverriders); + FinalOverriders = &MyFinalOverriders; + } + + bool Done = false; + for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(), + MEnd = FinalOverriders->end(); + M != MEnd && !Done; ++M) { + for (OverridingMethods::iterator SO = M->second.begin(), + SOEnd = M->second.end(); + SO != SOEnd && !Done; ++SO) { + assert(SO->second.size() > 0 && + "All virtual functions have overridding virtual functions"); + + // C++ [class.abstract]p4: + // A class is abstract if it contains or inherits at least one + // pure virtual function for which the final overrider is pure + // virtual. + if (SO->second.front().Method->isPure()) { + data().Abstract = true; + Done = true; + break; + } + } + } + } + + // Set access bits correctly on the directly-declared conversions. + for (conversion_iterator I = conversion_begin(), E = conversion_end(); + I != E; ++I) + I.setAccess((*I)->getAccess()); +} + +bool CXXRecordDecl::mayBeAbstract() const { + if (data().Abstract || isInvalidDecl() || !data().Polymorphic || + isDependentContext()) + return false; + + for (CXXRecordDecl::base_class_const_iterator B = bases_begin(), + BEnd = bases_end(); + B != BEnd; ++B) { + CXXRecordDecl *BaseDecl + = cast<CXXRecordDecl>(B->getType()->getAs<RecordType>()->getDecl()); + if (BaseDecl->isAbstract()) + return true; + } + + return false; +} + +void CXXMethodDecl::anchor() { } + +bool CXXMethodDecl::isStatic() const { + const CXXMethodDecl *MD = getCanonicalDecl(); + + if (MD->getStorageClass() == SC_Static) + return true; + + OverloadedOperatorKind OOK = getDeclName().getCXXOverloadedOperator(); + return isStaticOverloadedOperator(OOK); +} + +static bool recursivelyOverrides(const CXXMethodDecl *DerivedMD, + const CXXMethodDecl *BaseMD) { + for (CXXMethodDecl::method_iterator I = DerivedMD->begin_overridden_methods(), + E = DerivedMD->end_overridden_methods(); I != E; ++I) { + const CXXMethodDecl *MD = *I; + if (MD->getCanonicalDecl() == BaseMD->getCanonicalDecl()) + return true; + if (recursivelyOverrides(MD, BaseMD)) + return true; + } + return false; +} + +CXXMethodDecl * +CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD, + bool MayBeBase) { + if (this->getParent()->getCanonicalDecl() == RD->getCanonicalDecl()) + return this; + + // Lookup doesn't work for destructors, so handle them separately. + if (isa<CXXDestructorDecl>(this)) { + CXXMethodDecl *MD = RD->getDestructor(); + if (MD) { + if (recursivelyOverrides(MD, this)) + return MD; + if (MayBeBase && recursivelyOverrides(this, MD)) + return MD; + } + return NULL; + } + + lookup_const_result Candidates = RD->lookup(getDeclName()); + for (NamedDecl * const * I = Candidates.begin(); I != Candidates.end(); ++I) { + CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*I); + if (!MD) + continue; + if (recursivelyOverrides(MD, this)) + return MD; + if (MayBeBase && recursivelyOverrides(this, MD)) + return MD; + } + + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const RecordType *RT = I->getType()->getAs<RecordType>(); + if (!RT) + continue; + const CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl()); + CXXMethodDecl *T = this->getCorrespondingMethodInClass(Base); + if (T) + return T; + } + + return NULL; +} + +CXXMethodDecl * +CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD, + SourceLocation StartLoc, + const DeclarationNameInfo &NameInfo, + QualType T, TypeSourceInfo *TInfo, + StorageClass SC, bool isInline, + bool isConstexpr, SourceLocation EndLocation) { + return new (C) CXXMethodDecl(CXXMethod, RD, StartLoc, NameInfo, T, TInfo, + SC, isInline, isConstexpr, + EndLocation); +} + +CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXMethodDecl)); + return new (Mem) CXXMethodDecl(CXXMethod, 0, SourceLocation(), + DeclarationNameInfo(), QualType(), + 0, SC_None, false, false, + SourceLocation()); +} + +bool CXXMethodDecl::isUsualDeallocationFunction() const { + if (getOverloadedOperator() != OO_Delete && + getOverloadedOperator() != OO_Array_Delete) + return false; + + // C++ [basic.stc.dynamic.deallocation]p2: + // A template instance is never a usual deallocation function, + // regardless of its signature. + if (getPrimaryTemplate()) + return false; + + // C++ [basic.stc.dynamic.deallocation]p2: + // If a class T has a member deallocation function named operator delete + // with exactly one parameter, then that function is a usual (non-placement) + // deallocation function. [...] + if (getNumParams() == 1) + return true; + + // C++ [basic.stc.dynamic.deallocation]p2: + // [...] If class T does not declare such an operator delete but does + // declare a member deallocation function named operator delete with + // exactly two parameters, the second of which has type std::size_t (18.1), + // then this function is a usual deallocation function. + ASTContext &Context = getASTContext(); + if (getNumParams() != 2 || + !Context.hasSameUnqualifiedType(getParamDecl(1)->getType(), + Context.getSizeType())) + return false; + + // This function is a usual deallocation function if there are no + // single-parameter deallocation functions of the same kind. + DeclContext::lookup_const_result R = getDeclContext()->lookup(getDeclName()); + for (DeclContext::lookup_const_result::iterator I = R.begin(), E = R.end(); + I != E; ++I) { + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) + if (FD->getNumParams() == 1) + return false; + } + + return true; +} + +bool CXXMethodDecl::isCopyAssignmentOperator() const { + // C++0x [class.copy]p17: + // A user-declared copy assignment operator X::operator= is a non-static + // non-template member function of class X with exactly one parameter of + // type X, X&, const X&, volatile X& or const volatile X&. + if (/*operator=*/getOverloadedOperator() != OO_Equal || + /*non-static*/ isStatic() || + /*non-template*/getPrimaryTemplate() || getDescribedFunctionTemplate() || + getNumParams() != 1) + return false; + + QualType ParamType = getParamDecl(0)->getType(); + if (const LValueReferenceType *Ref = ParamType->getAs<LValueReferenceType>()) + ParamType = Ref->getPointeeType(); + + ASTContext &Context = getASTContext(); + QualType ClassType + = Context.getCanonicalType(Context.getTypeDeclType(getParent())); + return Context.hasSameUnqualifiedType(ClassType, ParamType); +} + +bool CXXMethodDecl::isMoveAssignmentOperator() const { + // C++0x [class.copy]p19: + // A user-declared move assignment operator X::operator= is a non-static + // non-template member function of class X with exactly one parameter of type + // X&&, const X&&, volatile X&&, or const volatile X&&. + if (getOverloadedOperator() != OO_Equal || isStatic() || + getPrimaryTemplate() || getDescribedFunctionTemplate() || + getNumParams() != 1) + return false; + + QualType ParamType = getParamDecl(0)->getType(); + if (!isa<RValueReferenceType>(ParamType)) + return false; + ParamType = ParamType->getPointeeType(); + + ASTContext &Context = getASTContext(); + QualType ClassType + = Context.getCanonicalType(Context.getTypeDeclType(getParent())); + return Context.hasSameUnqualifiedType(ClassType, ParamType); +} + +void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) { + assert(MD->isCanonicalDecl() && "Method is not canonical!"); + assert(!MD->getParent()->isDependentContext() && + "Can't add an overridden method to a class template!"); + assert(MD->isVirtual() && "Method is not virtual!"); + + getASTContext().addOverriddenMethod(this, MD); +} + +CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const { + if (isa<CXXConstructorDecl>(this)) return 0; + return getASTContext().overridden_methods_begin(this); +} + +CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const { + if (isa<CXXConstructorDecl>(this)) return 0; + return getASTContext().overridden_methods_end(this); +} + +unsigned CXXMethodDecl::size_overridden_methods() const { + if (isa<CXXConstructorDecl>(this)) return 0; + return getASTContext().overridden_methods_size(this); +} + +QualType CXXMethodDecl::getThisType(ASTContext &C) const { + // C++ 9.3.2p1: The type of this in a member function of a class X is X*. + // If the member function is declared const, the type of this is const X*, + // if the member function is declared volatile, the type of this is + // volatile X*, and if the member function is declared const volatile, + // the type of this is const volatile X*. + + assert(isInstance() && "No 'this' for static methods!"); + + QualType ClassTy = C.getTypeDeclType(getParent()); + ClassTy = C.getQualifiedType(ClassTy, + Qualifiers::fromCVRMask(getTypeQualifiers())); + return C.getPointerType(ClassTy); +} + +bool CXXMethodDecl::hasInlineBody() const { + // If this function is a template instantiation, look at the template from + // which it was instantiated. + const FunctionDecl *CheckFn = getTemplateInstantiationPattern(); + if (!CheckFn) + CheckFn = this; + + const FunctionDecl *fn; + return CheckFn->hasBody(fn) && !fn->isOutOfLine(); +} + +bool CXXMethodDecl::isLambdaStaticInvoker() const { + const CXXRecordDecl *P = getParent(); + if (P->isLambda()) { + if (const CXXMethodDecl *StaticInvoker = P->getLambdaStaticInvoker()) { + if (StaticInvoker == this) return true; + if (P->isGenericLambda() && this->isFunctionTemplateSpecialization()) + return StaticInvoker == this->getPrimaryTemplate()->getTemplatedDecl(); + } + } + return false; +} + +CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context, + TypeSourceInfo *TInfo, bool IsVirtual, + SourceLocation L, Expr *Init, + SourceLocation R, + SourceLocation EllipsisLoc) + : Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init), + LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual), + IsWritten(false), SourceOrderOrNumArrayIndices(0) +{ +} + +CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context, + FieldDecl *Member, + SourceLocation MemberLoc, + SourceLocation L, Expr *Init, + SourceLocation R) + : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init), + LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false), + IsWritten(false), SourceOrderOrNumArrayIndices(0) +{ +} + +CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context, + IndirectFieldDecl *Member, + SourceLocation MemberLoc, + SourceLocation L, Expr *Init, + SourceLocation R) + : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init), + LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false), + IsWritten(false), SourceOrderOrNumArrayIndices(0) +{ +} + +CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context, + TypeSourceInfo *TInfo, + SourceLocation L, Expr *Init, + SourceLocation R) + : Initializee(TInfo), MemberOrEllipsisLocation(), Init(Init), + LParenLoc(L), RParenLoc(R), IsDelegating(true), IsVirtual(false), + IsWritten(false), SourceOrderOrNumArrayIndices(0) +{ +} + +CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context, + FieldDecl *Member, + SourceLocation MemberLoc, + SourceLocation L, Expr *Init, + SourceLocation R, + VarDecl **Indices, + unsigned NumIndices) + : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init), + LParenLoc(L), RParenLoc(R), IsVirtual(false), + IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices) +{ + VarDecl **MyIndices = reinterpret_cast<VarDecl **> (this + 1); + memcpy(MyIndices, Indices, NumIndices * sizeof(VarDecl *)); +} + +CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context, + FieldDecl *Member, + SourceLocation MemberLoc, + SourceLocation L, Expr *Init, + SourceLocation R, + VarDecl **Indices, + unsigned NumIndices) { + void *Mem = Context.Allocate(sizeof(CXXCtorInitializer) + + sizeof(VarDecl *) * NumIndices, + llvm::alignOf<CXXCtorInitializer>()); + return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R, + Indices, NumIndices); +} + +TypeLoc CXXCtorInitializer::getBaseClassLoc() const { + if (isBaseInitializer()) + return Initializee.get<TypeSourceInfo*>()->getTypeLoc(); + else + return TypeLoc(); +} + +const Type *CXXCtorInitializer::getBaseClass() const { + if (isBaseInitializer()) + return Initializee.get<TypeSourceInfo*>()->getType().getTypePtr(); + else + return 0; +} + +SourceLocation CXXCtorInitializer::getSourceLocation() const { + if (isAnyMemberInitializer()) + return getMemberLocation(); + + if (isInClassMemberInitializer()) + return getAnyMember()->getLocation(); + + if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>()) + return TSInfo->getTypeLoc().getLocalSourceRange().getBegin(); + + return SourceLocation(); +} + +SourceRange CXXCtorInitializer::getSourceRange() const { + if (isInClassMemberInitializer()) { + FieldDecl *D = getAnyMember(); + if (Expr *I = D->getInClassInitializer()) + return I->getSourceRange(); + return SourceRange(); + } + + return SourceRange(getSourceLocation(), getRParenLoc()); +} + +void CXXConstructorDecl::anchor() { } + +CXXConstructorDecl * +CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConstructorDecl)); + return new (Mem) CXXConstructorDecl(0, SourceLocation(),DeclarationNameInfo(), + QualType(), 0, false, false, false,false); +} + +CXXConstructorDecl * +CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD, + SourceLocation StartLoc, + const DeclarationNameInfo &NameInfo, + QualType T, TypeSourceInfo *TInfo, + bool isExplicit, bool isInline, + bool isImplicitlyDeclared, bool isConstexpr) { + assert(NameInfo.getName().getNameKind() + == DeclarationName::CXXConstructorName && + "Name must refer to a constructor"); + return new (C) CXXConstructorDecl(RD, StartLoc, NameInfo, T, TInfo, + isExplicit, isInline, isImplicitlyDeclared, + isConstexpr); +} + +CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const { + assert(isDelegatingConstructor() && "Not a delegating constructor!"); + Expr *E = (*init_begin())->getInit()->IgnoreImplicit(); + if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(E)) + return Construct->getConstructor(); + + return 0; +} + +bool CXXConstructorDecl::isDefaultConstructor() const { + // C++ [class.ctor]p5: + // A default constructor for a class X is a constructor of class + // X that can be called without an argument. + return (getNumParams() == 0) || + (getNumParams() > 0 && getParamDecl(0)->hasDefaultArg()); +} + +bool +CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const { + return isCopyOrMoveConstructor(TypeQuals) && + getParamDecl(0)->getType()->isLValueReferenceType(); +} + +bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const { + return isCopyOrMoveConstructor(TypeQuals) && + getParamDecl(0)->getType()->isRValueReferenceType(); +} + +/// \brief Determine whether this is a copy or move constructor. +bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const { + // C++ [class.copy]p2: + // A non-template constructor for class X is a copy constructor + // if its first parameter is of type X&, const X&, volatile X& or + // const volatile X&, and either there are no other parameters + // or else all other parameters have default arguments (8.3.6). + // C++0x [class.copy]p3: + // A non-template constructor for class X is a move constructor if its + // first parameter is of type X&&, const X&&, volatile X&&, or + // const volatile X&&, and either there are no other parameters or else + // all other parameters have default arguments. + if ((getNumParams() < 1) || + (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) || + (getPrimaryTemplate() != 0) || + (getDescribedFunctionTemplate() != 0)) + return false; + + const ParmVarDecl *Param = getParamDecl(0); + + // Do we have a reference type? + const ReferenceType *ParamRefType = Param->getType()->getAs<ReferenceType>(); + if (!ParamRefType) + return false; + + // Is it a reference to our class type? + ASTContext &Context = getASTContext(); + + CanQualType PointeeType + = Context.getCanonicalType(ParamRefType->getPointeeType()); + CanQualType ClassTy + = Context.getCanonicalType(Context.getTagDeclType(getParent())); + if (PointeeType.getUnqualifiedType() != ClassTy) + return false; + + // FIXME: other qualifiers? + + // We have a copy or move constructor. + TypeQuals = PointeeType.getCVRQualifiers(); + return true; +} + +bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const { + // C++ [class.conv.ctor]p1: + // A constructor declared without the function-specifier explicit + // that can be called with a single parameter specifies a + // conversion from the type of its first parameter to the type of + // its class. Such a constructor is called a converting + // constructor. + if (isExplicit() && !AllowExplicit) + return false; + + return (getNumParams() == 0 && + getType()->getAs<FunctionProtoType>()->isVariadic()) || + (getNumParams() == 1) || + (getNumParams() > 1 && + (getParamDecl(1)->hasDefaultArg() || + getParamDecl(1)->isParameterPack())); +} + +bool CXXConstructorDecl::isSpecializationCopyingObject() const { + if ((getNumParams() < 1) || + (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) || + (getPrimaryTemplate() == 0) || + (getDescribedFunctionTemplate() != 0)) + return false; + + const ParmVarDecl *Param = getParamDecl(0); + + ASTContext &Context = getASTContext(); + CanQualType ParamType = Context.getCanonicalType(Param->getType()); + + // Is it the same as our our class type? + CanQualType ClassTy + = Context.getCanonicalType(Context.getTagDeclType(getParent())); + if (ParamType.getUnqualifiedType() != ClassTy) + return false; + + return true; +} + +const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const { + // Hack: we store the inherited constructor in the overridden method table + method_iterator It = getASTContext().overridden_methods_begin(this); + if (It == getASTContext().overridden_methods_end(this)) + return 0; + + return cast<CXXConstructorDecl>(*It); +} + +void +CXXConstructorDecl::setInheritedConstructor(const CXXConstructorDecl *BaseCtor){ + // Hack: we store the inherited constructor in the overridden method table + assert(getASTContext().overridden_methods_size(this) == 0 && + "Base ctor already set."); + getASTContext().addOverriddenMethod(this, BaseCtor); +} + +void CXXDestructorDecl::anchor() { } + +CXXDestructorDecl * +CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXDestructorDecl)); + return new (Mem) CXXDestructorDecl(0, SourceLocation(), DeclarationNameInfo(), + QualType(), 0, false, false); +} + +CXXDestructorDecl * +CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD, + SourceLocation StartLoc, + const DeclarationNameInfo &NameInfo, + QualType T, TypeSourceInfo *TInfo, + bool isInline, bool isImplicitlyDeclared) { + assert(NameInfo.getName().getNameKind() + == DeclarationName::CXXDestructorName && + "Name must refer to a destructor"); + return new (C) CXXDestructorDecl(RD, StartLoc, NameInfo, T, TInfo, isInline, + isImplicitlyDeclared); +} + +void CXXConversionDecl::anchor() { } + +CXXConversionDecl * +CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConversionDecl)); + return new (Mem) CXXConversionDecl(0, SourceLocation(), DeclarationNameInfo(), + QualType(), 0, false, false, false, + SourceLocation()); +} + +CXXConversionDecl * +CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD, + SourceLocation StartLoc, + const DeclarationNameInfo &NameInfo, + QualType T, TypeSourceInfo *TInfo, + bool isInline, bool isExplicit, + bool isConstexpr, SourceLocation EndLocation) { + assert(NameInfo.getName().getNameKind() + == DeclarationName::CXXConversionFunctionName && + "Name must refer to a conversion function"); + return new (C) CXXConversionDecl(RD, StartLoc, NameInfo, T, TInfo, + isInline, isExplicit, isConstexpr, + EndLocation); +} + +bool CXXConversionDecl::isLambdaToBlockPointerConversion() const { + return isImplicit() && getParent()->isLambda() && + getConversionType()->isBlockPointerType(); +} + +void LinkageSpecDecl::anchor() { } + +LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C, + DeclContext *DC, + SourceLocation ExternLoc, + SourceLocation LangLoc, + LanguageIDs Lang, + bool HasBraces) { + return new (C) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces); +} + +LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LinkageSpecDecl)); + return new (Mem) LinkageSpecDecl(0, SourceLocation(), SourceLocation(), + lang_c, false); +} + +void UsingDirectiveDecl::anchor() { } + +UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation L, + SourceLocation NamespaceLoc, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation IdentLoc, + NamedDecl *Used, + DeclContext *CommonAncestor) { + if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used)) + Used = NS->getOriginalNamespace(); + return new (C) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc, + IdentLoc, Used, CommonAncestor); +} + +UsingDirectiveDecl * +UsingDirectiveDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDirectiveDecl)); + return new (Mem) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(), + NestedNameSpecifierLoc(), + SourceLocation(), 0, 0); +} + +NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() { + if (NamespaceAliasDecl *NA = + dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace)) + return NA->getNamespace(); + return cast_or_null<NamespaceDecl>(NominatedNamespace); +} + +void NamespaceDecl::anchor() { } + +NamespaceDecl::NamespaceDecl(DeclContext *DC, bool Inline, + SourceLocation StartLoc, + SourceLocation IdLoc, IdentifierInfo *Id, + NamespaceDecl *PrevDecl) + : NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace), + LocStart(StartLoc), RBraceLoc(), AnonOrFirstNamespaceAndInline(0, Inline) +{ + setPreviousDecl(PrevDecl); + + if (PrevDecl) + AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace()); +} + +NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC, + bool Inline, SourceLocation StartLoc, + SourceLocation IdLoc, IdentifierInfo *Id, + NamespaceDecl *PrevDecl) { + return new (C) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl); +} + +NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceDecl)); + return new (Mem) NamespaceDecl(0, false, SourceLocation(), SourceLocation(), + 0, 0); +} + +void NamespaceAliasDecl::anchor() { } + +NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation UsingLoc, + SourceLocation AliasLoc, + IdentifierInfo *Alias, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation IdentLoc, + NamedDecl *Namespace) { + if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace)) + Namespace = NS->getOriginalNamespace(); + return new (C) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias, + QualifierLoc, IdentLoc, Namespace); +} + +NamespaceAliasDecl * +NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceAliasDecl)); + return new (Mem) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(), 0, + NestedNameSpecifierLoc(), + SourceLocation(), 0); +} + +void UsingShadowDecl::anchor() { } + +UsingShadowDecl * +UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingShadowDecl)); + return new (Mem) UsingShadowDecl(0, SourceLocation(), 0, 0); +} + +UsingDecl *UsingShadowDecl::getUsingDecl() const { + const UsingShadowDecl *Shadow = this; + while (const UsingShadowDecl *NextShadow = + dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow)) + Shadow = NextShadow; + return cast<UsingDecl>(Shadow->UsingOrNextShadow); +} + +void UsingDecl::anchor() { } + +void UsingDecl::addShadowDecl(UsingShadowDecl *S) { + assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() && + "declaration already in set"); + assert(S->getUsingDecl() == this); + + if (FirstUsingShadow.getPointer()) + S->UsingOrNextShadow = FirstUsingShadow.getPointer(); + FirstUsingShadow.setPointer(S); +} + +void UsingDecl::removeShadowDecl(UsingShadowDecl *S) { + assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() && + "declaration not in set"); + assert(S->getUsingDecl() == this); + + // Remove S from the shadow decl chain. This is O(n) but hopefully rare. + + if (FirstUsingShadow.getPointer() == S) { + FirstUsingShadow.setPointer( + dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow)); + S->UsingOrNextShadow = this; + return; + } + + UsingShadowDecl *Prev = FirstUsingShadow.getPointer(); + while (Prev->UsingOrNextShadow != S) + Prev = cast<UsingShadowDecl>(Prev->UsingOrNextShadow); + Prev->UsingOrNextShadow = S->UsingOrNextShadow; + S->UsingOrNextShadow = this; +} + +UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL, + NestedNameSpecifierLoc QualifierLoc, + const DeclarationNameInfo &NameInfo, + bool HasTypename) { + return new (C) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename); +} + +UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDecl)); + return new (Mem) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(), + DeclarationNameInfo(), false); +} + +SourceRange UsingDecl::getSourceRange() const { + SourceLocation Begin = isAccessDeclaration() + ? getQualifierLoc().getBeginLoc() : UsingLocation; + return SourceRange(Begin, getNameInfo().getEndLoc()); +} + +void UnresolvedUsingValueDecl::anchor() { } + +UnresolvedUsingValueDecl * +UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation UsingLoc, + NestedNameSpecifierLoc QualifierLoc, + const DeclarationNameInfo &NameInfo) { + return new (C) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc, + QualifierLoc, NameInfo); +} + +UnresolvedUsingValueDecl * +UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UnresolvedUsingValueDecl)); + return new (Mem) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(), + NestedNameSpecifierLoc(), + DeclarationNameInfo()); +} + +SourceRange UnresolvedUsingValueDecl::getSourceRange() const { + SourceLocation Begin = isAccessDeclaration() + ? getQualifierLoc().getBeginLoc() : UsingLocation; + return SourceRange(Begin, getNameInfo().getEndLoc()); +} + +void UnresolvedUsingTypenameDecl::anchor() { } + +UnresolvedUsingTypenameDecl * +UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation UsingLoc, + SourceLocation TypenameLoc, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TargetNameLoc, + DeclarationName TargetName) { + return new (C) UnresolvedUsingTypenameDecl(DC, UsingLoc, TypenameLoc, + QualifierLoc, TargetNameLoc, + TargetName.getAsIdentifierInfo()); +} + +UnresolvedUsingTypenameDecl * +UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, + sizeof(UnresolvedUsingTypenameDecl)); + return new (Mem) UnresolvedUsingTypenameDecl(0, SourceLocation(), + SourceLocation(), + NestedNameSpecifierLoc(), + SourceLocation(), + 0); +} + +void StaticAssertDecl::anchor() { } + +StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StaticAssertLoc, + Expr *AssertExpr, + StringLiteral *Message, + SourceLocation RParenLoc, + bool Failed) { + return new (C) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message, + RParenLoc, Failed); +} + +StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(StaticAssertDecl)); + return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0, + SourceLocation(), false); +} + +static const char *getAccessName(AccessSpecifier AS) { + switch (AS) { + case AS_none: + llvm_unreachable("Invalid access specifier!"); + case AS_public: + return "public"; + case AS_private: + return "private"; + case AS_protected: + return "protected"; + } + llvm_unreachable("Invalid access specifier!"); +} + +const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB, + AccessSpecifier AS) { + return DB << getAccessName(AS); +} + +const PartialDiagnostic &clang::operator<<(const PartialDiagnostic &DB, + AccessSpecifier AS) { + return DB << getAccessName(AS); +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp new file mode 100644 index 000000000000..1c639d676dc9 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp @@ -0,0 +1,70 @@ +//===--- DeclFriend.cpp - C++ Friend Declaration AST Node Implementation --===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the AST classes related to C++ friend +// declarations. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclFriend.h" +#include "clang/AST/DeclTemplate.h" +using namespace clang; + +void FriendDecl::anchor() { } + +FriendDecl *FriendDecl::getNextFriendSlowCase() { + return cast_or_null<FriendDecl>( + NextFriend.get(getASTContext().getExternalSource())); +} + +FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation L, + FriendUnion Friend, + SourceLocation FriendL, + ArrayRef<TemplateParameterList*> FriendTypeTPLists) { +#ifndef NDEBUG + if (Friend.is<NamedDecl*>()) { + NamedDecl *D = Friend.get<NamedDecl*>(); + assert(isa<FunctionDecl>(D) || + isa<CXXRecordDecl>(D) || + isa<FunctionTemplateDecl>(D) || + isa<ClassTemplateDecl>(D)); + + // As a temporary hack, we permit template instantiation to point + // to the original declaration when instantiating members. + assert(D->getFriendObjectKind() || + (cast<CXXRecordDecl>(DC)->getTemplateSpecializationKind())); + // These template parameters are for friend types only. + assert(FriendTypeTPLists.size() == 0); + } +#endif + + std::size_t Size = sizeof(FriendDecl) + + FriendTypeTPLists.size() * sizeof(TemplateParameterList*); + void *Mem = C.Allocate(Size); + FriendDecl *FD = new (Mem) FriendDecl(DC, L, Friend, FriendL, + FriendTypeTPLists); + cast<CXXRecordDecl>(DC)->pushFriendDecl(FD); + return FD; +} + +FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID, + unsigned FriendTypeNumTPLists) { + std::size_t Size = sizeof(FriendDecl) + + FriendTypeNumTPLists * sizeof(TemplateParameterList*); + void *Mem = AllocateDeserializedDecl(C, ID, Size); + return new (Mem) FriendDecl(EmptyShell(), FriendTypeNumTPLists); +} + +FriendDecl *CXXRecordDecl::getFirstFriend() const { + ExternalASTSource *Source = getParentASTContext().getExternalSource(); + Decl *First = data().FirstFriend.get(Source); + return First ? cast<FriendDecl>(First) : 0; +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp new file mode 100644 index 000000000000..9861f2278f9a --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp @@ -0,0 +1,32 @@ +//===--- DeclGroup.cpp - Classes for representing groups of Decls -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the DeclGroup and DeclGroupRef classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/DeclGroup.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "llvm/Support/Allocator.h" +using namespace clang; + +DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) { + assert(NumDecls > 1 && "Invalid DeclGroup"); + unsigned Size = sizeof(DeclGroup) + sizeof(Decl*) * NumDecls; + void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment); + new (Mem) DeclGroup(NumDecls, Decls); + return static_cast<DeclGroup*>(Mem); +} + +DeclGroup::DeclGroup(unsigned numdecls, Decl** decls) : NumDecls(numdecls) { + assert(numdecls > 0); + assert(decls); + memcpy(this+1, decls, numdecls * sizeof(*decls)); +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp new file mode 100644 index 000000000000..b2b5b70197b7 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp @@ -0,0 +1,1802 @@ +//===--- DeclObjC.cpp - ObjC Declaration AST Node Implementation ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Objective-C related Decl classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/DeclObjC.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTMutationListener.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Stmt.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +using namespace clang; + +//===----------------------------------------------------------------------===// +// ObjCListBase +//===----------------------------------------------------------------------===// + +void ObjCListBase::set(void *const* InList, unsigned Elts, ASTContext &Ctx) { + List = 0; + if (Elts == 0) return; // Setting to an empty list is a noop. + + + List = new (Ctx) void*[Elts]; + NumElts = Elts; + memcpy(List, InList, sizeof(void*)*Elts); +} + +void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts, + const SourceLocation *Locs, ASTContext &Ctx) { + if (Elts == 0) + return; + + Locations = new (Ctx) SourceLocation[Elts]; + memcpy(Locations, Locs, sizeof(SourceLocation) * Elts); + set(InList, Elts, Ctx); +} + +//===----------------------------------------------------------------------===// +// ObjCInterfaceDecl +//===----------------------------------------------------------------------===// + +void ObjCContainerDecl::anchor() { } + +/// getIvarDecl - This method looks up an ivar in this ContextDecl. +/// +ObjCIvarDecl * +ObjCContainerDecl::getIvarDecl(IdentifierInfo *Id) const { + lookup_const_result R = lookup(Id); + for (lookup_const_iterator Ivar = R.begin(), IvarEnd = R.end(); + Ivar != IvarEnd; ++Ivar) { + if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar)) + return ivar; + } + return 0; +} + +// Get the local instance/class method declared in this interface. +ObjCMethodDecl * +ObjCContainerDecl::getMethod(Selector Sel, bool isInstance, + bool AllowHidden) const { + // If this context is a hidden protocol definition, don't find any + // methods there. + if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) { + if (const ObjCProtocolDecl *Def = Proto->getDefinition()) + if (Def->isHidden() && !AllowHidden) + return 0; + } + + // Since instance & class methods can have the same name, the loop below + // ensures we get the correct method. + // + // @interface Whatever + // - (int) class_method; + // + (float) class_method; + // @end + // + lookup_const_result R = lookup(Sel); + for (lookup_const_iterator Meth = R.begin(), MethEnd = R.end(); + Meth != MethEnd; ++Meth) { + ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth); + if (MD && MD->isInstanceMethod() == isInstance) + return MD; + } + return 0; +} + +/// HasUserDeclaredSetterMethod - This routine returns 'true' if a user declared setter +/// method was found in the class, its protocols, its super classes or categories. +/// It also returns 'true' if one of its categories has declared a 'readwrite' property. +/// This is because, user must provide a setter method for the category's 'readwrite' +/// property. +bool +ObjCContainerDecl::HasUserDeclaredSetterMethod(const ObjCPropertyDecl *Property) const { + Selector Sel = Property->getSetterName(); + lookup_const_result R = lookup(Sel); + for (lookup_const_iterator Meth = R.begin(), MethEnd = R.end(); + Meth != MethEnd; ++Meth) { + ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth); + if (MD && MD->isInstanceMethod() && !MD->isImplicit()) + return true; + } + + if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(this)) { + // Also look into categories, including class extensions, looking + // for a user declared instance method. + for (ObjCInterfaceDecl::visible_categories_iterator + Cat = ID->visible_categories_begin(), + CatEnd = ID->visible_categories_end(); + Cat != CatEnd; + ++Cat) { + if (ObjCMethodDecl *MD = Cat->getInstanceMethod(Sel)) + if (!MD->isImplicit()) + return true; + if (Cat->IsClassExtension()) + continue; + // Also search through the categories looking for a 'readwrite' declaration + // of this property. If one found, presumably a setter will be provided + // (properties declared in categories will not get auto-synthesized). + for (ObjCContainerDecl::prop_iterator P = Cat->prop_begin(), + E = Cat->prop_end(); P != E; ++P) + if (P->getIdentifier() == Property->getIdentifier()) { + if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite) + return true; + break; + } + } + + // Also look into protocols, for a user declared instance method. + for (ObjCInterfaceDecl::all_protocol_iterator P = + ID->all_referenced_protocol_begin(), + PE = ID->all_referenced_protocol_end(); P != PE; ++P) { + ObjCProtocolDecl *Proto = (*P); + if (Proto->HasUserDeclaredSetterMethod(Property)) + return true; + } + // And in its super class. + ObjCInterfaceDecl *OSC = ID->getSuperClass(); + while (OSC) { + if (OSC->HasUserDeclaredSetterMethod(Property)) + return true; + OSC = OSC->getSuperClass(); + } + } + if (const ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(this)) + for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(), + E = PD->protocol_end(); PI != E; ++PI) { + if ((*PI)->HasUserDeclaredSetterMethod(Property)) + return true; + } + return false; +} + +ObjCPropertyDecl * +ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC, + IdentifierInfo *propertyID) { + // If this context is a hidden protocol definition, don't find any + // property. + if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(DC)) { + if (const ObjCProtocolDecl *Def = Proto->getDefinition()) + if (Def->isHidden()) + return 0; + } + + DeclContext::lookup_const_result R = DC->lookup(propertyID); + for (DeclContext::lookup_const_iterator I = R.begin(), E = R.end(); I != E; + ++I) + if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(*I)) + return PD; + + return 0; +} + +IdentifierInfo * +ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const { + SmallString<128> ivarName; + { + llvm::raw_svector_ostream os(ivarName); + os << '_' << getIdentifier()->getName(); + } + return &Ctx.Idents.get(ivarName.str()); +} + +/// FindPropertyDeclaration - Finds declaration of the property given its name +/// in 'PropertyId' and returns it. It returns 0, if not found. +ObjCPropertyDecl * +ObjCContainerDecl::FindPropertyDeclaration(IdentifierInfo *PropertyId) const { + // Don't find properties within hidden protocol definitions. + if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) { + if (const ObjCProtocolDecl *Def = Proto->getDefinition()) + if (Def->isHidden()) + return 0; + } + + if (ObjCPropertyDecl *PD = + ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId)) + return PD; + + switch (getKind()) { + default: + break; + case Decl::ObjCProtocol: { + const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this); + for (ObjCProtocolDecl::protocol_iterator I = PID->protocol_begin(), + E = PID->protocol_end(); I != E; ++I) + if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId)) + return P; + break; + } + case Decl::ObjCInterface: { + const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this); + // Look through categories (but not extensions). + for (ObjCInterfaceDecl::visible_categories_iterator + Cat = OID->visible_categories_begin(), + CatEnd = OID->visible_categories_end(); + Cat != CatEnd; ++Cat) { + if (!Cat->IsClassExtension()) + if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId)) + return P; + } + + // Look through protocols. + for (ObjCInterfaceDecl::all_protocol_iterator + I = OID->all_referenced_protocol_begin(), + E = OID->all_referenced_protocol_end(); I != E; ++I) + if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId)) + return P; + + // Finally, check the super class. + if (const ObjCInterfaceDecl *superClass = OID->getSuperClass()) + return superClass->FindPropertyDeclaration(PropertyId); + break; + } + case Decl::ObjCCategory: { + const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this); + // Look through protocols. + if (!OCD->IsClassExtension()) + for (ObjCCategoryDecl::protocol_iterator + I = OCD->protocol_begin(), E = OCD->protocol_end(); I != E; ++I) + if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId)) + return P; + + break; + } + } + return 0; +} + +void ObjCInterfaceDecl::anchor() { } + +/// FindPropertyVisibleInPrimaryClass - Finds declaration of the property +/// with name 'PropertyId' in the primary class; including those in protocols +/// (direct or indirect) used by the primary class. +/// +ObjCPropertyDecl * +ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass( + IdentifierInfo *PropertyId) const { + // FIXME: Should make sure no callers ever do this. + if (!hasDefinition()) + return 0; + + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + if (ObjCPropertyDecl *PD = + ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId)) + return PD; + + // Look through protocols. + for (ObjCInterfaceDecl::all_protocol_iterator + I = all_referenced_protocol_begin(), + E = all_referenced_protocol_end(); I != E; ++I) + if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId)) + return P; + + return 0; +} + +void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM, + PropertyDeclOrder &PO) const { + for (ObjCContainerDecl::prop_iterator P = prop_begin(), + E = prop_end(); P != E; ++P) { + ObjCPropertyDecl *Prop = *P; + PM[Prop->getIdentifier()] = Prop; + PO.push_back(Prop); + } + for (ObjCInterfaceDecl::all_protocol_iterator + PI = all_referenced_protocol_begin(), + E = all_referenced_protocol_end(); PI != E; ++PI) + (*PI)->collectPropertiesToImplement(PM, PO); + // Note, the properties declared only in class extensions are still copied + // into the main @interface's property list, and therefore we don't + // explicitly, have to search class extension properties. +} + +bool ObjCInterfaceDecl::isArcWeakrefUnavailable() const { + const ObjCInterfaceDecl *Class = this; + while (Class) { + if (Class->hasAttr<ArcWeakrefUnavailableAttr>()) + return true; + Class = Class->getSuperClass(); + } + return false; +} + +const ObjCInterfaceDecl *ObjCInterfaceDecl::isObjCRequiresPropertyDefs() const { + const ObjCInterfaceDecl *Class = this; + while (Class) { + if (Class->hasAttr<ObjCRequiresPropertyDefsAttr>()) + return Class; + Class = Class->getSuperClass(); + } + return 0; +} + +void ObjCInterfaceDecl::mergeClassExtensionProtocolList( + ObjCProtocolDecl *const* ExtList, unsigned ExtNum, + ASTContext &C) +{ + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + if (data().AllReferencedProtocols.empty() && + data().ReferencedProtocols.empty()) { + data().AllReferencedProtocols.set(ExtList, ExtNum, C); + return; + } + + // Check for duplicate protocol in class's protocol list. + // This is O(n*m). But it is extremely rare and number of protocols in + // class or its extension are very few. + SmallVector<ObjCProtocolDecl*, 8> ProtocolRefs; + for (unsigned i = 0; i < ExtNum; i++) { + bool protocolExists = false; + ObjCProtocolDecl *ProtoInExtension = ExtList[i]; + for (all_protocol_iterator + p = all_referenced_protocol_begin(), + e = all_referenced_protocol_end(); p != e; ++p) { + ObjCProtocolDecl *Proto = (*p); + if (C.ProtocolCompatibleWithProtocol(ProtoInExtension, Proto)) { + protocolExists = true; + break; + } + } + // Do we want to warn on a protocol in extension class which + // already exist in the class? Probably not. + if (!protocolExists) + ProtocolRefs.push_back(ProtoInExtension); + } + + if (ProtocolRefs.empty()) + return; + + // Merge ProtocolRefs into class's protocol list; + for (all_protocol_iterator p = all_referenced_protocol_begin(), + e = all_referenced_protocol_end(); p != e; ++p) { + ProtocolRefs.push_back(*p); + } + + data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C); +} + +void ObjCInterfaceDecl::allocateDefinitionData() { + assert(!hasDefinition() && "ObjC class already has a definition"); + Data.setPointer(new (getASTContext()) DefinitionData()); + Data.getPointer()->Definition = this; + + // Make the type point at the definition, now that we have one. + if (TypeForDecl) + cast<ObjCInterfaceType>(TypeForDecl)->Decl = this; +} + +void ObjCInterfaceDecl::startDefinition() { + allocateDefinitionData(); + + // Update all of the declarations with a pointer to the definition. + for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end(); + RD != RDEnd; ++RD) { + if (*RD != this) + RD->Data = Data; + } +} + +ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID, + ObjCInterfaceDecl *&clsDeclared) { + // FIXME: Should make sure no callers ever do this. + if (!hasDefinition()) + return 0; + + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + ObjCInterfaceDecl* ClassDecl = this; + while (ClassDecl != NULL) { + if (ObjCIvarDecl *I = ClassDecl->getIvarDecl(ID)) { + clsDeclared = ClassDecl; + return I; + } + + for (ObjCInterfaceDecl::visible_extensions_iterator + Ext = ClassDecl->visible_extensions_begin(), + ExtEnd = ClassDecl->visible_extensions_end(); + Ext != ExtEnd; ++Ext) { + if (ObjCIvarDecl *I = Ext->getIvarDecl(ID)) { + clsDeclared = ClassDecl; + return I; + } + } + + ClassDecl = ClassDecl->getSuperClass(); + } + return NULL; +} + +/// lookupInheritedClass - This method returns ObjCInterfaceDecl * of the super +/// class whose name is passed as argument. If it is not one of the super classes +/// the it returns NULL. +ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass( + const IdentifierInfo*ICName) { + // FIXME: Should make sure no callers ever do this. + if (!hasDefinition()) + return 0; + + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + ObjCInterfaceDecl* ClassDecl = this; + while (ClassDecl != NULL) { + if (ClassDecl->getIdentifier() == ICName) + return ClassDecl; + ClassDecl = ClassDecl->getSuperClass(); + } + return NULL; +} + +ObjCProtocolDecl * +ObjCInterfaceDecl::lookupNestedProtocol(IdentifierInfo *Name) { + for (ObjCInterfaceDecl::all_protocol_iterator P = + all_referenced_protocol_begin(), PE = all_referenced_protocol_end(); + P != PE; ++P) + if ((*P)->lookupProtocolNamed(Name)) + return (*P); + ObjCInterfaceDecl *SuperClass = getSuperClass(); + return SuperClass ? SuperClass->lookupNestedProtocol(Name) : NULL; +} + +/// lookupMethod - This method returns an instance/class method by looking in +/// the class, its categories, and its super classes (using a linear search). +/// When argument category "C" is specified, any implicit method found +/// in this category is ignored. +ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel, + bool isInstance, + bool shallowCategoryLookup, + const ObjCCategoryDecl *C) const { + // FIXME: Should make sure no callers ever do this. + if (!hasDefinition()) + return 0; + + const ObjCInterfaceDecl* ClassDecl = this; + ObjCMethodDecl *MethodDecl = 0; + + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + while (ClassDecl != NULL) { + if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance))) + return MethodDecl; + + // Didn't find one yet - look through protocols. + for (ObjCInterfaceDecl::protocol_iterator I = ClassDecl->protocol_begin(), + E = ClassDecl->protocol_end(); + I != E; ++I) + if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance))) + return MethodDecl; + + // Didn't find one yet - now look through categories. + for (ObjCInterfaceDecl::visible_categories_iterator + Cat = ClassDecl->visible_categories_begin(), + CatEnd = ClassDecl->visible_categories_end(); + Cat != CatEnd; ++Cat) { + if ((MethodDecl = Cat->getMethod(Sel, isInstance))) + if (C != (*Cat) || !MethodDecl->isImplicit()) + return MethodDecl; + + if (!shallowCategoryLookup) { + // Didn't find one yet - look through protocols. + const ObjCList<ObjCProtocolDecl> &Protocols = + Cat->getReferencedProtocols(); + for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(), + E = Protocols.end(); I != E; ++I) + if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance))) + if (C != (*Cat) || !MethodDecl->isImplicit()) + return MethodDecl; + } + } + + ClassDecl = ClassDecl->getSuperClass(); + } + return NULL; +} + +// Will search "local" class/category implementations for a method decl. +// If failed, then we search in class's root for an instance method. +// Returns 0 if no method is found. +ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod( + const Selector &Sel, + bool Instance) const { + // FIXME: Should make sure no callers ever do this. + if (!hasDefinition()) + return 0; + + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + ObjCMethodDecl *Method = 0; + if (ObjCImplementationDecl *ImpDecl = getImplementation()) + Method = Instance ? ImpDecl->getInstanceMethod(Sel) + : ImpDecl->getClassMethod(Sel); + + // Look through local category implementations associated with the class. + if (!Method) + Method = Instance ? getCategoryInstanceMethod(Sel) + : getCategoryClassMethod(Sel); + + // Before we give up, check if the selector is an instance method. + // But only in the root. This matches gcc's behavior and what the + // runtime expects. + if (!Instance && !Method && !getSuperClass()) { + Method = lookupInstanceMethod(Sel); + // Look through local category implementations associated + // with the root class. + if (!Method) + Method = lookupPrivateMethod(Sel, true); + } + + if (!Method && getSuperClass()) + return getSuperClass()->lookupPrivateMethod(Sel, Instance); + return Method; +} + +//===----------------------------------------------------------------------===// +// ObjCMethodDecl +//===----------------------------------------------------------------------===// + +ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C, + SourceLocation beginLoc, + SourceLocation endLoc, + Selector SelInfo, QualType T, + TypeSourceInfo *ResultTInfo, + DeclContext *contextDecl, + bool isInstance, + bool isVariadic, + bool isPropertyAccessor, + bool isImplicitlyDeclared, + bool isDefined, + ImplementationControl impControl, + bool HasRelatedResultType) { + return new (C) ObjCMethodDecl(beginLoc, endLoc, + SelInfo, T, ResultTInfo, contextDecl, + isInstance, isVariadic, isPropertyAccessor, + isImplicitlyDeclared, isDefined, + impControl, + HasRelatedResultType); +} + +ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCMethodDecl)); + return new (Mem) ObjCMethodDecl(SourceLocation(), SourceLocation(), + Selector(), QualType(), 0, 0); +} + +Stmt *ObjCMethodDecl::getBody() const { + return Body.get(getASTContext().getExternalSource()); +} + +void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) { + assert(PrevMethod); + getASTContext().setObjCMethodRedeclaration(PrevMethod, this); + IsRedeclaration = true; + PrevMethod->HasRedeclaration = true; +} + +void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C, + ArrayRef<ParmVarDecl*> Params, + ArrayRef<SourceLocation> SelLocs) { + ParamsAndSelLocs = 0; + NumParams = Params.size(); + if (Params.empty() && SelLocs.empty()) + return; + + unsigned Size = sizeof(ParmVarDecl *) * NumParams + + sizeof(SourceLocation) * SelLocs.size(); + ParamsAndSelLocs = C.Allocate(Size); + std::copy(Params.begin(), Params.end(), getParams()); + std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs()); +} + +void ObjCMethodDecl::getSelectorLocs( + SmallVectorImpl<SourceLocation> &SelLocs) const { + for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i) + SelLocs.push_back(getSelectorLoc(i)); +} + +void ObjCMethodDecl::setMethodParams(ASTContext &C, + ArrayRef<ParmVarDecl*> Params, + ArrayRef<SourceLocation> SelLocs) { + assert((!SelLocs.empty() || isImplicit()) && + "No selector locs for non-implicit method"); + if (isImplicit()) + return setParamsAndSelLocs(C, Params, llvm::None); + + SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params, + DeclEndLoc); + if (SelLocsKind != SelLoc_NonStandard) + return setParamsAndSelLocs(C, Params, llvm::None); + + setParamsAndSelLocs(C, Params, SelLocs); +} + +/// \brief A definition will return its interface declaration. +/// An interface declaration will return its definition. +/// Otherwise it will return itself. +ObjCMethodDecl *ObjCMethodDecl::getNextRedeclaration() { + ASTContext &Ctx = getASTContext(); + ObjCMethodDecl *Redecl = 0; + if (HasRedeclaration) + Redecl = const_cast<ObjCMethodDecl*>(Ctx.getObjCMethodRedeclaration(this)); + if (Redecl) + return Redecl; + + Decl *CtxD = cast<Decl>(getDeclContext()); + + if (!CtxD->isInvalidDecl()) { + if (ObjCInterfaceDecl *IFD = dyn_cast<ObjCInterfaceDecl>(CtxD)) { + if (ObjCImplementationDecl *ImplD = Ctx.getObjCImplementation(IFD)) + if (!ImplD->isInvalidDecl()) + Redecl = ImplD->getMethod(getSelector(), isInstanceMethod()); + + } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CtxD)) { + if (ObjCCategoryImplDecl *ImplD = Ctx.getObjCImplementation(CD)) + if (!ImplD->isInvalidDecl()) + Redecl = ImplD->getMethod(getSelector(), isInstanceMethod()); + + } else if (ObjCImplementationDecl *ImplD = + dyn_cast<ObjCImplementationDecl>(CtxD)) { + if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface()) + if (!IFD->isInvalidDecl()) + Redecl = IFD->getMethod(getSelector(), isInstanceMethod()); + + } else if (ObjCCategoryImplDecl *CImplD = + dyn_cast<ObjCCategoryImplDecl>(CtxD)) { + if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl()) + if (!CatD->isInvalidDecl()) + Redecl = CatD->getMethod(getSelector(), isInstanceMethod()); + } + } + + if (!Redecl && isRedeclaration()) { + // This is the last redeclaration, go back to the first method. + return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(), + isInstanceMethod()); + } + + return Redecl ? Redecl : this; +} + +ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() { + Decl *CtxD = cast<Decl>(getDeclContext()); + + if (ObjCImplementationDecl *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) { + if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface()) + if (ObjCMethodDecl *MD = IFD->getMethod(getSelector(), + isInstanceMethod())) + return MD; + + } else if (ObjCCategoryImplDecl *CImplD = + dyn_cast<ObjCCategoryImplDecl>(CtxD)) { + if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl()) + if (ObjCMethodDecl *MD = CatD->getMethod(getSelector(), + isInstanceMethod())) + return MD; + } + + if (isRedeclaration()) + return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(), + isInstanceMethod()); + + return this; +} + +SourceLocation ObjCMethodDecl::getLocEnd() const { + if (Stmt *Body = getBody()) + return Body->getLocEnd(); + return DeclEndLoc; +} + +ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const { + ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family); + if (family != static_cast<unsigned>(InvalidObjCMethodFamily)) + return family; + + // Check for an explicit attribute. + if (const ObjCMethodFamilyAttr *attr = getAttr<ObjCMethodFamilyAttr>()) { + // The unfortunate necessity of mapping between enums here is due + // to the attributes framework. + switch (attr->getFamily()) { + case ObjCMethodFamilyAttr::OMF_None: family = OMF_None; break; + case ObjCMethodFamilyAttr::OMF_alloc: family = OMF_alloc; break; + case ObjCMethodFamilyAttr::OMF_copy: family = OMF_copy; break; + case ObjCMethodFamilyAttr::OMF_init: family = OMF_init; break; + case ObjCMethodFamilyAttr::OMF_mutableCopy: family = OMF_mutableCopy; break; + case ObjCMethodFamilyAttr::OMF_new: family = OMF_new; break; + } + Family = static_cast<unsigned>(family); + return family; + } + + family = getSelector().getMethodFamily(); + switch (family) { + case OMF_None: break; + + // init only has a conventional meaning for an instance method, and + // it has to return an object. + case OMF_init: + if (!isInstanceMethod() || !getResultType()->isObjCObjectPointerType()) + family = OMF_None; + break; + + // alloc/copy/new have a conventional meaning for both class and + // instance methods, but they require an object return. + case OMF_alloc: + case OMF_copy: + case OMF_mutableCopy: + case OMF_new: + if (!getResultType()->isObjCObjectPointerType()) + family = OMF_None; + break; + + // These selectors have a conventional meaning only for instance methods. + case OMF_dealloc: + case OMF_finalize: + case OMF_retain: + case OMF_release: + case OMF_autorelease: + case OMF_retainCount: + case OMF_self: + if (!isInstanceMethod()) + family = OMF_None; + break; + + case OMF_performSelector: + if (!isInstanceMethod() || + !getResultType()->isObjCIdType()) + family = OMF_None; + else { + unsigned noParams = param_size(); + if (noParams < 1 || noParams > 3) + family = OMF_None; + else { + ObjCMethodDecl::arg_type_iterator it = arg_type_begin(); + QualType ArgT = (*it); + if (!ArgT->isObjCSelType()) { + family = OMF_None; + break; + } + while (--noParams) { + it++; + ArgT = (*it); + if (!ArgT->isObjCIdType()) { + family = OMF_None; + break; + } + } + } + } + break; + + } + + // Cache the result. + Family = static_cast<unsigned>(family); + return family; +} + +void ObjCMethodDecl::createImplicitParams(ASTContext &Context, + const ObjCInterfaceDecl *OID) { + QualType selfTy; + if (isInstanceMethod()) { + // There may be no interface context due to error in declaration + // of the interface (which has been reported). Recover gracefully. + if (OID) { + selfTy = Context.getObjCInterfaceType(OID); + selfTy = Context.getObjCObjectPointerType(selfTy); + } else { + selfTy = Context.getObjCIdType(); + } + } else // we have a factory method. + selfTy = Context.getObjCClassType(); + + bool selfIsPseudoStrong = false; + bool selfIsConsumed = false; + + if (Context.getLangOpts().ObjCAutoRefCount) { + if (isInstanceMethod()) { + selfIsConsumed = hasAttr<NSConsumesSelfAttr>(); + + // 'self' is always __strong. It's actually pseudo-strong except + // in init methods (or methods labeled ns_consumes_self), though. + Qualifiers qs; + qs.setObjCLifetime(Qualifiers::OCL_Strong); + selfTy = Context.getQualifiedType(selfTy, qs); + + // In addition, 'self' is const unless this is an init method. + if (getMethodFamily() != OMF_init && !selfIsConsumed) { + selfTy = selfTy.withConst(); + selfIsPseudoStrong = true; + } + } + else { + assert(isClassMethod()); + // 'self' is always const in class methods. + selfTy = selfTy.withConst(); + selfIsPseudoStrong = true; + } + } + + ImplicitParamDecl *self + = ImplicitParamDecl::Create(Context, this, SourceLocation(), + &Context.Idents.get("self"), selfTy); + setSelfDecl(self); + + if (selfIsConsumed) + self->addAttr(new (Context) NSConsumedAttr(SourceLocation(), Context)); + + if (selfIsPseudoStrong) + self->setARCPseudoStrong(true); + + setCmdDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(), + &Context.Idents.get("_cmd"), + Context.getObjCSelType())); +} + +ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() { + if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext())) + return ID; + if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext())) + return CD->getClassInterface(); + if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext())) + return IMD->getClassInterface(); + + assert(!isa<ObjCProtocolDecl>(getDeclContext()) && "It's a protocol method"); + llvm_unreachable("unknown method context"); +} + +static void CollectOverriddenMethodsRecurse(const ObjCContainerDecl *Container, + const ObjCMethodDecl *Method, + SmallVectorImpl<const ObjCMethodDecl *> &Methods, + bool MovedToSuper) { + if (!Container) + return; + + // In categories look for overriden methods from protocols. A method from + // category is not "overriden" since it is considered as the "same" method + // (same USR) as the one from the interface. + if (const ObjCCategoryDecl * + Category = dyn_cast<ObjCCategoryDecl>(Container)) { + // Check whether we have a matching method at this category but only if we + // are at the super class level. + if (MovedToSuper) + if (ObjCMethodDecl * + Overridden = Container->getMethod(Method->getSelector(), + Method->isInstanceMethod(), + /*AllowHidden=*/true)) + if (Method != Overridden) { + // We found an override at this category; there is no need to look + // into its protocols. + Methods.push_back(Overridden); + return; + } + + for (ObjCCategoryDecl::protocol_iterator P = Category->protocol_begin(), + PEnd = Category->protocol_end(); + P != PEnd; ++P) + CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper); + return; + } + + // Check whether we have a matching method at this level. + if (const ObjCMethodDecl * + Overridden = Container->getMethod(Method->getSelector(), + Method->isInstanceMethod(), + /*AllowHidden=*/true)) + if (Method != Overridden) { + // We found an override at this level; there is no need to look + // into other protocols or categories. + Methods.push_back(Overridden); + return; + } + + if (const ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){ + for (ObjCProtocolDecl::protocol_iterator P = Protocol->protocol_begin(), + PEnd = Protocol->protocol_end(); + P != PEnd; ++P) + CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper); + } + + if (const ObjCInterfaceDecl * + Interface = dyn_cast<ObjCInterfaceDecl>(Container)) { + for (ObjCInterfaceDecl::protocol_iterator P = Interface->protocol_begin(), + PEnd = Interface->protocol_end(); + P != PEnd; ++P) + CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper); + + for (ObjCInterfaceDecl::known_categories_iterator + Cat = Interface->known_categories_begin(), + CatEnd = Interface->known_categories_end(); + Cat != CatEnd; ++Cat) { + CollectOverriddenMethodsRecurse(*Cat, Method, Methods, + MovedToSuper); + } + + if (const ObjCInterfaceDecl *Super = Interface->getSuperClass()) + return CollectOverriddenMethodsRecurse(Super, Method, Methods, + /*MovedToSuper=*/true); + } +} + +static inline void CollectOverriddenMethods(const ObjCContainerDecl *Container, + const ObjCMethodDecl *Method, + SmallVectorImpl<const ObjCMethodDecl *> &Methods) { + CollectOverriddenMethodsRecurse(Container, Method, Methods, + /*MovedToSuper=*/false); +} + +static void collectOverriddenMethodsSlow(const ObjCMethodDecl *Method, + SmallVectorImpl<const ObjCMethodDecl *> &overridden) { + assert(Method->isOverriding()); + + if (const ObjCProtocolDecl * + ProtD = dyn_cast<ObjCProtocolDecl>(Method->getDeclContext())) { + CollectOverriddenMethods(ProtD, Method, overridden); + + } else if (const ObjCImplDecl * + IMD = dyn_cast<ObjCImplDecl>(Method->getDeclContext())) { + const ObjCInterfaceDecl *ID = IMD->getClassInterface(); + if (!ID) + return; + // Start searching for overridden methods using the method from the + // interface as starting point. + if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(), + Method->isInstanceMethod(), + /*AllowHidden=*/true)) + Method = IFaceMeth; + CollectOverriddenMethods(ID, Method, overridden); + + } else if (const ObjCCategoryDecl * + CatD = dyn_cast<ObjCCategoryDecl>(Method->getDeclContext())) { + const ObjCInterfaceDecl *ID = CatD->getClassInterface(); + if (!ID) + return; + // Start searching for overridden methods using the method from the + // interface as starting point. + if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(), + Method->isInstanceMethod(), + /*AllowHidden=*/true)) + Method = IFaceMeth; + CollectOverriddenMethods(ID, Method, overridden); + + } else { + CollectOverriddenMethods( + dyn_cast_or_null<ObjCContainerDecl>(Method->getDeclContext()), + Method, overridden); + } +} + +void ObjCMethodDecl::getOverriddenMethods( + SmallVectorImpl<const ObjCMethodDecl *> &Overridden) const { + const ObjCMethodDecl *Method = this; + + if (Method->isRedeclaration()) { + Method = cast<ObjCContainerDecl>(Method->getDeclContext())-> + getMethod(Method->getSelector(), Method->isInstanceMethod()); + } + + if (Method->isOverriding()) { + collectOverriddenMethodsSlow(Method, Overridden); + assert(!Overridden.empty() && + "ObjCMethodDecl's overriding bit is not as expected"); + } +} + +const ObjCPropertyDecl * +ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const { + Selector Sel = getSelector(); + unsigned NumArgs = Sel.getNumArgs(); + if (NumArgs > 1) + return 0; + + if (!isInstanceMethod() || getMethodFamily() != OMF_None) + return 0; + + if (isPropertyAccessor()) { + const ObjCContainerDecl *Container = cast<ObjCContainerDecl>(getParent()); + // If container is class extension, find its primary class. + if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(Container)) + if (CatDecl->IsClassExtension()) + Container = CatDecl->getClassInterface(); + + bool IsGetter = (NumArgs == 0); + + for (ObjCContainerDecl::prop_iterator I = Container->prop_begin(), + E = Container->prop_end(); + I != E; ++I) { + Selector NextSel = IsGetter ? (*I)->getGetterName() + : (*I)->getSetterName(); + if (NextSel == Sel) + return *I; + } + + llvm_unreachable("Marked as a property accessor but no property found!"); + } + + if (!CheckOverrides) + return 0; + + typedef SmallVector<const ObjCMethodDecl *, 8> OverridesTy; + OverridesTy Overrides; + getOverriddenMethods(Overrides); + for (OverridesTy::const_iterator I = Overrides.begin(), E = Overrides.end(); + I != E; ++I) { + if (const ObjCPropertyDecl *Prop = (*I)->findPropertyDecl(false)) + return Prop; + } + + return 0; + +} + +//===----------------------------------------------------------------------===// +// ObjCInterfaceDecl +//===----------------------------------------------------------------------===// + +ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C, + DeclContext *DC, + SourceLocation atLoc, + IdentifierInfo *Id, + ObjCInterfaceDecl *PrevDecl, + SourceLocation ClassLoc, + bool isInternal){ + ObjCInterfaceDecl *Result = new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc, + PrevDecl, isInternal); + Result->Data.setInt(!C.getLangOpts().Modules); + C.getObjCInterfaceType(Result, PrevDecl); + return Result; +} + +ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCInterfaceDecl)); + ObjCInterfaceDecl *Result = new (Mem) ObjCInterfaceDecl(0, SourceLocation(), + 0, SourceLocation(), + 0, false); + Result->Data.setInt(!C.getLangOpts().Modules); + return Result; +} + +ObjCInterfaceDecl:: +ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id, + SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl, + bool isInternal) + : ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, atLoc), + TypeForDecl(0), Data() +{ + setPreviousDecl(PrevDecl); + + // Copy the 'data' pointer over. + if (PrevDecl) + Data = PrevDecl->Data; + + setImplicit(isInternal); +} + +void ObjCInterfaceDecl::LoadExternalDefinition() const { + assert(data().ExternallyCompleted && "Class is not externally completed"); + data().ExternallyCompleted = false; + getASTContext().getExternalSource()->CompleteType( + const_cast<ObjCInterfaceDecl *>(this)); +} + +void ObjCInterfaceDecl::setExternallyCompleted() { + assert(getASTContext().getExternalSource() && + "Class can't be externally completed without an external source"); + assert(hasDefinition() && + "Forward declarations can't be externally completed"); + data().ExternallyCompleted = true; +} + +ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const { + if (const ObjCInterfaceDecl *Def = getDefinition()) { + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + return getASTContext().getObjCImplementation( + const_cast<ObjCInterfaceDecl*>(Def)); + } + + // FIXME: Should make sure no callers ever do this. + return 0; +} + +void ObjCInterfaceDecl::setImplementation(ObjCImplementationDecl *ImplD) { + getASTContext().setObjCImplementation(getDefinition(), ImplD); +} + +namespace { + struct SynthesizeIvarChunk { + uint64_t Size; + ObjCIvarDecl *Ivar; + SynthesizeIvarChunk(uint64_t size, ObjCIvarDecl *ivar) + : Size(size), Ivar(ivar) {} + }; + + bool operator<(const SynthesizeIvarChunk & LHS, + const SynthesizeIvarChunk &RHS) { + return LHS.Size < RHS.Size; + } +} + +/// all_declared_ivar_begin - return first ivar declared in this class, +/// its extensions and its implementation. Lazily build the list on first +/// access. +/// +/// Caveat: The list returned by this method reflects the current +/// state of the parser. The cache will be updated for every ivar +/// added by an extension or the implementation when they are +/// encountered. +/// See also ObjCIvarDecl::Create(). +ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() { + // FIXME: Should make sure no callers ever do this. + if (!hasDefinition()) + return 0; + + ObjCIvarDecl *curIvar = 0; + if (!data().IvarList) { + if (!ivar_empty()) { + ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end(); + data().IvarList = *I; ++I; + for (curIvar = data().IvarList; I != E; curIvar = *I, ++I) + curIvar->setNextIvar(*I); + } + + for (ObjCInterfaceDecl::known_extensions_iterator + Ext = known_extensions_begin(), + ExtEnd = known_extensions_end(); + Ext != ExtEnd; ++Ext) { + if (!Ext->ivar_empty()) { + ObjCCategoryDecl::ivar_iterator + I = Ext->ivar_begin(), + E = Ext->ivar_end(); + if (!data().IvarList) { + data().IvarList = *I; ++I; + curIvar = data().IvarList; + } + for ( ;I != E; curIvar = *I, ++I) + curIvar->setNextIvar(*I); + } + } + data().IvarListMissingImplementation = true; + } + + // cached and complete! + if (!data().IvarListMissingImplementation) + return data().IvarList; + + if (ObjCImplementationDecl *ImplDecl = getImplementation()) { + data().IvarListMissingImplementation = false; + if (!ImplDecl->ivar_empty()) { + SmallVector<SynthesizeIvarChunk, 16> layout; + for (ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(), + E = ImplDecl->ivar_end(); I != E; ++I) { + ObjCIvarDecl *IV = *I; + if (IV->getSynthesize() && !IV->isInvalidDecl()) { + layout.push_back(SynthesizeIvarChunk( + IV->getASTContext().getTypeSize(IV->getType()), IV)); + continue; + } + if (!data().IvarList) + data().IvarList = *I; + else + curIvar->setNextIvar(*I); + curIvar = *I; + } + + if (!layout.empty()) { + // Order synthesized ivars by their size. + std::stable_sort(layout.begin(), layout.end()); + unsigned Ix = 0, EIx = layout.size(); + if (!data().IvarList) { + data().IvarList = layout[0].Ivar; Ix++; + curIvar = data().IvarList; + } + for ( ; Ix != EIx; curIvar = layout[Ix].Ivar, Ix++) + curIvar->setNextIvar(layout[Ix].Ivar); + } + } + } + return data().IvarList; +} + +/// FindCategoryDeclaration - Finds category declaration in the list of +/// categories for this class and returns it. Name of the category is passed +/// in 'CategoryId'. If category not found, return 0; +/// +ObjCCategoryDecl * +ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const { + // FIXME: Should make sure no callers ever do this. + if (!hasDefinition()) + return 0; + + if (data().ExternallyCompleted) + LoadExternalDefinition(); + + for (visible_categories_iterator Cat = visible_categories_begin(), + CatEnd = visible_categories_end(); + Cat != CatEnd; + ++Cat) { + if (Cat->getIdentifier() == CategoryId) + return *Cat; + } + + return 0; +} + +ObjCMethodDecl * +ObjCInterfaceDecl::getCategoryInstanceMethod(Selector Sel) const { + for (visible_categories_iterator Cat = visible_categories_begin(), + CatEnd = visible_categories_end(); + Cat != CatEnd; + ++Cat) { + if (ObjCCategoryImplDecl *Impl = Cat->getImplementation()) + if (ObjCMethodDecl *MD = Impl->getInstanceMethod(Sel)) + return MD; + } + + return 0; +} + +ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const { + for (visible_categories_iterator Cat = visible_categories_begin(), + CatEnd = visible_categories_end(); + Cat != CatEnd; + ++Cat) { + if (ObjCCategoryImplDecl *Impl = Cat->getImplementation()) + if (ObjCMethodDecl *MD = Impl->getClassMethod(Sel)) + return MD; + } + + return 0; +} + +/// ClassImplementsProtocol - Checks that 'lProto' protocol +/// has been implemented in IDecl class, its super class or categories (if +/// lookupCategory is true). +bool ObjCInterfaceDecl::ClassImplementsProtocol(ObjCProtocolDecl *lProto, + bool lookupCategory, + bool RHSIsQualifiedID) { + if (!hasDefinition()) + return false; + + ObjCInterfaceDecl *IDecl = this; + // 1st, look up the class. + for (ObjCInterfaceDecl::protocol_iterator + PI = IDecl->protocol_begin(), E = IDecl->protocol_end(); PI != E; ++PI){ + if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI)) + return true; + // This is dubious and is added to be compatible with gcc. In gcc, it is + // also allowed assigning a protocol-qualified 'id' type to a LHS object + // when protocol in qualified LHS is in list of protocols in the rhs 'id' + // object. This IMO, should be a bug. + // FIXME: Treat this as an extension, and flag this as an error when GCC + // extensions are not enabled. + if (RHSIsQualifiedID && + getASTContext().ProtocolCompatibleWithProtocol(*PI, lProto)) + return true; + } + + // 2nd, look up the category. + if (lookupCategory) + for (visible_categories_iterator Cat = visible_categories_begin(), + CatEnd = visible_categories_end(); + Cat != CatEnd; + ++Cat) { + for (ObjCCategoryDecl::protocol_iterator PI = Cat->protocol_begin(), + E = Cat->protocol_end(); + PI != E; ++PI) + if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI)) + return true; + } + + // 3rd, look up the super class(s) + if (IDecl->getSuperClass()) + return + IDecl->getSuperClass()->ClassImplementsProtocol(lProto, lookupCategory, + RHSIsQualifiedID); + + return false; +} + +//===----------------------------------------------------------------------===// +// ObjCIvarDecl +//===----------------------------------------------------------------------===// + +void ObjCIvarDecl::anchor() { } + +ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC, + SourceLocation StartLoc, + SourceLocation IdLoc, IdentifierInfo *Id, + QualType T, TypeSourceInfo *TInfo, + AccessControl ac, Expr *BW, + bool synthesized, + bool backingIvarReferencedInAccessor) { + if (DC) { + // Ivar's can only appear in interfaces, implementations (via synthesized + // properties), and class extensions (via direct declaration, or synthesized + // properties). + // + // FIXME: This should really be asserting this: + // (isa<ObjCCategoryDecl>(DC) && + // cast<ObjCCategoryDecl>(DC)->IsClassExtension())) + // but unfortunately we sometimes place ivars into non-class extension + // categories on error. This breaks an AST invariant, and should not be + // fixed. + assert((isa<ObjCInterfaceDecl>(DC) || isa<ObjCImplementationDecl>(DC) || + isa<ObjCCategoryDecl>(DC)) && + "Invalid ivar decl context!"); + // Once a new ivar is created in any of class/class-extension/implementation + // decl contexts, the previously built IvarList must be rebuilt. + ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(DC); + if (!ID) { + if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC)) + ID = IM->getClassInterface(); + else + ID = cast<ObjCCategoryDecl>(DC)->getClassInterface(); + } + ID->setIvarList(0); + } + + return new (C) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo, + ac, BW, synthesized, backingIvarReferencedInAccessor); +} + +ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCIvarDecl)); + return new (Mem) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0, + QualType(), 0, ObjCIvarDecl::None, 0, false, false); +} + +const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const { + const ObjCContainerDecl *DC = cast<ObjCContainerDecl>(getDeclContext()); + + switch (DC->getKind()) { + default: + case ObjCCategoryImpl: + case ObjCProtocol: + llvm_unreachable("invalid ivar container!"); + + // Ivars can only appear in class extension categories. + case ObjCCategory: { + const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC); + assert(CD->IsClassExtension() && "invalid container for ivar!"); + return CD->getClassInterface(); + } + + case ObjCImplementation: + return cast<ObjCImplementationDecl>(DC)->getClassInterface(); + + case ObjCInterface: + return cast<ObjCInterfaceDecl>(DC); + } +} + +//===----------------------------------------------------------------------===// +// ObjCAtDefsFieldDecl +//===----------------------------------------------------------------------===// + +void ObjCAtDefsFieldDecl::anchor() { } + +ObjCAtDefsFieldDecl +*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + IdentifierInfo *Id, QualType T, Expr *BW) { + return new (C) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW); +} + +ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCAtDefsFieldDecl)); + return new (Mem) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(), + 0, QualType(), 0); +} + +//===----------------------------------------------------------------------===// +// ObjCProtocolDecl +//===----------------------------------------------------------------------===// + +void ObjCProtocolDecl::anchor() { } + +ObjCProtocolDecl::ObjCProtocolDecl(DeclContext *DC, IdentifierInfo *Id, + SourceLocation nameLoc, + SourceLocation atStartLoc, + ObjCProtocolDecl *PrevDecl) + : ObjCContainerDecl(ObjCProtocol, DC, Id, nameLoc, atStartLoc), Data() +{ + setPreviousDecl(PrevDecl); + if (PrevDecl) + Data = PrevDecl->Data; +} + +ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC, + IdentifierInfo *Id, + SourceLocation nameLoc, + SourceLocation atStartLoc, + ObjCProtocolDecl *PrevDecl) { + ObjCProtocolDecl *Result + = new (C) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl); + Result->Data.setInt(!C.getLangOpts().Modules); + return Result; +} + +ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCProtocolDecl)); + ObjCProtocolDecl *Result = new (Mem) ObjCProtocolDecl(0, 0, SourceLocation(), + SourceLocation(), 0); + Result->Data.setInt(!C.getLangOpts().Modules); + return Result; +} + +ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) { + ObjCProtocolDecl *PDecl = this; + + if (Name == getIdentifier()) + return PDecl; + + for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I) + if ((PDecl = (*I)->lookupProtocolNamed(Name))) + return PDecl; + + return NULL; +} + +// lookupMethod - Lookup a instance/class method in the protocol and protocols +// it inherited. +ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel, + bool isInstance) const { + ObjCMethodDecl *MethodDecl = NULL; + + // If there is no definition or the definition is hidden, we don't find + // anything. + const ObjCProtocolDecl *Def = getDefinition(); + if (!Def || Def->isHidden()) + return NULL; + + if ((MethodDecl = getMethod(Sel, isInstance))) + return MethodDecl; + + for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I) + if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance))) + return MethodDecl; + return NULL; +} + +void ObjCProtocolDecl::allocateDefinitionData() { + assert(!Data.getPointer() && "Protocol already has a definition!"); + Data.setPointer(new (getASTContext()) DefinitionData); + Data.getPointer()->Definition = this; +} + +void ObjCProtocolDecl::startDefinition() { + allocateDefinitionData(); + + // Update all of the declarations with a pointer to the definition. + for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end(); + RD != RDEnd; ++RD) + RD->Data = this->Data; +} + +void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM, + PropertyDeclOrder &PO) const { + + if (const ObjCProtocolDecl *PDecl = getDefinition()) { + for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(), + E = PDecl->prop_end(); P != E; ++P) { + ObjCPropertyDecl *Prop = *P; + // Insert into PM if not there already. + PM.insert(std::make_pair(Prop->getIdentifier(), Prop)); + PO.push_back(Prop); + } + // Scan through protocol's protocols. + for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(), + E = PDecl->protocol_end(); PI != E; ++PI) + (*PI)->collectPropertiesToImplement(PM, PO); + } +} + + +void ObjCProtocolDecl::collectInheritedProtocolProperties( + const ObjCPropertyDecl *Property, + ProtocolPropertyMap &PM) const { + if (const ObjCProtocolDecl *PDecl = getDefinition()) { + bool MatchFound = false; + for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(), + E = PDecl->prop_end(); P != E; ++P) { + ObjCPropertyDecl *Prop = *P; + if (Prop == Property) + continue; + if (Prop->getIdentifier() == Property->getIdentifier()) { + PM[PDecl] = Prop; + MatchFound = true; + break; + } + } + // Scan through protocol's protocols which did not have a matching property. + if (!MatchFound) + for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(), + E = PDecl->protocol_end(); PI != E; ++PI) + (*PI)->collectInheritedProtocolProperties(Property, PM); + } +} + +//===----------------------------------------------------------------------===// +// ObjCCategoryDecl +//===----------------------------------------------------------------------===// + +void ObjCCategoryDecl::anchor() { } + +ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation AtLoc, + SourceLocation ClassNameLoc, + SourceLocation CategoryNameLoc, + IdentifierInfo *Id, + ObjCInterfaceDecl *IDecl, + SourceLocation IvarLBraceLoc, + SourceLocation IvarRBraceLoc) { + ObjCCategoryDecl *CatDecl = new (C) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, + CategoryNameLoc, Id, + IDecl, + IvarLBraceLoc, IvarRBraceLoc); + if (IDecl) { + // Link this category into its class's category list. + CatDecl->NextClassCategory = IDecl->getCategoryListRaw(); + if (IDecl->hasDefinition()) { + IDecl->setCategoryListRaw(CatDecl); + if (ASTMutationListener *L = C.getASTMutationListener()) + L->AddedObjCCategoryToInterface(CatDecl, IDecl); + } + } + + return CatDecl; +} + +ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryDecl)); + return new (Mem) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(), + SourceLocation(), 0, 0); +} + +ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const { + return getASTContext().getObjCImplementation( + const_cast<ObjCCategoryDecl*>(this)); +} + +void ObjCCategoryDecl::setImplementation(ObjCCategoryImplDecl *ImplD) { + getASTContext().setObjCImplementation(this, ImplD); +} + + +//===----------------------------------------------------------------------===// +// ObjCCategoryImplDecl +//===----------------------------------------------------------------------===// + +void ObjCCategoryImplDecl::anchor() { } + +ObjCCategoryImplDecl * +ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC, + IdentifierInfo *Id, + ObjCInterfaceDecl *ClassInterface, + SourceLocation nameLoc, + SourceLocation atStartLoc, + SourceLocation CategoryNameLoc) { + if (ClassInterface && ClassInterface->hasDefinition()) + ClassInterface = ClassInterface->getDefinition(); + return new (C) ObjCCategoryImplDecl(DC, Id, ClassInterface, + nameLoc, atStartLoc, CategoryNameLoc); +} + +ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryImplDecl)); + return new (Mem) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(), + SourceLocation(), SourceLocation()); +} + +ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const { + // The class interface might be NULL if we are working with invalid code. + if (const ObjCInterfaceDecl *ID = getClassInterface()) + return ID->FindCategoryDeclaration(getIdentifier()); + return 0; +} + + +void ObjCImplDecl::anchor() { } + +void ObjCImplDecl::addPropertyImplementation(ObjCPropertyImplDecl *property) { + // FIXME: The context should be correct before we get here. + property->setLexicalDeclContext(this); + addDecl(property); +} + +void ObjCImplDecl::setClassInterface(ObjCInterfaceDecl *IFace) { + ASTContext &Ctx = getASTContext(); + + if (ObjCImplementationDecl *ImplD + = dyn_cast_or_null<ObjCImplementationDecl>(this)) { + if (IFace) + Ctx.setObjCImplementation(IFace, ImplD); + + } else if (ObjCCategoryImplDecl *ImplD = + dyn_cast_or_null<ObjCCategoryImplDecl>(this)) { + if (ObjCCategoryDecl *CD = IFace->FindCategoryDeclaration(getIdentifier())) + Ctx.setObjCImplementation(CD, ImplD); + } + + ClassInterface = IFace; +} + +/// FindPropertyImplIvarDecl - This method lookup the ivar in the list of +/// properties implemented in this \@implementation block and returns +/// the implemented property that uses it. +/// +ObjCPropertyImplDecl *ObjCImplDecl:: +FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const { + for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){ + ObjCPropertyImplDecl *PID = *i; + if (PID->getPropertyIvarDecl() && + PID->getPropertyIvarDecl()->getIdentifier() == ivarId) + return PID; + } + return 0; +} + +/// FindPropertyImplDecl - This method looks up a previous ObjCPropertyImplDecl +/// added to the list of those properties \@synthesized/\@dynamic in this +/// category \@implementation block. +/// +ObjCPropertyImplDecl *ObjCImplDecl:: +FindPropertyImplDecl(IdentifierInfo *Id) const { + for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){ + ObjCPropertyImplDecl *PID = *i; + if (PID->getPropertyDecl()->getIdentifier() == Id) + return PID; + } + return 0; +} + +raw_ostream &clang::operator<<(raw_ostream &OS, + const ObjCCategoryImplDecl &CID) { + OS << CID.getName(); + return OS; +} + +//===----------------------------------------------------------------------===// +// ObjCImplementationDecl +//===----------------------------------------------------------------------===// + +void ObjCImplementationDecl::anchor() { } + +ObjCImplementationDecl * +ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC, + ObjCInterfaceDecl *ClassInterface, + ObjCInterfaceDecl *SuperDecl, + SourceLocation nameLoc, + SourceLocation atStartLoc, + SourceLocation superLoc, + SourceLocation IvarLBraceLoc, + SourceLocation IvarRBraceLoc) { + if (ClassInterface && ClassInterface->hasDefinition()) + ClassInterface = ClassInterface->getDefinition(); + return new (C) ObjCImplementationDecl(DC, ClassInterface, SuperDecl, + nameLoc, atStartLoc, superLoc, + IvarLBraceLoc, IvarRBraceLoc); +} + +ObjCImplementationDecl * +ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCImplementationDecl)); + return new (Mem) ObjCImplementationDecl(0, 0, 0, SourceLocation(), + SourceLocation()); +} + +void ObjCImplementationDecl::setIvarInitializers(ASTContext &C, + CXXCtorInitializer ** initializers, + unsigned numInitializers) { + if (numInitializers > 0) { + NumIvarInitializers = numInitializers; + CXXCtorInitializer **ivarInitializers = + new (C) CXXCtorInitializer*[NumIvarInitializers]; + memcpy(ivarInitializers, initializers, + numInitializers * sizeof(CXXCtorInitializer*)); + IvarInitializers = ivarInitializers; + } +} + +raw_ostream &clang::operator<<(raw_ostream &OS, + const ObjCImplementationDecl &ID) { + OS << ID.getName(); + return OS; +} + +//===----------------------------------------------------------------------===// +// ObjCCompatibleAliasDecl +//===----------------------------------------------------------------------===// + +void ObjCCompatibleAliasDecl::anchor() { } + +ObjCCompatibleAliasDecl * +ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation L, + IdentifierInfo *Id, + ObjCInterfaceDecl* AliasedClass) { + return new (C) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass); +} + +ObjCCompatibleAliasDecl * +ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCompatibleAliasDecl)); + return new (Mem) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0); +} + +//===----------------------------------------------------------------------===// +// ObjCPropertyDecl +//===----------------------------------------------------------------------===// + +void ObjCPropertyDecl::anchor() { } + +ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation L, + IdentifierInfo *Id, + SourceLocation AtLoc, + SourceLocation LParenLoc, + TypeSourceInfo *T, + PropertyControl propControl) { + return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T); +} + +ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void * Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyDecl)); + return new (Mem) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(), + SourceLocation(), + 0); +} + +//===----------------------------------------------------------------------===// +// ObjCPropertyImplDecl +//===----------------------------------------------------------------------===// + +ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C, + DeclContext *DC, + SourceLocation atLoc, + SourceLocation L, + ObjCPropertyDecl *property, + Kind PK, + ObjCIvarDecl *ivar, + SourceLocation ivarLoc) { + return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar, + ivarLoc); +} + +ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyImplDecl)); + return new (Mem) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(), + 0, Dynamic, 0, SourceLocation()); +} + +SourceRange ObjCPropertyImplDecl::getSourceRange() const { + SourceLocation EndLoc = getLocation(); + if (IvarLoc.isValid()) + EndLoc = IvarLoc; + + return SourceRange(AtLoc, EndLoc); +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclOpenMP.cpp b/contrib/llvm/tools/clang/lib/AST/DeclOpenMP.cpp new file mode 100644 index 000000000000..0d195f74623d --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclOpenMP.cpp @@ -0,0 +1,61 @@ +//===--- DeclOpenMP.cpp - Declaration OpenMP AST Node Implementation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// \brief This file implements OMPThreadPrivateDecl class. +/// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclBase.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclOpenMP.h" +#include "clang/AST/Expr.h" + +using namespace clang; + +//===----------------------------------------------------------------------===// +// OMPThreadPrivateDecl Implementation. +//===----------------------------------------------------------------------===// + +void OMPThreadPrivateDecl::anchor() { } + +OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C, + DeclContext *DC, + SourceLocation L, + ArrayRef<Expr *> VL) { + unsigned Size = sizeof(OMPThreadPrivateDecl) + + (VL.size() * sizeof(Expr *)); + + void *Mem = C.Allocate(Size, llvm::alignOf<OMPThreadPrivateDecl>()); + OMPThreadPrivateDecl *D = new (Mem) OMPThreadPrivateDecl(OMPThreadPrivate, + DC, L); + D->NumVars = VL.size(); + D->setVars(VL); + return D; +} + +OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C, + unsigned ID, + unsigned N) { + unsigned Size = sizeof(OMPThreadPrivateDecl) + (N * sizeof(Expr *)); + + void *Mem = AllocateDeserializedDecl(C, ID, Size); + OMPThreadPrivateDecl *D = new (Mem) OMPThreadPrivateDecl(OMPThreadPrivate, + 0, SourceLocation()); + D->NumVars = N; + return D; +} + +void OMPThreadPrivateDecl::setVars(ArrayRef<Expr *> VL) { + assert(VL.size() == NumVars && + "Number of variables is not the same as the preallocated buffer"); + Expr **Vars = reinterpret_cast<Expr **>(this + 1); + std::copy(VL.begin(), VL.end(), Vars); +} + diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp new file mode 100644 index 000000000000..767f6620a295 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp @@ -0,0 +1,1199 @@ +//===--- DeclPrinter.cpp - Printing implementation for Decl ASTs ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Decl::print method, which pretty prints the +// AST back out to C/Objective-C/C++/Objective-C++ code. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclVisitor.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/Basic/Module.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; + +namespace { + class DeclPrinter : public DeclVisitor<DeclPrinter> { + raw_ostream &Out; + PrintingPolicy Policy; + unsigned Indentation; + bool PrintInstantiation; + + raw_ostream& Indent() { return Indent(Indentation); } + raw_ostream& Indent(unsigned Indentation); + void ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls); + + void Print(AccessSpecifier AS); + + public: + DeclPrinter(raw_ostream &Out, const PrintingPolicy &Policy, + unsigned Indentation = 0, bool PrintInstantiation = false) + : Out(Out), Policy(Policy), Indentation(Indentation), + PrintInstantiation(PrintInstantiation) { } + + void VisitDeclContext(DeclContext *DC, bool Indent = true); + + void VisitTranslationUnitDecl(TranslationUnitDecl *D); + void VisitTypedefDecl(TypedefDecl *D); + void VisitTypeAliasDecl(TypeAliasDecl *D); + void VisitEnumDecl(EnumDecl *D); + void VisitRecordDecl(RecordDecl *D); + void VisitEnumConstantDecl(EnumConstantDecl *D); + void VisitEmptyDecl(EmptyDecl *D); + void VisitFunctionDecl(FunctionDecl *D); + void VisitFriendDecl(FriendDecl *D); + void VisitFieldDecl(FieldDecl *D); + void VisitVarDecl(VarDecl *D); + void VisitLabelDecl(LabelDecl *D); + void VisitParmVarDecl(ParmVarDecl *D); + void VisitFileScopeAsmDecl(FileScopeAsmDecl *D); + void VisitImportDecl(ImportDecl *D); + void VisitStaticAssertDecl(StaticAssertDecl *D); + void VisitNamespaceDecl(NamespaceDecl *D); + void VisitUsingDirectiveDecl(UsingDirectiveDecl *D); + void VisitNamespaceAliasDecl(NamespaceAliasDecl *D); + void VisitCXXRecordDecl(CXXRecordDecl *D); + void VisitLinkageSpecDecl(LinkageSpecDecl *D); + void VisitTemplateDecl(const TemplateDecl *D); + void VisitFunctionTemplateDecl(FunctionTemplateDecl *D); + void VisitClassTemplateDecl(ClassTemplateDecl *D); + void VisitObjCMethodDecl(ObjCMethodDecl *D); + void VisitObjCImplementationDecl(ObjCImplementationDecl *D); + void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D); + void VisitObjCProtocolDecl(ObjCProtocolDecl *D); + void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D); + void VisitObjCCategoryDecl(ObjCCategoryDecl *D); + void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D); + void VisitObjCPropertyDecl(ObjCPropertyDecl *D); + void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D); + void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D); + void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D); + void VisitUsingDecl(UsingDecl *D); + void VisitUsingShadowDecl(UsingShadowDecl *D); + void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D); + + void PrintTemplateParameters(const TemplateParameterList *Params, + const TemplateArgumentList *Args = 0); + void prettyPrintAttributes(Decl *D); + }; +} + +void Decl::print(raw_ostream &Out, unsigned Indentation, + bool PrintInstantiation) const { + print(Out, getASTContext().getPrintingPolicy(), Indentation, PrintInstantiation); +} + +void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy, + unsigned Indentation, bool PrintInstantiation) const { + DeclPrinter Printer(Out, Policy, Indentation, PrintInstantiation); + Printer.Visit(const_cast<Decl*>(this)); +} + +static QualType GetBaseType(QualType T) { + // FIXME: This should be on the Type class! + QualType BaseType = T; + while (!BaseType->isSpecifierType()) { + if (isa<TypedefType>(BaseType)) + break; + else if (const PointerType* PTy = BaseType->getAs<PointerType>()) + BaseType = PTy->getPointeeType(); + else if (const BlockPointerType *BPy = BaseType->getAs<BlockPointerType>()) + BaseType = BPy->getPointeeType(); + else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType)) + BaseType = ATy->getElementType(); + else if (const FunctionType* FTy = BaseType->getAs<FunctionType>()) + BaseType = FTy->getResultType(); + else if (const VectorType *VTy = BaseType->getAs<VectorType>()) + BaseType = VTy->getElementType(); + else if (const ReferenceType *RTy = BaseType->getAs<ReferenceType>()) + BaseType = RTy->getPointeeType(); + else + llvm_unreachable("Unknown declarator!"); + } + return BaseType; +} + +static QualType getDeclType(Decl* D) { + if (TypedefNameDecl* TDD = dyn_cast<TypedefNameDecl>(D)) + return TDD->getUnderlyingType(); + if (ValueDecl* VD = dyn_cast<ValueDecl>(D)) + return VD->getType(); + return QualType(); +} + +void Decl::printGroup(Decl** Begin, unsigned NumDecls, + raw_ostream &Out, const PrintingPolicy &Policy, + unsigned Indentation) { + if (NumDecls == 1) { + (*Begin)->print(Out, Policy, Indentation); + return; + } + + Decl** End = Begin + NumDecls; + TagDecl* TD = dyn_cast<TagDecl>(*Begin); + if (TD) + ++Begin; + + PrintingPolicy SubPolicy(Policy); + if (TD && TD->isCompleteDefinition()) { + TD->print(Out, Policy, Indentation); + Out << " "; + SubPolicy.SuppressTag = true; + } + + bool isFirst = true; + for ( ; Begin != End; ++Begin) { + if (isFirst) { + SubPolicy.SuppressSpecifiers = false; + isFirst = false; + } else { + if (!isFirst) Out << ", "; + SubPolicy.SuppressSpecifiers = true; + } + + (*Begin)->print(Out, SubPolicy, Indentation); + } +} + +void DeclContext::dumpDeclContext() const { + // Get the translation unit + const DeclContext *DC = this; + while (!DC->isTranslationUnit()) + DC = DC->getParent(); + + ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext(); + DeclPrinter Printer(llvm::errs(), Ctx.getPrintingPolicy(), 0); + Printer.VisitDeclContext(const_cast<DeclContext *>(this), /*Indent=*/false); +} + +raw_ostream& DeclPrinter::Indent(unsigned Indentation) { + for (unsigned i = 0; i != Indentation; ++i) + Out << " "; + return Out; +} + +void DeclPrinter::prettyPrintAttributes(Decl *D) { + if (Policy.PolishForDeclaration) + return; + + if (D->hasAttrs()) { + AttrVec &Attrs = D->getAttrs(); + for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) { + Attr *A = *i; + A->printPretty(Out, Policy); + } + } +} + +void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) { + this->Indent(); + Decl::printGroup(Decls.data(), Decls.size(), Out, Policy, Indentation); + Out << ";\n"; + Decls.clear(); + +} + +void DeclPrinter::Print(AccessSpecifier AS) { + switch(AS) { + case AS_none: llvm_unreachable("No access specifier!"); + case AS_public: Out << "public"; break; + case AS_protected: Out << "protected"; break; + case AS_private: Out << "private"; break; + } +} + +//---------------------------------------------------------------------------- +// Common C declarations +//---------------------------------------------------------------------------- + +void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { + if (Policy.TerseOutput) + return; + + if (Indent) + Indentation += Policy.Indentation; + + SmallVector<Decl*, 2> Decls; + for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end(); + D != DEnd; ++D) { + + // Don't print ObjCIvarDecls, as they are printed when visiting the + // containing ObjCInterfaceDecl. + if (isa<ObjCIvarDecl>(*D)) + continue; + + // Skip over implicit declarations in pretty-printing mode. + if (D->isImplicit()) + continue; + + // FIXME: Ugly hack so we don't pretty-print the builtin declaration + // of __builtin_va_list or __[u]int128_t. There should be some other way + // to check that. + if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) { + if (IdentifierInfo *II = ND->getIdentifier()) { + if (II->isStr("__builtin_va_list") || + II->isStr("__int128_t") || II->isStr("__uint128_t")) + continue; + } + } + + // The next bits of code handles stuff like "struct {int x;} a,b"; we're + // forced to merge the declarations because there's no other way to + // refer to the struct in question. This limited merging is safe without + // a bunch of other checks because it only merges declarations directly + // referring to the tag, not typedefs. + // + // Check whether the current declaration should be grouped with a previous + // unnamed struct. + QualType CurDeclType = getDeclType(*D); + if (!Decls.empty() && !CurDeclType.isNull()) { + QualType BaseType = GetBaseType(CurDeclType); + if (!BaseType.isNull() && isa<ElaboratedType>(BaseType)) + BaseType = cast<ElaboratedType>(BaseType)->getNamedType(); + if (!BaseType.isNull() && isa<TagType>(BaseType) && + cast<TagType>(BaseType)->getDecl() == Decls[0]) { + Decls.push_back(*D); + continue; + } + } + + // If we have a merged group waiting to be handled, handle it now. + if (!Decls.empty()) + ProcessDeclGroup(Decls); + + // If the current declaration is an unnamed tag type, save it + // so we can merge it with the subsequent declaration(s) using it. + if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) { + Decls.push_back(*D); + continue; + } + + if (isa<AccessSpecDecl>(*D)) { + Indentation -= Policy.Indentation; + this->Indent(); + Print(D->getAccess()); + Out << ":\n"; + Indentation += Policy.Indentation; + continue; + } + + this->Indent(); + Visit(*D); + + // FIXME: Need to be able to tell the DeclPrinter when + const char *Terminator = 0; + if (isa<OMPThreadPrivateDecl>(*D)) + Terminator = 0; + else if (isa<FunctionDecl>(*D) && + cast<FunctionDecl>(*D)->isThisDeclarationADefinition()) + Terminator = 0; + else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody()) + Terminator = 0; + else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) || + isa<ObjCImplementationDecl>(*D) || + isa<ObjCInterfaceDecl>(*D) || + isa<ObjCProtocolDecl>(*D) || + isa<ObjCCategoryImplDecl>(*D) || + isa<ObjCCategoryDecl>(*D)) + Terminator = 0; + else if (isa<EnumConstantDecl>(*D)) { + DeclContext::decl_iterator Next = D; + ++Next; + if (Next != DEnd) + Terminator = ","; + } else + Terminator = ";"; + + if (Terminator) + Out << Terminator; + Out << "\n"; + } + + if (!Decls.empty()) + ProcessDeclGroup(Decls); + + if (Indent) + Indentation -= Policy.Indentation; +} + +void DeclPrinter::VisitTranslationUnitDecl(TranslationUnitDecl *D) { + VisitDeclContext(D, false); +} + +void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) { + if (!Policy.SuppressSpecifiers) { + Out << "typedef "; + + if (D->isModulePrivate()) + Out << "__module_private__ "; + } + D->getTypeSourceInfo()->getType().print(Out, Policy, D->getName()); + prettyPrintAttributes(D); +} + +void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) { + Out << "using " << *D; + prettyPrintAttributes(D); + Out << " = " << D->getTypeSourceInfo()->getType().getAsString(Policy); +} + +void DeclPrinter::VisitEnumDecl(EnumDecl *D) { + if (!Policy.SuppressSpecifiers && D->isModulePrivate()) + Out << "__module_private__ "; + Out << "enum "; + if (D->isScoped()) { + if (D->isScopedUsingClassTag()) + Out << "class "; + else + Out << "struct "; + } + Out << *D; + + if (D->isFixed()) + Out << " : " << D->getIntegerType().stream(Policy); + + if (D->isCompleteDefinition()) { + Out << " {\n"; + VisitDeclContext(D); + Indent() << "}"; + } + prettyPrintAttributes(D); +} + +void DeclPrinter::VisitRecordDecl(RecordDecl *D) { + if (!Policy.SuppressSpecifiers && D->isModulePrivate()) + Out << "__module_private__ "; + Out << D->getKindName(); + if (D->getIdentifier()) + Out << ' ' << *D; + + if (D->isCompleteDefinition()) { + Out << " {\n"; + VisitDeclContext(D); + Indent() << "}"; + } +} + +void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) { + Out << *D; + if (Expr *Init = D->getInitExpr()) { + Out << " = "; + Init->printPretty(Out, 0, Policy, Indentation); + } +} + +void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { + CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D); + if (!Policy.SuppressSpecifiers) { + switch (D->getStorageClass()) { + case SC_None: break; + case SC_Extern: Out << "extern "; break; + case SC_Static: Out << "static "; break; + case SC_PrivateExtern: Out << "__private_extern__ "; break; + case SC_Auto: case SC_Register: case SC_OpenCLWorkGroupLocal: + llvm_unreachable("invalid for functions"); + } + + if (D->isInlineSpecified()) Out << "inline "; + if (D->isVirtualAsWritten()) Out << "virtual "; + if (D->isModulePrivate()) Out << "__module_private__ "; + if (CDecl && CDecl->isExplicitSpecified()) + Out << "explicit "; + } + + PrintingPolicy SubPolicy(Policy); + SubPolicy.SuppressSpecifiers = false; + std::string Proto = D->getNameInfo().getAsString(); + + QualType Ty = D->getType(); + while (const ParenType *PT = dyn_cast<ParenType>(Ty)) { + Proto = '(' + Proto + ')'; + Ty = PT->getInnerType(); + } + + if (isa<FunctionType>(Ty)) { + const FunctionType *AFT = Ty->getAs<FunctionType>(); + const FunctionProtoType *FT = 0; + if (D->hasWrittenPrototype()) + FT = dyn_cast<FunctionProtoType>(AFT); + + Proto += "("; + if (FT) { + llvm::raw_string_ostream POut(Proto); + DeclPrinter ParamPrinter(POut, SubPolicy, Indentation); + for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) { + if (i) POut << ", "; + ParamPrinter.VisitParmVarDecl(D->getParamDecl(i)); + } + + if (FT->isVariadic()) { + if (D->getNumParams()) POut << ", "; + POut << "..."; + } + } else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) { + for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) { + if (i) + Proto += ", "; + Proto += D->getParamDecl(i)->getNameAsString(); + } + } + + Proto += ")"; + + if (FT) { + if (FT->isConst()) + Proto += " const"; + if (FT->isVolatile()) + Proto += " volatile"; + if (FT->isRestrict()) + Proto += " restrict"; + } + + if (FT && FT->hasDynamicExceptionSpec()) { + Proto += " throw("; + if (FT->getExceptionSpecType() == EST_MSAny) + Proto += "..."; + else + for (unsigned I = 0, N = FT->getNumExceptions(); I != N; ++I) { + if (I) + Proto += ", "; + + Proto += FT->getExceptionType(I).getAsString(SubPolicy); + } + Proto += ")"; + } else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) { + Proto += " noexcept"; + if (FT->getExceptionSpecType() == EST_ComputedNoexcept) { + Proto += "("; + llvm::raw_string_ostream EOut(Proto); + FT->getNoexceptExpr()->printPretty(EOut, 0, SubPolicy, + Indentation); + EOut.flush(); + Proto += EOut.str(); + Proto += ")"; + } + } + + if (CDecl) { + bool HasInitializerList = false; + for (CXXConstructorDecl::init_const_iterator B = CDecl->init_begin(), + E = CDecl->init_end(); + B != E; ++B) { + CXXCtorInitializer *BMInitializer = (*B); + if (BMInitializer->isInClassMemberInitializer()) + continue; + + if (!HasInitializerList) { + Proto += " : "; + Out << Proto; + Proto.clear(); + HasInitializerList = true; + } else + Out << ", "; + + if (BMInitializer->isAnyMemberInitializer()) { + FieldDecl *FD = BMInitializer->getAnyMember(); + Out << *FD; + } else { + Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(Policy); + } + + Out << "("; + if (!BMInitializer->getInit()) { + // Nothing to print + } else { + Expr *Init = BMInitializer->getInit(); + if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init)) + Init = Tmp->getSubExpr(); + + Init = Init->IgnoreParens(); + + Expr *SimpleInit = 0; + Expr **Args = 0; + unsigned NumArgs = 0; + if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) { + Args = ParenList->getExprs(); + NumArgs = ParenList->getNumExprs(); + } else if (CXXConstructExpr *Construct + = dyn_cast<CXXConstructExpr>(Init)) { + Args = Construct->getArgs(); + NumArgs = Construct->getNumArgs(); + } else + SimpleInit = Init; + + if (SimpleInit) + SimpleInit->printPretty(Out, 0, Policy, Indentation); + else { + for (unsigned I = 0; I != NumArgs; ++I) { + if (isa<CXXDefaultArgExpr>(Args[I])) + break; + + if (I) + Out << ", "; + Args[I]->printPretty(Out, 0, Policy, Indentation); + } + } + } + Out << ")"; + } + if (!Proto.empty()) + Out << Proto; + } else { + if (FT && FT->hasTrailingReturn()) { + Out << "auto " << Proto << " -> "; + Proto.clear(); + } + AFT->getResultType().print(Out, Policy, Proto); + } + } else { + Ty.print(Out, Policy, Proto); + } + + prettyPrintAttributes(D); + + if (D->isPure()) + Out << " = 0"; + else if (D->isDeletedAsWritten()) + Out << " = delete"; + else if (D->isExplicitlyDefaulted()) + Out << " = default"; + else if (D->doesThisDeclarationHaveABody() && !Policy.TerseOutput) { + if (!D->hasPrototype() && D->getNumParams()) { + // This is a K&R function definition, so we need to print the + // parameters. + Out << '\n'; + DeclPrinter ParamPrinter(Out, SubPolicy, Indentation); + Indentation += Policy.Indentation; + for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) { + Indent(); + ParamPrinter.VisitParmVarDecl(D->getParamDecl(i)); + Out << ";\n"; + } + Indentation -= Policy.Indentation; + } else + Out << ' '; + + D->getBody()->printPretty(Out, 0, SubPolicy, Indentation); + Out << '\n'; + } +} + +void DeclPrinter::VisitFriendDecl(FriendDecl *D) { + if (TypeSourceInfo *TSI = D->getFriendType()) { + unsigned NumTPLists = D->getFriendTypeNumTemplateParameterLists(); + for (unsigned i = 0; i < NumTPLists; ++i) + PrintTemplateParameters(D->getFriendTypeTemplateParameterList(i)); + Out << "friend "; + Out << " " << TSI->getType().getAsString(Policy); + } + else if (FunctionDecl *FD = + dyn_cast<FunctionDecl>(D->getFriendDecl())) { + Out << "friend "; + VisitFunctionDecl(FD); + } + else if (FunctionTemplateDecl *FTD = + dyn_cast<FunctionTemplateDecl>(D->getFriendDecl())) { + Out << "friend "; + VisitFunctionTemplateDecl(FTD); + } + else if (ClassTemplateDecl *CTD = + dyn_cast<ClassTemplateDecl>(D->getFriendDecl())) { + Out << "friend "; + VisitRedeclarableTemplateDecl(CTD); + } +} + +void DeclPrinter::VisitFieldDecl(FieldDecl *D) { + if (!Policy.SuppressSpecifiers && D->isMutable()) + Out << "mutable "; + if (!Policy.SuppressSpecifiers && D->isModulePrivate()) + Out << "__module_private__ "; + + Out << D->getASTContext().getUnqualifiedObjCPointerType(D->getType()). + stream(Policy, D->getName()); + + if (D->isBitField()) { + Out << " : "; + D->getBitWidth()->printPretty(Out, 0, Policy, Indentation); + } + + Expr *Init = D->getInClassInitializer(); + if (!Policy.SuppressInitializers && Init) { + if (D->getInClassInitStyle() == ICIS_ListInit) + Out << " "; + else + Out << " = "; + Init->printPretty(Out, 0, Policy, Indentation); + } + prettyPrintAttributes(D); +} + +void DeclPrinter::VisitLabelDecl(LabelDecl *D) { + Out << *D << ":"; +} + + +void DeclPrinter::VisitVarDecl(VarDecl *D) { + if (!Policy.SuppressSpecifiers) { + StorageClass SC = D->getStorageClass(); + if (SC != SC_None) + Out << VarDecl::getStorageClassSpecifierString(SC) << " "; + + switch (D->getTSCSpec()) { + case TSCS_unspecified: + break; + case TSCS___thread: + Out << "__thread "; + break; + case TSCS__Thread_local: + Out << "_Thread_local "; + break; + case TSCS_thread_local: + Out << "thread_local "; + break; + } + + if (D->isModulePrivate()) + Out << "__module_private__ "; + } + + QualType T = D->getTypeSourceInfo() + ? D->getTypeSourceInfo()->getType() + : D->getASTContext().getUnqualifiedObjCPointerType(D->getType()); + T.print(Out, Policy, D->getName()); + Expr *Init = D->getInit(); + if (!Policy.SuppressInitializers && Init) { + bool ImplicitInit = false; + if (CXXConstructExpr *Construct = + dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) { + if (D->getInitStyle() == VarDecl::CallInit && + !Construct->isListInitialization()) { + ImplicitInit = Construct->getNumArgs() == 0 || + Construct->getArg(0)->isDefaultArgument(); + } + } + if (!ImplicitInit) { + if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init)) + Out << "("; + else if (D->getInitStyle() == VarDecl::CInit) { + Out << " = "; + } + Init->printPretty(Out, 0, Policy, Indentation); + if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init)) + Out << ")"; + } + } + prettyPrintAttributes(D); +} + +void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) { + VisitVarDecl(D); +} + +void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) { + Out << "__asm ("; + D->getAsmString()->printPretty(Out, 0, Policy, Indentation); + Out << ")"; +} + +void DeclPrinter::VisitImportDecl(ImportDecl *D) { + Out << "@import " << D->getImportedModule()->getFullModuleName() + << ";\n"; +} + +void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) { + Out << "static_assert("; + D->getAssertExpr()->printPretty(Out, 0, Policy, Indentation); + Out << ", "; + D->getMessage()->printPretty(Out, 0, Policy, Indentation); + Out << ")"; +} + +//---------------------------------------------------------------------------- +// C++ declarations +//---------------------------------------------------------------------------- +void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) { + if (D->isInline()) + Out << "inline "; + Out << "namespace " << *D << " {\n"; + VisitDeclContext(D); + Indent() << "}"; +} + +void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { + Out << "using namespace "; + if (D->getQualifier()) + D->getQualifier()->print(Out, Policy); + Out << *D->getNominatedNamespaceAsWritten(); +} + +void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) { + Out << "namespace " << *D << " = "; + if (D->getQualifier()) + D->getQualifier()->print(Out, Policy); + Out << *D->getAliasedNamespace(); +} + +void DeclPrinter::VisitEmptyDecl(EmptyDecl *D) { + prettyPrintAttributes(D); +} + +void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) { + if (!Policy.SuppressSpecifiers && D->isModulePrivate()) + Out << "__module_private__ "; + Out << D->getKindName(); + if (D->getIdentifier()) + Out << ' ' << *D; + + if (D->isCompleteDefinition()) { + // Print the base classes + if (D->getNumBases()) { + Out << " : "; + for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(), + BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) { + if (Base != D->bases_begin()) + Out << ", "; + + if (Base->isVirtual()) + Out << "virtual "; + + AccessSpecifier AS = Base->getAccessSpecifierAsWritten(); + if (AS != AS_none) + Print(AS); + Out << " " << Base->getType().getAsString(Policy); + + if (Base->isPackExpansion()) + Out << "..."; + } + } + + // Print the class definition + // FIXME: Doesn't print access specifiers, e.g., "public:" + Out << " {\n"; + VisitDeclContext(D); + Indent() << "}"; + } +} + +void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) { + const char *l; + if (D->getLanguage() == LinkageSpecDecl::lang_c) + l = "C"; + else { + assert(D->getLanguage() == LinkageSpecDecl::lang_cxx && + "unknown language in linkage specification"); + l = "C++"; + } + + Out << "extern \"" << l << "\" "; + if (D->hasBraces()) { + Out << "{\n"; + VisitDeclContext(D); + Indent() << "}"; + } else + Visit(*D->decls_begin()); +} + +void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params, + const TemplateArgumentList *Args) { + assert(Params); + assert(!Args || Params->size() == Args->size()); + + Out << "template <"; + + for (unsigned i = 0, e = Params->size(); i != e; ++i) { + if (i != 0) + Out << ", "; + + const Decl *Param = Params->getParam(i); + if (const TemplateTypeParmDecl *TTP = + dyn_cast<TemplateTypeParmDecl>(Param)) { + + if (TTP->wasDeclaredWithTypename()) + Out << "typename "; + else + Out << "class "; + + if (TTP->isParameterPack()) + Out << "... "; + + Out << *TTP; + + if (Args) { + Out << " = "; + Args->get(i).print(Policy, Out); + } else if (TTP->hasDefaultArgument()) { + Out << " = "; + Out << TTP->getDefaultArgument().getAsString(Policy); + }; + } else if (const NonTypeTemplateParmDecl *NTTP = + dyn_cast<NonTypeTemplateParmDecl>(Param)) { + Out << NTTP->getType().getAsString(Policy); + + if (NTTP->isParameterPack() && !isa<PackExpansionType>(NTTP->getType())) + Out << "..."; + + if (IdentifierInfo *Name = NTTP->getIdentifier()) { + Out << ' '; + Out << Name->getName(); + } + + if (Args) { + Out << " = "; + Args->get(i).print(Policy, Out); + } else if (NTTP->hasDefaultArgument()) { + Out << " = "; + NTTP->getDefaultArgument()->printPretty(Out, 0, Policy, Indentation); + } + } else if (const TemplateTemplateParmDecl *TTPD = + dyn_cast<TemplateTemplateParmDecl>(Param)) { + VisitTemplateDecl(TTPD); + // FIXME: print the default argument, if present. + } + } + + Out << "> "; +} + +void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) { + PrintTemplateParameters(D->getTemplateParameters()); + + if (const TemplateTemplateParmDecl *TTP = + dyn_cast<TemplateTemplateParmDecl>(D)) { + Out << "class "; + if (TTP->isParameterPack()) + Out << "..."; + Out << D->getName(); + } else { + Visit(D->getTemplatedDecl()); + } +} + +void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { + if (PrintInstantiation) { + TemplateParameterList *Params = D->getTemplateParameters(); + for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end(); + I != E; ++I) { + PrintTemplateParameters(Params, (*I)->getTemplateSpecializationArgs()); + Visit(*I); + } + } + + return VisitRedeclarableTemplateDecl(D); +} + +void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) { + if (PrintInstantiation) { + TemplateParameterList *Params = D->getTemplateParameters(); + for (ClassTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end(); + I != E; ++I) { + PrintTemplateParameters(Params, &(*I)->getTemplateArgs()); + Visit(*I); + Out << '\n'; + } + } + + return VisitRedeclarableTemplateDecl(D); +} + +//---------------------------------------------------------------------------- +// Objective-C declarations +//---------------------------------------------------------------------------- + +void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) { + if (OMD->isInstanceMethod()) + Out << "- "; + else + Out << "+ "; + if (!OMD->getResultType().isNull()) + Out << '(' << OMD->getASTContext().getUnqualifiedObjCPointerType(OMD->getResultType()). + getAsString(Policy) << ")"; + + std::string name = OMD->getSelector().getAsString(); + std::string::size_type pos, lastPos = 0; + for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), + E = OMD->param_end(); PI != E; ++PI) { + // FIXME: selector is missing here! + pos = name.find_first_of(':', lastPos); + Out << " " << name.substr(lastPos, pos - lastPos); + Out << ":(" << (*PI)->getASTContext().getUnqualifiedObjCPointerType((*PI)->getType()). + getAsString(Policy) << ')' << **PI; + lastPos = pos + 1; + } + + if (OMD->param_begin() == OMD->param_end()) + Out << " " << name; + + if (OMD->isVariadic()) + Out << ", ..."; + + if (OMD->getBody() && !Policy.TerseOutput) { + Out << ' '; + OMD->getBody()->printPretty(Out, 0, Policy); + Out << '\n'; + } + else if (Policy.PolishForDeclaration) + Out << ';'; +} + +void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) { + std::string I = OID->getNameAsString(); + ObjCInterfaceDecl *SID = OID->getSuperClass(); + + if (SID) + Out << "@implementation " << I << " : " << *SID; + else + Out << "@implementation " << I; + + if (OID->ivar_size() > 0) { + Out << "{\n"; + Indentation += Policy.Indentation; + for (ObjCImplementationDecl::ivar_iterator I = OID->ivar_begin(), + E = OID->ivar_end(); I != E; ++I) { + Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()). + getAsString(Policy) << ' ' << **I << ";\n"; + } + Indentation -= Policy.Indentation; + Out << "}\n"; + } + VisitDeclContext(OID, false); + Out << "@end"; +} + +void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) { + std::string I = OID->getNameAsString(); + ObjCInterfaceDecl *SID = OID->getSuperClass(); + + if (!OID->isThisDeclarationADefinition()) { + Out << "@class " << I << ";"; + return; + } + bool eolnOut = false; + if (SID) + Out << "@interface " << I << " : " << *SID; + else + Out << "@interface " << I; + + // Protocols? + const ObjCList<ObjCProtocolDecl> &Protocols = OID->getReferencedProtocols(); + if (!Protocols.empty()) { + for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(), + E = Protocols.end(); I != E; ++I) + Out << (I == Protocols.begin() ? '<' : ',') << **I; + Out << "> "; + } + + if (OID->ivar_size() > 0) { + Out << "{\n"; + eolnOut = true; + Indentation += Policy.Indentation; + for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(), + E = OID->ivar_end(); I != E; ++I) { + Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()). + getAsString(Policy) << ' ' << **I << ";\n"; + } + Indentation -= Policy.Indentation; + Out << "}\n"; + } + else if (SID) { + Out << "\n"; + eolnOut = true; + } + + VisitDeclContext(OID, false); + if (!eolnOut) + Out << ' '; + Out << "@end"; + // FIXME: implement the rest... +} + +void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) { + if (!PID->isThisDeclarationADefinition()) { + Out << "@protocol " << *PID << ";\n"; + return; + } + // Protocols? + const ObjCList<ObjCProtocolDecl> &Protocols = PID->getReferencedProtocols(); + if (!Protocols.empty()) { + Out << "@protocol " << *PID; + for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(), + E = Protocols.end(); I != E; ++I) + Out << (I == Protocols.begin() ? '<' : ',') << **I; + Out << ">\n"; + } else + Out << "@protocol " << *PID << '\n'; + VisitDeclContext(PID, false); + Out << "@end"; +} + +void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) { + Out << "@implementation " << *PID->getClassInterface() << '(' << *PID <<")\n"; + + VisitDeclContext(PID, false); + Out << "@end"; + // FIXME: implement the rest... +} + +void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) { + Out << "@interface " << *PID->getClassInterface() << '(' << *PID << ")\n"; + if (PID->ivar_size() > 0) { + Out << "{\n"; + Indentation += Policy.Indentation; + for (ObjCCategoryDecl::ivar_iterator I = PID->ivar_begin(), + E = PID->ivar_end(); I != E; ++I) { + Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()). + getAsString(Policy) << ' ' << **I << ";\n"; + } + Indentation -= Policy.Indentation; + Out << "}\n"; + } + + VisitDeclContext(PID, false); + Out << "@end"; + + // FIXME: implement the rest... +} + +void DeclPrinter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *AID) { + Out << "@compatibility_alias " << *AID + << ' ' << *AID->getClassInterface() << ";\n"; +} + +/// PrintObjCPropertyDecl - print a property declaration. +/// +void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) { + if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Required) + Out << "@required\n"; + else if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Optional) + Out << "@optional\n"; + + Out << "@property"; + if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) { + bool first = true; + Out << " ("; + if (PDecl->getPropertyAttributes() & + ObjCPropertyDecl::OBJC_PR_readonly) { + Out << (first ? ' ' : ',') << "readonly"; + first = false; + } + + if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) { + Out << (first ? ' ' : ',') << "getter = " + << PDecl->getGetterName().getAsString(); + first = false; + } + if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) { + Out << (first ? ' ' : ',') << "setter = " + << PDecl->getSetterName().getAsString(); + first = false; + } + + if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) { + Out << (first ? ' ' : ',') << "assign"; + first = false; + } + + if (PDecl->getPropertyAttributes() & + ObjCPropertyDecl::OBJC_PR_readwrite) { + Out << (first ? ' ' : ',') << "readwrite"; + first = false; + } + + if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) { + Out << (first ? ' ' : ',') << "retain"; + first = false; + } + + if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) { + Out << (first ? ' ' : ',') << "strong"; + first = false; + } + + if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) { + Out << (first ? ' ' : ',') << "copy"; + first = false; + } + + if (PDecl->getPropertyAttributes() & + ObjCPropertyDecl::OBJC_PR_nonatomic) { + Out << (first ? ' ' : ',') << "nonatomic"; + first = false; + } + if (PDecl->getPropertyAttributes() & + ObjCPropertyDecl::OBJC_PR_atomic) { + Out << (first ? ' ' : ',') << "atomic"; + first = false; + } + + (void) first; // Silence dead store warning due to idiomatic code. + Out << " )"; + } + Out << ' ' << PDecl->getASTContext().getUnqualifiedObjCPointerType(PDecl->getType()). + getAsString(Policy) << ' ' << *PDecl; + if (Policy.PolishForDeclaration) + Out << ';'; +} + +void DeclPrinter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *PID) { + if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) + Out << "@synthesize "; + else + Out << "@dynamic "; + Out << *PID->getPropertyDecl(); + if (PID->getPropertyIvarDecl()) + Out << '=' << *PID->getPropertyIvarDecl(); +} + +void DeclPrinter::VisitUsingDecl(UsingDecl *D) { + if (!D->isAccessDeclaration()) + Out << "using "; + if (D->hasTypename()) + Out << "typename "; + D->getQualifier()->print(Out, Policy); + Out << *D; +} + +void +DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) { + Out << "using typename "; + D->getQualifier()->print(Out, Policy); + Out << D->getDeclName(); +} + +void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) { + if (!D->isAccessDeclaration()) + Out << "using "; + D->getQualifier()->print(Out, Policy); + Out << D->getName(); +} + +void DeclPrinter::VisitUsingShadowDecl(UsingShadowDecl *D) { + // ignore +} + +void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) { + Out << "#pragma omp threadprivate"; + if (!D->varlist_empty()) { + for (OMPThreadPrivateDecl::varlist_iterator I = D->varlist_begin(), + E = D->varlist_end(); + I != E; ++I) { + Out << (I == D->varlist_begin() ? '(' : ','); + NamedDecl *ND = cast<NamedDecl>(cast<DeclRefExpr>(*I)->getDecl()); + ND->printQualifiedName(Out); + } + Out << ")"; + } +} + diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp new file mode 100644 index 000000000000..7172fb7b487f --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp @@ -0,0 +1,1201 @@ +//===--- DeclTemplate.cpp - Template Declaration AST Node Implementation --===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the C++ related Decl classes for templates. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTMutationListener.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/IdentifierTable.h" +#include "llvm/ADT/STLExtras.h" +#include <memory> +using namespace clang; + +//===----------------------------------------------------------------------===// +// TemplateParameterList Implementation +//===----------------------------------------------------------------------===// + +TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc, + SourceLocation LAngleLoc, + NamedDecl **Params, unsigned NumParams, + SourceLocation RAngleLoc) + : TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc), + NumParams(NumParams), ContainsUnexpandedParameterPack(false) { + assert(this->NumParams == NumParams && "Too many template parameters"); + for (unsigned Idx = 0; Idx < NumParams; ++Idx) { + NamedDecl *P = Params[Idx]; + begin()[Idx] = P; + + if (!P->isTemplateParameterPack()) { + if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) + if (NTTP->getType()->containsUnexpandedParameterPack()) + ContainsUnexpandedParameterPack = true; + + if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(P)) + if (TTP->getTemplateParameters()->containsUnexpandedParameterPack()) + ContainsUnexpandedParameterPack = true; + + // FIXME: If a default argument contains an unexpanded parameter pack, the + // template parameter list does too. + } + } +} + +TemplateParameterList * +TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc, + SourceLocation LAngleLoc, NamedDecl **Params, + unsigned NumParams, SourceLocation RAngleLoc) { + unsigned Size = sizeof(TemplateParameterList) + + sizeof(NamedDecl *) * NumParams; + unsigned Align = std::max(llvm::alignOf<TemplateParameterList>(), + llvm::alignOf<NamedDecl*>()); + void *Mem = C.Allocate(Size, Align); + return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params, + NumParams, RAngleLoc); +} + +unsigned TemplateParameterList::getMinRequiredArguments() const { + unsigned NumRequiredArgs = 0; + for (iterator P = const_cast<TemplateParameterList *>(this)->begin(), + PEnd = const_cast<TemplateParameterList *>(this)->end(); + P != PEnd; ++P) { + if ((*P)->isTemplateParameterPack()) { + if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) + if (NTTP->isExpandedParameterPack()) { + NumRequiredArgs += NTTP->getNumExpansionTypes(); + continue; + } + + break; + } + + if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { + if (TTP->hasDefaultArgument()) + break; + } else if (NonTypeTemplateParmDecl *NTTP + = dyn_cast<NonTypeTemplateParmDecl>(*P)) { + if (NTTP->hasDefaultArgument()) + break; + } else if (cast<TemplateTemplateParmDecl>(*P)->hasDefaultArgument()) + break; + + ++NumRequiredArgs; + } + + return NumRequiredArgs; +} + +unsigned TemplateParameterList::getDepth() const { + if (size() == 0) + return 0; + + const NamedDecl *FirstParm = getParam(0); + if (const TemplateTypeParmDecl *TTP + = dyn_cast<TemplateTypeParmDecl>(FirstParm)) + return TTP->getDepth(); + else if (const NonTypeTemplateParmDecl *NTTP + = dyn_cast<NonTypeTemplateParmDecl>(FirstParm)) + return NTTP->getDepth(); + else + return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth(); +} + +static void AdoptTemplateParameterList(TemplateParameterList *Params, + DeclContext *Owner) { + for (TemplateParameterList::iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + (*P)->setDeclContext(Owner); + + if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(*P)) + AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner); + } +} + +//===----------------------------------------------------------------------===// +// RedeclarableTemplateDecl Implementation +//===----------------------------------------------------------------------===// + +RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() const { + if (Common) + return Common; + + // Walk the previous-declaration chain until we either find a declaration + // with a common pointer or we run out of previous declarations. + SmallVector<const RedeclarableTemplateDecl *, 2> PrevDecls; + for (const RedeclarableTemplateDecl *Prev = getPreviousDecl(); Prev; + Prev = Prev->getPreviousDecl()) { + if (Prev->Common) { + Common = Prev->Common; + break; + } + + PrevDecls.push_back(Prev); + } + + // If we never found a common pointer, allocate one now. + if (!Common) { + // FIXME: If any of the declarations is from an AST file, we probably + // need an update record to add the common data. + + Common = newCommon(getASTContext()); + } + + // Update any previous declarations we saw with the common pointer. + for (unsigned I = 0, N = PrevDecls.size(); I != N; ++I) + PrevDecls[I]->Common = Common; + + return Common; +} + +template <class EntryType> +typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType* +RedeclarableTemplateDecl::findSpecializationImpl( + llvm::FoldingSetVector<EntryType> &Specs, + const TemplateArgument *Args, unsigned NumArgs, + void *&InsertPos) { + typedef SpecEntryTraits<EntryType> SETraits; + llvm::FoldingSetNodeID ID; + EntryType::Profile(ID,Args,NumArgs, getASTContext()); + EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos); + return Entry ? SETraits::getMostRecentDecl(Entry) : 0; +} + +/// \brief Generate the injected template arguments for the given template +/// parameter list, e.g., for the injected-class-name of a class template. +static void GenerateInjectedTemplateArgs(ASTContext &Context, + TemplateParameterList *Params, + TemplateArgument *Args) { + for (TemplateParameterList::iterator Param = Params->begin(), + ParamEnd = Params->end(); + Param != ParamEnd; ++Param) { + TemplateArgument Arg; + if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) { + QualType ArgType = Context.getTypeDeclType(TTP); + if (TTP->isParameterPack()) + ArgType = Context.getPackExpansionType(ArgType, None); + + Arg = TemplateArgument(ArgType); + } else if (NonTypeTemplateParmDecl *NTTP = + dyn_cast<NonTypeTemplateParmDecl>(*Param)) { + Expr *E = new (Context) DeclRefExpr(NTTP, /*enclosing*/ false, + NTTP->getType().getNonLValueExprType(Context), + Expr::getValueKindForType(NTTP->getType()), + NTTP->getLocation()); + + if (NTTP->isParameterPack()) + E = new (Context) PackExpansionExpr(Context.DependentTy, E, + NTTP->getLocation(), None); + Arg = TemplateArgument(E); + } else { + TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*Param); + if (TTP->isParameterPack()) + Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); + else + Arg = TemplateArgument(TemplateName(TTP)); + } + + if ((*Param)->isTemplateParameterPack()) + Arg = TemplateArgument::CreatePackCopy(Context, &Arg, 1); + + *Args++ = Arg; + } +} + +//===----------------------------------------------------------------------===// +// FunctionTemplateDecl Implementation +//===----------------------------------------------------------------------===// + +void FunctionTemplateDecl::DeallocateCommon(void *Ptr) { + static_cast<Common *>(Ptr)->~Common(); +} + +FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C, + DeclContext *DC, + SourceLocation L, + DeclarationName Name, + TemplateParameterList *Params, + NamedDecl *Decl) { + AdoptTemplateParameterList(Params, cast<DeclContext>(Decl)); + return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl); +} + +FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionTemplateDecl)); + return new (Mem) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(), + 0, 0); +} + +RedeclarableTemplateDecl::CommonBase * +FunctionTemplateDecl::newCommon(ASTContext &C) const { + Common *CommonPtr = new (C) Common; + C.AddDeallocation(DeallocateCommon, CommonPtr); + return CommonPtr; +} + +void FunctionTemplateDecl::LoadLazySpecializations() const { + Common *CommonPtr = getCommonPtr(); + if (CommonPtr->LazySpecializations) { + ASTContext &Context = getASTContext(); + uint32_t *Specs = CommonPtr->LazySpecializations; + CommonPtr->LazySpecializations = 0; + for (uint32_t I = 0, N = *Specs++; I != N; ++I) + (void)Context.getExternalSource()->GetExternalDecl(Specs[I]); + } +} + +llvm::FoldingSetVector<FunctionTemplateSpecializationInfo> & +FunctionTemplateDecl::getSpecializations() const { + LoadLazySpecializations(); + return getCommonPtr()->Specializations; +} + +FunctionDecl * +FunctionTemplateDecl::findSpecialization(const TemplateArgument *Args, + unsigned NumArgs, void *&InsertPos) { + return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos); +} + +void FunctionTemplateDecl::addSpecialization( + FunctionTemplateSpecializationInfo *Info, void *InsertPos) { + if (InsertPos) + getSpecializations().InsertNode(Info, InsertPos); + else + getSpecializations().GetOrInsertNode(Info); + if (ASTMutationListener *L = getASTMutationListener()) + L->AddedCXXTemplateSpecialization(this, Info->Function); +} + +ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() { + TemplateParameterList *Params = getTemplateParameters(); + Common *CommonPtr = getCommonPtr(); + if (!CommonPtr->InjectedArgs) { + CommonPtr->InjectedArgs + = new (getASTContext()) TemplateArgument[Params->size()]; + GenerateInjectedTemplateArgs(getASTContext(), Params, + CommonPtr->InjectedArgs); + } + + return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size()); +} + +//===----------------------------------------------------------------------===// +// ClassTemplateDecl Implementation +//===----------------------------------------------------------------------===// + +void ClassTemplateDecl::DeallocateCommon(void *Ptr) { + static_cast<Common *>(Ptr)->~Common(); +} + +ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C, + DeclContext *DC, + SourceLocation L, + DeclarationName Name, + TemplateParameterList *Params, + NamedDecl *Decl, + ClassTemplateDecl *PrevDecl) { + AdoptTemplateParameterList(Params, cast<DeclContext>(Decl)); + ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl); + New->setPreviousDecl(PrevDecl); + return New; +} + +ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ClassTemplateDecl)); + return new (Mem) ClassTemplateDecl(EmptyShell()); +} + +void ClassTemplateDecl::LoadLazySpecializations() const { + Common *CommonPtr = getCommonPtr(); + if (CommonPtr->LazySpecializations) { + ASTContext &Context = getASTContext(); + uint32_t *Specs = CommonPtr->LazySpecializations; + CommonPtr->LazySpecializations = 0; + for (uint32_t I = 0, N = *Specs++; I != N; ++I) + (void)Context.getExternalSource()->GetExternalDecl(Specs[I]); + } +} + +llvm::FoldingSetVector<ClassTemplateSpecializationDecl> & +ClassTemplateDecl::getSpecializations() const { + LoadLazySpecializations(); + return getCommonPtr()->Specializations; +} + +llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> & +ClassTemplateDecl::getPartialSpecializations() { + LoadLazySpecializations(); + return getCommonPtr()->PartialSpecializations; +} + +RedeclarableTemplateDecl::CommonBase * +ClassTemplateDecl::newCommon(ASTContext &C) const { + Common *CommonPtr = new (C) Common; + C.AddDeallocation(DeallocateCommon, CommonPtr); + return CommonPtr; +} + +ClassTemplateSpecializationDecl * +ClassTemplateDecl::findSpecialization(const TemplateArgument *Args, + unsigned NumArgs, void *&InsertPos) { + return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos); +} + +void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D, + void *InsertPos) { + if (InsertPos) + getSpecializations().InsertNode(D, InsertPos); + else { + ClassTemplateSpecializationDecl *Existing + = getSpecializations().GetOrInsertNode(D); + (void)Existing; + assert(Existing->isCanonicalDecl() && "Non-canonical specialization?"); + } + if (ASTMutationListener *L = getASTMutationListener()) + L->AddedCXXTemplateSpecialization(this, D); +} + +ClassTemplatePartialSpecializationDecl * +ClassTemplateDecl::findPartialSpecialization(const TemplateArgument *Args, + unsigned NumArgs, + void *&InsertPos) { + return findSpecializationImpl(getPartialSpecializations(), Args, NumArgs, + InsertPos); +} + +void ClassTemplateDecl::AddPartialSpecialization( + ClassTemplatePartialSpecializationDecl *D, + void *InsertPos) { + if (InsertPos) + getPartialSpecializations().InsertNode(D, InsertPos); + else { + ClassTemplatePartialSpecializationDecl *Existing + = getPartialSpecializations().GetOrInsertNode(D); + (void)Existing; + assert(Existing->isCanonicalDecl() && "Non-canonical specialization?"); + } + + if (ASTMutationListener *L = getASTMutationListener()) + L->AddedCXXTemplateSpecialization(this, D); +} + +void ClassTemplateDecl::getPartialSpecializations( + SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) { + llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &PartialSpecs + = getPartialSpecializations(); + PS.clear(); + PS.reserve(PartialSpecs.size()); + for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator + P = PartialSpecs.begin(), PEnd = PartialSpecs.end(); + P != PEnd; ++P) + PS.push_back(P->getMostRecentDecl()); +} + +ClassTemplatePartialSpecializationDecl * +ClassTemplateDecl::findPartialSpecialization(QualType T) { + ASTContext &Context = getASTContext(); + using llvm::FoldingSetVector; + typedef FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator + partial_spec_iterator; + for (partial_spec_iterator P = getPartialSpecializations().begin(), + PEnd = getPartialSpecializations().end(); + P != PEnd; ++P) { + if (Context.hasSameType(P->getInjectedSpecializationType(), T)) + return P->getMostRecentDecl(); + } + + return 0; +} + +ClassTemplatePartialSpecializationDecl * +ClassTemplateDecl::findPartialSpecInstantiatedFromMember( + ClassTemplatePartialSpecializationDecl *D) { + Decl *DCanon = D->getCanonicalDecl(); + for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator + P = getPartialSpecializations().begin(), + PEnd = getPartialSpecializations().end(); + P != PEnd; ++P) { + if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon) + return P->getMostRecentDecl(); + } + + return 0; +} + +QualType +ClassTemplateDecl::getInjectedClassNameSpecialization() { + Common *CommonPtr = getCommonPtr(); + if (!CommonPtr->InjectedClassNameType.isNull()) + return CommonPtr->InjectedClassNameType; + + // C++0x [temp.dep.type]p2: + // The template argument list of a primary template is a template argument + // list in which the nth template argument has the value of the nth template + // parameter of the class template. If the nth template parameter is a + // template parameter pack (14.5.3), the nth template argument is a pack + // expansion (14.5.3) whose pattern is the name of the template parameter + // pack. + ASTContext &Context = getASTContext(); + TemplateParameterList *Params = getTemplateParameters(); + SmallVector<TemplateArgument, 16> TemplateArgs; + TemplateArgs.resize(Params->size()); + GenerateInjectedTemplateArgs(getASTContext(), Params, TemplateArgs.data()); + CommonPtr->InjectedClassNameType + = Context.getTemplateSpecializationType(TemplateName(this), + &TemplateArgs[0], + TemplateArgs.size()); + return CommonPtr->InjectedClassNameType; +} + +//===----------------------------------------------------------------------===// +// TemplateTypeParm Allocation/Deallocation Method Implementations +//===----------------------------------------------------------------------===// + +TemplateTypeParmDecl * +TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC, + SourceLocation KeyLoc, SourceLocation NameLoc, + unsigned D, unsigned P, IdentifierInfo *Id, + bool Typename, bool ParameterPack) { + TemplateTypeParmDecl *TTPDecl = + new (C) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename); + QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl); + TTPDecl->TypeForDecl = TTPType.getTypePtr(); + return TTPDecl; +} + +TemplateTypeParmDecl * +TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTypeParmDecl)); + return new (Mem) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(), + 0, false); +} + +SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const { + return hasDefaultArgument() + ? DefaultArgument->getTypeLoc().getBeginLoc() + : SourceLocation(); +} + +SourceRange TemplateTypeParmDecl::getSourceRange() const { + if (hasDefaultArgument() && !defaultArgumentWasInherited()) + return SourceRange(getLocStart(), + DefaultArgument->getTypeLoc().getEndLoc()); + else + return TypeDecl::getSourceRange(); +} + +unsigned TemplateTypeParmDecl::getDepth() const { + return TypeForDecl->getAs<TemplateTypeParmType>()->getDepth(); +} + +unsigned TemplateTypeParmDecl::getIndex() const { + return TypeForDecl->getAs<TemplateTypeParmType>()->getIndex(); +} + +bool TemplateTypeParmDecl::isParameterPack() const { + return TypeForDecl->getAs<TemplateTypeParmType>()->isParameterPack(); +} + +//===----------------------------------------------------------------------===// +// NonTypeTemplateParmDecl Method Implementations +//===----------------------------------------------------------------------===// + +NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(DeclContext *DC, + SourceLocation StartLoc, + SourceLocation IdLoc, + unsigned D, unsigned P, + IdentifierInfo *Id, + QualType T, + TypeSourceInfo *TInfo, + const QualType *ExpandedTypes, + unsigned NumExpandedTypes, + TypeSourceInfo **ExpandedTInfos) + : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc), + TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false), + ParameterPack(true), ExpandedParameterPack(true), + NumExpandedTypes(NumExpandedTypes) +{ + if (ExpandedTypes && ExpandedTInfos) { + void **TypesAndInfos = reinterpret_cast<void **>(this + 1); + for (unsigned I = 0; I != NumExpandedTypes; ++I) { + TypesAndInfos[2*I] = ExpandedTypes[I].getAsOpaquePtr(); + TypesAndInfos[2*I + 1] = ExpandedTInfos[I]; + } + } +} + +NonTypeTemplateParmDecl * +NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + unsigned D, unsigned P, IdentifierInfo *Id, + QualType T, bool ParameterPack, + TypeSourceInfo *TInfo) { + return new (C) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id, + T, ParameterPack, TInfo); +} + +NonTypeTemplateParmDecl * +NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + unsigned D, unsigned P, + IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, + const QualType *ExpandedTypes, + unsigned NumExpandedTypes, + TypeSourceInfo **ExpandedTInfos) { + unsigned Size = sizeof(NonTypeTemplateParmDecl) + + NumExpandedTypes * 2 * sizeof(void*); + void *Mem = C.Allocate(Size); + return new (Mem) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, + D, P, Id, T, TInfo, + ExpandedTypes, NumExpandedTypes, + ExpandedTInfos); +} + +NonTypeTemplateParmDecl * +NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NonTypeTemplateParmDecl)); + return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(), + SourceLocation(), 0, 0, 0, + QualType(), false, 0); +} + +NonTypeTemplateParmDecl * +NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, + unsigned NumExpandedTypes) { + unsigned Size = sizeof(NonTypeTemplateParmDecl) + + NumExpandedTypes * 2 * sizeof(void*); + + void *Mem = AllocateDeserializedDecl(C, ID, Size); + return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(), + SourceLocation(), 0, 0, 0, + QualType(), 0, 0, NumExpandedTypes, + 0); +} + +SourceRange NonTypeTemplateParmDecl::getSourceRange() const { + if (hasDefaultArgument() && !defaultArgumentWasInherited()) + return SourceRange(getOuterLocStart(), + getDefaultArgument()->getSourceRange().getEnd()); + return DeclaratorDecl::getSourceRange(); +} + +SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const { + return hasDefaultArgument() + ? getDefaultArgument()->getSourceRange().getBegin() + : SourceLocation(); +} + +//===----------------------------------------------------------------------===// +// TemplateTemplateParmDecl Method Implementations +//===----------------------------------------------------------------------===// + +void TemplateTemplateParmDecl::anchor() { } + +TemplateTemplateParmDecl::TemplateTemplateParmDecl( + DeclContext *DC, SourceLocation L, unsigned D, unsigned P, + IdentifierInfo *Id, TemplateParameterList *Params, + unsigned NumExpansions, TemplateParameterList * const *Expansions) + : TemplateDecl(TemplateTemplateParm, DC, L, Id, Params), + TemplateParmPosition(D, P), DefaultArgument(), + DefaultArgumentWasInherited(false), ParameterPack(true), + ExpandedParameterPack(true), NumExpandedParams(NumExpansions) { + if (Expansions) + std::memcpy(reinterpret_cast<void*>(this + 1), Expansions, + sizeof(TemplateParameterList*) * NumExpandedParams); +} + +TemplateTemplateParmDecl * +TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, + SourceLocation L, unsigned D, unsigned P, + bool ParameterPack, IdentifierInfo *Id, + TemplateParameterList *Params) { + return new (C) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id, + Params); +} + +TemplateTemplateParmDecl * +TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, + SourceLocation L, unsigned D, unsigned P, + IdentifierInfo *Id, + TemplateParameterList *Params, + ArrayRef<TemplateParameterList *> Expansions) { + void *Mem = C.Allocate(sizeof(TemplateTemplateParmDecl) + + sizeof(TemplateParameterList*) * Expansions.size()); + return new (Mem) TemplateTemplateParmDecl(DC, L, D, P, Id, Params, + Expansions.size(), + Expansions.data()); +} + +TemplateTemplateParmDecl * +TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTemplateParmDecl)); + return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false, + 0, 0); +} + +TemplateTemplateParmDecl * +TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, + unsigned NumExpansions) { + unsigned Size = sizeof(TemplateTemplateParmDecl) + + sizeof(TemplateParameterList*) * NumExpansions; + void *Mem = AllocateDeserializedDecl(C, ID, Size); + return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, 0, 0, + NumExpansions, 0); +} + +//===----------------------------------------------------------------------===// +// TemplateArgumentList Implementation +//===----------------------------------------------------------------------===// +TemplateArgumentList * +TemplateArgumentList::CreateCopy(ASTContext &Context, + const TemplateArgument *Args, + unsigned NumArgs) { + std::size_t Size = sizeof(TemplateArgumentList) + + NumArgs * sizeof(TemplateArgument); + void *Mem = Context.Allocate(Size); + TemplateArgument *StoredArgs + = reinterpret_cast<TemplateArgument *>( + static_cast<TemplateArgumentList *>(Mem) + 1); + std::uninitialized_copy(Args, Args + NumArgs, StoredArgs); + return new (Mem) TemplateArgumentList(StoredArgs, NumArgs, true); +} + +FunctionTemplateSpecializationInfo * +FunctionTemplateSpecializationInfo::Create(ASTContext &C, FunctionDecl *FD, + FunctionTemplateDecl *Template, + TemplateSpecializationKind TSK, + const TemplateArgumentList *TemplateArgs, + const TemplateArgumentListInfo *TemplateArgsAsWritten, + SourceLocation POI) { + const ASTTemplateArgumentListInfo *ArgsAsWritten = 0; + if (TemplateArgsAsWritten) + ArgsAsWritten = ASTTemplateArgumentListInfo::Create(C, + *TemplateArgsAsWritten); + + return new (C) FunctionTemplateSpecializationInfo(FD, Template, TSK, + TemplateArgs, + ArgsAsWritten, + POI); +} + +//===----------------------------------------------------------------------===// +// TemplateDecl Implementation +//===----------------------------------------------------------------------===// + +void TemplateDecl::anchor() { } + +//===----------------------------------------------------------------------===// +// ClassTemplateSpecializationDecl Implementation +//===----------------------------------------------------------------------===// +ClassTemplateSpecializationDecl:: +ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK, + DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, + ClassTemplateDecl *SpecializedTemplate, + const TemplateArgument *Args, + unsigned NumArgs, + ClassTemplateSpecializationDecl *PrevDecl) + : CXXRecordDecl(DK, TK, DC, StartLoc, IdLoc, + SpecializedTemplate->getIdentifier(), + PrevDecl), + SpecializedTemplate(SpecializedTemplate), + ExplicitInfo(0), + TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)), + SpecializationKind(TSK_Undeclared) { +} + +ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(Kind DK) + : CXXRecordDecl(DK, TTK_Struct, 0, SourceLocation(), SourceLocation(), 0, 0), + ExplicitInfo(0), + SpecializationKind(TSK_Undeclared) { +} + +ClassTemplateSpecializationDecl * +ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK, + DeclContext *DC, + SourceLocation StartLoc, + SourceLocation IdLoc, + ClassTemplateDecl *SpecializedTemplate, + const TemplateArgument *Args, + unsigned NumArgs, + ClassTemplateSpecializationDecl *PrevDecl) { + ClassTemplateSpecializationDecl *Result + = new (Context)ClassTemplateSpecializationDecl(Context, + ClassTemplateSpecialization, + TK, DC, StartLoc, IdLoc, + SpecializedTemplate, + Args, NumArgs, + PrevDecl); + Result->MayHaveOutOfDateDef = false; + + Context.getTypeDeclType(Result, PrevDecl); + return Result; +} + +ClassTemplateSpecializationDecl * +ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, + sizeof(ClassTemplateSpecializationDecl)); + ClassTemplateSpecializationDecl *Result = + new (Mem) ClassTemplateSpecializationDecl(ClassTemplateSpecialization); + Result->MayHaveOutOfDateDef = false; + return Result; +} + +void ClassTemplateSpecializationDecl::getNameForDiagnostic( + raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const { + NamedDecl::getNameForDiagnostic(OS, Policy, Qualified); + + const TemplateArgumentList &TemplateArgs = getTemplateArgs(); + TemplateSpecializationType::PrintTemplateArgumentList( + OS, TemplateArgs.data(), TemplateArgs.size(), Policy); +} + +ClassTemplateDecl * +ClassTemplateSpecializationDecl::getSpecializedTemplate() const { + if (SpecializedPartialSpecialization *PartialSpec + = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>()) + return PartialSpec->PartialSpecialization->getSpecializedTemplate(); + return SpecializedTemplate.get<ClassTemplateDecl*>(); +} + +SourceRange +ClassTemplateSpecializationDecl::getSourceRange() const { + if (ExplicitInfo) { + SourceLocation Begin = getTemplateKeywordLoc(); + if (Begin.isValid()) { + // Here we have an explicit (partial) specialization or instantiation. + assert(getSpecializationKind() == TSK_ExplicitSpecialization || + getSpecializationKind() == TSK_ExplicitInstantiationDeclaration || + getSpecializationKind() == TSK_ExplicitInstantiationDefinition); + if (getExternLoc().isValid()) + Begin = getExternLoc(); + SourceLocation End = getRBraceLoc(); + if (End.isInvalid()) + End = getTypeAsWritten()->getTypeLoc().getEndLoc(); + return SourceRange(Begin, End); + } + // An implicit instantiation of a class template partial specialization + // uses ExplicitInfo to record the TypeAsWritten, but the source + // locations should be retrieved from the instantiation pattern. + typedef ClassTemplatePartialSpecializationDecl CTPSDecl; + CTPSDecl *ctpsd = const_cast<CTPSDecl*>(cast<CTPSDecl>(this)); + CTPSDecl *inst_from = ctpsd->getInstantiatedFromMember(); + assert(inst_from != 0); + return inst_from->getSourceRange(); + } + else { + // No explicit info available. + llvm::PointerUnion<ClassTemplateDecl *, + ClassTemplatePartialSpecializationDecl *> + inst_from = getInstantiatedFrom(); + if (inst_from.isNull()) + return getSpecializedTemplate()->getSourceRange(); + if (ClassTemplateDecl *ctd = inst_from.dyn_cast<ClassTemplateDecl*>()) + return ctd->getSourceRange(); + return inst_from.get<ClassTemplatePartialSpecializationDecl*>() + ->getSourceRange(); + } +} + +//===----------------------------------------------------------------------===// +// ClassTemplatePartialSpecializationDecl Implementation +//===----------------------------------------------------------------------===// +void ClassTemplatePartialSpecializationDecl::anchor() { } + +ClassTemplatePartialSpecializationDecl:: +ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK, + DeclContext *DC, + SourceLocation StartLoc, + SourceLocation IdLoc, + TemplateParameterList *Params, + ClassTemplateDecl *SpecializedTemplate, + const TemplateArgument *Args, + unsigned NumArgs, + const ASTTemplateArgumentListInfo *ArgInfos, + ClassTemplatePartialSpecializationDecl *PrevDecl) + : ClassTemplateSpecializationDecl(Context, + ClassTemplatePartialSpecialization, + TK, DC, StartLoc, IdLoc, + SpecializedTemplate, + Args, NumArgs, PrevDecl), + TemplateParams(Params), ArgsAsWritten(ArgInfos), + InstantiatedFromMember(0, false) +{ + AdoptTemplateParameterList(Params, this); +} + +ClassTemplatePartialSpecializationDecl * +ClassTemplatePartialSpecializationDecl:: +Create(ASTContext &Context, TagKind TK,DeclContext *DC, + SourceLocation StartLoc, SourceLocation IdLoc, + TemplateParameterList *Params, + ClassTemplateDecl *SpecializedTemplate, + const TemplateArgument *Args, + unsigned NumArgs, + const TemplateArgumentListInfo &ArgInfos, + QualType CanonInjectedType, + ClassTemplatePartialSpecializationDecl *PrevDecl) { + const ASTTemplateArgumentListInfo *ASTArgInfos = + ASTTemplateArgumentListInfo::Create(Context, ArgInfos); + + ClassTemplatePartialSpecializationDecl *Result + = new (Context)ClassTemplatePartialSpecializationDecl(Context, TK, DC, + StartLoc, IdLoc, + Params, + SpecializedTemplate, + Args, NumArgs, + ASTArgInfos, + PrevDecl); + Result->setSpecializationKind(TSK_ExplicitSpecialization); + Result->MayHaveOutOfDateDef = false; + + Context.getInjectedClassNameType(Result, CanonInjectedType); + return Result; +} + +ClassTemplatePartialSpecializationDecl * +ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, + sizeof(ClassTemplatePartialSpecializationDecl)); + ClassTemplatePartialSpecializationDecl *Result + = new (Mem) ClassTemplatePartialSpecializationDecl(); + Result->MayHaveOutOfDateDef = false; + return Result; +} + +//===----------------------------------------------------------------------===// +// FriendTemplateDecl Implementation +//===----------------------------------------------------------------------===// + +void FriendTemplateDecl::anchor() { } + +FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context, + DeclContext *DC, + SourceLocation L, + unsigned NParams, + TemplateParameterList **Params, + FriendUnion Friend, + SourceLocation FLoc) { + FriendTemplateDecl *Result + = new (Context) FriendTemplateDecl(DC, L, NParams, Params, Friend, FLoc); + return Result; +} + +FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendTemplateDecl)); + return new (Mem) FriendTemplateDecl(EmptyShell()); +} + +//===----------------------------------------------------------------------===// +// TypeAliasTemplateDecl Implementation +//===----------------------------------------------------------------------===// + +TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C, + DeclContext *DC, + SourceLocation L, + DeclarationName Name, + TemplateParameterList *Params, + NamedDecl *Decl) { + AdoptTemplateParameterList(Params, DC); + return new (C) TypeAliasTemplateDecl(DC, L, Name, Params, Decl); +} + +TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasTemplateDecl)); + return new (Mem) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(), + 0, 0); +} + +void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) { + static_cast<Common *>(Ptr)->~Common(); +} +RedeclarableTemplateDecl::CommonBase * +TypeAliasTemplateDecl::newCommon(ASTContext &C) const { + Common *CommonPtr = new (C) Common; + C.AddDeallocation(DeallocateCommon, CommonPtr); + return CommonPtr; +} + +//===----------------------------------------------------------------------===// +// ClassScopeFunctionSpecializationDecl Implementation +//===----------------------------------------------------------------------===// + +void ClassScopeFunctionSpecializationDecl::anchor() { } + +ClassScopeFunctionSpecializationDecl * +ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, + sizeof(ClassScopeFunctionSpecializationDecl)); + return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0, + false, TemplateArgumentListInfo()); +} + +//===----------------------------------------------------------------------===// +// VarTemplateDecl Implementation +//===----------------------------------------------------------------------===// + +void VarTemplateDecl::DeallocateCommon(void *Ptr) { + static_cast<Common *>(Ptr)->~Common(); +} + +VarTemplateDecl *VarTemplateDecl::getDefinition() { + VarTemplateDecl *CurD = this; + while (CurD) { + if (CurD->isThisDeclarationADefinition()) + return CurD; + CurD = CurD->getPreviousDecl(); + } + return 0; +} + +VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC, + SourceLocation L, DeclarationName Name, + TemplateParameterList *Params, + NamedDecl *Decl, + VarTemplateDecl *PrevDecl) { + VarTemplateDecl *New = new (C) VarTemplateDecl(DC, L, Name, Params, Decl); + New->setPreviousDecl(PrevDecl); + return New; +} + +VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarTemplateDecl)); + return new (Mem) VarTemplateDecl(EmptyShell()); +} + +// TODO: Unify accross class, function and variable templates? +// May require moving this and Common to RedeclarableTemplateDecl. +void VarTemplateDecl::LoadLazySpecializations() const { + Common *CommonPtr = getCommonPtr(); + if (CommonPtr->LazySpecializations) { + ASTContext &Context = getASTContext(); + uint32_t *Specs = CommonPtr->LazySpecializations; + CommonPtr->LazySpecializations = 0; + for (uint32_t I = 0, N = *Specs++; I != N; ++I) + (void)Context.getExternalSource()->GetExternalDecl(Specs[I]); + } +} + +llvm::FoldingSetVector<VarTemplateSpecializationDecl> & +VarTemplateDecl::getSpecializations() const { + LoadLazySpecializations(); + return getCommonPtr()->Specializations; +} + +llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> & +VarTemplateDecl::getPartialSpecializations() { + LoadLazySpecializations(); + return getCommonPtr()->PartialSpecializations; +} + +RedeclarableTemplateDecl::CommonBase * +VarTemplateDecl::newCommon(ASTContext &C) const { + Common *CommonPtr = new (C) Common; + C.AddDeallocation(DeallocateCommon, CommonPtr); + return CommonPtr; +} + +VarTemplateSpecializationDecl * +VarTemplateDecl::findSpecialization(const TemplateArgument *Args, + unsigned NumArgs, void *&InsertPos) { + return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos); +} + +void VarTemplateDecl::AddSpecialization(VarTemplateSpecializationDecl *D, + void *InsertPos) { + if (InsertPos) + getSpecializations().InsertNode(D, InsertPos); + else { + VarTemplateSpecializationDecl *Existing = + getSpecializations().GetOrInsertNode(D); + (void)Existing; + assert(Existing->isCanonicalDecl() && "Non-canonical specialization?"); + } + if (ASTMutationListener *L = getASTMutationListener()) + L->AddedCXXTemplateSpecialization(this, D); +} + +VarTemplatePartialSpecializationDecl * +VarTemplateDecl::findPartialSpecialization(const TemplateArgument *Args, + unsigned NumArgs, void *&InsertPos) { + return findSpecializationImpl(getPartialSpecializations(), Args, NumArgs, + InsertPos); +} + +void VarTemplateDecl::AddPartialSpecialization( + VarTemplatePartialSpecializationDecl *D, void *InsertPos) { + if (InsertPos) + getPartialSpecializations().InsertNode(D, InsertPos); + else { + VarTemplatePartialSpecializationDecl *Existing = + getPartialSpecializations().GetOrInsertNode(D); + (void)Existing; + assert(Existing->isCanonicalDecl() && "Non-canonical specialization?"); + } + + if (ASTMutationListener *L = getASTMutationListener()) + L->AddedCXXTemplateSpecialization(this, D); +} + +void VarTemplateDecl::getPartialSpecializations( + SmallVectorImpl<VarTemplatePartialSpecializationDecl *> &PS) { + llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> &PartialSpecs = + getPartialSpecializations(); + PS.clear(); + PS.reserve(PartialSpecs.size()); + for (llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl>::iterator + P = PartialSpecs.begin(), + PEnd = PartialSpecs.end(); + P != PEnd; ++P) + PS.push_back(P->getMostRecentDecl()); +} + +VarTemplatePartialSpecializationDecl * +VarTemplateDecl::findPartialSpecInstantiatedFromMember( + VarTemplatePartialSpecializationDecl *D) { + Decl *DCanon = D->getCanonicalDecl(); + for (llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl>::iterator + P = getPartialSpecializations().begin(), + PEnd = getPartialSpecializations().end(); + P != PEnd; ++P) { + if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon) + return P->getMostRecentDecl(); + } + + return 0; +} + +//===----------------------------------------------------------------------===// +// VarTemplateSpecializationDecl Implementation +//===----------------------------------------------------------------------===// +VarTemplateSpecializationDecl::VarTemplateSpecializationDecl( + ASTContext &Context, Kind DK, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T, + TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args, + unsigned NumArgs) + : VarDecl(DK, DC, StartLoc, IdLoc, SpecializedTemplate->getIdentifier(), T, + TInfo, S), + SpecializedTemplate(SpecializedTemplate), ExplicitInfo(0), + TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)), + SpecializationKind(TSK_Undeclared) {} + +VarTemplateSpecializationDecl::VarTemplateSpecializationDecl(Kind DK) + : VarDecl(DK, 0, SourceLocation(), SourceLocation(), 0, QualType(), 0, + SC_None), + ExplicitInfo(0), SpecializationKind(TSK_Undeclared) {} + +VarTemplateSpecializationDecl *VarTemplateSpecializationDecl::Create( + ASTContext &Context, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T, + TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args, + unsigned NumArgs) { + VarTemplateSpecializationDecl *Result = new (Context) + VarTemplateSpecializationDecl(Context, VarTemplateSpecialization, DC, + StartLoc, IdLoc, SpecializedTemplate, T, + TInfo, S, Args, NumArgs); + return Result; +} + +VarTemplateSpecializationDecl * +VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, unsigned ID) { + void *Mem = + AllocateDeserializedDecl(C, ID, sizeof(VarTemplateSpecializationDecl)); + VarTemplateSpecializationDecl *Result = + new (Mem) VarTemplateSpecializationDecl(VarTemplateSpecialization); + return Result; +} + +void VarTemplateSpecializationDecl::getNameForDiagnostic( + raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const { + NamedDecl::getNameForDiagnostic(OS, Policy, Qualified); + + const TemplateArgumentList &TemplateArgs = getTemplateArgs(); + TemplateSpecializationType::PrintTemplateArgumentList( + OS, TemplateArgs.data(), TemplateArgs.size(), Policy); +} + +VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const { + if (SpecializedPartialSpecialization *PartialSpec = + SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization *>()) + return PartialSpec->PartialSpecialization->getSpecializedTemplate(); + return SpecializedTemplate.get<VarTemplateDecl *>(); +} + +void VarTemplateSpecializationDecl::setTemplateArgsInfo( + const TemplateArgumentListInfo &ArgsInfo) { + unsigned N = ArgsInfo.size(); + TemplateArgsInfo.setLAngleLoc(ArgsInfo.getLAngleLoc()); + TemplateArgsInfo.setRAngleLoc(ArgsInfo.getRAngleLoc()); + for (unsigned I = 0; I != N; ++I) + TemplateArgsInfo.addArgument(ArgsInfo[I]); +} + +//===----------------------------------------------------------------------===// +// VarTemplatePartialSpecializationDecl Implementation +//===----------------------------------------------------------------------===// +void VarTemplatePartialSpecializationDecl::anchor() {} + +VarTemplatePartialSpecializationDecl::VarTemplatePartialSpecializationDecl( + ASTContext &Context, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, TemplateParameterList *Params, + VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo, + StorageClass S, const TemplateArgument *Args, unsigned NumArgs, + const ASTTemplateArgumentListInfo *ArgInfos) + : VarTemplateSpecializationDecl(Context, VarTemplatePartialSpecialization, + DC, StartLoc, IdLoc, SpecializedTemplate, T, + TInfo, S, Args, NumArgs), + TemplateParams(Params), ArgsAsWritten(ArgInfos), + InstantiatedFromMember(0, false) { + // TODO: The template parameters should be in DC by now. Verify. + // AdoptTemplateParameterList(Params, DC); +} + +VarTemplatePartialSpecializationDecl * +VarTemplatePartialSpecializationDecl::Create( + ASTContext &Context, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, TemplateParameterList *Params, + VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo, + StorageClass S, const TemplateArgument *Args, unsigned NumArgs, + const TemplateArgumentListInfo &ArgInfos) { + const ASTTemplateArgumentListInfo *ASTArgInfos + = ASTTemplateArgumentListInfo::Create(Context, ArgInfos); + + VarTemplatePartialSpecializationDecl *Result = + new (Context) VarTemplatePartialSpecializationDecl( + Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo, + S, Args, NumArgs, ASTArgInfos); + Result->setSpecializationKind(TSK_ExplicitSpecialization); + return Result; +} + +VarTemplatePartialSpecializationDecl * +VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C, + unsigned ID) { + void *Mem = AllocateDeserializedDecl( + C, ID, sizeof(VarTemplatePartialSpecializationDecl)); + VarTemplatePartialSpecializationDecl *Result = + new (Mem) VarTemplatePartialSpecializationDecl(); + return Result; +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp new file mode 100644 index 000000000000..e064e23a0ae9 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp @@ -0,0 +1,579 @@ +//===-- DeclarationName.cpp - Declaration names implementation --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the DeclarationName and DeclarationNameTable +// classes. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclarationName.h" +#include "clang/AST/Type.h" +#include "clang/AST/TypeLoc.h" +#include "clang/AST/TypeOrdering.h" +#include "clang/Basic/IdentifierTable.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; + +namespace clang { +/// CXXSpecialName - Records the type associated with one of the +/// "special" kinds of declaration names in C++, e.g., constructors, +/// destructors, and conversion functions. +class CXXSpecialName + : public DeclarationNameExtra, public llvm::FoldingSetNode { +public: + /// Type - The type associated with this declaration name. + QualType Type; + + /// FETokenInfo - Extra information associated with this declaration + /// name that can be used by the front end. + void *FETokenInfo; + + void Profile(llvm::FoldingSetNodeID &ID) { + ID.AddInteger(ExtraKindOrNumArgs); + ID.AddPointer(Type.getAsOpaquePtr()); + } +}; + +/// CXXOperatorIdName - Contains extra information for the name of an +/// overloaded operator in C++, such as "operator+. +class CXXOperatorIdName : public DeclarationNameExtra { +public: + /// FETokenInfo - Extra information associated with this operator + /// name that can be used by the front end. + void *FETokenInfo; +}; + +/// CXXLiteralOperatorName - Contains the actual identifier that makes up the +/// name. +/// +/// This identifier is stored here rather than directly in DeclarationName so as +/// to allow Objective-C selectors, which are about a million times more common, +/// to consume minimal memory. +class CXXLiteralOperatorIdName + : public DeclarationNameExtra, public llvm::FoldingSetNode { +public: + IdentifierInfo *ID; + + /// FETokenInfo - Extra information associated with this operator + /// name that can be used by the front end. + void *FETokenInfo; + + void Profile(llvm::FoldingSetNodeID &FSID) { + FSID.AddPointer(ID); + } +}; + +static int compareInt(unsigned A, unsigned B) { + return (A < B ? -1 : (A > B ? 1 : 0)); +} + +int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) { + if (LHS.getNameKind() != RHS.getNameKind()) + return (LHS.getNameKind() < RHS.getNameKind() ? -1 : 1); + + switch (LHS.getNameKind()) { + case DeclarationName::Identifier: { + IdentifierInfo *LII = LHS.getAsIdentifierInfo(); + IdentifierInfo *RII = RHS.getAsIdentifierInfo(); + if (!LII) return RII ? -1 : 0; + if (!RII) return 1; + + return LII->getName().compare(RII->getName()); + } + + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: { + Selector LHSSelector = LHS.getObjCSelector(); + Selector RHSSelector = RHS.getObjCSelector(); + unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs(); + for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) { + switch (LHSSelector.getNameForSlot(I).compare( + RHSSelector.getNameForSlot(I))) { + case -1: return true; + case 1: return false; + default: break; + } + } + + return compareInt(LN, RN); + } + + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXConversionFunctionName: + if (QualTypeOrdering()(LHS.getCXXNameType(), RHS.getCXXNameType())) + return -1; + if (QualTypeOrdering()(RHS.getCXXNameType(), LHS.getCXXNameType())) + return 1; + return 0; + + case DeclarationName::CXXOperatorName: + return compareInt(LHS.getCXXOverloadedOperator(), + RHS.getCXXOverloadedOperator()); + + case DeclarationName::CXXLiteralOperatorName: + return LHS.getCXXLiteralIdentifier()->getName().compare( + RHS.getCXXLiteralIdentifier()->getName()); + + case DeclarationName::CXXUsingDirective: + return 0; + } + + llvm_unreachable("Invalid DeclarationName Kind!"); +} + +raw_ostream &operator<<(raw_ostream &OS, DeclarationName N) { + switch (N.getNameKind()) { + case DeclarationName::Identifier: + if (const IdentifierInfo *II = N.getAsIdentifierInfo()) + OS << II->getName(); + return OS; + + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + return OS << N.getObjCSelector().getAsString(); + + case DeclarationName::CXXConstructorName: { + QualType ClassType = N.getCXXNameType(); + if (const RecordType *ClassRec = ClassType->getAs<RecordType>()) + return OS << *ClassRec->getDecl(); + return OS << ClassType.getAsString(); + } + + case DeclarationName::CXXDestructorName: { + OS << '~'; + QualType Type = N.getCXXNameType(); + if (const RecordType *Rec = Type->getAs<RecordType>()) + return OS << *Rec->getDecl(); + return OS << Type.getAsString(); + } + + case DeclarationName::CXXOperatorName: { + static const char* const OperatorNames[NUM_OVERLOADED_OPERATORS] = { + 0, +#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \ + Spelling, +#include "clang/Basic/OperatorKinds.def" + }; + const char *OpName = OperatorNames[N.getCXXOverloadedOperator()]; + assert(OpName && "not an overloaded operator"); + + OS << "operator"; + if (OpName[0] >= 'a' && OpName[0] <= 'z') + OS << ' '; + return OS << OpName; + } + + case DeclarationName::CXXLiteralOperatorName: + return OS << "operator \"\" " << N.getCXXLiteralIdentifier()->getName(); + + case DeclarationName::CXXConversionFunctionName: { + OS << "operator "; + QualType Type = N.getCXXNameType(); + if (const RecordType *Rec = Type->getAs<RecordType>()) + return OS << *Rec->getDecl(); + return OS << Type.getAsString(); + } + case DeclarationName::CXXUsingDirective: + return OS << "<using-directive>"; + } + + llvm_unreachable("Unexpected declaration name kind"); +} + +} // end namespace clang + +DeclarationName::NameKind DeclarationName::getNameKind() const { + switch (getStoredNameKind()) { + case StoredIdentifier: return Identifier; + case StoredObjCZeroArgSelector: return ObjCZeroArgSelector; + case StoredObjCOneArgSelector: return ObjCOneArgSelector; + + case StoredDeclarationNameExtra: + switch (getExtra()->ExtraKindOrNumArgs) { + case DeclarationNameExtra::CXXConstructor: + return CXXConstructorName; + + case DeclarationNameExtra::CXXDestructor: + return CXXDestructorName; + + case DeclarationNameExtra::CXXConversionFunction: + return CXXConversionFunctionName; + + case DeclarationNameExtra::CXXLiteralOperator: + return CXXLiteralOperatorName; + + case DeclarationNameExtra::CXXUsingDirective: + return CXXUsingDirective; + + default: + // Check if we have one of the CXXOperator* enumeration values. + if (getExtra()->ExtraKindOrNumArgs < + DeclarationNameExtra::CXXUsingDirective) + return CXXOperatorName; + + return ObjCMultiArgSelector; + } + } + + // Can't actually get here. + llvm_unreachable("This should be unreachable!"); +} + +bool DeclarationName::isDependentName() const { + QualType T = getCXXNameType(); + return !T.isNull() && T->isDependentType(); +} + +std::string DeclarationName::getAsString() const { + std::string Result; + llvm::raw_string_ostream OS(Result); + OS << *this; + return OS.str(); +} + +QualType DeclarationName::getCXXNameType() const { + if (CXXSpecialName *CXXName = getAsCXXSpecialName()) + return CXXName->Type; + else + return QualType(); +} + +OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const { + if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) { + unsigned value + = CXXOp->ExtraKindOrNumArgs - DeclarationNameExtra::CXXConversionFunction; + return static_cast<OverloadedOperatorKind>(value); + } else { + return OO_None; + } +} + +IdentifierInfo *DeclarationName::getCXXLiteralIdentifier() const { + if (CXXLiteralOperatorIdName *CXXLit = getAsCXXLiteralOperatorIdName()) + return CXXLit->ID; + else + return 0; +} + +void *DeclarationName::getFETokenInfoAsVoidSlow() const { + switch (getNameKind()) { + case Identifier: + llvm_unreachable("Handled by getFETokenInfo()"); + + case CXXConstructorName: + case CXXDestructorName: + case CXXConversionFunctionName: + return getAsCXXSpecialName()->FETokenInfo; + + case CXXOperatorName: + return getAsCXXOperatorIdName()->FETokenInfo; + + case CXXLiteralOperatorName: + return getAsCXXLiteralOperatorIdName()->FETokenInfo; + + default: + llvm_unreachable("Declaration name has no FETokenInfo"); + } +} + +void DeclarationName::setFETokenInfo(void *T) { + switch (getNameKind()) { + case Identifier: + getAsIdentifierInfo()->setFETokenInfo(T); + break; + + case CXXConstructorName: + case CXXDestructorName: + case CXXConversionFunctionName: + getAsCXXSpecialName()->FETokenInfo = T; + break; + + case CXXOperatorName: + getAsCXXOperatorIdName()->FETokenInfo = T; + break; + + case CXXLiteralOperatorName: + getAsCXXLiteralOperatorIdName()->FETokenInfo = T; + break; + + default: + llvm_unreachable("Declaration name has no FETokenInfo"); + } +} + +DeclarationName DeclarationName::getUsingDirectiveName() { + // Single instance of DeclarationNameExtra for using-directive + static const DeclarationNameExtra UDirExtra = + { DeclarationNameExtra::CXXUsingDirective }; + + uintptr_t Ptr = reinterpret_cast<uintptr_t>(&UDirExtra); + Ptr |= StoredDeclarationNameExtra; + + return DeclarationName(Ptr); +} + +void DeclarationName::dump() const { + llvm::errs() << *this << '\n'; +} + +DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) { + CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>; + CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>; + + // Initialize the overloaded operator names. + CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS]; + for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) { + CXXOperatorNames[Op].ExtraKindOrNumArgs + = Op + DeclarationNameExtra::CXXConversionFunction; + CXXOperatorNames[Op].FETokenInfo = 0; + } +} + +DeclarationNameTable::~DeclarationNameTable() { + llvm::FoldingSet<CXXSpecialName> *SpecialNames = + static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl); + llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames + = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*> + (CXXLiteralOperatorNames); + + delete SpecialNames; + delete LiteralNames; +} + +DeclarationName DeclarationNameTable::getCXXConstructorName(CanQualType Ty) { + return getCXXSpecialName(DeclarationName::CXXConstructorName, + Ty.getUnqualifiedType()); +} + +DeclarationName DeclarationNameTable::getCXXDestructorName(CanQualType Ty) { + return getCXXSpecialName(DeclarationName::CXXDestructorName, + Ty.getUnqualifiedType()); +} + +DeclarationName +DeclarationNameTable::getCXXConversionFunctionName(CanQualType Ty) { + return getCXXSpecialName(DeclarationName::CXXConversionFunctionName, Ty); +} + +DeclarationName +DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind, + CanQualType Ty) { + assert(Kind >= DeclarationName::CXXConstructorName && + Kind <= DeclarationName::CXXConversionFunctionName && + "Kind must be a C++ special name kind"); + llvm::FoldingSet<CXXSpecialName> *SpecialNames + = static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl); + + DeclarationNameExtra::ExtraKind EKind; + switch (Kind) { + case DeclarationName::CXXConstructorName: + EKind = DeclarationNameExtra::CXXConstructor; + assert(!Ty.hasQualifiers() &&"Constructor type must be unqualified"); + break; + case DeclarationName::CXXDestructorName: + EKind = DeclarationNameExtra::CXXDestructor; + assert(!Ty.hasQualifiers() && "Destructor type must be unqualified"); + break; + case DeclarationName::CXXConversionFunctionName: + EKind = DeclarationNameExtra::CXXConversionFunction; + break; + default: + return DeclarationName(); + } + + // Unique selector, to guarantee there is one per name. + llvm::FoldingSetNodeID ID; + ID.AddInteger(EKind); + ID.AddPointer(Ty.getAsOpaquePtr()); + + void *InsertPos = 0; + if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos)) + return DeclarationName(Name); + + CXXSpecialName *SpecialName = new (Ctx) CXXSpecialName; + SpecialName->ExtraKindOrNumArgs = EKind; + SpecialName->Type = Ty; + SpecialName->FETokenInfo = 0; + + SpecialNames->InsertNode(SpecialName, InsertPos); + return DeclarationName(SpecialName); +} + +DeclarationName +DeclarationNameTable::getCXXOperatorName(OverloadedOperatorKind Op) { + return DeclarationName(&CXXOperatorNames[(unsigned)Op]); +} + +DeclarationName +DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) { + llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames + = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*> + (CXXLiteralOperatorNames); + + llvm::FoldingSetNodeID ID; + ID.AddPointer(II); + + void *InsertPos = 0; + if (CXXLiteralOperatorIdName *Name = + LiteralNames->FindNodeOrInsertPos(ID, InsertPos)) + return DeclarationName (Name); + + CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName; + LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator; + LiteralName->ID = II; + LiteralName->FETokenInfo = 0; + + LiteralNames->InsertNode(LiteralName, InsertPos); + return DeclarationName(LiteralName); +} + +DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) { + switch (Name.getNameKind()) { + case DeclarationName::Identifier: + break; + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXConversionFunctionName: + NamedType.TInfo = 0; + break; + case DeclarationName::CXXOperatorName: + CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding(); + CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding(); + break; + case DeclarationName::CXXLiteralOperatorName: + CXXLiteralOperatorName.OpNameLoc = SourceLocation().getRawEncoding(); + break; + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + // FIXME: ? + break; + case DeclarationName::CXXUsingDirective: + break; + } +} + +bool DeclarationNameInfo::containsUnexpandedParameterPack() const { + switch (Name.getNameKind()) { + case DeclarationName::Identifier: + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + case DeclarationName::CXXOperatorName: + case DeclarationName::CXXLiteralOperatorName: + case DeclarationName::CXXUsingDirective: + return false; + + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXConversionFunctionName: + if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) + return TInfo->getType()->containsUnexpandedParameterPack(); + + return Name.getCXXNameType()->containsUnexpandedParameterPack(); + } + llvm_unreachable("All name kinds handled."); +} + +bool DeclarationNameInfo::isInstantiationDependent() const { + switch (Name.getNameKind()) { + case DeclarationName::Identifier: + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + case DeclarationName::CXXOperatorName: + case DeclarationName::CXXLiteralOperatorName: + case DeclarationName::CXXUsingDirective: + return false; + + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXConversionFunctionName: + if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) + return TInfo->getType()->isInstantiationDependentType(); + + return Name.getCXXNameType()->isInstantiationDependentType(); + } + llvm_unreachable("All name kinds handled."); +} + +std::string DeclarationNameInfo::getAsString() const { + std::string Result; + llvm::raw_string_ostream OS(Result); + printName(OS); + return OS.str(); +} + +void DeclarationNameInfo::printName(raw_ostream &OS) const { + switch (Name.getNameKind()) { + case DeclarationName::Identifier: + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + case DeclarationName::CXXOperatorName: + case DeclarationName::CXXLiteralOperatorName: + case DeclarationName::CXXUsingDirective: + OS << Name; + return; + + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXConversionFunctionName: + if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) { + if (Name.getNameKind() == DeclarationName::CXXDestructorName) + OS << '~'; + else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) + OS << "operator "; + OS << TInfo->getType().getAsString(); + } else + OS << Name; + return; + } + llvm_unreachable("Unexpected declaration name kind"); +} + +SourceLocation DeclarationNameInfo::getEndLoc() const { + switch (Name.getNameKind()) { + case DeclarationName::Identifier: + return NameLoc; + + case DeclarationName::CXXOperatorName: { + unsigned raw = LocInfo.CXXOperatorName.EndOpNameLoc; + return SourceLocation::getFromRawEncoding(raw); + } + + case DeclarationName::CXXLiteralOperatorName: { + unsigned raw = LocInfo.CXXLiteralOperatorName.OpNameLoc; + return SourceLocation::getFromRawEncoding(raw); + } + + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXConversionFunctionName: + if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) + return TInfo->getTypeLoc().getEndLoc(); + else + return NameLoc; + + // DNInfo work in progress: FIXME. + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + case DeclarationName::CXXUsingDirective: + return NameLoc; + } + llvm_unreachable("Unexpected declaration name kind"); +} diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp new file mode 100644 index 000000000000..9055ddac35e3 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp @@ -0,0 +1,4181 @@ +//===--- Expr.cpp - Expression AST Node Implementation --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Expr class and subclasses. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/APValue.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/EvaluatedExprVisitor.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/Mangle.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/CharInfo.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/Lex/Lexer.h" +#include "clang/Lex/LiteralSupport.h" +#include "clang/Sema/SemaDiagnostic.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstring> +using namespace clang; + +const CXXRecordDecl *Expr::getBestDynamicClassType() const { + const Expr *E = ignoreParenBaseCasts(); + + QualType DerivedType = E->getType(); + if (const PointerType *PTy = DerivedType->getAs<PointerType>()) + DerivedType = PTy->getPointeeType(); + + if (DerivedType->isDependentType()) + return NULL; + + const RecordType *Ty = DerivedType->castAs<RecordType>(); + Decl *D = Ty->getDecl(); + return cast<CXXRecordDecl>(D); +} + +const Expr *Expr::skipRValueSubobjectAdjustments( + SmallVectorImpl<const Expr *> &CommaLHSs, + SmallVectorImpl<SubobjectAdjustment> &Adjustments) const { + const Expr *E = this; + while (true) { + E = E->IgnoreParens(); + + if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { + if ((CE->getCastKind() == CK_DerivedToBase || + CE->getCastKind() == CK_UncheckedDerivedToBase) && + E->getType()->isRecordType()) { + E = CE->getSubExpr(); + CXXRecordDecl *Derived + = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); + Adjustments.push_back(SubobjectAdjustment(CE, Derived)); + continue; + } + + if (CE->getCastKind() == CK_NoOp) { + E = CE->getSubExpr(); + continue; + } + } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { + if (!ME->isArrow()) { + assert(ME->getBase()->getType()->isRecordType()); + if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { + if (!Field->isBitField() && !Field->getType()->isReferenceType()) { + E = ME->getBase(); + Adjustments.push_back(SubobjectAdjustment(Field)); + continue; + } + } + } + } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { + if (BO->isPtrMemOp()) { + assert(BO->getRHS()->isRValue()); + E = BO->getLHS(); + const MemberPointerType *MPT = + BO->getRHS()->getType()->getAs<MemberPointerType>(); + Adjustments.push_back(SubobjectAdjustment(MPT, BO->getRHS())); + continue; + } else if (BO->getOpcode() == BO_Comma) { + CommaLHSs.push_back(BO->getLHS()); + E = BO->getRHS(); + continue; + } + } + + // Nothing changed. + break; + } + return E; +} + +const Expr * +Expr::findMaterializedTemporary(const MaterializeTemporaryExpr *&MTE) const { + const Expr *E = this; + + // This might be a default initializer for a reference member. Walk over the + // wrapper node for that. + if (const CXXDefaultInitExpr *DAE = dyn_cast<CXXDefaultInitExpr>(E)) + E = DAE->getExpr(); + + // Look through single-element init lists that claim to be lvalues. They're + // just syntactic wrappers in this case. + if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) { + if (ILE->getNumInits() == 1 && ILE->isGLValue()) { + E = ILE->getInit(0); + if (const CXXDefaultInitExpr *DAE = dyn_cast<CXXDefaultInitExpr>(E)) + E = DAE->getExpr(); + } + } + + // Look through expressions for materialized temporaries (for now). + if (const MaterializeTemporaryExpr *M + = dyn_cast<MaterializeTemporaryExpr>(E)) { + MTE = M; + E = M->GetTemporaryExpr(); + } + + if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) + E = DAE->getExpr(); + return E; +} + +/// isKnownToHaveBooleanValue - Return true if this is an integer expression +/// that is known to return 0 or 1. This happens for _Bool/bool expressions +/// but also int expressions which are produced by things like comparisons in +/// C. +bool Expr::isKnownToHaveBooleanValue() const { + const Expr *E = IgnoreParens(); + + // If this value has _Bool type, it is obvious 0/1. + if (E->getType()->isBooleanType()) return true; + // If this is a non-scalar-integer type, we don't care enough to try. + if (!E->getType()->isIntegralOrEnumerationType()) return false; + + if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { + switch (UO->getOpcode()) { + case UO_Plus: + return UO->getSubExpr()->isKnownToHaveBooleanValue(); + default: + return false; + } + } + + // Only look through implicit casts. If the user writes + // '(int) (a && b)' treat it as an arbitrary int. + if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) + return CE->getSubExpr()->isKnownToHaveBooleanValue(); + + if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { + switch (BO->getOpcode()) { + default: return false; + case BO_LT: // Relational operators. + case BO_GT: + case BO_LE: + case BO_GE: + case BO_EQ: // Equality operators. + case BO_NE: + case BO_LAnd: // AND operator. + case BO_LOr: // Logical OR operator. + return true; + + case BO_And: // Bitwise AND operator. + case BO_Xor: // Bitwise XOR operator. + case BO_Or: // Bitwise OR operator. + // Handle things like (x==2)|(y==12). + return BO->getLHS()->isKnownToHaveBooleanValue() && + BO->getRHS()->isKnownToHaveBooleanValue(); + + case BO_Comma: + case BO_Assign: + return BO->getRHS()->isKnownToHaveBooleanValue(); + } + } + + if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) + return CO->getTrueExpr()->isKnownToHaveBooleanValue() && + CO->getFalseExpr()->isKnownToHaveBooleanValue(); + + return false; +} + +// Amusing macro metaprogramming hack: check whether a class provides +// a more specific implementation of getExprLoc(). +// +// See also Stmt.cpp:{getLocStart(),getLocEnd()}. +namespace { + /// This implementation is used when a class provides a custom + /// implementation of getExprLoc. + template <class E, class T> + SourceLocation getExprLocImpl(const Expr *expr, + SourceLocation (T::*v)() const) { + return static_cast<const E*>(expr)->getExprLoc(); + } + + /// This implementation is used when a class doesn't provide + /// a custom implementation of getExprLoc. Overload resolution + /// should pick it over the implementation above because it's + /// more specialized according to function template partial ordering. + template <class E> + SourceLocation getExprLocImpl(const Expr *expr, + SourceLocation (Expr::*v)() const) { + return static_cast<const E*>(expr)->getLocStart(); + } +} + +SourceLocation Expr::getExprLoc() const { + switch (getStmtClass()) { + case Stmt::NoStmtClass: llvm_unreachable("statement without class"); +#define ABSTRACT_STMT(type) +#define STMT(type, base) \ + case Stmt::type##Class: llvm_unreachable(#type " is not an Expr"); break; +#define EXPR(type, base) \ + case Stmt::type##Class: return getExprLocImpl<type>(this, &type::getExprLoc); +#include "clang/AST/StmtNodes.inc" + } + llvm_unreachable("unknown statement kind"); +} + +//===----------------------------------------------------------------------===// +// Primary Expressions. +//===----------------------------------------------------------------------===// + +/// \brief Compute the type-, value-, and instantiation-dependence of a +/// declaration reference +/// based on the declaration being referenced. +static void computeDeclRefDependence(const ASTContext &Ctx, NamedDecl *D, + QualType T, bool &TypeDependent, + bool &ValueDependent, + bool &InstantiationDependent) { + TypeDependent = false; + ValueDependent = false; + InstantiationDependent = false; + + // (TD) C++ [temp.dep.expr]p3: + // An id-expression is type-dependent if it contains: + // + // and + // + // (VD) C++ [temp.dep.constexpr]p2: + // An identifier is value-dependent if it is: + + // (TD) - an identifier that was declared with dependent type + // (VD) - a name declared with a dependent type, + if (T->isDependentType()) { + TypeDependent = true; + ValueDependent = true; + InstantiationDependent = true; + return; + } else if (T->isInstantiationDependentType()) { + InstantiationDependent = true; + } + + // (TD) - a conversion-function-id that specifies a dependent type + if (D->getDeclName().getNameKind() + == DeclarationName::CXXConversionFunctionName) { + QualType T = D->getDeclName().getCXXNameType(); + if (T->isDependentType()) { + TypeDependent = true; + ValueDependent = true; + InstantiationDependent = true; + return; + } + + if (T->isInstantiationDependentType()) + InstantiationDependent = true; + } + + // (VD) - the name of a non-type template parameter, + if (isa<NonTypeTemplateParmDecl>(D)) { + ValueDependent = true; + InstantiationDependent = true; + return; + } + + // (VD) - a constant with integral or enumeration type and is + // initialized with an expression that is value-dependent. + // (VD) - a constant with literal type and is initialized with an + // expression that is value-dependent [C++11]. + // (VD) - FIXME: Missing from the standard: + // - an entity with reference type and is initialized with an + // expression that is value-dependent [C++11] + if (VarDecl *Var = dyn_cast<VarDecl>(D)) { + if ((Ctx.getLangOpts().CPlusPlus11 ? + Var->getType()->isLiteralType(Ctx) : + Var->getType()->isIntegralOrEnumerationType()) && + (Var->getType().isConstQualified() || + Var->getType()->isReferenceType())) { + if (const Expr *Init = Var->getAnyInitializer()) + if (Init->isValueDependent()) { + ValueDependent = true; + InstantiationDependent = true; + } + } + + // (VD) - FIXME: Missing from the standard: + // - a member function or a static data member of the current + // instantiation + if (Var->isStaticDataMember() && + Var->getDeclContext()->isDependentContext()) { + ValueDependent = true; + InstantiationDependent = true; + TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo(); + if (TInfo->getType()->isIncompleteArrayType()) + TypeDependent = true; + } + + return; + } + + // (VD) - FIXME: Missing from the standard: + // - a member function or a static data member of the current + // instantiation + if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) { + ValueDependent = true; + InstantiationDependent = true; + } +} + +void DeclRefExpr::computeDependence(const ASTContext &Ctx) { + bool TypeDependent = false; + bool ValueDependent = false; + bool InstantiationDependent = false; + computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent, + ValueDependent, InstantiationDependent); + + // (TD) C++ [temp.dep.expr]p3: + // An id-expression is type-dependent if it contains: + // + // and + // + // (VD) C++ [temp.dep.constexpr]p2: + // An identifier is value-dependent if it is: + if (!TypeDependent && !ValueDependent && + hasExplicitTemplateArgs() && + TemplateSpecializationType::anyDependentTemplateArguments( + getTemplateArgs(), + getNumTemplateArgs(), + InstantiationDependent)) { + TypeDependent = true; + ValueDependent = true; + InstantiationDependent = true; + } + + ExprBits.TypeDependent = TypeDependent; + ExprBits.ValueDependent = ValueDependent; + ExprBits.InstantiationDependent = InstantiationDependent; + + // Is the declaration a parameter pack? + if (getDecl()->isParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; +} + +DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + ValueDecl *D, bool RefersToEnclosingLocal, + const DeclarationNameInfo &NameInfo, + NamedDecl *FoundD, + const TemplateArgumentListInfo *TemplateArgs, + QualType T, ExprValueKind VK) + : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false), + D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) { + DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0; + if (QualifierLoc) + getInternalQualifierLoc() = QualifierLoc; + DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0; + if (FoundD) + getInternalFoundDecl() = FoundD; + DeclRefExprBits.HasTemplateKWAndArgsInfo + = (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0; + DeclRefExprBits.RefersToEnclosingLocal = RefersToEnclosingLocal; + if (TemplateArgs) { + bool Dependent = false; + bool InstantiationDependent = false; + bool ContainsUnexpandedParameterPack = false; + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs, + Dependent, + InstantiationDependent, + ContainsUnexpandedParameterPack); + if (InstantiationDependent) + setInstantiationDependent(true); + } else if (TemplateKWLoc.isValid()) { + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc); + } + DeclRefExprBits.HadMultipleCandidates = 0; + + computeDependence(Ctx); +} + +DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + ValueDecl *D, + bool RefersToEnclosingLocal, + SourceLocation NameLoc, + QualType T, + ExprValueKind VK, + NamedDecl *FoundD, + const TemplateArgumentListInfo *TemplateArgs) { + return Create(Context, QualifierLoc, TemplateKWLoc, D, + RefersToEnclosingLocal, + DeclarationNameInfo(D->getDeclName(), NameLoc), + T, VK, FoundD, TemplateArgs); +} + +DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + ValueDecl *D, + bool RefersToEnclosingLocal, + const DeclarationNameInfo &NameInfo, + QualType T, + ExprValueKind VK, + NamedDecl *FoundD, + const TemplateArgumentListInfo *TemplateArgs) { + // Filter out cases where the found Decl is the same as the value refenenced. + if (D == FoundD) + FoundD = 0; + + std::size_t Size = sizeof(DeclRefExpr); + if (QualifierLoc) + Size += sizeof(NestedNameSpecifierLoc); + if (FoundD) + Size += sizeof(NamedDecl *); + if (TemplateArgs) + Size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size()); + else if (TemplateKWLoc.isValid()) + Size += ASTTemplateKWAndArgsInfo::sizeFor(0); + + void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>()); + return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D, + RefersToEnclosingLocal, + NameInfo, FoundD, TemplateArgs, T, VK); +} + +DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context, + bool HasQualifier, + bool HasFoundDecl, + bool HasTemplateKWAndArgsInfo, + unsigned NumTemplateArgs) { + std::size_t Size = sizeof(DeclRefExpr); + if (HasQualifier) + Size += sizeof(NestedNameSpecifierLoc); + if (HasFoundDecl) + Size += sizeof(NamedDecl *); + if (HasTemplateKWAndArgsInfo) + Size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs); + + void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>()); + return new (Mem) DeclRefExpr(EmptyShell()); +} + +SourceLocation DeclRefExpr::getLocStart() const { + if (hasQualifier()) + return getQualifierLoc().getBeginLoc(); + return getNameInfo().getLocStart(); +} +SourceLocation DeclRefExpr::getLocEnd() const { + if (hasExplicitTemplateArgs()) + return getRAngleLoc(); + return getNameInfo().getLocEnd(); +} + +// FIXME: Maybe this should use DeclPrinter with a special "print predefined +// expr" policy instead. +std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) { + ASTContext &Context = CurrentDecl->getASTContext(); + + if (IT == PredefinedExpr::FuncDName) { + if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) { + OwningPtr<MangleContext> MC; + MC.reset(Context.createMangleContext()); + + if (MC->shouldMangleDeclName(ND)) { + SmallString<256> Buffer; + llvm::raw_svector_ostream Out(Buffer); + if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(ND)) + MC->mangleCXXCtor(CD, Ctor_Base, Out); + else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(ND)) + MC->mangleCXXDtor(DD, Dtor_Base, Out); + else + MC->mangleName(ND, Out); + + Out.flush(); + if (!Buffer.empty() && Buffer.front() == '\01') + return Buffer.substr(1); + return Buffer.str(); + } else + return ND->getIdentifier()->getName(); + } + return ""; + } + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) { + if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual) + return FD->getNameAsString(); + + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { + if (MD->isVirtual() && IT != PrettyFunctionNoVirtual) + Out << "virtual "; + if (MD->isStatic()) + Out << "static "; + } + + PrintingPolicy Policy(Context.getLangOpts()); + std::string Proto; + llvm::raw_string_ostream POut(Proto); + FD->printQualifiedName(POut, Policy); + + const FunctionDecl *Decl = FD; + if (const FunctionDecl* Pattern = FD->getTemplateInstantiationPattern()) + Decl = Pattern; + const FunctionType *AFT = Decl->getType()->getAs<FunctionType>(); + const FunctionProtoType *FT = 0; + if (FD->hasWrittenPrototype()) + FT = dyn_cast<FunctionProtoType>(AFT); + + POut << "("; + if (FT) { + for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) { + if (i) POut << ", "; + POut << Decl->getParamDecl(i)->getType().stream(Policy); + } + + if (FT->isVariadic()) { + if (FD->getNumParams()) POut << ", "; + POut << "..."; + } + } + POut << ")"; + + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { + const FunctionType *FT = MD->getType()->castAs<FunctionType>(); + if (FT->isConst()) + POut << " const"; + if (FT->isVolatile()) + POut << " volatile"; + RefQualifierKind Ref = MD->getRefQualifier(); + if (Ref == RQ_LValue) + POut << " &"; + else if (Ref == RQ_RValue) + POut << " &&"; + } + + typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy; + SpecsTy Specs; + const DeclContext *Ctx = FD->getDeclContext(); + while (Ctx && isa<NamedDecl>(Ctx)) { + const ClassTemplateSpecializationDecl *Spec + = dyn_cast<ClassTemplateSpecializationDecl>(Ctx); + if (Spec && !Spec->isExplicitSpecialization()) + Specs.push_back(Spec); + Ctx = Ctx->getParent(); + } + + std::string TemplateParams; + llvm::raw_string_ostream TOut(TemplateParams); + for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend(); + I != E; ++I) { + const TemplateParameterList *Params + = (*I)->getSpecializedTemplate()->getTemplateParameters(); + const TemplateArgumentList &Args = (*I)->getTemplateArgs(); + assert(Params->size() == Args.size()); + for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) { + StringRef Param = Params->getParam(i)->getName(); + if (Param.empty()) continue; + TOut << Param << " = "; + Args.get(i).print(Policy, TOut); + TOut << ", "; + } + } + + FunctionTemplateSpecializationInfo *FSI + = FD->getTemplateSpecializationInfo(); + if (FSI && !FSI->isExplicitSpecialization()) { + const TemplateParameterList* Params + = FSI->getTemplate()->getTemplateParameters(); + const TemplateArgumentList* Args = FSI->TemplateArguments; + assert(Params->size() == Args->size()); + for (unsigned i = 0, e = Params->size(); i != e; ++i) { + StringRef Param = Params->getParam(i)->getName(); + if (Param.empty()) continue; + TOut << Param << " = "; + Args->get(i).print(Policy, TOut); + TOut << ", "; + } + } + + TOut.flush(); + if (!TemplateParams.empty()) { + // remove the trailing comma and space + TemplateParams.resize(TemplateParams.size() - 2); + POut << " [" << TemplateParams << "]"; + } + + POut.flush(); + + // Print "auto" for all deduced return types. This includes C++1y return + // type deduction and lambdas. For trailing return types resolve the + // decltype expression. Otherwise print the real type when this is + // not a constructor or destructor. + if ((isa<CXXMethodDecl>(FD) && + cast<CXXMethodDecl>(FD)->getParent()->isLambda()) || + (FT && FT->getResultType()->getAs<AutoType>())) + Proto = "auto " + Proto; + else if (FT && FT->getResultType()->getAs<DecltypeType>()) + FT->getResultType()->getAs<DecltypeType>()->getUnderlyingType() + .getAsStringInternal(Proto, Policy); + else if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD)) + AFT->getResultType().getAsStringInternal(Proto, Policy); + + Out << Proto; + + Out.flush(); + return Name.str().str(); + } + if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(CurrentDecl)) { + for (const DeclContext *DC = CD->getParent(); DC; DC = DC->getParent()) + // Skip to its enclosing function or method, but not its enclosing + // CapturedDecl. + if (DC->isFunctionOrMethod() && (DC->getDeclKind() != Decl::Captured)) { + const Decl *D = Decl::castFromDeclContext(DC); + return ComputeName(IT, D); + } + llvm_unreachable("CapturedDecl not inside a function or method"); + } + if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurrentDecl)) { + SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + Out << (MD->isInstanceMethod() ? '-' : '+'); + Out << '['; + + // For incorrect code, there might not be an ObjCInterfaceDecl. Do + // a null check to avoid a crash. + if (const ObjCInterfaceDecl *ID = MD->getClassInterface()) + Out << *ID; + + if (const ObjCCategoryImplDecl *CID = + dyn_cast<ObjCCategoryImplDecl>(MD->getDeclContext())) + Out << '(' << *CID << ')'; + + Out << ' '; + Out << MD->getSelector().getAsString(); + Out << ']'; + + Out.flush(); + return Name.str().str(); + } + if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) { + // __PRETTY_FUNCTION__ -> "top level", the others produce an empty string. + return "top level"; + } + return ""; +} + +void APNumericStorage::setIntValue(const ASTContext &C, + const llvm::APInt &Val) { + if (hasAllocation()) + C.Deallocate(pVal); + + BitWidth = Val.getBitWidth(); + unsigned NumWords = Val.getNumWords(); + const uint64_t* Words = Val.getRawData(); + if (NumWords > 1) { + pVal = new (C) uint64_t[NumWords]; + std::copy(Words, Words + NumWords, pVal); + } else if (NumWords == 1) + VAL = Words[0]; + else + VAL = 0; +} + +IntegerLiteral::IntegerLiteral(const ASTContext &C, const llvm::APInt &V, + QualType type, SourceLocation l) + : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false, + false, false), + Loc(l) { + assert(type->isIntegerType() && "Illegal type in IntegerLiteral"); + assert(V.getBitWidth() == C.getIntWidth(type) && + "Integer type is not the correct size for constant."); + setValue(C, V); +} + +IntegerLiteral * +IntegerLiteral::Create(const ASTContext &C, const llvm::APInt &V, + QualType type, SourceLocation l) { + return new (C) IntegerLiteral(C, V, type, l); +} + +IntegerLiteral * +IntegerLiteral::Create(const ASTContext &C, EmptyShell Empty) { + return new (C) IntegerLiteral(Empty); +} + +FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V, + bool isexact, QualType Type, SourceLocation L) + : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false, + false, false), Loc(L) { + setSemantics(V.getSemantics()); + FloatingLiteralBits.IsExact = isexact; + setValue(C, V); +} + +FloatingLiteral::FloatingLiteral(const ASTContext &C, EmptyShell Empty) + : Expr(FloatingLiteralClass, Empty) { + setRawSemantics(IEEEhalf); + FloatingLiteralBits.IsExact = false; +} + +FloatingLiteral * +FloatingLiteral::Create(const ASTContext &C, const llvm::APFloat &V, + bool isexact, QualType Type, SourceLocation L) { + return new (C) FloatingLiteral(C, V, isexact, Type, L); +} + +FloatingLiteral * +FloatingLiteral::Create(const ASTContext &C, EmptyShell Empty) { + return new (C) FloatingLiteral(C, Empty); +} + +const llvm::fltSemantics &FloatingLiteral::getSemantics() const { + switch(FloatingLiteralBits.Semantics) { + case IEEEhalf: + return llvm::APFloat::IEEEhalf; + case IEEEsingle: + return llvm::APFloat::IEEEsingle; + case IEEEdouble: + return llvm::APFloat::IEEEdouble; + case x87DoubleExtended: + return llvm::APFloat::x87DoubleExtended; + case IEEEquad: + return llvm::APFloat::IEEEquad; + case PPCDoubleDouble: + return llvm::APFloat::PPCDoubleDouble; + } + llvm_unreachable("Unrecognised floating semantics"); +} + +void FloatingLiteral::setSemantics(const llvm::fltSemantics &Sem) { + if (&Sem == &llvm::APFloat::IEEEhalf) + FloatingLiteralBits.Semantics = IEEEhalf; + else if (&Sem == &llvm::APFloat::IEEEsingle) + FloatingLiteralBits.Semantics = IEEEsingle; + else if (&Sem == &llvm::APFloat::IEEEdouble) + FloatingLiteralBits.Semantics = IEEEdouble; + else if (&Sem == &llvm::APFloat::x87DoubleExtended) + FloatingLiteralBits.Semantics = x87DoubleExtended; + else if (&Sem == &llvm::APFloat::IEEEquad) + FloatingLiteralBits.Semantics = IEEEquad; + else if (&Sem == &llvm::APFloat::PPCDoubleDouble) + FloatingLiteralBits.Semantics = PPCDoubleDouble; + else + llvm_unreachable("Unknown floating semantics"); +} + +/// getValueAsApproximateDouble - This returns the value as an inaccurate +/// double. Note that this may cause loss of precision, but is useful for +/// debugging dumps, etc. +double FloatingLiteral::getValueAsApproximateDouble() const { + llvm::APFloat V = getValue(); + bool ignored; + V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven, + &ignored); + return V.convertToDouble(); +} + +int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) { + int CharByteWidth = 0; + switch(k) { + case Ascii: + case UTF8: + CharByteWidth = target.getCharWidth(); + break; + case Wide: + CharByteWidth = target.getWCharWidth(); + break; + case UTF16: + CharByteWidth = target.getChar16Width(); + break; + case UTF32: + CharByteWidth = target.getChar32Width(); + break; + } + assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple"); + CharByteWidth /= 8; + assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4) + && "character byte widths supported are 1, 2, and 4 only"); + return CharByteWidth; +} + +StringLiteral *StringLiteral::Create(const ASTContext &C, StringRef Str, + StringKind Kind, bool Pascal, QualType Ty, + const SourceLocation *Loc, + unsigned NumStrs) { + // Allocate enough space for the StringLiteral plus an array of locations for + // any concatenated string tokens. + void *Mem = C.Allocate(sizeof(StringLiteral)+ + sizeof(SourceLocation)*(NumStrs-1), + llvm::alignOf<StringLiteral>()); + StringLiteral *SL = new (Mem) StringLiteral(Ty); + + // OPTIMIZE: could allocate this appended to the StringLiteral. + SL->setString(C,Str,Kind,Pascal); + + SL->TokLocs[0] = Loc[0]; + SL->NumConcatenated = NumStrs; + + if (NumStrs != 1) + memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1)); + return SL; +} + +StringLiteral *StringLiteral::CreateEmpty(const ASTContext &C, + unsigned NumStrs) { + void *Mem = C.Allocate(sizeof(StringLiteral)+ + sizeof(SourceLocation)*(NumStrs-1), + llvm::alignOf<StringLiteral>()); + StringLiteral *SL = new (Mem) StringLiteral(QualType()); + SL->CharByteWidth = 0; + SL->Length = 0; + SL->NumConcatenated = NumStrs; + return SL; +} + +void StringLiteral::outputString(raw_ostream &OS) const { + switch (getKind()) { + case Ascii: break; // no prefix. + case Wide: OS << 'L'; break; + case UTF8: OS << "u8"; break; + case UTF16: OS << 'u'; break; + case UTF32: OS << 'U'; break; + } + OS << '"'; + static const char Hex[] = "0123456789ABCDEF"; + + unsigned LastSlashX = getLength(); + for (unsigned I = 0, N = getLength(); I != N; ++I) { + switch (uint32_t Char = getCodeUnit(I)) { + default: + // FIXME: Convert UTF-8 back to codepoints before rendering. + + // Convert UTF-16 surrogate pairs back to codepoints before rendering. + // Leave invalid surrogates alone; we'll use \x for those. + if (getKind() == UTF16 && I != N - 1 && Char >= 0xd800 && + Char <= 0xdbff) { + uint32_t Trail = getCodeUnit(I + 1); + if (Trail >= 0xdc00 && Trail <= 0xdfff) { + Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00); + ++I; + } + } + + if (Char > 0xff) { + // If this is a wide string, output characters over 0xff using \x + // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a + // codepoint: use \x escapes for invalid codepoints. + if (getKind() == Wide || + (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) { + // FIXME: Is this the best way to print wchar_t? + OS << "\\x"; + int Shift = 28; + while ((Char >> Shift) == 0) + Shift -= 4; + for (/**/; Shift >= 0; Shift -= 4) + OS << Hex[(Char >> Shift) & 15]; + LastSlashX = I; + break; + } + + if (Char > 0xffff) + OS << "\\U00" + << Hex[(Char >> 20) & 15] + << Hex[(Char >> 16) & 15]; + else + OS << "\\u"; + OS << Hex[(Char >> 12) & 15] + << Hex[(Char >> 8) & 15] + << Hex[(Char >> 4) & 15] + << Hex[(Char >> 0) & 15]; + break; + } + + // If we used \x... for the previous character, and this character is a + // hexadecimal digit, prevent it being slurped as part of the \x. + if (LastSlashX + 1 == I) { + switch (Char) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + OS << "\"\""; + } + } + + assert(Char <= 0xff && + "Characters above 0xff should already have been handled."); + + if (isPrintable(Char)) + OS << (char)Char; + else // Output anything hard as an octal escape. + OS << '\\' + << (char)('0' + ((Char >> 6) & 7)) + << (char)('0' + ((Char >> 3) & 7)) + << (char)('0' + ((Char >> 0) & 7)); + break; + // Handle some common non-printable cases to make dumps prettier. + case '\\': OS << "\\\\"; break; + case '"': OS << "\\\""; break; + case '\n': OS << "\\n"; break; + case '\t': OS << "\\t"; break; + case '\a': OS << "\\a"; break; + case '\b': OS << "\\b"; break; + } + } + OS << '"'; +} + +void StringLiteral::setString(const ASTContext &C, StringRef Str, + StringKind Kind, bool IsPascal) { + //FIXME: we assume that the string data comes from a target that uses the same + // code unit size and endianess for the type of string. + this->Kind = Kind; + this->IsPascal = IsPascal; + + CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind); + assert((Str.size()%CharByteWidth == 0) + && "size of data must be multiple of CharByteWidth"); + Length = Str.size()/CharByteWidth; + + switch(CharByteWidth) { + case 1: { + char *AStrData = new (C) char[Length]; + std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData)); + StrData.asChar = AStrData; + break; + } + case 2: { + uint16_t *AStrData = new (C) uint16_t[Length]; + std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData)); + StrData.asUInt16 = AStrData; + break; + } + case 4: { + uint32_t *AStrData = new (C) uint32_t[Length]; + std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData)); + StrData.asUInt32 = AStrData; + break; + } + default: + assert(false && "unsupported CharByteWidth"); + } +} + +/// getLocationOfByte - Return a source location that points to the specified +/// byte of this string literal. +/// +/// Strings are amazingly complex. They can be formed from multiple tokens and +/// can have escape sequences in them in addition to the usual trigraph and +/// escaped newline business. This routine handles this complexity. +/// +SourceLocation StringLiteral:: +getLocationOfByte(unsigned ByteNo, const SourceManager &SM, + const LangOptions &Features, const TargetInfo &Target) const { + assert((Kind == StringLiteral::Ascii || Kind == StringLiteral::UTF8) && + "Only narrow string literals are currently supported"); + + // Loop over all of the tokens in this string until we find the one that + // contains the byte we're looking for. + unsigned TokNo = 0; + while (1) { + assert(TokNo < getNumConcatenated() && "Invalid byte number!"); + SourceLocation StrTokLoc = getStrTokenLoc(TokNo); + + // Get the spelling of the string so that we can get the data that makes up + // the string literal, not the identifier for the macro it is potentially + // expanded through. + SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc); + + // Re-lex the token to get its length and original spelling. + std::pair<FileID, unsigned> LocInfo =SM.getDecomposedLoc(StrTokSpellingLoc); + bool Invalid = false; + StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); + if (Invalid) + return StrTokSpellingLoc; + + const char *StrData = Buffer.data()+LocInfo.second; + + // Create a lexer starting at the beginning of this token. + Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), Features, + Buffer.begin(), StrData, Buffer.end()); + Token TheTok; + TheLexer.LexFromRawLexer(TheTok); + + // Use the StringLiteralParser to compute the length of the string in bytes. + StringLiteralParser SLP(&TheTok, 1, SM, Features, Target); + unsigned TokNumBytes = SLP.GetStringLength(); + + // If the byte is in this token, return the location of the byte. + if (ByteNo < TokNumBytes || + (ByteNo == TokNumBytes && TokNo == getNumConcatenated() - 1)) { + unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo); + + // Now that we know the offset of the token in the spelling, use the + // preprocessor to get the offset in the original source. + return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features); + } + + // Move to the next string token. + ++TokNo; + ByteNo -= TokNumBytes; + } +} + + + +/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it +/// corresponds to, e.g. "sizeof" or "[pre]++". +StringRef UnaryOperator::getOpcodeStr(Opcode Op) { + switch (Op) { + case UO_PostInc: return "++"; + case UO_PostDec: return "--"; + case UO_PreInc: return "++"; + case UO_PreDec: return "--"; + case UO_AddrOf: return "&"; + case UO_Deref: return "*"; + case UO_Plus: return "+"; + case UO_Minus: return "-"; + case UO_Not: return "~"; + case UO_LNot: return "!"; + case UO_Real: return "__real"; + case UO_Imag: return "__imag"; + case UO_Extension: return "__extension__"; + } + llvm_unreachable("Unknown unary operator"); +} + +UnaryOperatorKind +UnaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix) { + switch (OO) { + default: llvm_unreachable("No unary operator for overloaded function"); + case OO_PlusPlus: return Postfix ? UO_PostInc : UO_PreInc; + case OO_MinusMinus: return Postfix ? UO_PostDec : UO_PreDec; + case OO_Amp: return UO_AddrOf; + case OO_Star: return UO_Deref; + case OO_Plus: return UO_Plus; + case OO_Minus: return UO_Minus; + case OO_Tilde: return UO_Not; + case OO_Exclaim: return UO_LNot; + } +} + +OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) { + switch (Opc) { + case UO_PostInc: case UO_PreInc: return OO_PlusPlus; + case UO_PostDec: case UO_PreDec: return OO_MinusMinus; + case UO_AddrOf: return OO_Amp; + case UO_Deref: return OO_Star; + case UO_Plus: return OO_Plus; + case UO_Minus: return OO_Minus; + case UO_Not: return OO_Tilde; + case UO_LNot: return OO_Exclaim; + default: return OO_None; + } +} + + +//===----------------------------------------------------------------------===// +// Postfix Operators. +//===----------------------------------------------------------------------===// + +CallExpr::CallExpr(const ASTContext& C, StmtClass SC, Expr *fn, + unsigned NumPreArgs, ArrayRef<Expr*> args, QualType t, + ExprValueKind VK, SourceLocation rparenloc) + : Expr(SC, t, VK, OK_Ordinary, + fn->isTypeDependent(), + fn->isValueDependent(), + fn->isInstantiationDependent(), + fn->containsUnexpandedParameterPack()), + NumArgs(args.size()) { + + SubExprs = new (C) Stmt*[args.size()+PREARGS_START+NumPreArgs]; + SubExprs[FN] = fn; + for (unsigned i = 0; i != args.size(); ++i) { + if (args[i]->isTypeDependent()) + ExprBits.TypeDependent = true; + if (args[i]->isValueDependent()) + ExprBits.ValueDependent = true; + if (args[i]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (args[i]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SubExprs[i+PREARGS_START+NumPreArgs] = args[i]; + } + + CallExprBits.NumPreArgs = NumPreArgs; + RParenLoc = rparenloc; +} + +CallExpr::CallExpr(const ASTContext& C, Expr *fn, ArrayRef<Expr*> args, + QualType t, ExprValueKind VK, SourceLocation rparenloc) + : Expr(CallExprClass, t, VK, OK_Ordinary, + fn->isTypeDependent(), + fn->isValueDependent(), + fn->isInstantiationDependent(), + fn->containsUnexpandedParameterPack()), + NumArgs(args.size()) { + + SubExprs = new (C) Stmt*[args.size()+PREARGS_START]; + SubExprs[FN] = fn; + for (unsigned i = 0; i != args.size(); ++i) { + if (args[i]->isTypeDependent()) + ExprBits.TypeDependent = true; + if (args[i]->isValueDependent()) + ExprBits.ValueDependent = true; + if (args[i]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (args[i]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SubExprs[i+PREARGS_START] = args[i]; + } + + CallExprBits.NumPreArgs = 0; + RParenLoc = rparenloc; +} + +CallExpr::CallExpr(const ASTContext &C, StmtClass SC, EmptyShell Empty) + : Expr(SC, Empty), SubExprs(0), NumArgs(0) { + // FIXME: Why do we allocate this? + SubExprs = new (C) Stmt*[PREARGS_START]; + CallExprBits.NumPreArgs = 0; +} + +CallExpr::CallExpr(const ASTContext &C, StmtClass SC, unsigned NumPreArgs, + EmptyShell Empty) + : Expr(SC, Empty), SubExprs(0), NumArgs(0) { + // FIXME: Why do we allocate this? + SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs]; + CallExprBits.NumPreArgs = NumPreArgs; +} + +Decl *CallExpr::getCalleeDecl() { + Expr *CEE = getCallee()->IgnoreParenImpCasts(); + + while (SubstNonTypeTemplateParmExpr *NTTP + = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) { + CEE = NTTP->getReplacement()->IgnoreParenCasts(); + } + + // If we're calling a dereference, look at the pointer instead. + if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) { + if (BO->isPtrMemOp()) + CEE = BO->getRHS()->IgnoreParenCasts(); + } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) { + if (UO->getOpcode() == UO_Deref) + CEE = UO->getSubExpr()->IgnoreParenCasts(); + } + if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) + return DRE->getDecl(); + if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE)) + return ME->getMemberDecl(); + + return 0; +} + +FunctionDecl *CallExpr::getDirectCallee() { + return dyn_cast_or_null<FunctionDecl>(getCalleeDecl()); +} + +/// setNumArgs - This changes the number of arguments present in this call. +/// Any orphaned expressions are deleted by this, and any new operands are set +/// to null. +void CallExpr::setNumArgs(const ASTContext& C, unsigned NumArgs) { + // No change, just return. + if (NumArgs == getNumArgs()) return; + + // If shrinking # arguments, just delete the extras and forgot them. + if (NumArgs < getNumArgs()) { + this->NumArgs = NumArgs; + return; + } + + // Otherwise, we are growing the # arguments. New an bigger argument array. + unsigned NumPreArgs = getNumPreArgs(); + Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs]; + // Copy over args. + for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i) + NewSubExprs[i] = SubExprs[i]; + // Null out new args. + for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs; + i != NumArgs+PREARGS_START+NumPreArgs; ++i) + NewSubExprs[i] = 0; + + if (SubExprs) C.Deallocate(SubExprs); + SubExprs = NewSubExprs; + this->NumArgs = NumArgs; +} + +/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If +/// not, return 0. +unsigned CallExpr::isBuiltinCall() const { + // All simple function calls (e.g. func()) are implicitly cast to pointer to + // function. As a result, we try and obtain the DeclRefExpr from the + // ImplicitCastExpr. + const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee()); + if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()). + return 0; + + const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr()); + if (!DRE) + return 0; + + const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl()); + if (!FDecl) + return 0; + + if (!FDecl->getIdentifier()) + return 0; + + return FDecl->getBuiltinID(); +} + +bool CallExpr::isUnevaluatedBuiltinCall(ASTContext &Ctx) const { + if (unsigned BI = isBuiltinCall()) + return Ctx.BuiltinInfo.isUnevaluated(BI); + return false; +} + +QualType CallExpr::getCallReturnType() const { + QualType CalleeType = getCallee()->getType(); + if (const PointerType *FnTypePtr = CalleeType->getAs<PointerType>()) + CalleeType = FnTypePtr->getPointeeType(); + else if (const BlockPointerType *BPT = CalleeType->getAs<BlockPointerType>()) + CalleeType = BPT->getPointeeType(); + else if (CalleeType->isSpecificPlaceholderType(BuiltinType::BoundMember)) + // This should never be overloaded and so should never return null. + CalleeType = Expr::findBoundMemberType(getCallee()); + + const FunctionType *FnType = CalleeType->castAs<FunctionType>(); + return FnType->getResultType(); +} + +SourceLocation CallExpr::getLocStart() const { + if (isa<CXXOperatorCallExpr>(this)) + return cast<CXXOperatorCallExpr>(this)->getLocStart(); + + SourceLocation begin = getCallee()->getLocStart(); + if (begin.isInvalid() && getNumArgs() > 0) + begin = getArg(0)->getLocStart(); + return begin; +} +SourceLocation CallExpr::getLocEnd() const { + if (isa<CXXOperatorCallExpr>(this)) + return cast<CXXOperatorCallExpr>(this)->getLocEnd(); + + SourceLocation end = getRParenLoc(); + if (end.isInvalid() && getNumArgs() > 0) + end = getArg(getNumArgs() - 1)->getLocEnd(); + return end; +} + +OffsetOfExpr *OffsetOfExpr::Create(const ASTContext &C, QualType type, + SourceLocation OperatorLoc, + TypeSourceInfo *tsi, + ArrayRef<OffsetOfNode> comps, + ArrayRef<Expr*> exprs, + SourceLocation RParenLoc) { + void *Mem = C.Allocate(sizeof(OffsetOfExpr) + + sizeof(OffsetOfNode) * comps.size() + + sizeof(Expr*) * exprs.size()); + + return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, comps, exprs, + RParenLoc); +} + +OffsetOfExpr *OffsetOfExpr::CreateEmpty(const ASTContext &C, + unsigned numComps, unsigned numExprs) { + void *Mem = C.Allocate(sizeof(OffsetOfExpr) + + sizeof(OffsetOfNode) * numComps + + sizeof(Expr*) * numExprs); + return new (Mem) OffsetOfExpr(numComps, numExprs); +} + +OffsetOfExpr::OffsetOfExpr(const ASTContext &C, QualType type, + SourceLocation OperatorLoc, TypeSourceInfo *tsi, + ArrayRef<OffsetOfNode> comps, ArrayRef<Expr*> exprs, + SourceLocation RParenLoc) + : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary, + /*TypeDependent=*/false, + /*ValueDependent=*/tsi->getType()->isDependentType(), + tsi->getType()->isInstantiationDependentType(), + tsi->getType()->containsUnexpandedParameterPack()), + OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi), + NumComps(comps.size()), NumExprs(exprs.size()) +{ + for (unsigned i = 0; i != comps.size(); ++i) { + setComponent(i, comps[i]); + } + + for (unsigned i = 0; i != exprs.size(); ++i) { + if (exprs[i]->isTypeDependent() || exprs[i]->isValueDependent()) + ExprBits.ValueDependent = true; + if (exprs[i]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + setIndexExpr(i, exprs[i]); + } +} + +IdentifierInfo *OffsetOfExpr::OffsetOfNode::getFieldName() const { + assert(getKind() == Field || getKind() == Identifier); + if (getKind() == Field) + return getField()->getIdentifier(); + + return reinterpret_cast<IdentifierInfo *> (Data & ~(uintptr_t)Mask); +} + +MemberExpr *MemberExpr::Create(const ASTContext &C, Expr *base, bool isarrow, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + ValueDecl *memberdecl, + DeclAccessPair founddecl, + DeclarationNameInfo nameinfo, + const TemplateArgumentListInfo *targs, + QualType ty, + ExprValueKind vk, + ExprObjectKind ok) { + std::size_t Size = sizeof(MemberExpr); + + bool hasQualOrFound = (QualifierLoc || + founddecl.getDecl() != memberdecl || + founddecl.getAccess() != memberdecl->getAccess()); + if (hasQualOrFound) + Size += sizeof(MemberNameQualifier); + + if (targs) + Size += ASTTemplateKWAndArgsInfo::sizeFor(targs->size()); + else if (TemplateKWLoc.isValid()) + Size += ASTTemplateKWAndArgsInfo::sizeFor(0); + + void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>()); + MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, nameinfo, + ty, vk, ok); + + if (hasQualOrFound) { + // FIXME: Wrong. We should be looking at the member declaration we found. + if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) { + E->setValueDependent(true); + E->setTypeDependent(true); + E->setInstantiationDependent(true); + } + else if (QualifierLoc && + QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) + E->setInstantiationDependent(true); + + E->HasQualifierOrFoundDecl = true; + + MemberNameQualifier *NQ = E->getMemberQualifier(); + NQ->QualifierLoc = QualifierLoc; + NQ->FoundDecl = founddecl; + } + + E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid()); + + if (targs) { + bool Dependent = false; + bool InstantiationDependent = false; + bool ContainsUnexpandedParameterPack = false; + E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *targs, + Dependent, + InstantiationDependent, + ContainsUnexpandedParameterPack); + if (InstantiationDependent) + E->setInstantiationDependent(true); + } else if (TemplateKWLoc.isValid()) { + E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc); + } + + return E; +} + +SourceLocation MemberExpr::getLocStart() const { + if (isImplicitAccess()) { + if (hasQualifier()) + return getQualifierLoc().getBeginLoc(); + return MemberLoc; + } + + // FIXME: We don't want this to happen. Rather, we should be able to + // detect all kinds of implicit accesses more cleanly. + SourceLocation BaseStartLoc = getBase()->getLocStart(); + if (BaseStartLoc.isValid()) + return BaseStartLoc; + return MemberLoc; +} +SourceLocation MemberExpr::getLocEnd() const { + SourceLocation EndLoc = getMemberNameInfo().getEndLoc(); + if (hasExplicitTemplateArgs()) + EndLoc = getRAngleLoc(); + else if (EndLoc.isInvalid()) + EndLoc = getBase()->getLocEnd(); + return EndLoc; +} + +void CastExpr::CheckCastConsistency() const { + switch (getCastKind()) { + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + case CK_DerivedToBaseMemberPointer: + case CK_BaseToDerived: + case CK_BaseToDerivedMemberPointer: + assert(!path_empty() && "Cast kind should have a base path!"); + break; + + case CK_CPointerToObjCPointerCast: + assert(getType()->isObjCObjectPointerType()); + assert(getSubExpr()->getType()->isPointerType()); + goto CheckNoBasePath; + + case CK_BlockPointerToObjCPointerCast: + assert(getType()->isObjCObjectPointerType()); + assert(getSubExpr()->getType()->isBlockPointerType()); + goto CheckNoBasePath; + + case CK_ReinterpretMemberPointer: + assert(getType()->isMemberPointerType()); + assert(getSubExpr()->getType()->isMemberPointerType()); + goto CheckNoBasePath; + + case CK_BitCast: + // Arbitrary casts to C pointer types count as bitcasts. + // Otherwise, we should only have block and ObjC pointer casts + // here if they stay within the type kind. + if (!getType()->isPointerType()) { + assert(getType()->isObjCObjectPointerType() == + getSubExpr()->getType()->isObjCObjectPointerType()); + assert(getType()->isBlockPointerType() == + getSubExpr()->getType()->isBlockPointerType()); + } + goto CheckNoBasePath; + + case CK_AnyPointerToBlockPointerCast: + assert(getType()->isBlockPointerType()); + assert(getSubExpr()->getType()->isAnyPointerType() && + !getSubExpr()->getType()->isBlockPointerType()); + goto CheckNoBasePath; + + case CK_CopyAndAutoreleaseBlockObject: + assert(getType()->isBlockPointerType()); + assert(getSubExpr()->getType()->isBlockPointerType()); + goto CheckNoBasePath; + + case CK_FunctionToPointerDecay: + assert(getType()->isPointerType()); + assert(getSubExpr()->getType()->isFunctionType()); + goto CheckNoBasePath; + + // These should not have an inheritance path. + case CK_Dynamic: + case CK_ToUnion: + case CK_ArrayToPointerDecay: + case CK_NullToMemberPointer: + case CK_NullToPointer: + case CK_ConstructorConversion: + case CK_IntegralToPointer: + case CK_PointerToIntegral: + case CK_ToVoid: + case CK_VectorSplat: + case CK_IntegralCast: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingCast: + case CK_ObjCObjectLValueCast: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexToReal: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_ZeroToOCLEvent: + assert(!getType()->isBooleanType() && "unheralded conversion to bool"); + goto CheckNoBasePath; + + case CK_Dependent: + case CK_LValueToRValue: + case CK_NoOp: + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: + case CK_PointerToBoolean: + case CK_IntegralToBoolean: + case CK_FloatingToBoolean: + case CK_MemberPointerToBoolean: + case CK_FloatingComplexToBoolean: + case CK_IntegralComplexToBoolean: + case CK_LValueBitCast: // -> bool& + case CK_UserDefinedConversion: // operator bool() + case CK_BuiltinFnToFnPtr: + CheckNoBasePath: + assert(path_empty() && "Cast kind should not have a base path!"); + break; + } +} + +const char *CastExpr::getCastKindName() const { + switch (getCastKind()) { + case CK_Dependent: + return "Dependent"; + case CK_BitCast: + return "BitCast"; + case CK_LValueBitCast: + return "LValueBitCast"; + case CK_LValueToRValue: + return "LValueToRValue"; + case CK_NoOp: + return "NoOp"; + case CK_BaseToDerived: + return "BaseToDerived"; + case CK_DerivedToBase: + return "DerivedToBase"; + case CK_UncheckedDerivedToBase: + return "UncheckedDerivedToBase"; + case CK_Dynamic: + return "Dynamic"; + case CK_ToUnion: + return "ToUnion"; + case CK_ArrayToPointerDecay: + return "ArrayToPointerDecay"; + case CK_FunctionToPointerDecay: + return "FunctionToPointerDecay"; + case CK_NullToMemberPointer: + return "NullToMemberPointer"; + case CK_NullToPointer: + return "NullToPointer"; + case CK_BaseToDerivedMemberPointer: + return "BaseToDerivedMemberPointer"; + case CK_DerivedToBaseMemberPointer: + return "DerivedToBaseMemberPointer"; + case CK_ReinterpretMemberPointer: + return "ReinterpretMemberPointer"; + case CK_UserDefinedConversion: + return "UserDefinedConversion"; + case CK_ConstructorConversion: + return "ConstructorConversion"; + case CK_IntegralToPointer: + return "IntegralToPointer"; + case CK_PointerToIntegral: + return "PointerToIntegral"; + case CK_PointerToBoolean: + return "PointerToBoolean"; + case CK_ToVoid: + return "ToVoid"; + case CK_VectorSplat: + return "VectorSplat"; + case CK_IntegralCast: + return "IntegralCast"; + case CK_IntegralToBoolean: + return "IntegralToBoolean"; + case CK_IntegralToFloating: + return "IntegralToFloating"; + case CK_FloatingToIntegral: + return "FloatingToIntegral"; + case CK_FloatingCast: + return "FloatingCast"; + case CK_FloatingToBoolean: + return "FloatingToBoolean"; + case CK_MemberPointerToBoolean: + return "MemberPointerToBoolean"; + case CK_CPointerToObjCPointerCast: + return "CPointerToObjCPointerCast"; + case CK_BlockPointerToObjCPointerCast: + return "BlockPointerToObjCPointerCast"; + case CK_AnyPointerToBlockPointerCast: + return "AnyPointerToBlockPointerCast"; + case CK_ObjCObjectLValueCast: + return "ObjCObjectLValueCast"; + case CK_FloatingRealToComplex: + return "FloatingRealToComplex"; + case CK_FloatingComplexToReal: + return "FloatingComplexToReal"; + case CK_FloatingComplexToBoolean: + return "FloatingComplexToBoolean"; + case CK_FloatingComplexCast: + return "FloatingComplexCast"; + case CK_FloatingComplexToIntegralComplex: + return "FloatingComplexToIntegralComplex"; + case CK_IntegralRealToComplex: + return "IntegralRealToComplex"; + case CK_IntegralComplexToReal: + return "IntegralComplexToReal"; + case CK_IntegralComplexToBoolean: + return "IntegralComplexToBoolean"; + case CK_IntegralComplexCast: + return "IntegralComplexCast"; + case CK_IntegralComplexToFloatingComplex: + return "IntegralComplexToFloatingComplex"; + case CK_ARCConsumeObject: + return "ARCConsumeObject"; + case CK_ARCProduceObject: + return "ARCProduceObject"; + case CK_ARCReclaimReturnedObject: + return "ARCReclaimReturnedObject"; + case CK_ARCExtendBlockObject: + return "ARCCExtendBlockObject"; + case CK_AtomicToNonAtomic: + return "AtomicToNonAtomic"; + case CK_NonAtomicToAtomic: + return "NonAtomicToAtomic"; + case CK_CopyAndAutoreleaseBlockObject: + return "CopyAndAutoreleaseBlockObject"; + case CK_BuiltinFnToFnPtr: + return "BuiltinFnToFnPtr"; + case CK_ZeroToOCLEvent: + return "ZeroToOCLEvent"; + } + + llvm_unreachable("Unhandled cast kind!"); +} + +Expr *CastExpr::getSubExprAsWritten() { + Expr *SubExpr = 0; + CastExpr *E = this; + do { + SubExpr = E->getSubExpr(); + + // Skip through reference binding to temporary. + if (MaterializeTemporaryExpr *Materialize + = dyn_cast<MaterializeTemporaryExpr>(SubExpr)) + SubExpr = Materialize->GetTemporaryExpr(); + + // Skip any temporary bindings; they're implicit. + if (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(SubExpr)) + SubExpr = Binder->getSubExpr(); + + // Conversions by constructor and conversion functions have a + // subexpression describing the call; strip it off. + if (E->getCastKind() == CK_ConstructorConversion) + SubExpr = cast<CXXConstructExpr>(SubExpr)->getArg(0); + else if (E->getCastKind() == CK_UserDefinedConversion) + SubExpr = cast<CXXMemberCallExpr>(SubExpr)->getImplicitObjectArgument(); + + // If the subexpression we're left with is an implicit cast, look + // through that, too. + } while ((E = dyn_cast<ImplicitCastExpr>(SubExpr))); + + return SubExpr; +} + +CXXBaseSpecifier **CastExpr::path_buffer() { + switch (getStmtClass()) { +#define ABSTRACT_STMT(x) +#define CASTEXPR(Type, Base) \ + case Stmt::Type##Class: \ + return reinterpret_cast<CXXBaseSpecifier**>(static_cast<Type*>(this)+1); +#define STMT(Type, Base) +#include "clang/AST/StmtNodes.inc" + default: + llvm_unreachable("non-cast expressions not possible here"); + } +} + +void CastExpr::setCastPath(const CXXCastPath &Path) { + assert(Path.size() == path_size()); + memcpy(path_buffer(), Path.data(), Path.size() * sizeof(CXXBaseSpecifier*)); +} + +ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T, + CastKind Kind, Expr *Operand, + const CXXCastPath *BasePath, + ExprValueKind VK) { + unsigned PathSize = (BasePath ? BasePath->size() : 0); + void *Buffer = + C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*)); + ImplicitCastExpr *E = + new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK); + if (PathSize) E->setCastPath(*BasePath); + return E; +} + +ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(const ASTContext &C, + unsigned PathSize) { + void *Buffer = + C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*)); + return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize); +} + + +CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T, + ExprValueKind VK, CastKind K, Expr *Op, + const CXXCastPath *BasePath, + TypeSourceInfo *WrittenTy, + SourceLocation L, SourceLocation R) { + unsigned PathSize = (BasePath ? BasePath->size() : 0); + void *Buffer = + C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*)); + CStyleCastExpr *E = + new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R); + if (PathSize) E->setCastPath(*BasePath); + return E; +} + +CStyleCastExpr *CStyleCastExpr::CreateEmpty(const ASTContext &C, + unsigned PathSize) { + void *Buffer = + C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*)); + return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize); +} + +/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it +/// corresponds to, e.g. "<<=". +StringRef BinaryOperator::getOpcodeStr(Opcode Op) { + switch (Op) { + case BO_PtrMemD: return ".*"; + case BO_PtrMemI: return "->*"; + case BO_Mul: return "*"; + case BO_Div: return "/"; + case BO_Rem: return "%"; + case BO_Add: return "+"; + case BO_Sub: return "-"; + case BO_Shl: return "<<"; + case BO_Shr: return ">>"; + case BO_LT: return "<"; + case BO_GT: return ">"; + case BO_LE: return "<="; + case BO_GE: return ">="; + case BO_EQ: return "=="; + case BO_NE: return "!="; + case BO_And: return "&"; + case BO_Xor: return "^"; + case BO_Or: return "|"; + case BO_LAnd: return "&&"; + case BO_LOr: return "||"; + case BO_Assign: return "="; + case BO_MulAssign: return "*="; + case BO_DivAssign: return "/="; + case BO_RemAssign: return "%="; + case BO_AddAssign: return "+="; + case BO_SubAssign: return "-="; + case BO_ShlAssign: return "<<="; + case BO_ShrAssign: return ">>="; + case BO_AndAssign: return "&="; + case BO_XorAssign: return "^="; + case BO_OrAssign: return "|="; + case BO_Comma: return ","; + } + + llvm_unreachable("Invalid OpCode!"); +} + +BinaryOperatorKind +BinaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO) { + switch (OO) { + default: llvm_unreachable("Not an overloadable binary operator"); + case OO_Plus: return BO_Add; + case OO_Minus: return BO_Sub; + case OO_Star: return BO_Mul; + case OO_Slash: return BO_Div; + case OO_Percent: return BO_Rem; + case OO_Caret: return BO_Xor; + case OO_Amp: return BO_And; + case OO_Pipe: return BO_Or; + case OO_Equal: return BO_Assign; + case OO_Less: return BO_LT; + case OO_Greater: return BO_GT; + case OO_PlusEqual: return BO_AddAssign; + case OO_MinusEqual: return BO_SubAssign; + case OO_StarEqual: return BO_MulAssign; + case OO_SlashEqual: return BO_DivAssign; + case OO_PercentEqual: return BO_RemAssign; + case OO_CaretEqual: return BO_XorAssign; + case OO_AmpEqual: return BO_AndAssign; + case OO_PipeEqual: return BO_OrAssign; + case OO_LessLess: return BO_Shl; + case OO_GreaterGreater: return BO_Shr; + case OO_LessLessEqual: return BO_ShlAssign; + case OO_GreaterGreaterEqual: return BO_ShrAssign; + case OO_EqualEqual: return BO_EQ; + case OO_ExclaimEqual: return BO_NE; + case OO_LessEqual: return BO_LE; + case OO_GreaterEqual: return BO_GE; + case OO_AmpAmp: return BO_LAnd; + case OO_PipePipe: return BO_LOr; + case OO_Comma: return BO_Comma; + case OO_ArrowStar: return BO_PtrMemI; + } +} + +OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) { + static const OverloadedOperatorKind OverOps[] = { + /* .* Cannot be overloaded */OO_None, OO_ArrowStar, + OO_Star, OO_Slash, OO_Percent, + OO_Plus, OO_Minus, + OO_LessLess, OO_GreaterGreater, + OO_Less, OO_Greater, OO_LessEqual, OO_GreaterEqual, + OO_EqualEqual, OO_ExclaimEqual, + OO_Amp, + OO_Caret, + OO_Pipe, + OO_AmpAmp, + OO_PipePipe, + OO_Equal, OO_StarEqual, + OO_SlashEqual, OO_PercentEqual, + OO_PlusEqual, OO_MinusEqual, + OO_LessLessEqual, OO_GreaterGreaterEqual, + OO_AmpEqual, OO_CaretEqual, + OO_PipeEqual, + OO_Comma + }; + return OverOps[Opc]; +} + +InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc, + ArrayRef<Expr*> initExprs, SourceLocation rbraceloc) + : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false, + false, false), + InitExprs(C, initExprs.size()), + LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), AltForm(0, true) +{ + sawArrayRangeDesignator(false); + for (unsigned I = 0; I != initExprs.size(); ++I) { + if (initExprs[I]->isTypeDependent()) + ExprBits.TypeDependent = true; + if (initExprs[I]->isValueDependent()) + ExprBits.ValueDependent = true; + if (initExprs[I]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (initExprs[I]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + } + + InitExprs.insert(C, InitExprs.end(), initExprs.begin(), initExprs.end()); +} + +void InitListExpr::reserveInits(const ASTContext &C, unsigned NumInits) { + if (NumInits > InitExprs.size()) + InitExprs.reserve(C, NumInits); +} + +void InitListExpr::resizeInits(const ASTContext &C, unsigned NumInits) { + InitExprs.resize(C, NumInits, 0); +} + +Expr *InitListExpr::updateInit(const ASTContext &C, unsigned Init, Expr *expr) { + if (Init >= InitExprs.size()) { + InitExprs.insert(C, InitExprs.end(), Init - InitExprs.size() + 1, 0); + InitExprs.back() = expr; + return 0; + } + + Expr *Result = cast_or_null<Expr>(InitExprs[Init]); + InitExprs[Init] = expr; + return Result; +} + +void InitListExpr::setArrayFiller(Expr *filler) { + assert(!hasArrayFiller() && "Filler already set!"); + ArrayFillerOrUnionFieldInit = filler; + // Fill out any "holes" in the array due to designated initializers. + Expr **inits = getInits(); + for (unsigned i = 0, e = getNumInits(); i != e; ++i) + if (inits[i] == 0) + inits[i] = filler; +} + +bool InitListExpr::isStringLiteralInit() const { + if (getNumInits() != 1) + return false; + const ArrayType *AT = getType()->getAsArrayTypeUnsafe(); + if (!AT || !AT->getElementType()->isIntegerType()) + return false; + const Expr *Init = getInit(0)->IgnoreParens(); + return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init); +} + +SourceLocation InitListExpr::getLocStart() const { + if (InitListExpr *SyntacticForm = getSyntacticForm()) + return SyntacticForm->getLocStart(); + SourceLocation Beg = LBraceLoc; + if (Beg.isInvalid()) { + // Find the first non-null initializer. + for (InitExprsTy::const_iterator I = InitExprs.begin(), + E = InitExprs.end(); + I != E; ++I) { + if (Stmt *S = *I) { + Beg = S->getLocStart(); + break; + } + } + } + return Beg; +} + +SourceLocation InitListExpr::getLocEnd() const { + if (InitListExpr *SyntacticForm = getSyntacticForm()) + return SyntacticForm->getLocEnd(); + SourceLocation End = RBraceLoc; + if (End.isInvalid()) { + // Find the first non-null initializer from the end. + for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(), + E = InitExprs.rend(); + I != E; ++I) { + if (Stmt *S = *I) { + End = S->getLocEnd(); + break; + } + } + } + return End; +} + +/// getFunctionType - Return the underlying function type for this block. +/// +const FunctionProtoType *BlockExpr::getFunctionType() const { + // The block pointer is never sugared, but the function type might be. + return cast<BlockPointerType>(getType()) + ->getPointeeType()->castAs<FunctionProtoType>(); +} + +SourceLocation BlockExpr::getCaretLocation() const { + return TheBlock->getCaretLocation(); +} +const Stmt *BlockExpr::getBody() const { + return TheBlock->getBody(); +} +Stmt *BlockExpr::getBody() { + return TheBlock->getBody(); +} + + +//===----------------------------------------------------------------------===// +// Generic Expression Routines +//===----------------------------------------------------------------------===// + +/// isUnusedResultAWarning - Return true if this immediate expression should +/// be warned about if the result is unused. If so, fill in Loc and Ranges +/// with location to warn on and the source range[s] to report with the +/// warning. +bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc, + SourceRange &R1, SourceRange &R2, + ASTContext &Ctx) const { + // Don't warn if the expr is type dependent. The type could end up + // instantiating to void. + if (isTypeDependent()) + return false; + + switch (getStmtClass()) { + default: + if (getType()->isVoidType()) + return false; + WarnE = this; + Loc = getExprLoc(); + R1 = getSourceRange(); + return true; + case ParenExprClass: + return cast<ParenExpr>(this)->getSubExpr()-> + isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + case GenericSelectionExprClass: + return cast<GenericSelectionExpr>(this)->getResultExpr()-> + isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + case ChooseExprClass: + return cast<ChooseExpr>(this)->getChosenSubExpr()-> + isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + case UnaryOperatorClass: { + const UnaryOperator *UO = cast<UnaryOperator>(this); + + switch (UO->getOpcode()) { + case UO_Plus: + case UO_Minus: + case UO_AddrOf: + case UO_Not: + case UO_LNot: + case UO_Deref: + break; + case UO_PostInc: + case UO_PostDec: + case UO_PreInc: + case UO_PreDec: // ++/-- + return false; // Not a warning. + case UO_Real: + case UO_Imag: + // accessing a piece of a volatile complex is a side-effect. + if (Ctx.getCanonicalType(UO->getSubExpr()->getType()) + .isVolatileQualified()) + return false; + break; + case UO_Extension: + return UO->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + } + WarnE = this; + Loc = UO->getOperatorLoc(); + R1 = UO->getSubExpr()->getSourceRange(); + return true; + } + case BinaryOperatorClass: { + const BinaryOperator *BO = cast<BinaryOperator>(this); + switch (BO->getOpcode()) { + default: + break; + // Consider the RHS of comma for side effects. LHS was checked by + // Sema::CheckCommaOperands. + case BO_Comma: + // ((foo = <blah>), 0) is an idiom for hiding the result (and + // lvalue-ness) of an assignment written in a macro. + if (IntegerLiteral *IE = + dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens())) + if (IE->getValue() == 0) + return false; + return BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + // Consider '||', '&&' to have side effects if the LHS or RHS does. + case BO_LAnd: + case BO_LOr: + if (!BO->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx) || + !BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx)) + return false; + break; + } + if (BO->isAssignmentOp()) + return false; + WarnE = this; + Loc = BO->getOperatorLoc(); + R1 = BO->getLHS()->getSourceRange(); + R2 = BO->getRHS()->getSourceRange(); + return true; + } + case CompoundAssignOperatorClass: + case VAArgExprClass: + case AtomicExprClass: + return false; + + case ConditionalOperatorClass: { + // If only one of the LHS or RHS is a warning, the operator might + // be being used for control flow. Only warn if both the LHS and + // RHS are warnings. + const ConditionalOperator *Exp = cast<ConditionalOperator>(this); + if (!Exp->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx)) + return false; + if (!Exp->getLHS()) + return true; + return Exp->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + } + + case MemberExprClass: + WarnE = this; + Loc = cast<MemberExpr>(this)->getMemberLoc(); + R1 = SourceRange(Loc, Loc); + R2 = cast<MemberExpr>(this)->getBase()->getSourceRange(); + return true; + + case ArraySubscriptExprClass: + WarnE = this; + Loc = cast<ArraySubscriptExpr>(this)->getRBracketLoc(); + R1 = cast<ArraySubscriptExpr>(this)->getLHS()->getSourceRange(); + R2 = cast<ArraySubscriptExpr>(this)->getRHS()->getSourceRange(); + return true; + + case CXXOperatorCallExprClass: { + // We warn about operator== and operator!= even when user-defined operator + // overloads as there is no reasonable way to define these such that they + // have non-trivial, desirable side-effects. See the -Wunused-comparison + // warning: these operators are commonly typo'ed, and so warning on them + // provides additional value as well. If this list is updated, + // DiagnoseUnusedComparison should be as well. + const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this); + if (Op->getOperator() == OO_EqualEqual || + Op->getOperator() == OO_ExclaimEqual) { + WarnE = this; + Loc = Op->getOperatorLoc(); + R1 = Op->getSourceRange(); + return true; + } + + // Fallthrough for generic call handling. + } + case CallExprClass: + case CXXMemberCallExprClass: + case UserDefinedLiteralClass: { + // If this is a direct call, get the callee. + const CallExpr *CE = cast<CallExpr>(this); + if (const Decl *FD = CE->getCalleeDecl()) { + // If the callee has attribute pure, const, or warn_unused_result, warn + // about it. void foo() { strlen("bar"); } should warn. + // + // Note: If new cases are added here, DiagnoseUnusedExprResult should be + // updated to match for QoI. + if (FD->getAttr<WarnUnusedResultAttr>() || + FD->getAttr<PureAttr>() || FD->getAttr<ConstAttr>()) { + WarnE = this; + Loc = CE->getCallee()->getLocStart(); + R1 = CE->getCallee()->getSourceRange(); + + if (unsigned NumArgs = CE->getNumArgs()) + R2 = SourceRange(CE->getArg(0)->getLocStart(), + CE->getArg(NumArgs-1)->getLocEnd()); + return true; + } + } + return false; + } + + // If we don't know precisely what we're looking at, let's not warn. + case UnresolvedLookupExprClass: + case CXXUnresolvedConstructExprClass: + return false; + + case CXXTemporaryObjectExprClass: + case CXXConstructExprClass: { + if (const CXXRecordDecl *Type = getType()->getAsCXXRecordDecl()) { + if (Type->hasAttr<WarnUnusedAttr>()) { + WarnE = this; + Loc = getLocStart(); + R1 = getSourceRange(); + return true; + } + } + return false; + } + + case ObjCMessageExprClass: { + const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this); + if (Ctx.getLangOpts().ObjCAutoRefCount && + ME->isInstanceMessage() && + !ME->getType()->isVoidType() && + ME->getMethodFamily() == OMF_init) { + WarnE = this; + Loc = getExprLoc(); + R1 = ME->getSourceRange(); + return true; + } + + const ObjCMethodDecl *MD = ME->getMethodDecl(); + if (MD && MD->getAttr<WarnUnusedResultAttr>()) { + WarnE = this; + Loc = getExprLoc(); + return true; + } + return false; + } + + case ObjCPropertyRefExprClass: + WarnE = this; + Loc = getExprLoc(); + R1 = getSourceRange(); + return true; + + case PseudoObjectExprClass: { + const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this); + + // Only complain about things that have the form of a getter. + if (isa<UnaryOperator>(PO->getSyntacticForm()) || + isa<BinaryOperator>(PO->getSyntacticForm())) + return false; + + WarnE = this; + Loc = getExprLoc(); + R1 = getSourceRange(); + return true; + } + + case StmtExprClass: { + // Statement exprs don't logically have side effects themselves, but are + // sometimes used in macros in ways that give them a type that is unused. + // For example ({ blah; foo(); }) will end up with a type if foo has a type. + // however, if the result of the stmt expr is dead, we don't want to emit a + // warning. + const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt(); + if (!CS->body_empty()) { + if (const Expr *E = dyn_cast<Expr>(CS->body_back())) + return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + if (const LabelStmt *Label = dyn_cast<LabelStmt>(CS->body_back())) + if (const Expr *E = dyn_cast<Expr>(Label->getSubStmt())) + return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + } + + if (getType()->isVoidType()) + return false; + WarnE = this; + Loc = cast<StmtExpr>(this)->getLParenLoc(); + R1 = getSourceRange(); + return true; + } + case CXXFunctionalCastExprClass: + case CStyleCastExprClass: { + // Ignore an explicit cast to void unless the operand is a non-trivial + // volatile lvalue. + const CastExpr *CE = cast<CastExpr>(this); + if (CE->getCastKind() == CK_ToVoid) { + if (CE->getSubExpr()->isGLValue() && + CE->getSubExpr()->getType().isVolatileQualified()) { + const DeclRefExpr *DRE = + dyn_cast<DeclRefExpr>(CE->getSubExpr()->IgnoreParens()); + if (!(DRE && isa<VarDecl>(DRE->getDecl()) && + cast<VarDecl>(DRE->getDecl())->hasLocalStorage())) { + return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, + R1, R2, Ctx); + } + } + return false; + } + + // If this is a cast to a constructor conversion, check the operand. + // Otherwise, the result of the cast is unused. + if (CE->getCastKind() == CK_ConstructorConversion) + return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + + WarnE = this; + if (const CXXFunctionalCastExpr *CXXCE = + dyn_cast<CXXFunctionalCastExpr>(this)) { + Loc = CXXCE->getLocStart(); + R1 = CXXCE->getSubExpr()->getSourceRange(); + } else { + const CStyleCastExpr *CStyleCE = cast<CStyleCastExpr>(this); + Loc = CStyleCE->getLParenLoc(); + R1 = CStyleCE->getSubExpr()->getSourceRange(); + } + return true; + } + case ImplicitCastExprClass: { + const CastExpr *ICE = cast<ImplicitCastExpr>(this); + + // lvalue-to-rvalue conversion on a volatile lvalue is a side-effect. + if (ICE->getCastKind() == CK_LValueToRValue && + ICE->getSubExpr()->getType().isVolatileQualified()) + return false; + + return ICE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); + } + case CXXDefaultArgExprClass: + return (cast<CXXDefaultArgExpr>(this) + ->getExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx)); + case CXXDefaultInitExprClass: + return (cast<CXXDefaultInitExpr>(this) + ->getExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx)); + + case CXXNewExprClass: + // FIXME: In theory, there might be new expressions that don't have side + // effects (e.g. a placement new with an uninitialized POD). + case CXXDeleteExprClass: + return false; + case CXXBindTemporaryExprClass: + return (cast<CXXBindTemporaryExpr>(this) + ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx)); + case ExprWithCleanupsClass: + return (cast<ExprWithCleanups>(this) + ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx)); + } +} + +/// isOBJCGCCandidate - Check if an expression is objc gc'able. +/// returns true, if it is; false otherwise. +bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const { + const Expr *E = IgnoreParens(); + switch (E->getStmtClass()) { + default: + return false; + case ObjCIvarRefExprClass: + return true; + case Expr::UnaryOperatorClass: + return cast<UnaryOperator>(E)->getSubExpr()->isOBJCGCCandidate(Ctx); + case ImplicitCastExprClass: + return cast<ImplicitCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx); + case MaterializeTemporaryExprClass: + return cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr() + ->isOBJCGCCandidate(Ctx); + case CStyleCastExprClass: + return cast<CStyleCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx); + case DeclRefExprClass: { + const Decl *D = cast<DeclRefExpr>(E)->getDecl(); + + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + if (VD->hasGlobalStorage()) + return true; + QualType T = VD->getType(); + // dereferencing to a pointer is always a gc'able candidate, + // unless it is __weak. + return T->isPointerType() && + (Ctx.getObjCGCAttrKind(T) != Qualifiers::Weak); + } + return false; + } + case MemberExprClass: { + const MemberExpr *M = cast<MemberExpr>(E); + return M->getBase()->isOBJCGCCandidate(Ctx); + } + case ArraySubscriptExprClass: + return cast<ArraySubscriptExpr>(E)->getBase()->isOBJCGCCandidate(Ctx); + } +} + +bool Expr::isBoundMemberFunction(ASTContext &Ctx) const { + if (isTypeDependent()) + return false; + return ClassifyLValue(Ctx) == Expr::LV_MemberFunction; +} + +QualType Expr::findBoundMemberType(const Expr *expr) { + assert(expr->hasPlaceholderType(BuiltinType::BoundMember)); + + // Bound member expressions are always one of these possibilities: + // x->m x.m x->*y x.*y + // (possibly parenthesized) + + expr = expr->IgnoreParens(); + if (const MemberExpr *mem = dyn_cast<MemberExpr>(expr)) { + assert(isa<CXXMethodDecl>(mem->getMemberDecl())); + return mem->getMemberDecl()->getType(); + } + + if (const BinaryOperator *op = dyn_cast<BinaryOperator>(expr)) { + QualType type = op->getRHS()->getType()->castAs<MemberPointerType>() + ->getPointeeType(); + assert(type->isFunctionType()); + return type; + } + + assert(isa<UnresolvedMemberExpr>(expr)); + return QualType(); +} + +Expr* Expr::IgnoreParens() { + Expr* E = this; + while (true) { + if (ParenExpr* P = dyn_cast<ParenExpr>(E)) { + E = P->getSubExpr(); + continue; + } + if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) { + if (P->getOpcode() == UO_Extension) { + E = P->getSubExpr(); + continue; + } + } + if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) { + if (!P->isResultDependent()) { + E = P->getResultExpr(); + continue; + } + } + if (ChooseExpr* P = dyn_cast<ChooseExpr>(E)) { + if (!P->isConditionDependent()) { + E = P->getChosenSubExpr(); + continue; + } + } + return E; + } +} + +/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr +/// or CastExprs or ImplicitCastExprs, returning their operand. +Expr *Expr::IgnoreParenCasts() { + Expr *E = this; + while (true) { + E = E->IgnoreParens(); + if (CastExpr *P = dyn_cast<CastExpr>(E)) { + E = P->getSubExpr(); + continue; + } + if (MaterializeTemporaryExpr *Materialize + = dyn_cast<MaterializeTemporaryExpr>(E)) { + E = Materialize->GetTemporaryExpr(); + continue; + } + if (SubstNonTypeTemplateParmExpr *NTTP + = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { + E = NTTP->getReplacement(); + continue; + } + return E; + } +} + +/// IgnoreParenLValueCasts - Ignore parentheses and lvalue-to-rvalue +/// casts. This is intended purely as a temporary workaround for code +/// that hasn't yet been rewritten to do the right thing about those +/// casts, and may disappear along with the last internal use. +Expr *Expr::IgnoreParenLValueCasts() { + Expr *E = this; + while (true) { + E = E->IgnoreParens(); + if (CastExpr *P = dyn_cast<CastExpr>(E)) { + if (P->getCastKind() == CK_LValueToRValue) { + E = P->getSubExpr(); + continue; + } + } else if (MaterializeTemporaryExpr *Materialize + = dyn_cast<MaterializeTemporaryExpr>(E)) { + E = Materialize->GetTemporaryExpr(); + continue; + } else if (SubstNonTypeTemplateParmExpr *NTTP + = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { + E = NTTP->getReplacement(); + continue; + } + break; + } + return E; +} + +Expr *Expr::ignoreParenBaseCasts() { + Expr *E = this; + while (true) { + E = E->IgnoreParens(); + if (CastExpr *CE = dyn_cast<CastExpr>(E)) { + if (CE->getCastKind() == CK_DerivedToBase || + CE->getCastKind() == CK_UncheckedDerivedToBase || + CE->getCastKind() == CK_NoOp) { + E = CE->getSubExpr(); + continue; + } + } + + return E; + } +} + +Expr *Expr::IgnoreParenImpCasts() { + Expr *E = this; + while (true) { + E = E->IgnoreParens(); + if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E)) { + E = P->getSubExpr(); + continue; + } + if (MaterializeTemporaryExpr *Materialize + = dyn_cast<MaterializeTemporaryExpr>(E)) { + E = Materialize->GetTemporaryExpr(); + continue; + } + if (SubstNonTypeTemplateParmExpr *NTTP + = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { + E = NTTP->getReplacement(); + continue; + } + return E; + } +} + +Expr *Expr::IgnoreConversionOperator() { + if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(this)) { + if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl())) + return MCE->getImplicitObjectArgument(); + } + return this; +} + +/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the +/// value (including ptr->int casts of the same size). Strip off any +/// ParenExpr or CastExprs, returning their operand. +Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) { + Expr *E = this; + while (true) { + E = E->IgnoreParens(); + + if (CastExpr *P = dyn_cast<CastExpr>(E)) { + // We ignore integer <-> casts that are of the same width, ptr<->ptr and + // ptr<->int casts of the same width. We also ignore all identity casts. + Expr *SE = P->getSubExpr(); + + if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) { + E = SE; + continue; + } + + if ((E->getType()->isPointerType() || + E->getType()->isIntegralType(Ctx)) && + (SE->getType()->isPointerType() || + SE->getType()->isIntegralType(Ctx)) && + Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) { + E = SE; + continue; + } + } + + if (SubstNonTypeTemplateParmExpr *NTTP + = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { + E = NTTP->getReplacement(); + continue; + } + + return E; + } +} + +bool Expr::isDefaultArgument() const { + const Expr *E = this; + if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E)) + E = M->GetTemporaryExpr(); + + while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) + E = ICE->getSubExprAsWritten(); + + return isa<CXXDefaultArgExpr>(E); +} + +/// \brief Skip over any no-op casts and any temporary-binding +/// expressions. +static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) { + if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E)) + E = M->GetTemporaryExpr(); + + while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { + if (ICE->getCastKind() == CK_NoOp) + E = ICE->getSubExpr(); + else + break; + } + + while (const CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(E)) + E = BE->getSubExpr(); + + while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { + if (ICE->getCastKind() == CK_NoOp) + E = ICE->getSubExpr(); + else + break; + } + + return E->IgnoreParens(); +} + +/// isTemporaryObject - Determines if this expression produces a +/// temporary of the given class type. +bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const { + if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy))) + return false; + + const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this); + + // Temporaries are by definition pr-values of class type. + if (!E->Classify(C).isPRValue()) { + // In this context, property reference is a message call and is pr-value. + if (!isa<ObjCPropertyRefExpr>(E)) + return false; + } + + // Black-list a few cases which yield pr-values of class type that don't + // refer to temporaries of that type: + + // - implicit derived-to-base conversions + if (isa<ImplicitCastExpr>(E)) { + switch (cast<ImplicitCastExpr>(E)->getCastKind()) { + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + return false; + default: + break; + } + } + + // - member expressions (all) + if (isa<MemberExpr>(E)) + return false; + + if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) + if (BO->isPtrMemOp()) + return false; + + // - opaque values (all) + if (isa<OpaqueValueExpr>(E)) + return false; + + return true; +} + +bool Expr::isImplicitCXXThis() const { + const Expr *E = this; + + // Strip away parentheses and casts we don't care about. + while (true) { + if (const ParenExpr *Paren = dyn_cast<ParenExpr>(E)) { + E = Paren->getSubExpr(); + continue; + } + + if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { + if (ICE->getCastKind() == CK_NoOp || + ICE->getCastKind() == CK_LValueToRValue || + ICE->getCastKind() == CK_DerivedToBase || + ICE->getCastKind() == CK_UncheckedDerivedToBase) { + E = ICE->getSubExpr(); + continue; + } + } + + if (const UnaryOperator* UnOp = dyn_cast<UnaryOperator>(E)) { + if (UnOp->getOpcode() == UO_Extension) { + E = UnOp->getSubExpr(); + continue; + } + } + + if (const MaterializeTemporaryExpr *M + = dyn_cast<MaterializeTemporaryExpr>(E)) { + E = M->GetTemporaryExpr(); + continue; + } + + break; + } + + if (const CXXThisExpr *This = dyn_cast<CXXThisExpr>(E)) + return This->isImplicit(); + + return false; +} + +/// hasAnyTypeDependentArguments - Determines if any of the expressions +/// in Exprs is type-dependent. +bool Expr::hasAnyTypeDependentArguments(ArrayRef<Expr *> Exprs) { + for (unsigned I = 0; I < Exprs.size(); ++I) + if (Exprs[I]->isTypeDependent()) + return true; + + return false; +} + +bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const { + // This function is attempting whether an expression is an initializer + // which can be evaluated at compile-time. It very closely parallels + // ConstExprEmitter in CGExprConstant.cpp; if they don't match, it + // will lead to unexpected results. Like ConstExprEmitter, it falls back + // to isEvaluatable most of the time. + // + // If we ever capture reference-binding directly in the AST, we can + // kill the second parameter. + + if (IsForRef) { + EvalResult Result; + return EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects; + } + + switch (getStmtClass()) { + default: break; + case StringLiteralClass: + case ObjCEncodeExprClass: + return true; + case CXXTemporaryObjectExprClass: + case CXXConstructExprClass: { + const CXXConstructExpr *CE = cast<CXXConstructExpr>(this); + + if (CE->getConstructor()->isTrivial() && + CE->getConstructor()->getParent()->hasTrivialDestructor()) { + // Trivial default constructor + if (!CE->getNumArgs()) return true; + + // Trivial copy constructor + assert(CE->getNumArgs() == 1 && "trivial ctor with > 1 argument"); + return CE->getArg(0)->isConstantInitializer(Ctx, false); + } + + break; + } + case CompoundLiteralExprClass: { + // This handles gcc's extension that allows global initializers like + // "struct x {int x;} x = (struct x) {};". + // FIXME: This accepts other cases it shouldn't! + const Expr *Exp = cast<CompoundLiteralExpr>(this)->getInitializer(); + return Exp->isConstantInitializer(Ctx, false); + } + case InitListExprClass: { + const InitListExpr *ILE = cast<InitListExpr>(this); + if (ILE->getType()->isArrayType()) { + unsigned numInits = ILE->getNumInits(); + for (unsigned i = 0; i < numInits; i++) { + if (!ILE->getInit(i)->isConstantInitializer(Ctx, false)) + return false; + } + return true; + } + + if (ILE->getType()->isRecordType()) { + unsigned ElementNo = 0; + RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl(); + for (RecordDecl::field_iterator Field = RD->field_begin(), + FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) { + // If this is a union, skip all the fields that aren't being initialized. + if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field) + continue; + + // Don't emit anonymous bitfields, they just affect layout. + if (Field->isUnnamedBitfield()) + continue; + + if (ElementNo < ILE->getNumInits()) { + const Expr *Elt = ILE->getInit(ElementNo++); + if (Field->isBitField()) { + // Bitfields have to evaluate to an integer. + llvm::APSInt ResultTmp; + if (!Elt->EvaluateAsInt(ResultTmp, Ctx)) + return false; + } else { + bool RefType = Field->getType()->isReferenceType(); + if (!Elt->isConstantInitializer(Ctx, RefType)) + return false; + } + } + } + return true; + } + + break; + } + case ImplicitValueInitExprClass: + return true; + case ParenExprClass: + return cast<ParenExpr>(this)->getSubExpr() + ->isConstantInitializer(Ctx, IsForRef); + case GenericSelectionExprClass: + return cast<GenericSelectionExpr>(this)->getResultExpr() + ->isConstantInitializer(Ctx, IsForRef); + case ChooseExprClass: + if (cast<ChooseExpr>(this)->isConditionDependent()) + return false; + return cast<ChooseExpr>(this)->getChosenSubExpr() + ->isConstantInitializer(Ctx, IsForRef); + case UnaryOperatorClass: { + const UnaryOperator* Exp = cast<UnaryOperator>(this); + if (Exp->getOpcode() == UO_Extension) + return Exp->getSubExpr()->isConstantInitializer(Ctx, false); + break; + } + case CXXFunctionalCastExprClass: + case CXXStaticCastExprClass: + case ImplicitCastExprClass: + case CStyleCastExprClass: + case ObjCBridgedCastExprClass: + case CXXDynamicCastExprClass: + case CXXReinterpretCastExprClass: + case CXXConstCastExprClass: { + const CastExpr *CE = cast<CastExpr>(this); + + // Handle misc casts we want to ignore. + if (CE->getCastKind() == CK_NoOp || + CE->getCastKind() == CK_LValueToRValue || + CE->getCastKind() == CK_ToUnion || + CE->getCastKind() == CK_ConstructorConversion || + CE->getCastKind() == CK_NonAtomicToAtomic || + CE->getCastKind() == CK_AtomicToNonAtomic) + return CE->getSubExpr()->isConstantInitializer(Ctx, false); + + break; + } + case MaterializeTemporaryExprClass: + return cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr() + ->isConstantInitializer(Ctx, false); + + case SubstNonTypeTemplateParmExprClass: + return cast<SubstNonTypeTemplateParmExpr>(this)->getReplacement() + ->isConstantInitializer(Ctx, false); + case CXXDefaultArgExprClass: + return cast<CXXDefaultArgExpr>(this)->getExpr() + ->isConstantInitializer(Ctx, false); + case CXXDefaultInitExprClass: + return cast<CXXDefaultInitExpr>(this)->getExpr() + ->isConstantInitializer(Ctx, false); + } + return isEvaluatable(Ctx); +} + +bool Expr::HasSideEffects(const ASTContext &Ctx) const { + if (isInstantiationDependent()) + return true; + + switch (getStmtClass()) { + case NoStmtClass: + #define ABSTRACT_STMT(Type) + #define STMT(Type, Base) case Type##Class: + #define EXPR(Type, Base) + #include "clang/AST/StmtNodes.inc" + llvm_unreachable("unexpected Expr kind"); + + case DependentScopeDeclRefExprClass: + case CXXUnresolvedConstructExprClass: + case CXXDependentScopeMemberExprClass: + case UnresolvedLookupExprClass: + case UnresolvedMemberExprClass: + case PackExpansionExprClass: + case SubstNonTypeTemplateParmPackExprClass: + case FunctionParmPackExprClass: + llvm_unreachable("shouldn't see dependent / unresolved nodes here"); + + case DeclRefExprClass: + case ObjCIvarRefExprClass: + case PredefinedExprClass: + case IntegerLiteralClass: + case FloatingLiteralClass: + case ImaginaryLiteralClass: + case StringLiteralClass: + case CharacterLiteralClass: + case OffsetOfExprClass: + case ImplicitValueInitExprClass: + case UnaryExprOrTypeTraitExprClass: + case AddrLabelExprClass: + case GNUNullExprClass: + case CXXBoolLiteralExprClass: + case CXXNullPtrLiteralExprClass: + case CXXThisExprClass: + case CXXScalarValueInitExprClass: + case TypeTraitExprClass: + case UnaryTypeTraitExprClass: + case BinaryTypeTraitExprClass: + case ArrayTypeTraitExprClass: + case ExpressionTraitExprClass: + case CXXNoexceptExprClass: + case SizeOfPackExprClass: + case ObjCStringLiteralClass: + case ObjCEncodeExprClass: + case ObjCBoolLiteralExprClass: + case CXXUuidofExprClass: + case OpaqueValueExprClass: + // These never have a side-effect. + return false; + + case CallExprClass: + case MSPropertyRefExprClass: + case CompoundAssignOperatorClass: + case VAArgExprClass: + case AtomicExprClass: + case StmtExprClass: + case CXXOperatorCallExprClass: + case CXXMemberCallExprClass: + case UserDefinedLiteralClass: + case CXXThrowExprClass: + case CXXNewExprClass: + case CXXDeleteExprClass: + case ExprWithCleanupsClass: + case CXXBindTemporaryExprClass: + case BlockExprClass: + case CUDAKernelCallExprClass: + // These always have a side-effect. + return true; + + case ParenExprClass: + case ArraySubscriptExprClass: + case MemberExprClass: + case ConditionalOperatorClass: + case BinaryConditionalOperatorClass: + case CompoundLiteralExprClass: + case ExtVectorElementExprClass: + case DesignatedInitExprClass: + case ParenListExprClass: + case CXXPseudoDestructorExprClass: + case CXXStdInitializerListExprClass: + case SubstNonTypeTemplateParmExprClass: + case MaterializeTemporaryExprClass: + case ShuffleVectorExprClass: + case ConvertVectorExprClass: + case AsTypeExprClass: + // These have a side-effect if any subexpression does. + break; + + case UnaryOperatorClass: + if (cast<UnaryOperator>(this)->isIncrementDecrementOp()) + return true; + break; + + case BinaryOperatorClass: + if (cast<BinaryOperator>(this)->isAssignmentOp()) + return true; + break; + + case InitListExprClass: + // FIXME: The children for an InitListExpr doesn't include the array filler. + if (const Expr *E = cast<InitListExpr>(this)->getArrayFiller()) + if (E->HasSideEffects(Ctx)) + return true; + break; + + case GenericSelectionExprClass: + return cast<GenericSelectionExpr>(this)->getResultExpr()-> + HasSideEffects(Ctx); + + case ChooseExprClass: + return cast<ChooseExpr>(this)->getChosenSubExpr()->HasSideEffects(Ctx); + + case CXXDefaultArgExprClass: + return cast<CXXDefaultArgExpr>(this)->getExpr()->HasSideEffects(Ctx); + + case CXXDefaultInitExprClass: + if (const Expr *E = cast<CXXDefaultInitExpr>(this)->getExpr()) + return E->HasSideEffects(Ctx); + // If we've not yet parsed the initializer, assume it has side-effects. + return true; + + case CXXDynamicCastExprClass: { + // A dynamic_cast expression has side-effects if it can throw. + const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(this); + if (DCE->getTypeAsWritten()->isReferenceType() && + DCE->getCastKind() == CK_Dynamic) + return true; + } // Fall through. + case ImplicitCastExprClass: + case CStyleCastExprClass: + case CXXStaticCastExprClass: + case CXXReinterpretCastExprClass: + case CXXConstCastExprClass: + case CXXFunctionalCastExprClass: { + const CastExpr *CE = cast<CastExpr>(this); + if (CE->getCastKind() == CK_LValueToRValue && + CE->getSubExpr()->getType().isVolatileQualified()) + return true; + break; + } + + case CXXTypeidExprClass: + // typeid might throw if its subexpression is potentially-evaluated, so has + // side-effects in that case whether or not its subexpression does. + return cast<CXXTypeidExpr>(this)->isPotentiallyEvaluated(); + + case CXXConstructExprClass: + case CXXTemporaryObjectExprClass: { + const CXXConstructExpr *CE = cast<CXXConstructExpr>(this); + if (!CE->getConstructor()->isTrivial()) + return true; + // A trivial constructor does not add any side-effects of its own. Just look + // at its arguments. + break; + } + + case LambdaExprClass: { + const LambdaExpr *LE = cast<LambdaExpr>(this); + for (LambdaExpr::capture_iterator I = LE->capture_begin(), + E = LE->capture_end(); I != E; ++I) + if (I->getCaptureKind() == LCK_ByCopy) + // FIXME: Only has a side-effect if the variable is volatile or if + // the copy would invoke a non-trivial copy constructor. + return true; + return false; + } + + case PseudoObjectExprClass: { + // Only look for side-effects in the semantic form, and look past + // OpaqueValueExpr bindings in that form. + const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this); + for (PseudoObjectExpr::const_semantics_iterator I = PO->semantics_begin(), + E = PO->semantics_end(); + I != E; ++I) { + const Expr *Subexpr = *I; + if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Subexpr)) + Subexpr = OVE->getSourceExpr(); + if (Subexpr->HasSideEffects(Ctx)) + return true; + } + return false; + } + + case ObjCBoxedExprClass: + case ObjCArrayLiteralClass: + case ObjCDictionaryLiteralClass: + case ObjCMessageExprClass: + case ObjCSelectorExprClass: + case ObjCProtocolExprClass: + case ObjCPropertyRefExprClass: + case ObjCIsaExprClass: + case ObjCIndirectCopyRestoreExprClass: + case ObjCSubscriptRefExprClass: + case ObjCBridgedCastExprClass: + // FIXME: Classify these cases better. + return true; + } + + // Recurse to children. + for (const_child_range SubStmts = children(); SubStmts; ++SubStmts) + if (const Stmt *S = *SubStmts) + if (cast<Expr>(S)->HasSideEffects(Ctx)) + return true; + + return false; +} + +namespace { + /// \brief Look for a call to a non-trivial function within an expression. + class NonTrivialCallFinder : public EvaluatedExprVisitor<NonTrivialCallFinder> + { + typedef EvaluatedExprVisitor<NonTrivialCallFinder> Inherited; + + bool NonTrivial; + + public: + explicit NonTrivialCallFinder(ASTContext &Context) + : Inherited(Context), NonTrivial(false) { } + + bool hasNonTrivialCall() const { return NonTrivial; } + + void VisitCallExpr(CallExpr *E) { + if (CXXMethodDecl *Method + = dyn_cast_or_null<CXXMethodDecl>(E->getCalleeDecl())) { + if (Method->isTrivial()) { + // Recurse to children of the call. + Inherited::VisitStmt(E); + return; + } + } + + NonTrivial = true; + } + + void VisitCXXConstructExpr(CXXConstructExpr *E) { + if (E->getConstructor()->isTrivial()) { + // Recurse to children of the call. + Inherited::VisitStmt(E); + return; + } + + NonTrivial = true; + } + + void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { + if (E->getTemporary()->getDestructor()->isTrivial()) { + Inherited::VisitStmt(E); + return; + } + + NonTrivial = true; + } + }; +} + +bool Expr::hasNonTrivialCall(ASTContext &Ctx) { + NonTrivialCallFinder Finder(Ctx); + Finder.Visit(this); + return Finder.hasNonTrivialCall(); +} + +/// isNullPointerConstant - C99 6.3.2.3p3 - Return whether this is a null +/// pointer constant or not, as well as the specific kind of constant detected. +/// Null pointer constants can be integer constant expressions with the +/// value zero, casts of zero to void*, nullptr (C++0X), or __null +/// (a GNU extension). +Expr::NullPointerConstantKind +Expr::isNullPointerConstant(ASTContext &Ctx, + NullPointerConstantValueDependence NPC) const { + if (isValueDependent() && + (!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MicrosoftMode)) { + switch (NPC) { + case NPC_NeverValueDependent: + llvm_unreachable("Unexpected value dependent expression!"); + case NPC_ValueDependentIsNull: + if (isTypeDependent() || getType()->isIntegralType(Ctx)) + return NPCK_ZeroExpression; + else + return NPCK_NotNull; + + case NPC_ValueDependentIsNotNull: + return NPCK_NotNull; + } + } + + // Strip off a cast to void*, if it exists. Except in C++. + if (const ExplicitCastExpr *CE = dyn_cast<ExplicitCastExpr>(this)) { + if (!Ctx.getLangOpts().CPlusPlus) { + // Check that it is a cast to void*. + if (const PointerType *PT = CE->getType()->getAs<PointerType>()) { + QualType Pointee = PT->getPointeeType(); + if (!Pointee.hasQualifiers() && + Pointee->isVoidType() && // to void* + CE->getSubExpr()->getType()->isIntegerType()) // from int. + return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC); + } + } + } else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(this)) { + // Ignore the ImplicitCastExpr type entirely. + return ICE->getSubExpr()->isNullPointerConstant(Ctx, NPC); + } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(this)) { + // Accept ((void*)0) as a null pointer constant, as many other + // implementations do. + return PE->getSubExpr()->isNullPointerConstant(Ctx, NPC); + } else if (const GenericSelectionExpr *GE = + dyn_cast<GenericSelectionExpr>(this)) { + if (GE->isResultDependent()) + return NPCK_NotNull; + return GE->getResultExpr()->isNullPointerConstant(Ctx, NPC); + } else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(this)) { + if (CE->isConditionDependent()) + return NPCK_NotNull; + return CE->getChosenSubExpr()->isNullPointerConstant(Ctx, NPC); + } else if (const CXXDefaultArgExpr *DefaultArg + = dyn_cast<CXXDefaultArgExpr>(this)) { + // See through default argument expressions. + return DefaultArg->getExpr()->isNullPointerConstant(Ctx, NPC); + } else if (const CXXDefaultInitExpr *DefaultInit + = dyn_cast<CXXDefaultInitExpr>(this)) { + // See through default initializer expressions. + return DefaultInit->getExpr()->isNullPointerConstant(Ctx, NPC); + } else if (isa<GNUNullExpr>(this)) { + // The GNU __null extension is always a null pointer constant. + return NPCK_GNUNull; + } else if (const MaterializeTemporaryExpr *M + = dyn_cast<MaterializeTemporaryExpr>(this)) { + return M->GetTemporaryExpr()->isNullPointerConstant(Ctx, NPC); + } else if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(this)) { + if (const Expr *Source = OVE->getSourceExpr()) + return Source->isNullPointerConstant(Ctx, NPC); + } + + // C++11 nullptr_t is always a null pointer constant. + if (getType()->isNullPtrType()) + return NPCK_CXX11_nullptr; + + if (const RecordType *UT = getType()->getAsUnionType()) + if (!Ctx.getLangOpts().CPlusPlus11 && + UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) + if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){ + const Expr *InitExpr = CLE->getInitializer(); + if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr)) + return ILE->getInit(0)->isNullPointerConstant(Ctx, NPC); + } + // This expression must be an integer type. + if (!getType()->isIntegerType() || + (Ctx.getLangOpts().CPlusPlus && getType()->isEnumeralType())) + return NPCK_NotNull; + + if (Ctx.getLangOpts().CPlusPlus11) { + // C++11 [conv.ptr]p1: A null pointer constant is an integer literal with + // value zero or a prvalue of type std::nullptr_t. + // Microsoft mode permits C++98 rules reflecting MSVC behavior. + const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(this); + if (Lit && !Lit->getValue()) + return NPCK_ZeroLiteral; + else if (!Ctx.getLangOpts().MicrosoftMode || + !isCXX98IntegralConstantExpr(Ctx)) + return NPCK_NotNull; + } else { + // If we have an integer constant expression, we need to *evaluate* it and + // test for the value 0. + if (!isIntegerConstantExpr(Ctx)) + return NPCK_NotNull; + } + + if (EvaluateKnownConstInt(Ctx) != 0) + return NPCK_NotNull; + + if (isa<IntegerLiteral>(this)) + return NPCK_ZeroLiteral; + return NPCK_ZeroExpression; +} + +/// \brief If this expression is an l-value for an Objective C +/// property, find the underlying property reference expression. +const ObjCPropertyRefExpr *Expr::getObjCProperty() const { + const Expr *E = this; + while (true) { + assert((E->getValueKind() == VK_LValue && + E->getObjectKind() == OK_ObjCProperty) && + "expression is not a property reference"); + E = E->IgnoreParenCasts(); + if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { + if (BO->getOpcode() == BO_Comma) { + E = BO->getRHS(); + continue; + } + } + + break; + } + + return cast<ObjCPropertyRefExpr>(E); +} + +bool Expr::isObjCSelfExpr() const { + const Expr *E = IgnoreParenImpCasts(); + + const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E); + if (!DRE) + return false; + + const ImplicitParamDecl *Param = dyn_cast<ImplicitParamDecl>(DRE->getDecl()); + if (!Param) + return false; + + const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(Param->getDeclContext()); + if (!M) + return false; + + return M->getSelfDecl() == Param; +} + +FieldDecl *Expr::getSourceBitField() { + Expr *E = this->IgnoreParens(); + + while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { + if (ICE->getCastKind() == CK_LValueToRValue || + (ICE->getValueKind() != VK_RValue && ICE->getCastKind() == CK_NoOp)) + E = ICE->getSubExpr()->IgnoreParens(); + else + break; + } + + if (MemberExpr *MemRef = dyn_cast<MemberExpr>(E)) + if (FieldDecl *Field = dyn_cast<FieldDecl>(MemRef->getMemberDecl())) + if (Field->isBitField()) + return Field; + + if (ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) + if (FieldDecl *Ivar = dyn_cast<FieldDecl>(IvarRef->getDecl())) + if (Ivar->isBitField()) + return Ivar; + + if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E)) + if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl())) + if (Field->isBitField()) + return Field; + + if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) { + if (BinOp->isAssignmentOp() && BinOp->getLHS()) + return BinOp->getLHS()->getSourceBitField(); + + if (BinOp->getOpcode() == BO_Comma && BinOp->getRHS()) + return BinOp->getRHS()->getSourceBitField(); + } + + return 0; +} + +bool Expr::refersToVectorElement() const { + const Expr *E = this->IgnoreParens(); + + while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { + if (ICE->getValueKind() != VK_RValue && + ICE->getCastKind() == CK_NoOp) + E = ICE->getSubExpr()->IgnoreParens(); + else + break; + } + + if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E)) + return ASE->getBase()->getType()->isVectorType(); + + if (isa<ExtVectorElementExpr>(E)) + return true; + + return false; +} + +/// isArrow - Return true if the base expression is a pointer to vector, +/// return false if the base expression is a vector. +bool ExtVectorElementExpr::isArrow() const { + return getBase()->getType()->isPointerType(); +} + +unsigned ExtVectorElementExpr::getNumElements() const { + if (const VectorType *VT = getType()->getAs<VectorType>()) + return VT->getNumElements(); + return 1; +} + +/// containsDuplicateElements - Return true if any element access is repeated. +bool ExtVectorElementExpr::containsDuplicateElements() const { + // FIXME: Refactor this code to an accessor on the AST node which returns the + // "type" of component access, and share with code below and in Sema. + StringRef Comp = Accessor->getName(); + + // Halving swizzles do not contain duplicate elements. + if (Comp == "hi" || Comp == "lo" || Comp == "even" || Comp == "odd") + return false; + + // Advance past s-char prefix on hex swizzles. + if (Comp[0] == 's' || Comp[0] == 'S') + Comp = Comp.substr(1); + + for (unsigned i = 0, e = Comp.size(); i != e; ++i) + if (Comp.substr(i + 1).find(Comp[i]) != StringRef::npos) + return true; + + return false; +} + +/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray. +void ExtVectorElementExpr::getEncodedElementAccess( + SmallVectorImpl<unsigned> &Elts) const { + StringRef Comp = Accessor->getName(); + if (Comp[0] == 's' || Comp[0] == 'S') + Comp = Comp.substr(1); + + bool isHi = Comp == "hi"; + bool isLo = Comp == "lo"; + bool isEven = Comp == "even"; + bool isOdd = Comp == "odd"; + + for (unsigned i = 0, e = getNumElements(); i != e; ++i) { + uint64_t Index; + + if (isHi) + Index = e + i; + else if (isLo) + Index = i; + else if (isEven) + Index = 2 * i; + else if (isOdd) + Index = 2 * i + 1; + else + Index = ExtVectorType::getAccessorIdx(Comp[i]); + + Elts.push_back(Index); + } +} + +ObjCMessageExpr::ObjCMessageExpr(QualType T, + ExprValueKind VK, + SourceLocation LBracLoc, + SourceLocation SuperLoc, + bool IsInstanceSuper, + QualType SuperType, + Selector Sel, + ArrayRef<SourceLocation> SelLocs, + SelectorLocationsKind SelLocsK, + ObjCMethodDecl *Method, + ArrayRef<Expr *> Args, + SourceLocation RBracLoc, + bool isImplicit) + : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, + /*TypeDependent=*/false, /*ValueDependent=*/false, + /*InstantiationDependent=*/false, + /*ContainsUnexpandedParameterPack=*/false), + SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method + : Sel.getAsOpaquePtr())), + Kind(IsInstanceSuper? SuperInstance : SuperClass), + HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit), + SuperLoc(SuperLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc) +{ + initArgsAndSelLocs(Args, SelLocs, SelLocsK); + setReceiverPointer(SuperType.getAsOpaquePtr()); +} + +ObjCMessageExpr::ObjCMessageExpr(QualType T, + ExprValueKind VK, + SourceLocation LBracLoc, + TypeSourceInfo *Receiver, + Selector Sel, + ArrayRef<SourceLocation> SelLocs, + SelectorLocationsKind SelLocsK, + ObjCMethodDecl *Method, + ArrayRef<Expr *> Args, + SourceLocation RBracLoc, + bool isImplicit) + : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(), + T->isDependentType(), T->isInstantiationDependentType(), + T->containsUnexpandedParameterPack()), + SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method + : Sel.getAsOpaquePtr())), + Kind(Class), + HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit), + LBracLoc(LBracLoc), RBracLoc(RBracLoc) +{ + initArgsAndSelLocs(Args, SelLocs, SelLocsK); + setReceiverPointer(Receiver); +} + +ObjCMessageExpr::ObjCMessageExpr(QualType T, + ExprValueKind VK, + SourceLocation LBracLoc, + Expr *Receiver, + Selector Sel, + ArrayRef<SourceLocation> SelLocs, + SelectorLocationsKind SelLocsK, + ObjCMethodDecl *Method, + ArrayRef<Expr *> Args, + SourceLocation RBracLoc, + bool isImplicit) + : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(), + Receiver->isTypeDependent(), + Receiver->isInstantiationDependent(), + Receiver->containsUnexpandedParameterPack()), + SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method + : Sel.getAsOpaquePtr())), + Kind(Instance), + HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit), + LBracLoc(LBracLoc), RBracLoc(RBracLoc) +{ + initArgsAndSelLocs(Args, SelLocs, SelLocsK); + setReceiverPointer(Receiver); +} + +void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args, + ArrayRef<SourceLocation> SelLocs, + SelectorLocationsKind SelLocsK) { + setNumArgs(Args.size()); + Expr **MyArgs = getArgs(); + for (unsigned I = 0; I != Args.size(); ++I) { + if (Args[I]->isTypeDependent()) + ExprBits.TypeDependent = true; + if (Args[I]->isValueDependent()) + ExprBits.ValueDependent = true; + if (Args[I]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (Args[I]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + MyArgs[I] = Args[I]; + } + + SelLocsKind = SelLocsK; + if (!isImplicit()) { + if (SelLocsK == SelLoc_NonStandard) + std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs()); + } +} + +ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T, + ExprValueKind VK, + SourceLocation LBracLoc, + SourceLocation SuperLoc, + bool IsInstanceSuper, + QualType SuperType, + Selector Sel, + ArrayRef<SourceLocation> SelLocs, + ObjCMethodDecl *Method, + ArrayRef<Expr *> Args, + SourceLocation RBracLoc, + bool isImplicit) { + assert((!SelLocs.empty() || isImplicit) && + "No selector locs for non-implicit message"); + ObjCMessageExpr *Mem; + SelectorLocationsKind SelLocsK = SelectorLocationsKind(); + if (isImplicit) + Mem = alloc(Context, Args.size(), 0); + else + Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK); + return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper, + SuperType, Sel, SelLocs, SelLocsK, + Method, Args, RBracLoc, isImplicit); +} + +ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T, + ExprValueKind VK, + SourceLocation LBracLoc, + TypeSourceInfo *Receiver, + Selector Sel, + ArrayRef<SourceLocation> SelLocs, + ObjCMethodDecl *Method, + ArrayRef<Expr *> Args, + SourceLocation RBracLoc, + bool isImplicit) { + assert((!SelLocs.empty() || isImplicit) && + "No selector locs for non-implicit message"); + ObjCMessageExpr *Mem; + SelectorLocationsKind SelLocsK = SelectorLocationsKind(); + if (isImplicit) + Mem = alloc(Context, Args.size(), 0); + else + Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK); + return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, + SelLocs, SelLocsK, Method, Args, RBracLoc, + isImplicit); +} + +ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T, + ExprValueKind VK, + SourceLocation LBracLoc, + Expr *Receiver, + Selector Sel, + ArrayRef<SourceLocation> SelLocs, + ObjCMethodDecl *Method, + ArrayRef<Expr *> Args, + SourceLocation RBracLoc, + bool isImplicit) { + assert((!SelLocs.empty() || isImplicit) && + "No selector locs for non-implicit message"); + ObjCMessageExpr *Mem; + SelectorLocationsKind SelLocsK = SelectorLocationsKind(); + if (isImplicit) + Mem = alloc(Context, Args.size(), 0); + else + Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK); + return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel, + SelLocs, SelLocsK, Method, Args, RBracLoc, + isImplicit); +} + +ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(const ASTContext &Context, + unsigned NumArgs, + unsigned NumStoredSelLocs) { + ObjCMessageExpr *Mem = alloc(Context, NumArgs, NumStoredSelLocs); + return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs); +} + +ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C, + ArrayRef<Expr *> Args, + SourceLocation RBraceLoc, + ArrayRef<SourceLocation> SelLocs, + Selector Sel, + SelectorLocationsKind &SelLocsK) { + SelLocsK = hasStandardSelectorLocs(Sel, SelLocs, Args, RBraceLoc); + unsigned NumStoredSelLocs = (SelLocsK == SelLoc_NonStandard) ? SelLocs.size() + : 0; + return alloc(C, Args.size(), NumStoredSelLocs); +} + +ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C, + unsigned NumArgs, + unsigned NumStoredSelLocs) { + unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) + + NumArgs * sizeof(Expr *) + NumStoredSelLocs * sizeof(SourceLocation); + return (ObjCMessageExpr *)C.Allocate(Size, + llvm::AlignOf<ObjCMessageExpr>::Alignment); +} + +void ObjCMessageExpr::getSelectorLocs( + SmallVectorImpl<SourceLocation> &SelLocs) const { + for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i) + SelLocs.push_back(getSelectorLoc(i)); +} + +SourceRange ObjCMessageExpr::getReceiverRange() const { + switch (getReceiverKind()) { + case Instance: + return getInstanceReceiver()->getSourceRange(); + + case Class: + return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange(); + + case SuperInstance: + case SuperClass: + return getSuperLoc(); + } + + llvm_unreachable("Invalid ReceiverKind!"); +} + +Selector ObjCMessageExpr::getSelector() const { + if (HasMethod) + return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod) + ->getSelector(); + return Selector(SelectorOrMethod); +} + +QualType ObjCMessageExpr::getReceiverType() const { + switch (getReceiverKind()) { + case Instance: + return getInstanceReceiver()->getType(); + case Class: + return getClassReceiver(); + case SuperInstance: + case SuperClass: + return getSuperType(); + } + + llvm_unreachable("unexpected receiver kind"); +} + +ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const { + QualType T = getReceiverType(); + + if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>()) + return Ptr->getInterfaceDecl(); + + if (const ObjCObjectType *Ty = T->getAs<ObjCObjectType>()) + return Ty->getInterface(); + + return 0; +} + +StringRef ObjCBridgedCastExpr::getBridgeKindName() const { + switch (getBridgeKind()) { + case OBC_Bridge: + return "__bridge"; + case OBC_BridgeTransfer: + return "__bridge_transfer"; + case OBC_BridgeRetained: + return "__bridge_retained"; + } + + llvm_unreachable("Invalid BridgeKind!"); +} + +ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr*> args, + QualType Type, SourceLocation BLoc, + SourceLocation RP) + : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary, + Type->isDependentType(), Type->isDependentType(), + Type->isInstantiationDependentType(), + Type->containsUnexpandedParameterPack()), + BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size()) +{ + SubExprs = new (C) Stmt*[args.size()]; + for (unsigned i = 0; i != args.size(); i++) { + if (args[i]->isTypeDependent()) + ExprBits.TypeDependent = true; + if (args[i]->isValueDependent()) + ExprBits.ValueDependent = true; + if (args[i]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (args[i]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SubExprs[i] = args[i]; + } +} + +void ShuffleVectorExpr::setExprs(const ASTContext &C, ArrayRef<Expr *> Exprs) { + if (SubExprs) C.Deallocate(SubExprs); + + this->NumExprs = Exprs.size(); + SubExprs = new (C) Stmt*[NumExprs]; + memcpy(SubExprs, Exprs.data(), sizeof(Expr *) * Exprs.size()); +} + +GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context, + SourceLocation GenericLoc, Expr *ControllingExpr, + ArrayRef<TypeSourceInfo*> AssocTypes, + ArrayRef<Expr*> AssocExprs, + SourceLocation DefaultLoc, + SourceLocation RParenLoc, + bool ContainsUnexpandedParameterPack, + unsigned ResultIndex) + : Expr(GenericSelectionExprClass, + AssocExprs[ResultIndex]->getType(), + AssocExprs[ResultIndex]->getValueKind(), + AssocExprs[ResultIndex]->getObjectKind(), + AssocExprs[ResultIndex]->isTypeDependent(), + AssocExprs[ResultIndex]->isValueDependent(), + AssocExprs[ResultIndex]->isInstantiationDependent(), + ContainsUnexpandedParameterPack), + AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]), + SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]), + NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex), + GenericLoc(GenericLoc), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { + SubExprs[CONTROLLING] = ControllingExpr; + assert(AssocTypes.size() == AssocExprs.size()); + std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes); + std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR); +} + +GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context, + SourceLocation GenericLoc, Expr *ControllingExpr, + ArrayRef<TypeSourceInfo*> AssocTypes, + ArrayRef<Expr*> AssocExprs, + SourceLocation DefaultLoc, + SourceLocation RParenLoc, + bool ContainsUnexpandedParameterPack) + : Expr(GenericSelectionExprClass, + Context.DependentTy, + VK_RValue, + OK_Ordinary, + /*isTypeDependent=*/true, + /*isValueDependent=*/true, + /*isInstantiationDependent=*/true, + ContainsUnexpandedParameterPack), + AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]), + SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]), + NumAssocs(AssocExprs.size()), ResultIndex(-1U), GenericLoc(GenericLoc), + DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { + SubExprs[CONTROLLING] = ControllingExpr; + assert(AssocTypes.size() == AssocExprs.size()); + std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes); + std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR); +} + +//===----------------------------------------------------------------------===// +// DesignatedInitExpr +//===----------------------------------------------------------------------===// + +IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const { + assert(Kind == FieldDesignator && "Only valid on a field designator"); + if (Field.NameOrField & 0x01) + return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01); + else + return getField()->getIdentifier(); +} + +DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty, + unsigned NumDesignators, + const Designator *Designators, + SourceLocation EqualOrColonLoc, + bool GNUSyntax, + ArrayRef<Expr*> IndexExprs, + Expr *Init) + : Expr(DesignatedInitExprClass, Ty, + Init->getValueKind(), Init->getObjectKind(), + Init->isTypeDependent(), Init->isValueDependent(), + Init->isInstantiationDependent(), + Init->containsUnexpandedParameterPack()), + EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax), + NumDesignators(NumDesignators), NumSubExprs(IndexExprs.size() + 1) { + this->Designators = new (C) Designator[NumDesignators]; + + // Record the initializer itself. + child_range Child = children(); + *Child++ = Init; + + // Copy the designators and their subexpressions, computing + // value-dependence along the way. + unsigned IndexIdx = 0; + for (unsigned I = 0; I != NumDesignators; ++I) { + this->Designators[I] = Designators[I]; + + if (this->Designators[I].isArrayDesignator()) { + // Compute type- and value-dependence. + Expr *Index = IndexExprs[IndexIdx]; + if (Index->isTypeDependent() || Index->isValueDependent()) + ExprBits.ValueDependent = true; + if (Index->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + // Propagate unexpanded parameter packs. + if (Index->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + // Copy the index expressions into permanent storage. + *Child++ = IndexExprs[IndexIdx++]; + } else if (this->Designators[I].isArrayRangeDesignator()) { + // Compute type- and value-dependence. + Expr *Start = IndexExprs[IndexIdx]; + Expr *End = IndexExprs[IndexIdx + 1]; + if (Start->isTypeDependent() || Start->isValueDependent() || + End->isTypeDependent() || End->isValueDependent()) { + ExprBits.ValueDependent = true; + ExprBits.InstantiationDependent = true; + } else if (Start->isInstantiationDependent() || + End->isInstantiationDependent()) { + ExprBits.InstantiationDependent = true; + } + + // Propagate unexpanded parameter packs. + if (Start->containsUnexpandedParameterPack() || + End->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + // Copy the start/end expressions into permanent storage. + *Child++ = IndexExprs[IndexIdx++]; + *Child++ = IndexExprs[IndexIdx++]; + } + } + + assert(IndexIdx == IndexExprs.size() && "Wrong number of index expressions"); +} + +DesignatedInitExpr * +DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators, + unsigned NumDesignators, + ArrayRef<Expr*> IndexExprs, + SourceLocation ColonOrEqualLoc, + bool UsesColonSyntax, Expr *Init) { + void *Mem = C.Allocate(sizeof(DesignatedInitExpr) + + sizeof(Stmt *) * (IndexExprs.size() + 1), 8); + return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators, + ColonOrEqualLoc, UsesColonSyntax, + IndexExprs, Init); +} + +DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(const ASTContext &C, + unsigned NumIndexExprs) { + void *Mem = C.Allocate(sizeof(DesignatedInitExpr) + + sizeof(Stmt *) * (NumIndexExprs + 1), 8); + return new (Mem) DesignatedInitExpr(NumIndexExprs + 1); +} + +void DesignatedInitExpr::setDesignators(const ASTContext &C, + const Designator *Desigs, + unsigned NumDesigs) { + Designators = new (C) Designator[NumDesigs]; + NumDesignators = NumDesigs; + for (unsigned I = 0; I != NumDesigs; ++I) + Designators[I] = Desigs[I]; +} + +SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const { + DesignatedInitExpr *DIE = const_cast<DesignatedInitExpr*>(this); + if (size() == 1) + return DIE->getDesignator(0)->getSourceRange(); + return SourceRange(DIE->getDesignator(0)->getLocStart(), + DIE->getDesignator(size()-1)->getLocEnd()); +} + +SourceLocation DesignatedInitExpr::getLocStart() const { + SourceLocation StartLoc; + Designator &First = + *const_cast<DesignatedInitExpr*>(this)->designators_begin(); + if (First.isFieldDesignator()) { + if (GNUSyntax) + StartLoc = SourceLocation::getFromRawEncoding(First.Field.FieldLoc); + else + StartLoc = SourceLocation::getFromRawEncoding(First.Field.DotLoc); + } else + StartLoc = + SourceLocation::getFromRawEncoding(First.ArrayOrRange.LBracketLoc); + return StartLoc; +} + +SourceLocation DesignatedInitExpr::getLocEnd() const { + return getInit()->getLocEnd(); +} + +Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const { + assert(D.Kind == Designator::ArrayDesignator && "Requires array designator"); + char *Ptr = static_cast<char *>( + const_cast<void *>(static_cast<const void *>(this))); + Ptr += sizeof(DesignatedInitExpr); + Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr)); + return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1)); +} + +Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const { + assert(D.Kind == Designator::ArrayRangeDesignator && + "Requires array range designator"); + char *Ptr = static_cast<char *>( + const_cast<void *>(static_cast<const void *>(this))); + Ptr += sizeof(DesignatedInitExpr); + Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr)); + return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1)); +} + +Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const { + assert(D.Kind == Designator::ArrayRangeDesignator && + "Requires array range designator"); + char *Ptr = static_cast<char *>( + const_cast<void *>(static_cast<const void *>(this))); + Ptr += sizeof(DesignatedInitExpr); + Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr)); + return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2)); +} + +/// \brief Replaces the designator at index @p Idx with the series +/// of designators in [First, Last). +void DesignatedInitExpr::ExpandDesignator(const ASTContext &C, unsigned Idx, + const Designator *First, + const Designator *Last) { + unsigned NumNewDesignators = Last - First; + if (NumNewDesignators == 0) { + std::copy_backward(Designators + Idx + 1, + Designators + NumDesignators, + Designators + Idx); + --NumNewDesignators; + return; + } else if (NumNewDesignators == 1) { + Designators[Idx] = *First; + return; + } + + Designator *NewDesignators + = new (C) Designator[NumDesignators - 1 + NumNewDesignators]; + std::copy(Designators, Designators + Idx, NewDesignators); + std::copy(First, Last, NewDesignators + Idx); + std::copy(Designators + Idx + 1, Designators + NumDesignators, + NewDesignators + Idx + NumNewDesignators); + Designators = NewDesignators; + NumDesignators = NumDesignators - 1 + NumNewDesignators; +} + +ParenListExpr::ParenListExpr(const ASTContext& C, SourceLocation lparenloc, + ArrayRef<Expr*> exprs, + SourceLocation rparenloc) + : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary, + false, false, false, false), + NumExprs(exprs.size()), LParenLoc(lparenloc), RParenLoc(rparenloc) { + Exprs = new (C) Stmt*[exprs.size()]; + for (unsigned i = 0; i != exprs.size(); ++i) { + if (exprs[i]->isTypeDependent()) + ExprBits.TypeDependent = true; + if (exprs[i]->isValueDependent()) + ExprBits.ValueDependent = true; + if (exprs[i]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (exprs[i]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + Exprs[i] = exprs[i]; + } +} + +const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) { + if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e)) + e = ewc->getSubExpr(); + if (const MaterializeTemporaryExpr *m = dyn_cast<MaterializeTemporaryExpr>(e)) + e = m->GetTemporaryExpr(); + e = cast<CXXConstructExpr>(e)->getArg(0); + while (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e)) + e = ice->getSubExpr(); + return cast<OpaqueValueExpr>(e); +} + +PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &Context, + EmptyShell sh, + unsigned numSemanticExprs) { + void *buffer = Context.Allocate(sizeof(PseudoObjectExpr) + + (1 + numSemanticExprs) * sizeof(Expr*), + llvm::alignOf<PseudoObjectExpr>()); + return new(buffer) PseudoObjectExpr(sh, numSemanticExprs); +} + +PseudoObjectExpr::PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs) + : Expr(PseudoObjectExprClass, shell) { + PseudoObjectExprBits.NumSubExprs = numSemanticExprs + 1; +} + +PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax, + ArrayRef<Expr*> semantics, + unsigned resultIndex) { + assert(syntax && "no syntactic expression!"); + assert(semantics.size() && "no semantic expressions!"); + + QualType type; + ExprValueKind VK; + if (resultIndex == NoResult) { + type = C.VoidTy; + VK = VK_RValue; + } else { + assert(resultIndex < semantics.size()); + type = semantics[resultIndex]->getType(); + VK = semantics[resultIndex]->getValueKind(); + assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary); + } + + void *buffer = C.Allocate(sizeof(PseudoObjectExpr) + + (1 + semantics.size()) * sizeof(Expr*), + llvm::alignOf<PseudoObjectExpr>()); + return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics, + resultIndex); +} + +PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK, + Expr *syntax, ArrayRef<Expr*> semantics, + unsigned resultIndex) + : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary, + /*filled in at end of ctor*/ false, false, false, false) { + PseudoObjectExprBits.NumSubExprs = semantics.size() + 1; + PseudoObjectExprBits.ResultIndex = resultIndex + 1; + + for (unsigned i = 0, e = semantics.size() + 1; i != e; ++i) { + Expr *E = (i == 0 ? syntax : semantics[i-1]); + getSubExprsBuffer()[i] = E; + + if (E->isTypeDependent()) + ExprBits.TypeDependent = true; + if (E->isValueDependent()) + ExprBits.ValueDependent = true; + if (E->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (E->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + if (isa<OpaqueValueExpr>(E)) + assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != 0 && + "opaque-value semantic expressions for pseudo-object " + "operations must have sources"); + } +} + +//===----------------------------------------------------------------------===// +// ExprIterator. +//===----------------------------------------------------------------------===// + +Expr* ExprIterator::operator[](size_t idx) { return cast<Expr>(I[idx]); } +Expr* ExprIterator::operator*() const { return cast<Expr>(*I); } +Expr* ExprIterator::operator->() const { return cast<Expr>(*I); } +const Expr* ConstExprIterator::operator[](size_t idx) const { + return cast<Expr>(I[idx]); +} +const Expr* ConstExprIterator::operator*() const { return cast<Expr>(*I); } +const Expr* ConstExprIterator::operator->() const { return cast<Expr>(*I); } + +//===----------------------------------------------------------------------===// +// Child Iterators for iterating over subexpressions/substatements +//===----------------------------------------------------------------------===// + +// UnaryExprOrTypeTraitExpr +Stmt::child_range UnaryExprOrTypeTraitExpr::children() { + // If this is of a type and the type is a VLA type (and not a typedef), the + // size expression of the VLA needs to be treated as an executable expression. + // Why isn't this weirdness documented better in StmtIterator? + if (isArgumentType()) { + if (const VariableArrayType* T = dyn_cast<VariableArrayType>( + getArgumentType().getTypePtr())) + return child_range(child_iterator(T), child_iterator()); + return child_range(); + } + return child_range(&Argument.Ex, &Argument.Ex + 1); +} + +// ObjCMessageExpr +Stmt::child_range ObjCMessageExpr::children() { + Stmt **begin; + if (getReceiverKind() == Instance) + begin = reinterpret_cast<Stmt **>(this + 1); + else + begin = reinterpret_cast<Stmt **>(getArgs()); + return child_range(begin, + reinterpret_cast<Stmt **>(getArgs() + getNumArgs())); +} + +ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements, + QualType T, ObjCMethodDecl *Method, + SourceRange SR) + : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary, + false, false, false, false), + NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method) +{ + Expr **SaveElements = getElements(); + for (unsigned I = 0, N = Elements.size(); I != N; ++I) { + if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent()) + ExprBits.ValueDependent = true; + if (Elements[I]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (Elements[I]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SaveElements[I] = Elements[I]; + } +} + +ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C, + ArrayRef<Expr *> Elements, + QualType T, ObjCMethodDecl * Method, + SourceRange SR) { + void *Mem = C.Allocate(sizeof(ObjCArrayLiteral) + + Elements.size() * sizeof(Expr *)); + return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR); +} + +ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(const ASTContext &C, + unsigned NumElements) { + + void *Mem = C.Allocate(sizeof(ObjCArrayLiteral) + + NumElements * sizeof(Expr *)); + return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements); +} + +ObjCDictionaryLiteral::ObjCDictionaryLiteral( + ArrayRef<ObjCDictionaryElement> VK, + bool HasPackExpansions, + QualType T, ObjCMethodDecl *method, + SourceRange SR) + : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false, + false, false), + NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR), + DictWithObjectsMethod(method) +{ + KeyValuePair *KeyValues = getKeyValues(); + ExpansionData *Expansions = getExpansionData(); + for (unsigned I = 0; I < NumElements; I++) { + if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() || + VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent()) + ExprBits.ValueDependent = true; + if (VK[I].Key->isInstantiationDependent() || + VK[I].Value->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (VK[I].EllipsisLoc.isInvalid() && + (VK[I].Key->containsUnexpandedParameterPack() || + VK[I].Value->containsUnexpandedParameterPack())) + ExprBits.ContainsUnexpandedParameterPack = true; + + KeyValues[I].Key = VK[I].Key; + KeyValues[I].Value = VK[I].Value; + if (Expansions) { + Expansions[I].EllipsisLoc = VK[I].EllipsisLoc; + if (VK[I].NumExpansions) + Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1; + else + Expansions[I].NumExpansionsPlusOne = 0; + } + } +} + +ObjCDictionaryLiteral * +ObjCDictionaryLiteral::Create(const ASTContext &C, + ArrayRef<ObjCDictionaryElement> VK, + bool HasPackExpansions, + QualType T, ObjCMethodDecl *method, + SourceRange SR) { + unsigned ExpansionsSize = 0; + if (HasPackExpansions) + ExpansionsSize = sizeof(ExpansionData) * VK.size(); + + void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) + + sizeof(KeyValuePair) * VK.size() + ExpansionsSize); + return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR); +} + +ObjCDictionaryLiteral * +ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements, + bool HasPackExpansions) { + unsigned ExpansionsSize = 0; + if (HasPackExpansions) + ExpansionsSize = sizeof(ExpansionData) * NumElements; + void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) + + sizeof(KeyValuePair) * NumElements + ExpansionsSize); + return new (Mem) ObjCDictionaryLiteral(EmptyShell(), NumElements, + HasPackExpansions); +} + +ObjCSubscriptRefExpr *ObjCSubscriptRefExpr::Create(const ASTContext &C, + Expr *base, + Expr *key, QualType T, + ObjCMethodDecl *getMethod, + ObjCMethodDecl *setMethod, + SourceLocation RB) { + void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr)); + return new (Mem) ObjCSubscriptRefExpr(base, key, T, VK_LValue, + OK_ObjCSubscript, + getMethod, setMethod, RB); +} + +AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args, + QualType t, AtomicOp op, SourceLocation RP) + : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary, + false, false, false, false), + NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op) +{ + assert(args.size() == getNumSubExprs(op) && "wrong number of subexpressions"); + for (unsigned i = 0; i != args.size(); i++) { + if (args[i]->isTypeDependent()) + ExprBits.TypeDependent = true; + if (args[i]->isValueDependent()) + ExprBits.ValueDependent = true; + if (args[i]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (args[i]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SubExprs[i] = args[i]; + } +} + +unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { + switch (Op) { + case AO__c11_atomic_init: + case AO__c11_atomic_load: + case AO__atomic_load_n: + return 2; + + case AO__c11_atomic_store: + case AO__c11_atomic_exchange: + case AO__atomic_load: + case AO__atomic_store: + case AO__atomic_store_n: + case AO__atomic_exchange_n: + case AO__c11_atomic_fetch_add: + case AO__c11_atomic_fetch_sub: + case AO__c11_atomic_fetch_and: + case AO__c11_atomic_fetch_or: + case AO__c11_atomic_fetch_xor: + case AO__atomic_fetch_add: + case AO__atomic_fetch_sub: + case AO__atomic_fetch_and: + case AO__atomic_fetch_or: + case AO__atomic_fetch_xor: + case AO__atomic_fetch_nand: + case AO__atomic_add_fetch: + case AO__atomic_sub_fetch: + case AO__atomic_and_fetch: + case AO__atomic_or_fetch: + case AO__atomic_xor_fetch: + case AO__atomic_nand_fetch: + return 3; + + case AO__atomic_exchange: + return 4; + + case AO__c11_atomic_compare_exchange_strong: + case AO__c11_atomic_compare_exchange_weak: + return 5; + + case AO__atomic_compare_exchange: + case AO__atomic_compare_exchange_n: + return 6; + } + llvm_unreachable("unknown atomic op"); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp new file mode 100644 index 000000000000..3738c0e4f2c9 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp @@ -0,0 +1,1503 @@ +//===--- ExprCXX.cpp - (C++) Expression AST Node Implementation -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the subclesses of Expr class declared in ExprCXX.h +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/IdentifierTable.h" +using namespace clang; + + +//===----------------------------------------------------------------------===// +// Child Iterators for iterating over subexpressions/substatements +//===----------------------------------------------------------------------===// + +bool CXXTypeidExpr::isPotentiallyEvaluated() const { + if (isTypeOperand()) + return false; + + // C++11 [expr.typeid]p3: + // When typeid is applied to an expression other than a glvalue of + // polymorphic class type, [...] the expression is an unevaluated operand. + const Expr *E = getExprOperand(); + if (const CXXRecordDecl *RD = E->getType()->getAsCXXRecordDecl()) + if (RD->isPolymorphic() && E->isGLValue()) + return true; + + return false; +} + +QualType CXXTypeidExpr::getTypeOperand(ASTContext &Context) const { + assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)"); + Qualifiers Quals; + return Context.getUnqualifiedArrayType( + Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals); +} + +QualType CXXUuidofExpr::getTypeOperand(ASTContext &Context) const { + assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)"); + Qualifiers Quals; + return Context.getUnqualifiedArrayType( + Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals); +} + +// static +UuidAttr *CXXUuidofExpr::GetUuidAttrOfType(QualType QT, + bool *RDHasMultipleGUIDsPtr) { + // Optionally remove one level of pointer, reference or array indirection. + const Type *Ty = QT.getTypePtr(); + if (QT->isPointerType() || QT->isReferenceType()) + Ty = QT->getPointeeType().getTypePtr(); + else if (QT->isArrayType()) + Ty = Ty->getBaseElementTypeUnsafe(); + + // Loop all record redeclaration looking for an uuid attribute. + CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); + if (!RD) + return 0; + + // __uuidof can grab UUIDs from template arguments. + if (ClassTemplateSpecializationDecl *CTSD = + dyn_cast<ClassTemplateSpecializationDecl>(RD)) { + const TemplateArgumentList &TAL = CTSD->getTemplateArgs(); + UuidAttr *UuidForRD = 0; + + for (unsigned I = 0, N = TAL.size(); I != N; ++I) { + const TemplateArgument &TA = TAL[I]; + bool SeenMultipleGUIDs = false; + + UuidAttr *UuidForTA = 0; + if (TA.getKind() == TemplateArgument::Type) + UuidForTA = GetUuidAttrOfType(TA.getAsType(), &SeenMultipleGUIDs); + else if (TA.getKind() == TemplateArgument::Declaration) + UuidForTA = + GetUuidAttrOfType(TA.getAsDecl()->getType(), &SeenMultipleGUIDs); + + // If the template argument has a UUID, there are three cases: + // - This is the first UUID seen for this RecordDecl. + // - This is a different UUID than previously seen for this RecordDecl. + // - This is the same UUID than previously seen for this RecordDecl. + if (UuidForTA) { + if (!UuidForRD) + UuidForRD = UuidForTA; + else if (UuidForRD != UuidForTA) + SeenMultipleGUIDs = true; + } + + // Seeing multiple UUIDs means that we couldn't find a UUID + if (SeenMultipleGUIDs) { + if (RDHasMultipleGUIDsPtr) + *RDHasMultipleGUIDsPtr = true; + return 0; + } + } + + return UuidForRD; + } + + for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(), + E = RD->redecls_end(); + I != E; ++I) + if (UuidAttr *Uuid = I->getAttr<UuidAttr>()) + return Uuid; + + return 0; +} + +StringRef CXXUuidofExpr::getUuidAsStringRef(ASTContext &Context) const { + StringRef Uuid; + if (isTypeOperand()) + Uuid = CXXUuidofExpr::GetUuidAttrOfType(getTypeOperand(Context))->getGuid(); + else { + // Special case: __uuidof(0) means an all-zero GUID. + Expr *Op = getExprOperand(); + if (!Op->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) + Uuid = CXXUuidofExpr::GetUuidAttrOfType(Op->getType())->getGuid(); + else + Uuid = "00000000-0000-0000-0000-000000000000"; + } + return Uuid; +} + +// CXXScalarValueInitExpr +SourceLocation CXXScalarValueInitExpr::getLocStart() const { + return TypeInfo ? TypeInfo->getTypeLoc().getBeginLoc() : RParenLoc; +} + +// CXXNewExpr +CXXNewExpr::CXXNewExpr(const ASTContext &C, bool globalNew, + FunctionDecl *operatorNew, FunctionDecl *operatorDelete, + bool usualArrayDeleteWantsSize, + ArrayRef<Expr*> placementArgs, + SourceRange typeIdParens, Expr *arraySize, + InitializationStyle initializationStyle, + Expr *initializer, QualType ty, + TypeSourceInfo *allocatedTypeInfo, + SourceRange Range, SourceRange directInitRange) + : Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary, + ty->isDependentType(), ty->isDependentType(), + ty->isInstantiationDependentType(), + ty->containsUnexpandedParameterPack()), + SubExprs(0), OperatorNew(operatorNew), OperatorDelete(operatorDelete), + AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens), + Range(Range), DirectInitRange(directInitRange), + GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) { + assert((initializer != 0 || initializationStyle == NoInit) && + "Only NoInit can have no initializer."); + StoredInitializationStyle = initializer ? initializationStyle + 1 : 0; + AllocateArgsArray(C, arraySize != 0, placementArgs.size(), initializer != 0); + unsigned i = 0; + if (Array) { + if (arraySize->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + + if (arraySize->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SubExprs[i++] = arraySize; + } + + if (initializer) { + if (initializer->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + + if (initializer->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SubExprs[i++] = initializer; + } + + for (unsigned j = 0; j != placementArgs.size(); ++j) { + if (placementArgs[j]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (placementArgs[j]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + SubExprs[i++] = placementArgs[j]; + } + + switch (getInitializationStyle()) { + case CallInit: + this->Range.setEnd(DirectInitRange.getEnd()); break; + case ListInit: + this->Range.setEnd(getInitializer()->getSourceRange().getEnd()); break; + default: + if (TypeIdParens.isValid()) + this->Range.setEnd(TypeIdParens.getEnd()); + break; + } +} + +void CXXNewExpr::AllocateArgsArray(const ASTContext &C, bool isArray, + unsigned numPlaceArgs, bool hasInitializer){ + assert(SubExprs == 0 && "SubExprs already allocated"); + Array = isArray; + NumPlacementArgs = numPlaceArgs; + + unsigned TotalSize = Array + hasInitializer + NumPlacementArgs; + SubExprs = new (C) Stmt*[TotalSize]; +} + +bool CXXNewExpr::shouldNullCheckAllocation(const ASTContext &Ctx) const { + return getOperatorNew()->getType()-> + castAs<FunctionProtoType>()->isNothrow(Ctx); +} + +// CXXDeleteExpr +QualType CXXDeleteExpr::getDestroyedType() const { + const Expr *Arg = getArgument(); + // The type-to-delete may not be a pointer if it's a dependent type. + const QualType ArgType = Arg->getType(); + + if (ArgType->isDependentType() && !ArgType->isPointerType()) + return QualType(); + + return ArgType->getAs<PointerType>()->getPointeeType(); +} + +// CXXPseudoDestructorExpr +PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info) + : Type(Info) +{ + Location = Info->getTypeLoc().getLocalSourceRange().getBegin(); +} + +CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(const ASTContext &Context, + Expr *Base, bool isArrow, SourceLocation OperatorLoc, + NestedNameSpecifierLoc QualifierLoc, TypeSourceInfo *ScopeType, + SourceLocation ColonColonLoc, SourceLocation TildeLoc, + PseudoDestructorTypeStorage DestroyedType) + : Expr(CXXPseudoDestructorExprClass, + Context.getPointerType(Context.getFunctionType( + Context.VoidTy, None, + FunctionProtoType::ExtProtoInfo( + Context.getDefaultCallingConvention(false, true)))), + VK_RValue, OK_Ordinary, + /*isTypeDependent=*/(Base->isTypeDependent() || + (DestroyedType.getTypeSourceInfo() && + DestroyedType.getTypeSourceInfo()->getType()->isDependentType())), + /*isValueDependent=*/Base->isValueDependent(), + (Base->isInstantiationDependent() || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) || + (ScopeType && + ScopeType->getType()->isInstantiationDependentType()) || + (DestroyedType.getTypeSourceInfo() && + DestroyedType.getTypeSourceInfo()->getType() + ->isInstantiationDependentType())), + // ContainsUnexpandedParameterPack + (Base->containsUnexpandedParameterPack() || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier() + ->containsUnexpandedParameterPack()) || + (ScopeType && + ScopeType->getType()->containsUnexpandedParameterPack()) || + (DestroyedType.getTypeSourceInfo() && + DestroyedType.getTypeSourceInfo()->getType() + ->containsUnexpandedParameterPack()))), + Base(static_cast<Stmt *>(Base)), IsArrow(isArrow), + OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc), + ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc), + DestroyedType(DestroyedType) { } + +QualType CXXPseudoDestructorExpr::getDestroyedType() const { + if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo()) + return TInfo->getType(); + + return QualType(); +} + +SourceLocation CXXPseudoDestructorExpr::getLocEnd() const { + SourceLocation End = DestroyedType.getLocation(); + if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo()) + End = TInfo->getTypeLoc().getLocalSourceRange().getEnd(); + return End; +} + +// UnresolvedLookupExpr +UnresolvedLookupExpr * +UnresolvedLookupExpr::Create(const ASTContext &C, + CXXRecordDecl *NamingClass, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + const DeclarationNameInfo &NameInfo, + bool ADL, + const TemplateArgumentListInfo *Args, + UnresolvedSetIterator Begin, + UnresolvedSetIterator End) +{ + assert(Args || TemplateKWLoc.isValid()); + unsigned num_args = Args ? Args->size() : 0; + void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) + + ASTTemplateKWAndArgsInfo::sizeFor(num_args)); + return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc, + TemplateKWLoc, NameInfo, + ADL, /*Overload*/ true, Args, + Begin, End); +} + +UnresolvedLookupExpr * +UnresolvedLookupExpr::CreateEmpty(const ASTContext &C, + bool HasTemplateKWAndArgsInfo, + unsigned NumTemplateArgs) { + std::size_t size = sizeof(UnresolvedLookupExpr); + if (HasTemplateKWAndArgsInfo) + size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs); + + void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedLookupExpr>()); + UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell()); + E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo; + return E; +} + +OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + const DeclarationNameInfo &NameInfo, + const TemplateArgumentListInfo *TemplateArgs, + UnresolvedSetIterator Begin, + UnresolvedSetIterator End, + bool KnownDependent, + bool KnownInstantiationDependent, + bool KnownContainsUnexpandedParameterPack) + : Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent, + KnownDependent, + (KnownInstantiationDependent || + NameInfo.isInstantiationDependent() || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())), + (KnownContainsUnexpandedParameterPack || + NameInfo.containsUnexpandedParameterPack() || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier() + ->containsUnexpandedParameterPack()))), + NameInfo(NameInfo), QualifierLoc(QualifierLoc), + Results(0), NumResults(End - Begin), + HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid()) +{ + NumResults = End - Begin; + if (NumResults) { + // Determine whether this expression is type-dependent. + for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) { + if ((*I)->getDeclContext()->isDependentContext() || + isa<UnresolvedUsingValueDecl>(*I)) { + ExprBits.TypeDependent = true; + ExprBits.ValueDependent = true; + ExprBits.InstantiationDependent = true; + } + } + + Results = static_cast<DeclAccessPair *>( + C.Allocate(sizeof(DeclAccessPair) * NumResults, + llvm::alignOf<DeclAccessPair>())); + memcpy(Results, &*Begin.getIterator(), + NumResults * sizeof(DeclAccessPair)); + } + + // If we have explicit template arguments, check for dependent + // template arguments and whether they contain any unexpanded pack + // expansions. + if (TemplateArgs) { + bool Dependent = false; + bool InstantiationDependent = false; + bool ContainsUnexpandedParameterPack = false; + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs, + Dependent, + InstantiationDependent, + ContainsUnexpandedParameterPack); + + if (Dependent) { + ExprBits.TypeDependent = true; + ExprBits.ValueDependent = true; + } + if (InstantiationDependent) + ExprBits.InstantiationDependent = true; + if (ContainsUnexpandedParameterPack) + ExprBits.ContainsUnexpandedParameterPack = true; + } else if (TemplateKWLoc.isValid()) { + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc); + } + + if (isTypeDependent()) + setType(C.DependentTy); +} + +void OverloadExpr::initializeResults(const ASTContext &C, + UnresolvedSetIterator Begin, + UnresolvedSetIterator End) { + assert(Results == 0 && "Results already initialized!"); + NumResults = End - Begin; + if (NumResults) { + Results = static_cast<DeclAccessPair *>( + C.Allocate(sizeof(DeclAccessPair) * NumResults, + + llvm::alignOf<DeclAccessPair>())); + memcpy(Results, &*Begin.getIterator(), + NumResults * sizeof(DeclAccessPair)); + } +} + +CXXRecordDecl *OverloadExpr::getNamingClass() const { + if (isa<UnresolvedLookupExpr>(this)) + return cast<UnresolvedLookupExpr>(this)->getNamingClass(); + else + return cast<UnresolvedMemberExpr>(this)->getNamingClass(); +} + +// DependentScopeDeclRefExpr +DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + const DeclarationNameInfo &NameInfo, + const TemplateArgumentListInfo *Args) + : Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary, + true, true, + (NameInfo.isInstantiationDependent() || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())), + (NameInfo.containsUnexpandedParameterPack() || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier() + ->containsUnexpandedParameterPack()))), + QualifierLoc(QualifierLoc), NameInfo(NameInfo), + HasTemplateKWAndArgsInfo(Args != 0 || TemplateKWLoc.isValid()) +{ + if (Args) { + bool Dependent = true; + bool InstantiationDependent = true; + bool ContainsUnexpandedParameterPack + = ExprBits.ContainsUnexpandedParameterPack; + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *Args, + Dependent, + InstantiationDependent, + ContainsUnexpandedParameterPack); + ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack; + } else if (TemplateKWLoc.isValid()) { + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc); + } +} + +DependentScopeDeclRefExpr * +DependentScopeDeclRefExpr::Create(const ASTContext &C, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + const DeclarationNameInfo &NameInfo, + const TemplateArgumentListInfo *Args) { + assert(QualifierLoc && "should be created for dependent qualifiers"); + std::size_t size = sizeof(DependentScopeDeclRefExpr); + if (Args) + size += ASTTemplateKWAndArgsInfo::sizeFor(Args->size()); + else if (TemplateKWLoc.isValid()) + size += ASTTemplateKWAndArgsInfo::sizeFor(0); + void *Mem = C.Allocate(size); + return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc, + TemplateKWLoc, NameInfo, Args); +} + +DependentScopeDeclRefExpr * +DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &C, + bool HasTemplateKWAndArgsInfo, + unsigned NumTemplateArgs) { + std::size_t size = sizeof(DependentScopeDeclRefExpr); + if (HasTemplateKWAndArgsInfo) + size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs); + void *Mem = C.Allocate(size); + DependentScopeDeclRefExpr *E + = new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(), + SourceLocation(), + DeclarationNameInfo(), 0); + E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo; + return E; +} + +SourceLocation CXXConstructExpr::getLocStart() const { + if (isa<CXXTemporaryObjectExpr>(this)) + return cast<CXXTemporaryObjectExpr>(this)->getLocStart(); + return Loc; +} + +SourceLocation CXXConstructExpr::getLocEnd() const { + if (isa<CXXTemporaryObjectExpr>(this)) + return cast<CXXTemporaryObjectExpr>(this)->getLocEnd(); + + if (ParenOrBraceRange.isValid()) + return ParenOrBraceRange.getEnd(); + + SourceLocation End = Loc; + for (unsigned I = getNumArgs(); I > 0; --I) { + const Expr *Arg = getArg(I-1); + if (!Arg->isDefaultArgument()) { + SourceLocation NewEnd = Arg->getLocEnd(); + if (NewEnd.isValid()) { + End = NewEnd; + break; + } + } + } + + return End; +} + +SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const { + OverloadedOperatorKind Kind = getOperator(); + if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) { + if (getNumArgs() == 1) + // Prefix operator + return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd()); + else + // Postfix operator + return SourceRange(getArg(0)->getLocStart(), getOperatorLoc()); + } else if (Kind == OO_Arrow) { + return getArg(0)->getSourceRange(); + } else if (Kind == OO_Call) { + return SourceRange(getArg(0)->getLocStart(), getRParenLoc()); + } else if (Kind == OO_Subscript) { + return SourceRange(getArg(0)->getLocStart(), getRParenLoc()); + } else if (getNumArgs() == 1) { + return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd()); + } else if (getNumArgs() == 2) { + return SourceRange(getArg(0)->getLocStart(), getArg(1)->getLocEnd()); + } else { + return getOperatorLoc(); + } +} + +Expr *CXXMemberCallExpr::getImplicitObjectArgument() const { + const Expr *Callee = getCallee()->IgnoreParens(); + if (const MemberExpr *MemExpr = dyn_cast<MemberExpr>(Callee)) + return MemExpr->getBase(); + if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Callee)) + if (BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI) + return BO->getLHS(); + + // FIXME: Will eventually need to cope with member pointers. + return 0; +} + +CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const { + if (const MemberExpr *MemExpr = + dyn_cast<MemberExpr>(getCallee()->IgnoreParens())) + return cast<CXXMethodDecl>(MemExpr->getMemberDecl()); + + // FIXME: Will eventually need to cope with member pointers. + return 0; +} + + +CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() const { + Expr* ThisArg = getImplicitObjectArgument(); + if (!ThisArg) + return 0; + + if (ThisArg->getType()->isAnyPointerType()) + return ThisArg->getType()->getPointeeType()->getAsCXXRecordDecl(); + + return ThisArg->getType()->getAsCXXRecordDecl(); +} + + +//===----------------------------------------------------------------------===// +// Named casts +//===----------------------------------------------------------------------===// + +/// getCastName - Get the name of the C++ cast being used, e.g., +/// "static_cast", "dynamic_cast", "reinterpret_cast", or +/// "const_cast". The returned pointer must not be freed. +const char *CXXNamedCastExpr::getCastName() const { + switch (getStmtClass()) { + case CXXStaticCastExprClass: return "static_cast"; + case CXXDynamicCastExprClass: return "dynamic_cast"; + case CXXReinterpretCastExprClass: return "reinterpret_cast"; + case CXXConstCastExprClass: return "const_cast"; + default: return "<invalid cast>"; + } +} + +CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T, + ExprValueKind VK, + CastKind K, Expr *Op, + const CXXCastPath *BasePath, + TypeSourceInfo *WrittenTy, + SourceLocation L, + SourceLocation RParenLoc, + SourceRange AngleBrackets) { + unsigned PathSize = (BasePath ? BasePath->size() : 0); + void *Buffer = C.Allocate(sizeof(CXXStaticCastExpr) + + PathSize * sizeof(CXXBaseSpecifier*)); + CXXStaticCastExpr *E = + new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, + RParenLoc, AngleBrackets); + if (PathSize) E->setCastPath(*BasePath); + return E; +} + +CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(const ASTContext &C, + unsigned PathSize) { + void *Buffer = + C.Allocate(sizeof(CXXStaticCastExpr) + PathSize * sizeof(CXXBaseSpecifier*)); + return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize); +} + +CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T, + ExprValueKind VK, + CastKind K, Expr *Op, + const CXXCastPath *BasePath, + TypeSourceInfo *WrittenTy, + SourceLocation L, + SourceLocation RParenLoc, + SourceRange AngleBrackets) { + unsigned PathSize = (BasePath ? BasePath->size() : 0); + void *Buffer = C.Allocate(sizeof(CXXDynamicCastExpr) + + PathSize * sizeof(CXXBaseSpecifier*)); + CXXDynamicCastExpr *E = + new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, + RParenLoc, AngleBrackets); + if (PathSize) E->setCastPath(*BasePath); + return E; +} + +CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C, + unsigned PathSize) { + void *Buffer = + C.Allocate(sizeof(CXXDynamicCastExpr) + PathSize * sizeof(CXXBaseSpecifier*)); + return new (Buffer) CXXDynamicCastExpr(EmptyShell(), PathSize); +} + +/// isAlwaysNull - Return whether the result of the dynamic_cast is proven +/// to always be null. For example: +/// +/// struct A { }; +/// struct B final : A { }; +/// struct C { }; +/// +/// C *f(B* b) { return dynamic_cast<C*>(b); } +bool CXXDynamicCastExpr::isAlwaysNull() const +{ + QualType SrcType = getSubExpr()->getType(); + QualType DestType = getType(); + + if (const PointerType *SrcPTy = SrcType->getAs<PointerType>()) { + SrcType = SrcPTy->getPointeeType(); + DestType = DestType->castAs<PointerType>()->getPointeeType(); + } + + if (DestType->isVoidType()) + return false; + + const CXXRecordDecl *SrcRD = + cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl()); + + if (!SrcRD->hasAttr<FinalAttr>()) + return false; + + const CXXRecordDecl *DestRD = + cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl()); + + return !DestRD->isDerivedFrom(SrcRD); +} + +CXXReinterpretCastExpr * +CXXReinterpretCastExpr::Create(const ASTContext &C, QualType T, + ExprValueKind VK, CastKind K, Expr *Op, + const CXXCastPath *BasePath, + TypeSourceInfo *WrittenTy, SourceLocation L, + SourceLocation RParenLoc, + SourceRange AngleBrackets) { + unsigned PathSize = (BasePath ? BasePath->size() : 0); + void *Buffer = + C.Allocate(sizeof(CXXReinterpretCastExpr) + PathSize * sizeof(CXXBaseSpecifier*)); + CXXReinterpretCastExpr *E = + new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, + RParenLoc, AngleBrackets); + if (PathSize) E->setCastPath(*BasePath); + return E; +} + +CXXReinterpretCastExpr * +CXXReinterpretCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) { + void *Buffer = C.Allocate(sizeof(CXXReinterpretCastExpr) + + PathSize * sizeof(CXXBaseSpecifier*)); + return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize); +} + +CXXConstCastExpr *CXXConstCastExpr::Create(const ASTContext &C, QualType T, + ExprValueKind VK, Expr *Op, + TypeSourceInfo *WrittenTy, + SourceLocation L, + SourceLocation RParenLoc, + SourceRange AngleBrackets) { + return new (C) CXXConstCastExpr(T, VK, Op, WrittenTy, L, RParenLoc, AngleBrackets); +} + +CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(const ASTContext &C) { + return new (C) CXXConstCastExpr(EmptyShell()); +} + +CXXFunctionalCastExpr * +CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK, + TypeSourceInfo *Written, CastKind K, Expr *Op, + const CXXCastPath *BasePath, + SourceLocation L, SourceLocation R) { + unsigned PathSize = (BasePath ? BasePath->size() : 0); + void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr) + + PathSize * sizeof(CXXBaseSpecifier*)); + CXXFunctionalCastExpr *E = + new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R); + if (PathSize) E->setCastPath(*BasePath); + return E; +} + +CXXFunctionalCastExpr * +CXXFunctionalCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) { + void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr) + + PathSize * sizeof(CXXBaseSpecifier*)); + return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize); +} + +SourceLocation CXXFunctionalCastExpr::getLocStart() const { + return getTypeInfoAsWritten()->getTypeLoc().getLocStart(); +} + +SourceLocation CXXFunctionalCastExpr::getLocEnd() const { + return RParenLoc.isValid() ? RParenLoc : getSubExpr()->getLocEnd(); +} + +UserDefinedLiteral::LiteralOperatorKind +UserDefinedLiteral::getLiteralOperatorKind() const { + if (getNumArgs() == 0) + return LOK_Template; + if (getNumArgs() == 2) + return LOK_String; + + assert(getNumArgs() == 1 && "unexpected #args in literal operator call"); + QualType ParamTy = + cast<FunctionDecl>(getCalleeDecl())->getParamDecl(0)->getType(); + if (ParamTy->isPointerType()) + return LOK_Raw; + if (ParamTy->isAnyCharacterType()) + return LOK_Character; + if (ParamTy->isIntegerType()) + return LOK_Integer; + if (ParamTy->isFloatingType()) + return LOK_Floating; + + llvm_unreachable("unknown kind of literal operator"); +} + +Expr *UserDefinedLiteral::getCookedLiteral() { +#ifndef NDEBUG + LiteralOperatorKind LOK = getLiteralOperatorKind(); + assert(LOK != LOK_Template && LOK != LOK_Raw && "not a cooked literal"); +#endif + return getArg(0); +} + +const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const { + return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier(); +} + +CXXDefaultArgExpr * +CXXDefaultArgExpr::Create(const ASTContext &C, SourceLocation Loc, + ParmVarDecl *Param, Expr *SubExpr) { + void *Mem = C.Allocate(sizeof(CXXDefaultArgExpr) + sizeof(Stmt *)); + return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param, + SubExpr); +} + +CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &C, SourceLocation Loc, + FieldDecl *Field, QualType T) + : Expr(CXXDefaultInitExprClass, T.getNonLValueExprType(C), + T->isLValueReferenceType() ? VK_LValue : T->isRValueReferenceType() + ? VK_XValue + : VK_RValue, + /*FIXME*/ OK_Ordinary, false, false, false, false), + Field(Field), Loc(Loc) { + assert(Field->hasInClassInitializer()); +} + +CXXTemporary *CXXTemporary::Create(const ASTContext &C, + const CXXDestructorDecl *Destructor) { + return new (C) CXXTemporary(Destructor); +} + +CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(const ASTContext &C, + CXXTemporary *Temp, + Expr* SubExpr) { + assert((SubExpr->getType()->isRecordType() || + SubExpr->getType()->isArrayType()) && + "Expression bound to a temporary must have record or array type!"); + + return new (C) CXXBindTemporaryExpr(Temp, SubExpr); +} + +CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(const ASTContext &C, + CXXConstructorDecl *Cons, + TypeSourceInfo *Type, + ArrayRef<Expr*> Args, + SourceRange ParenOrBraceRange, + bool HadMultipleCandidates, + bool ListInitialization, + bool ZeroInitialization) + : CXXConstructExpr(C, CXXTemporaryObjectExprClass, + Type->getType().getNonReferenceType(), + Type->getTypeLoc().getBeginLoc(), + Cons, false, Args, + HadMultipleCandidates, + ListInitialization, ZeroInitialization, + CXXConstructExpr::CK_Complete, ParenOrBraceRange), + Type(Type) { +} + +SourceLocation CXXTemporaryObjectExpr::getLocStart() const { + return Type->getTypeLoc().getBeginLoc(); +} + +SourceLocation CXXTemporaryObjectExpr::getLocEnd() const { + SourceLocation Loc = getParenOrBraceRange().getEnd(); + if (Loc.isInvalid() && getNumArgs()) + Loc = getArg(getNumArgs()-1)->getLocEnd(); + return Loc; +} + +CXXConstructExpr *CXXConstructExpr::Create(const ASTContext &C, QualType T, + SourceLocation Loc, + CXXConstructorDecl *D, bool Elidable, + ArrayRef<Expr*> Args, + bool HadMultipleCandidates, + bool ListInitialization, + bool ZeroInitialization, + ConstructionKind ConstructKind, + SourceRange ParenOrBraceRange) { + return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D, + Elidable, Args, + HadMultipleCandidates, ListInitialization, + ZeroInitialization, ConstructKind, + ParenOrBraceRange); +} + +CXXConstructExpr::CXXConstructExpr(const ASTContext &C, StmtClass SC, + QualType T, SourceLocation Loc, + CXXConstructorDecl *D, bool elidable, + ArrayRef<Expr*> args, + bool HadMultipleCandidates, + bool ListInitialization, + bool ZeroInitialization, + ConstructionKind ConstructKind, + SourceRange ParenOrBraceRange) + : Expr(SC, T, VK_RValue, OK_Ordinary, + T->isDependentType(), T->isDependentType(), + T->isInstantiationDependentType(), + T->containsUnexpandedParameterPack()), + Constructor(D), Loc(Loc), ParenOrBraceRange(ParenOrBraceRange), + NumArgs(args.size()), + Elidable(elidable), HadMultipleCandidates(HadMultipleCandidates), + ListInitialization(ListInitialization), + ZeroInitialization(ZeroInitialization), + ConstructKind(ConstructKind), Args(0) +{ + if (NumArgs) { + Args = new (C) Stmt*[args.size()]; + + for (unsigned i = 0; i != args.size(); ++i) { + assert(args[i] && "NULL argument in CXXConstructExpr"); + + if (args[i]->isValueDependent()) + ExprBits.ValueDependent = true; + if (args[i]->isInstantiationDependent()) + ExprBits.InstantiationDependent = true; + if (args[i]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + Args[i] = args[i]; + } + } +} + +LambdaExpr::Capture::Capture(SourceLocation Loc, bool Implicit, + LambdaCaptureKind Kind, VarDecl *Var, + SourceLocation EllipsisLoc) + : DeclAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc) +{ + unsigned Bits = 0; + if (Implicit) + Bits |= Capture_Implicit; + + switch (Kind) { + case LCK_This: + assert(Var == 0 && "'this' capture cannot have a variable!"); + break; + + case LCK_ByCopy: + Bits |= Capture_ByCopy; + // Fall through + case LCK_ByRef: + assert(Var && "capture must have a variable!"); + break; + } + DeclAndBits.setInt(Bits); +} + +LambdaCaptureKind LambdaExpr::Capture::getCaptureKind() const { + Decl *D = DeclAndBits.getPointer(); + if (!D) + return LCK_This; + + return (DeclAndBits.getInt() & Capture_ByCopy) ? LCK_ByCopy : LCK_ByRef; +} + +LambdaExpr::LambdaExpr(QualType T, + SourceRange IntroducerRange, + LambdaCaptureDefault CaptureDefault, + SourceLocation CaptureDefaultLoc, + ArrayRef<Capture> Captures, + bool ExplicitParams, + bool ExplicitResultType, + ArrayRef<Expr *> CaptureInits, + ArrayRef<VarDecl *> ArrayIndexVars, + ArrayRef<unsigned> ArrayIndexStarts, + SourceLocation ClosingBrace, + bool ContainsUnexpandedParameterPack) + : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary, + T->isDependentType(), T->isDependentType(), T->isDependentType(), + ContainsUnexpandedParameterPack), + IntroducerRange(IntroducerRange), + CaptureDefaultLoc(CaptureDefaultLoc), + NumCaptures(Captures.size()), + CaptureDefault(CaptureDefault), + ExplicitParams(ExplicitParams), + ExplicitResultType(ExplicitResultType), + ClosingBrace(ClosingBrace) +{ + assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments"); + CXXRecordDecl *Class = getLambdaClass(); + CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData(); + + // FIXME: Propagate "has unexpanded parameter pack" bit. + + // Copy captures. + const ASTContext &Context = Class->getASTContext(); + Data.NumCaptures = NumCaptures; + Data.NumExplicitCaptures = 0; + Data.Captures = (Capture *)Context.Allocate(sizeof(Capture) * NumCaptures); + Capture *ToCapture = Data.Captures; + for (unsigned I = 0, N = Captures.size(); I != N; ++I) { + if (Captures[I].isExplicit()) + ++Data.NumExplicitCaptures; + + *ToCapture++ = Captures[I]; + } + + // Copy initialization expressions for the non-static data members. + Stmt **Stored = getStoredStmts(); + for (unsigned I = 0, N = CaptureInits.size(); I != N; ++I) + *Stored++ = CaptureInits[I]; + + // Copy the body of the lambda. + *Stored++ = getCallOperator()->getBody(); + + // Copy the array index variables, if any. + HasArrayIndexVars = !ArrayIndexVars.empty(); + if (HasArrayIndexVars) { + assert(ArrayIndexStarts.size() == NumCaptures); + memcpy(getArrayIndexVars(), ArrayIndexVars.data(), + sizeof(VarDecl *) * ArrayIndexVars.size()); + memcpy(getArrayIndexStarts(), ArrayIndexStarts.data(), + sizeof(unsigned) * Captures.size()); + getArrayIndexStarts()[Captures.size()] = ArrayIndexVars.size(); + } +} + +LambdaExpr *LambdaExpr::Create(const ASTContext &Context, + CXXRecordDecl *Class, + SourceRange IntroducerRange, + LambdaCaptureDefault CaptureDefault, + SourceLocation CaptureDefaultLoc, + ArrayRef<Capture> Captures, + bool ExplicitParams, + bool ExplicitResultType, + ArrayRef<Expr *> CaptureInits, + ArrayRef<VarDecl *> ArrayIndexVars, + ArrayRef<unsigned> ArrayIndexStarts, + SourceLocation ClosingBrace, + bool ContainsUnexpandedParameterPack) { + // Determine the type of the expression (i.e., the type of the + // function object we're creating). + QualType T = Context.getTypeDeclType(Class); + + unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1); + if (!ArrayIndexVars.empty()) { + Size += sizeof(unsigned) * (Captures.size() + 1); + // Realign for following VarDecl array. + Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<VarDecl*>()); + Size += sizeof(VarDecl *) * ArrayIndexVars.size(); + } + void *Mem = Context.Allocate(Size); + return new (Mem) LambdaExpr(T, IntroducerRange, + CaptureDefault, CaptureDefaultLoc, Captures, + ExplicitParams, ExplicitResultType, + CaptureInits, ArrayIndexVars, ArrayIndexStarts, + ClosingBrace, ContainsUnexpandedParameterPack); +} + +LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C, + unsigned NumCaptures, + unsigned NumArrayIndexVars) { + unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (NumCaptures + 1); + if (NumArrayIndexVars) + Size += sizeof(VarDecl) * NumArrayIndexVars + + sizeof(unsigned) * (NumCaptures + 1); + void *Mem = C.Allocate(Size); + return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0); +} + +LambdaExpr::capture_iterator LambdaExpr::capture_begin() const { + return getLambdaClass()->getLambdaData().Captures; +} + +LambdaExpr::capture_iterator LambdaExpr::capture_end() const { + return capture_begin() + NumCaptures; +} + +LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const { + return capture_begin(); +} + +LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const { + struct CXXRecordDecl::LambdaDefinitionData &Data + = getLambdaClass()->getLambdaData(); + return Data.Captures + Data.NumExplicitCaptures; +} + +LambdaExpr::capture_iterator LambdaExpr::implicit_capture_begin() const { + return explicit_capture_end(); +} + +LambdaExpr::capture_iterator LambdaExpr::implicit_capture_end() const { + return capture_end(); +} + +ArrayRef<VarDecl *> +LambdaExpr::getCaptureInitIndexVars(capture_init_iterator Iter) const { + assert(HasArrayIndexVars && "No array index-var data?"); + + unsigned Index = Iter - capture_init_begin(); + assert(Index < getLambdaClass()->getLambdaData().NumCaptures && + "Capture index out-of-range"); + VarDecl **IndexVars = getArrayIndexVars(); + unsigned *IndexStarts = getArrayIndexStarts(); + return ArrayRef<VarDecl *>(IndexVars + IndexStarts[Index], + IndexVars + IndexStarts[Index + 1]); +} + +CXXRecordDecl *LambdaExpr::getLambdaClass() const { + return getType()->getAsCXXRecordDecl(); +} + +CXXMethodDecl *LambdaExpr::getCallOperator() const { + CXXRecordDecl *Record = getLambdaClass(); + return Record->getLambdaCallOperator(); +} + +TemplateParameterList *LambdaExpr::getTemplateParameterList() const { + CXXRecordDecl *Record = getLambdaClass(); + return Record->getGenericLambdaTemplateParameterList(); + +} + +CompoundStmt *LambdaExpr::getBody() const { + if (!getStoredStmts()[NumCaptures]) + getStoredStmts()[NumCaptures] = getCallOperator()->getBody(); + + return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]); +} + +bool LambdaExpr::isMutable() const { + return !getCallOperator()->isConst(); +} + +ExprWithCleanups::ExprWithCleanups(Expr *subexpr, + ArrayRef<CleanupObject> objects) + : Expr(ExprWithCleanupsClass, subexpr->getType(), + subexpr->getValueKind(), subexpr->getObjectKind(), + subexpr->isTypeDependent(), subexpr->isValueDependent(), + subexpr->isInstantiationDependent(), + subexpr->containsUnexpandedParameterPack()), + SubExpr(subexpr) { + ExprWithCleanupsBits.NumObjects = objects.size(); + for (unsigned i = 0, e = objects.size(); i != e; ++i) + getObjectsBuffer()[i] = objects[i]; +} + +ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr, + ArrayRef<CleanupObject> objects) { + size_t size = sizeof(ExprWithCleanups) + + objects.size() * sizeof(CleanupObject); + void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>()); + return new (buffer) ExprWithCleanups(subexpr, objects); +} + +ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects) + : Expr(ExprWithCleanupsClass, empty) { + ExprWithCleanupsBits.NumObjects = numObjects; +} + +ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, + EmptyShell empty, + unsigned numObjects) { + size_t size = sizeof(ExprWithCleanups) + numObjects * sizeof(CleanupObject); + void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>()); + return new (buffer) ExprWithCleanups(empty, numObjects); +} + +CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type, + SourceLocation LParenLoc, + ArrayRef<Expr*> Args, + SourceLocation RParenLoc) + : Expr(CXXUnresolvedConstructExprClass, + Type->getType().getNonReferenceType(), + (Type->getType()->isLValueReferenceType() ? VK_LValue + :Type->getType()->isRValueReferenceType()? VK_XValue + :VK_RValue), + OK_Ordinary, + Type->getType()->isDependentType(), true, true, + Type->getType()->containsUnexpandedParameterPack()), + Type(Type), + LParenLoc(LParenLoc), + RParenLoc(RParenLoc), + NumArgs(Args.size()) { + Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1); + for (unsigned I = 0; I != Args.size(); ++I) { + if (Args[I]->containsUnexpandedParameterPack()) + ExprBits.ContainsUnexpandedParameterPack = true; + + StoredArgs[I] = Args[I]; + } +} + +CXXUnresolvedConstructExpr * +CXXUnresolvedConstructExpr::Create(const ASTContext &C, + TypeSourceInfo *Type, + SourceLocation LParenLoc, + ArrayRef<Expr*> Args, + SourceLocation RParenLoc) { + void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) + + sizeof(Expr *) * Args.size()); + return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc, Args, RParenLoc); +} + +CXXUnresolvedConstructExpr * +CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &C, unsigned NumArgs) { + Stmt::EmptyShell Empty; + void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) + + sizeof(Expr *) * NumArgs); + return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs); +} + +SourceLocation CXXUnresolvedConstructExpr::getLocStart() const { + return Type->getTypeLoc().getBeginLoc(); +} + +CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(const ASTContext &C, + Expr *Base, QualType BaseType, + bool IsArrow, + SourceLocation OperatorLoc, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + NamedDecl *FirstQualifierFoundInScope, + DeclarationNameInfo MemberNameInfo, + const TemplateArgumentListInfo *TemplateArgs) + : Expr(CXXDependentScopeMemberExprClass, C.DependentTy, + VK_LValue, OK_Ordinary, true, true, true, + ((Base && Base->containsUnexpandedParameterPack()) || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier() + ->containsUnexpandedParameterPack()) || + MemberNameInfo.containsUnexpandedParameterPack())), + Base(Base), BaseType(BaseType), IsArrow(IsArrow), + HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid()), + OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc), + FirstQualifierFoundInScope(FirstQualifierFoundInScope), + MemberNameInfo(MemberNameInfo) { + if (TemplateArgs) { + bool Dependent = true; + bool InstantiationDependent = true; + bool ContainsUnexpandedParameterPack = false; + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs, + Dependent, + InstantiationDependent, + ContainsUnexpandedParameterPack); + if (ContainsUnexpandedParameterPack) + ExprBits.ContainsUnexpandedParameterPack = true; + } else if (TemplateKWLoc.isValid()) { + getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc); + } +} + +CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(const ASTContext &C, + Expr *Base, QualType BaseType, + bool IsArrow, + SourceLocation OperatorLoc, + NestedNameSpecifierLoc QualifierLoc, + NamedDecl *FirstQualifierFoundInScope, + DeclarationNameInfo MemberNameInfo) + : Expr(CXXDependentScopeMemberExprClass, C.DependentTy, + VK_LValue, OK_Ordinary, true, true, true, + ((Base && Base->containsUnexpandedParameterPack()) || + (QualifierLoc && + QualifierLoc.getNestedNameSpecifier()-> + containsUnexpandedParameterPack()) || + MemberNameInfo.containsUnexpandedParameterPack())), + Base(Base), BaseType(BaseType), IsArrow(IsArrow), + HasTemplateKWAndArgsInfo(false), + OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc), + FirstQualifierFoundInScope(FirstQualifierFoundInScope), + MemberNameInfo(MemberNameInfo) { } + +CXXDependentScopeMemberExpr * +CXXDependentScopeMemberExpr::Create(const ASTContext &C, + Expr *Base, QualType BaseType, bool IsArrow, + SourceLocation OperatorLoc, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + NamedDecl *FirstQualifierFoundInScope, + DeclarationNameInfo MemberNameInfo, + const TemplateArgumentListInfo *TemplateArgs) { + if (!TemplateArgs && !TemplateKWLoc.isValid()) + return new (C) CXXDependentScopeMemberExpr(C, Base, BaseType, + IsArrow, OperatorLoc, + QualifierLoc, + FirstQualifierFoundInScope, + MemberNameInfo); + + unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0; + std::size_t size = sizeof(CXXDependentScopeMemberExpr) + + ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs); + + void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>()); + return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType, + IsArrow, OperatorLoc, + QualifierLoc, + TemplateKWLoc, + FirstQualifierFoundInScope, + MemberNameInfo, TemplateArgs); +} + +CXXDependentScopeMemberExpr * +CXXDependentScopeMemberExpr::CreateEmpty(const ASTContext &C, + bool HasTemplateKWAndArgsInfo, + unsigned NumTemplateArgs) { + if (!HasTemplateKWAndArgsInfo) + return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(), + 0, SourceLocation(), + NestedNameSpecifierLoc(), 0, + DeclarationNameInfo()); + + std::size_t size = sizeof(CXXDependentScopeMemberExpr) + + ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs); + void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>()); + CXXDependentScopeMemberExpr *E + = new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(), + 0, SourceLocation(), + NestedNameSpecifierLoc(), + SourceLocation(), 0, + DeclarationNameInfo(), 0); + E->HasTemplateKWAndArgsInfo = true; + return E; +} + +bool CXXDependentScopeMemberExpr::isImplicitAccess() const { + if (Base == 0) + return true; + + return cast<Expr>(Base)->isImplicitCXXThis(); +} + +static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin, + UnresolvedSetIterator end) { + do { + NamedDecl *decl = *begin; + if (isa<UnresolvedUsingValueDecl>(decl)) + return false; + if (isa<UsingShadowDecl>(decl)) + decl = cast<UsingShadowDecl>(decl)->getUnderlyingDecl(); + + // Unresolved member expressions should only contain methods and + // method templates. + assert(isa<CXXMethodDecl>(decl) || isa<FunctionTemplateDecl>(decl)); + + if (isa<FunctionTemplateDecl>(decl)) + decl = cast<FunctionTemplateDecl>(decl)->getTemplatedDecl(); + if (cast<CXXMethodDecl>(decl)->isStatic()) + return false; + } while (++begin != end); + + return true; +} + +UnresolvedMemberExpr::UnresolvedMemberExpr(const ASTContext &C, + bool HasUnresolvedUsing, + Expr *Base, QualType BaseType, + bool IsArrow, + SourceLocation OperatorLoc, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + const DeclarationNameInfo &MemberNameInfo, + const TemplateArgumentListInfo *TemplateArgs, + UnresolvedSetIterator Begin, + UnresolvedSetIterator End) + : OverloadExpr(UnresolvedMemberExprClass, C, QualifierLoc, TemplateKWLoc, + MemberNameInfo, TemplateArgs, Begin, End, + // Dependent + ((Base && Base->isTypeDependent()) || + BaseType->isDependentType()), + ((Base && Base->isInstantiationDependent()) || + BaseType->isInstantiationDependentType()), + // Contains unexpanded parameter pack + ((Base && Base->containsUnexpandedParameterPack()) || + BaseType->containsUnexpandedParameterPack())), + IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing), + Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) { + + // Check whether all of the members are non-static member functions, + // and if so, mark give this bound-member type instead of overload type. + if (hasOnlyNonStaticMemberFunctions(Begin, End)) + setType(C.BoundMemberTy); +} + +bool UnresolvedMemberExpr::isImplicitAccess() const { + if (Base == 0) + return true; + + return cast<Expr>(Base)->isImplicitCXXThis(); +} + +UnresolvedMemberExpr * +UnresolvedMemberExpr::Create(const ASTContext &C, bool HasUnresolvedUsing, + Expr *Base, QualType BaseType, bool IsArrow, + SourceLocation OperatorLoc, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, + const DeclarationNameInfo &MemberNameInfo, + const TemplateArgumentListInfo *TemplateArgs, + UnresolvedSetIterator Begin, + UnresolvedSetIterator End) { + std::size_t size = sizeof(UnresolvedMemberExpr); + if (TemplateArgs) + size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size()); + else if (TemplateKWLoc.isValid()) + size += ASTTemplateKWAndArgsInfo::sizeFor(0); + + void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>()); + return new (Mem) UnresolvedMemberExpr(C, + HasUnresolvedUsing, Base, BaseType, + IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc, + MemberNameInfo, TemplateArgs, Begin, End); +} + +UnresolvedMemberExpr * +UnresolvedMemberExpr::CreateEmpty(const ASTContext &C, + bool HasTemplateKWAndArgsInfo, + unsigned NumTemplateArgs) { + std::size_t size = sizeof(UnresolvedMemberExpr); + if (HasTemplateKWAndArgsInfo) + size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs); + + void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>()); + UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell()); + E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo; + return E; +} + +CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const { + // Unlike for UnresolvedLookupExpr, it is very easy to re-derive this. + + // If there was a nested name specifier, it names the naming class. + // It can't be dependent: after all, we were actually able to do the + // lookup. + CXXRecordDecl *Record = 0; + if (getQualifier()) { + const Type *T = getQualifier()->getAsType(); + assert(T && "qualifier in member expression does not name type"); + Record = T->getAsCXXRecordDecl(); + assert(Record && "qualifier in member expression does not name record"); + } + // Otherwise the naming class must have been the base class. + else { + QualType BaseType = getBaseType().getNonReferenceType(); + if (isArrow()) { + const PointerType *PT = BaseType->getAs<PointerType>(); + assert(PT && "base of arrow member access is not pointer"); + BaseType = PT->getPointeeType(); + } + + Record = BaseType->getAsCXXRecordDecl(); + assert(Record && "base of member expression does not name record"); + } + + return Record; +} + +SubstNonTypeTemplateParmPackExpr:: +SubstNonTypeTemplateParmPackExpr(QualType T, + NonTypeTemplateParmDecl *Param, + SourceLocation NameLoc, + const TemplateArgument &ArgPack) + : Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary, + true, true, true, true), + Param(Param), Arguments(ArgPack.pack_begin()), + NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { } + +TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const { + return TemplateArgument(Arguments, NumArguments); +} + +FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack, + SourceLocation NameLoc, + unsigned NumParams, + Decl * const *Params) + : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary, + true, true, true, true), + ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) { + if (Params) + std::uninitialized_copy(Params, Params + NumParams, + reinterpret_cast<Decl**>(this+1)); +} + +FunctionParmPackExpr * +FunctionParmPackExpr::Create(const ASTContext &Context, QualType T, + ParmVarDecl *ParamPack, SourceLocation NameLoc, + ArrayRef<Decl *> Params) { + return new (Context.Allocate(sizeof(FunctionParmPackExpr) + + sizeof(ParmVarDecl*) * Params.size())) + FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data()); +} + +FunctionParmPackExpr * +FunctionParmPackExpr::CreateEmpty(const ASTContext &Context, + unsigned NumParams) { + return new (Context.Allocate(sizeof(FunctionParmPackExpr) + + sizeof(ParmVarDecl*) * NumParams)) + FunctionParmPackExpr(QualType(), 0, SourceLocation(), 0, 0); +} + +TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind, + ArrayRef<TypeSourceInfo *> Args, + SourceLocation RParenLoc, + bool Value) + : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary, + /*TypeDependent=*/false, + /*ValueDependent=*/false, + /*InstantiationDependent=*/false, + /*ContainsUnexpandedParameterPack=*/false), + Loc(Loc), RParenLoc(RParenLoc) +{ + TypeTraitExprBits.Kind = Kind; + TypeTraitExprBits.Value = Value; + TypeTraitExprBits.NumArgs = Args.size(); + + TypeSourceInfo **ToArgs = getTypeSourceInfos(); + + for (unsigned I = 0, N = Args.size(); I != N; ++I) { + if (Args[I]->getType()->isDependentType()) + setValueDependent(true); + if (Args[I]->getType()->isInstantiationDependentType()) + setInstantiationDependent(true); + if (Args[I]->getType()->containsUnexpandedParameterPack()) + setContainsUnexpandedParameterPack(true); + + ToArgs[I] = Args[I]; + } +} + +TypeTraitExpr *TypeTraitExpr::Create(const ASTContext &C, QualType T, + SourceLocation Loc, + TypeTrait Kind, + ArrayRef<TypeSourceInfo *> Args, + SourceLocation RParenLoc, + bool Value) { + unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * Args.size(); + void *Mem = C.Allocate(Size); + return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value); +} + +TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C, + unsigned NumArgs) { + unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * NumArgs; + void *Mem = C.Allocate(Size); + return new (Mem) TypeTraitExpr(EmptyShell()); +} + +void ArrayTypeTraitExpr::anchor() { } diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp new file mode 100644 index 000000000000..54f77efef0f9 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp @@ -0,0 +1,671 @@ +//===--- ExprClassification.cpp - Expression AST Node Implementation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements Expr::classify. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Expr.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "llvm/Support/ErrorHandling.h" +using namespace clang; + +typedef Expr::Classification Cl; + +static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E); +static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D); +static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T); +static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E); +static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E); +static Cl::Kinds ClassifyConditional(ASTContext &Ctx, + const Expr *trueExpr, + const Expr *falseExpr); +static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E, + Cl::Kinds Kind, SourceLocation &Loc); + +Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const { + assert(!TR->isReferenceType() && "Expressions can't have reference type."); + + Cl::Kinds kind = ClassifyInternal(Ctx, this); + // C99 6.3.2.1: An lvalue is an expression with an object type or an + // incomplete type other than void. + if (!Ctx.getLangOpts().CPlusPlus) { + // Thus, no functions. + if (TR->isFunctionType() || TR == Ctx.OverloadTy) + kind = Cl::CL_Function; + // No void either, but qualified void is OK because it is "other than void". + // Void "lvalues" are classified as addressable void values, which are void + // expressions whose address can be taken. + else if (TR->isVoidType() && !TR.hasQualifiers()) + kind = (kind == Cl::CL_LValue ? Cl::CL_AddressableVoid : Cl::CL_Void); + } + + // Enable this assertion for testing. + switch (kind) { + case Cl::CL_LValue: assert(getValueKind() == VK_LValue); break; + case Cl::CL_XValue: assert(getValueKind() == VK_XValue); break; + case Cl::CL_Function: + case Cl::CL_Void: + case Cl::CL_AddressableVoid: + case Cl::CL_DuplicateVectorComponents: + case Cl::CL_MemberFunction: + case Cl::CL_SubObjCPropertySetting: + case Cl::CL_ClassTemporary: + case Cl::CL_ArrayTemporary: + case Cl::CL_ObjCMessageRValue: + case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break; + } + + Cl::ModifiableType modifiable = Cl::CM_Untested; + if (Loc) + modifiable = IsModifiable(Ctx, this, kind, *Loc); + return Classification(kind, modifiable); +} + +/// Classify an expression which creates a temporary, based on its type. +static Cl::Kinds ClassifyTemporary(QualType T) { + if (T->isRecordType()) + return Cl::CL_ClassTemporary; + if (T->isArrayType()) + return Cl::CL_ArrayTemporary; + + // No special classification: these don't behave differently from normal + // prvalues. + return Cl::CL_PRValue; +} + +static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang, + const Expr *E, + ExprValueKind Kind) { + switch (Kind) { + case VK_RValue: + return Lang.CPlusPlus ? ClassifyTemporary(E->getType()) : Cl::CL_PRValue; + case VK_LValue: + return Cl::CL_LValue; + case VK_XValue: + return Cl::CL_XValue; + } + llvm_unreachable("Invalid value category of implicit cast."); +} + +static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { + // This function takes the first stab at classifying expressions. + const LangOptions &Lang = Ctx.getLangOpts(); + + switch (E->getStmtClass()) { + case Stmt::NoStmtClass: +#define ABSTRACT_STMT(Kind) +#define STMT(Kind, Base) case Expr::Kind##Class: +#define EXPR(Kind, Base) +#include "clang/AST/StmtNodes.inc" + llvm_unreachable("cannot classify a statement"); + + // First come the expressions that are always lvalues, unconditionally. + case Expr::ObjCIsaExprClass: + // C++ [expr.prim.general]p1: A string literal is an lvalue. + case Expr::StringLiteralClass: + // @encode is equivalent to its string + case Expr::ObjCEncodeExprClass: + // __func__ and friends are too. + case Expr::PredefinedExprClass: + // Property references are lvalues + case Expr::ObjCSubscriptRefExprClass: + case Expr::ObjCPropertyRefExprClass: + // C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of... + case Expr::CXXTypeidExprClass: + // Unresolved lookups get classified as lvalues. + // FIXME: Is this wise? Should they get their own kind? + case Expr::UnresolvedLookupExprClass: + case Expr::UnresolvedMemberExprClass: + case Expr::CXXDependentScopeMemberExprClass: + case Expr::DependentScopeDeclRefExprClass: + // ObjC instance variables are lvalues + // FIXME: ObjC++0x might have different rules + case Expr::ObjCIvarRefExprClass: + case Expr::FunctionParmPackExprClass: + case Expr::MSPropertyRefExprClass: + return Cl::CL_LValue; + + // C99 6.5.2.5p5 says that compound literals are lvalues. + // In C++, they're prvalue temporaries. + case Expr::CompoundLiteralExprClass: + return Ctx.getLangOpts().CPlusPlus ? ClassifyTemporary(E->getType()) + : Cl::CL_LValue; + + // Expressions that are prvalues. + case Expr::CXXBoolLiteralExprClass: + case Expr::CXXPseudoDestructorExprClass: + case Expr::UnaryExprOrTypeTraitExprClass: + case Expr::CXXNewExprClass: + case Expr::CXXThisExprClass: + case Expr::CXXNullPtrLiteralExprClass: + case Expr::ImaginaryLiteralClass: + case Expr::GNUNullExprClass: + case Expr::OffsetOfExprClass: + case Expr::CXXThrowExprClass: + case Expr::ShuffleVectorExprClass: + case Expr::ConvertVectorExprClass: + case Expr::IntegerLiteralClass: + case Expr::CharacterLiteralClass: + case Expr::AddrLabelExprClass: + case Expr::CXXDeleteExprClass: + case Expr::ImplicitValueInitExprClass: + case Expr::BlockExprClass: + case Expr::FloatingLiteralClass: + case Expr::CXXNoexceptExprClass: + case Expr::CXXScalarValueInitExprClass: + case Expr::UnaryTypeTraitExprClass: + case Expr::BinaryTypeTraitExprClass: + case Expr::TypeTraitExprClass: + case Expr::ArrayTypeTraitExprClass: + case Expr::ExpressionTraitExprClass: + case Expr::ObjCSelectorExprClass: + case Expr::ObjCProtocolExprClass: + case Expr::ObjCStringLiteralClass: + case Expr::ObjCBoxedExprClass: + case Expr::ObjCArrayLiteralClass: + case Expr::ObjCDictionaryLiteralClass: + case Expr::ObjCBoolLiteralExprClass: + case Expr::ParenListExprClass: + case Expr::SizeOfPackExprClass: + case Expr::SubstNonTypeTemplateParmPackExprClass: + case Expr::AsTypeExprClass: + case Expr::ObjCIndirectCopyRestoreExprClass: + case Expr::AtomicExprClass: + return Cl::CL_PRValue; + + // Next come the complicated cases. + case Expr::SubstNonTypeTemplateParmExprClass: + return ClassifyInternal(Ctx, + cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); + + // C++ [expr.sub]p1: The result is an lvalue of type "T". + // However, subscripting vector types is more like member access. + case Expr::ArraySubscriptExprClass: + if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType()) + return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase()); + return Cl::CL_LValue; + + // C++ [expr.prim.general]p3: The result is an lvalue if the entity is a + // function or variable and a prvalue otherwise. + case Expr::DeclRefExprClass: + if (E->getType() == Ctx.UnknownAnyTy) + return isa<FunctionDecl>(cast<DeclRefExpr>(E)->getDecl()) + ? Cl::CL_PRValue : Cl::CL_LValue; + return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl()); + + // Member access is complex. + case Expr::MemberExprClass: + return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E)); + + case Expr::UnaryOperatorClass: + switch (cast<UnaryOperator>(E)->getOpcode()) { + // C++ [expr.unary.op]p1: The unary * operator performs indirection: + // [...] the result is an lvalue referring to the object or function + // to which the expression points. + case UO_Deref: + return Cl::CL_LValue; + + // GNU extensions, simply look through them. + case UO_Extension: + return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr()); + + // Treat _Real and _Imag basically as if they were member + // expressions: l-value only if the operand is a true l-value. + case UO_Real: + case UO_Imag: { + const Expr *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); + Cl::Kinds K = ClassifyInternal(Ctx, Op); + if (K != Cl::CL_LValue) return K; + + if (isa<ObjCPropertyRefExpr>(Op)) + return Cl::CL_SubObjCPropertySetting; + return Cl::CL_LValue; + } + + // C++ [expr.pre.incr]p1: The result is the updated operand; it is an + // lvalue, [...] + // Not so in C. + case UO_PreInc: + case UO_PreDec: + return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue; + + default: + return Cl::CL_PRValue; + } + + case Expr::OpaqueValueExprClass: + return ClassifyExprValueKind(Lang, E, E->getValueKind()); + + // Pseudo-object expressions can produce l-values with reference magic. + case Expr::PseudoObjectExprClass: + return ClassifyExprValueKind(Lang, E, + cast<PseudoObjectExpr>(E)->getValueKind()); + + // Implicit casts are lvalues if they're lvalue casts. Other than that, we + // only specifically record class temporaries. + case Expr::ImplicitCastExprClass: + return ClassifyExprValueKind(Lang, E, E->getValueKind()); + + // C++ [expr.prim.general]p4: The presence of parentheses does not affect + // whether the expression is an lvalue. + case Expr::ParenExprClass: + return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr()); + + // C11 6.5.1.1p4: [A generic selection] is an lvalue, a function designator, + // or a void expression if its result expression is, respectively, an + // lvalue, a function designator, or a void expression. + case Expr::GenericSelectionExprClass: + if (cast<GenericSelectionExpr>(E)->isResultDependent()) + return Cl::CL_PRValue; + return ClassifyInternal(Ctx,cast<GenericSelectionExpr>(E)->getResultExpr()); + + case Expr::BinaryOperatorClass: + case Expr::CompoundAssignOperatorClass: + // C doesn't have any binary expressions that are lvalues. + if (Lang.CPlusPlus) + return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E)); + return Cl::CL_PRValue; + + case Expr::CallExprClass: + case Expr::CXXOperatorCallExprClass: + case Expr::CXXMemberCallExprClass: + case Expr::UserDefinedLiteralClass: + case Expr::CUDAKernelCallExprClass: + return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType()); + + // __builtin_choose_expr is equivalent to the chosen expression. + case Expr::ChooseExprClass: + return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr()); + + // Extended vector element access is an lvalue unless there are duplicates + // in the shuffle expression. + case Expr::ExtVectorElementExprClass: + if (cast<ExtVectorElementExpr>(E)->containsDuplicateElements()) + return Cl::CL_DuplicateVectorComponents; + if (cast<ExtVectorElementExpr>(E)->isArrow()) + return Cl::CL_LValue; + return ClassifyInternal(Ctx, cast<ExtVectorElementExpr>(E)->getBase()); + + // Simply look at the actual default argument. + case Expr::CXXDefaultArgExprClass: + return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr()); + + // Same idea for default initializers. + case Expr::CXXDefaultInitExprClass: + return ClassifyInternal(Ctx, cast<CXXDefaultInitExpr>(E)->getExpr()); + + // Same idea for temporary binding. + case Expr::CXXBindTemporaryExprClass: + return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr()); + + // And the cleanups guard. + case Expr::ExprWithCleanupsClass: + return ClassifyInternal(Ctx, cast<ExprWithCleanups>(E)->getSubExpr()); + + // Casts depend completely on the target type. All casts work the same. + case Expr::CStyleCastExprClass: + case Expr::CXXFunctionalCastExprClass: + case Expr::CXXStaticCastExprClass: + case Expr::CXXDynamicCastExprClass: + case Expr::CXXReinterpretCastExprClass: + case Expr::CXXConstCastExprClass: + case Expr::ObjCBridgedCastExprClass: + // Only in C++ can casts be interesting at all. + if (!Lang.CPlusPlus) return Cl::CL_PRValue; + return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten()); + + case Expr::CXXUnresolvedConstructExprClass: + return ClassifyUnnamed(Ctx, + cast<CXXUnresolvedConstructExpr>(E)->getTypeAsWritten()); + + case Expr::BinaryConditionalOperatorClass: { + if (!Lang.CPlusPlus) return Cl::CL_PRValue; + const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E); + return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr()); + } + + case Expr::ConditionalOperatorClass: { + // Once again, only C++ is interesting. + if (!Lang.CPlusPlus) return Cl::CL_PRValue; + const ConditionalOperator *co = cast<ConditionalOperator>(E); + return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr()); + } + + // ObjC message sends are effectively function calls, if the target function + // is known. + case Expr::ObjCMessageExprClass: + if (const ObjCMethodDecl *Method = + cast<ObjCMessageExpr>(E)->getMethodDecl()) { + Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getResultType()); + return (kind == Cl::CL_PRValue) ? Cl::CL_ObjCMessageRValue : kind; + } + return Cl::CL_PRValue; + + // Some C++ expressions are always class temporaries. + case Expr::CXXConstructExprClass: + case Expr::CXXTemporaryObjectExprClass: + case Expr::LambdaExprClass: + case Expr::CXXStdInitializerListExprClass: + return Cl::CL_ClassTemporary; + + case Expr::VAArgExprClass: + return ClassifyUnnamed(Ctx, E->getType()); + + case Expr::DesignatedInitExprClass: + return ClassifyInternal(Ctx, cast<DesignatedInitExpr>(E)->getInit()); + + case Expr::StmtExprClass: { + const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt(); + if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back())) + return ClassifyUnnamed(Ctx, LastExpr->getType()); + return Cl::CL_PRValue; + } + + case Expr::CXXUuidofExprClass: + return Cl::CL_LValue; + + case Expr::PackExpansionExprClass: + return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern()); + + case Expr::MaterializeTemporaryExprClass: + return cast<MaterializeTemporaryExpr>(E)->isBoundToLvalueReference() + ? Cl::CL_LValue + : Cl::CL_XValue; + + case Expr::InitListExprClass: + // An init list can be an lvalue if it is bound to a reference and + // contains only one element. In that case, we look at that element + // for an exact classification. Init list creation takes care of the + // value kind for us, so we only need to fine-tune. + if (E->isRValue()) + return ClassifyExprValueKind(Lang, E, E->getValueKind()); + assert(cast<InitListExpr>(E)->getNumInits() == 1 && + "Only 1-element init lists can be glvalues."); + return ClassifyInternal(Ctx, cast<InitListExpr>(E)->getInit(0)); + } + + llvm_unreachable("unhandled expression kind in classification"); +} + +/// ClassifyDecl - Return the classification of an expression referencing the +/// given declaration. +static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) { + // C++ [expr.prim.general]p6: The result is an lvalue if the entity is a + // function, variable, or data member and a prvalue otherwise. + // In C, functions are not lvalues. + // In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an + // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to + // special-case this. + + if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) + return Cl::CL_MemberFunction; + + bool islvalue; + if (const NonTypeTemplateParmDecl *NTTParm = + dyn_cast<NonTypeTemplateParmDecl>(D)) + islvalue = NTTParm->getType()->isReferenceType(); + else + islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) || + isa<IndirectFieldDecl>(D) || + (Ctx.getLangOpts().CPlusPlus && + (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D))); + + return islvalue ? Cl::CL_LValue : Cl::CL_PRValue; +} + +/// ClassifyUnnamed - Return the classification of an expression yielding an +/// unnamed value of the given type. This applies in particular to function +/// calls and casts. +static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) { + // In C, function calls are always rvalues. + if (!Ctx.getLangOpts().CPlusPlus) return Cl::CL_PRValue; + + // C++ [expr.call]p10: A function call is an lvalue if the result type is an + // lvalue reference type or an rvalue reference to function type, an xvalue + // if the result type is an rvalue reference to object type, and a prvalue + // otherwise. + if (T->isLValueReferenceType()) + return Cl::CL_LValue; + const RValueReferenceType *RV = T->getAs<RValueReferenceType>(); + if (!RV) // Could still be a class temporary, though. + return ClassifyTemporary(T); + + return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue; +} + +static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) { + if (E->getType() == Ctx.UnknownAnyTy) + return (isa<FunctionDecl>(E->getMemberDecl()) + ? Cl::CL_PRValue : Cl::CL_LValue); + + // Handle C first, it's easier. + if (!Ctx.getLangOpts().CPlusPlus) { + // C99 6.5.2.3p3 + // For dot access, the expression is an lvalue if the first part is. For + // arrow access, it always is an lvalue. + if (E->isArrow()) + return Cl::CL_LValue; + // ObjC property accesses are not lvalues, but get special treatment. + Expr *Base = E->getBase()->IgnoreParens(); + if (isa<ObjCPropertyRefExpr>(Base)) + return Cl::CL_SubObjCPropertySetting; + return ClassifyInternal(Ctx, Base); + } + + NamedDecl *Member = E->getMemberDecl(); + // C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2. + // C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then + // E1.E2 is an lvalue. + if (ValueDecl *Value = dyn_cast<ValueDecl>(Member)) + if (Value->getType()->isReferenceType()) + return Cl::CL_LValue; + + // Otherwise, one of the following rules applies. + // -- If E2 is a static member [...] then E1.E2 is an lvalue. + if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord()) + return Cl::CL_LValue; + + // -- If E2 is a non-static data member [...]. If E1 is an lvalue, then + // E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue; + // otherwise, it is a prvalue. + if (isa<FieldDecl>(Member)) { + // *E1 is an lvalue + if (E->isArrow()) + return Cl::CL_LValue; + Expr *Base = E->getBase()->IgnoreParenImpCasts(); + if (isa<ObjCPropertyRefExpr>(Base)) + return Cl::CL_SubObjCPropertySetting; + return ClassifyInternal(Ctx, E->getBase()); + } + + // -- If E2 is a [...] member function, [...] + // -- If it refers to a static member function [...], then E1.E2 is an + // lvalue; [...] + // -- Otherwise [...] E1.E2 is a prvalue. + if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) + return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction; + + // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue. + // So is everything else we haven't handled yet. + return Cl::CL_PRValue; +} + +static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) { + assert(Ctx.getLangOpts().CPlusPlus && + "This is only relevant for C++."); + // C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand. + // Except we override this for writes to ObjC properties. + if (E->isAssignmentOp()) + return (E->getLHS()->getObjectKind() == OK_ObjCProperty + ? Cl::CL_PRValue : Cl::CL_LValue); + + // C++ [expr.comma]p1: the result is of the same value category as its right + // operand, [...]. + if (E->getOpcode() == BO_Comma) + return ClassifyInternal(Ctx, E->getRHS()); + + // C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand + // is a pointer to a data member is of the same value category as its first + // operand. + if (E->getOpcode() == BO_PtrMemD) + return (E->getType()->isFunctionType() || + E->hasPlaceholderType(BuiltinType::BoundMember)) + ? Cl::CL_MemberFunction + : ClassifyInternal(Ctx, E->getLHS()); + + // C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its + // second operand is a pointer to data member and a prvalue otherwise. + if (E->getOpcode() == BO_PtrMemI) + return (E->getType()->isFunctionType() || + E->hasPlaceholderType(BuiltinType::BoundMember)) + ? Cl::CL_MemberFunction + : Cl::CL_LValue; + + // All other binary operations are prvalues. + return Cl::CL_PRValue; +} + +static Cl::Kinds ClassifyConditional(ASTContext &Ctx, const Expr *True, + const Expr *False) { + assert(Ctx.getLangOpts().CPlusPlus && + "This is only relevant for C++."); + + // C++ [expr.cond]p2 + // If either the second or the third operand has type (cv) void, [...] + // the result [...] is a prvalue. + if (True->getType()->isVoidType() || False->getType()->isVoidType()) + return Cl::CL_PRValue; + + // Note that at this point, we have already performed all conversions + // according to [expr.cond]p3. + // C++ [expr.cond]p4: If the second and third operands are glvalues of the + // same value category [...], the result is of that [...] value category. + // C++ [expr.cond]p5: Otherwise, the result is a prvalue. + Cl::Kinds LCl = ClassifyInternal(Ctx, True), + RCl = ClassifyInternal(Ctx, False); + return LCl == RCl ? LCl : Cl::CL_PRValue; +} + +static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E, + Cl::Kinds Kind, SourceLocation &Loc) { + // As a general rule, we only care about lvalues. But there are some rvalues + // for which we want to generate special results. + if (Kind == Cl::CL_PRValue) { + // For the sake of better diagnostics, we want to specifically recognize + // use of the GCC cast-as-lvalue extension. + if (const ExplicitCastExpr *CE = + dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) { + if (CE->getSubExpr()->IgnoreParenImpCasts()->isLValue()) { + Loc = CE->getExprLoc(); + return Cl::CM_LValueCast; + } + } + } + if (Kind != Cl::CL_LValue) + return Cl::CM_RValue; + + // This is the lvalue case. + // Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6) + if (Ctx.getLangOpts().CPlusPlus && E->getType()->isFunctionType()) + return Cl::CM_Function; + + // Assignment to a property in ObjC is an implicit setter access. But a + // setter might not exist. + if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) { + if (Expr->isImplicitProperty() && Expr->getImplicitPropertySetter() == 0) + return Cl::CM_NoSetterProperty; + } + + CanQualType CT = Ctx.getCanonicalType(E->getType()); + // Const stuff is obviously not modifiable. + if (CT.isConstQualified()) + return Cl::CM_ConstQualified; + if (CT.getQualifiers().getAddressSpace() == LangAS::opencl_constant) + return Cl::CM_ConstQualified; + + // Arrays are not modifiable, only their elements are. + if (CT->isArrayType()) + return Cl::CM_ArrayType; + // Incomplete types are not modifiable. + if (CT->isIncompleteType()) + return Cl::CM_IncompleteType; + + // Records with any const fields (recursively) are not modifiable. + if (const RecordType *R = CT->getAs<RecordType>()) { + assert((E->getObjectKind() == OK_ObjCProperty || + !Ctx.getLangOpts().CPlusPlus) && + "C++ struct assignment should be resolved by the " + "copy assignment operator."); + if (R->hasConstFields()) + return Cl::CM_ConstQualified; + } + + return Cl::CM_Modifiable; +} + +Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const { + Classification VC = Classify(Ctx); + switch (VC.getKind()) { + case Cl::CL_LValue: return LV_Valid; + case Cl::CL_XValue: return LV_InvalidExpression; + case Cl::CL_Function: return LV_NotObjectType; + case Cl::CL_Void: return LV_InvalidExpression; + case Cl::CL_AddressableVoid: return LV_IncompleteVoidType; + case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents; + case Cl::CL_MemberFunction: return LV_MemberFunction; + case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting; + case Cl::CL_ClassTemporary: return LV_ClassTemporary; + case Cl::CL_ArrayTemporary: return LV_ArrayTemporary; + case Cl::CL_ObjCMessageRValue: return LV_InvalidMessageExpression; + case Cl::CL_PRValue: return LV_InvalidExpression; + } + llvm_unreachable("Unhandled kind"); +} + +Expr::isModifiableLvalueResult +Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const { + SourceLocation dummy; + Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy); + switch (VC.getKind()) { + case Cl::CL_LValue: break; + case Cl::CL_XValue: return MLV_InvalidExpression; + case Cl::CL_Function: return MLV_NotObjectType; + case Cl::CL_Void: return MLV_InvalidExpression; + case Cl::CL_AddressableVoid: return MLV_IncompleteVoidType; + case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents; + case Cl::CL_MemberFunction: return MLV_MemberFunction; + case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting; + case Cl::CL_ClassTemporary: return MLV_ClassTemporary; + case Cl::CL_ArrayTemporary: return MLV_ArrayTemporary; + case Cl::CL_ObjCMessageRValue: return MLV_InvalidMessageExpression; + case Cl::CL_PRValue: + return VC.getModifiable() == Cl::CM_LValueCast ? + MLV_LValueCast : MLV_InvalidExpression; + } + assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind"); + switch (VC.getModifiable()) { + case Cl::CM_Untested: llvm_unreachable("Did not test modifiability"); + case Cl::CM_Modifiable: return MLV_Valid; + case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match"); + case Cl::CM_Function: return MLV_NotObjectType; + case Cl::CM_LValueCast: + llvm_unreachable("CM_LValueCast and CL_LValue don't match"); + case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty; + case Cl::CM_ConstQualified: return MLV_ConstQualified; + case Cl::CM_ArrayType: return MLV_ArrayType; + case Cl::CM_IncompleteType: return MLV_IncompleteType; + } + llvm_unreachable("Unhandled modifiable type"); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp new file mode 100644 index 000000000000..390cfe9cd235 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp @@ -0,0 +1,8707 @@ +//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Expr constant evaluator. +// +// Constant expression evaluation produces four main results: +// +// * A success/failure flag indicating whether constant folding was successful. +// This is the 'bool' return value used by most of the code in this file. A +// 'false' return value indicates that constant folding has failed, and any +// appropriate diagnostic has already been produced. +// +// * An evaluated result, valid only if constant folding has not failed. +// +// * A flag indicating if evaluation encountered (unevaluated) side-effects. +// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1), +// where it is possible to determine the evaluated result regardless. +// +// * A set of notes indicating why the evaluation was not a constant expression +// (under the C++11 / C++1y rules only, at the moment), or, if folding failed +// too, why the expression could not be folded. +// +// If we are checking for a potential constant expression, failure to constant +// fold a potential constant sub-expression will be indicated by a 'false' +// return value (the expression could not be folded) and no diagnostic (the +// expression is not necessarily non-constant). +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/APValue.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTDiagnostic.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/Expr.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/raw_ostream.h" +#include <cstring> +#include <functional> + +using namespace clang; +using llvm::APSInt; +using llvm::APFloat; + +static bool IsGlobalLValue(APValue::LValueBase B); + +namespace { + struct LValue; + struct CallStackFrame; + struct EvalInfo; + + static QualType getType(APValue::LValueBase B) { + if (!B) return QualType(); + if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) + return D->getType(); + + const Expr *Base = B.get<const Expr*>(); + + // For a materialized temporary, the type of the temporary we materialized + // may not be the type of the expression. + if (const MaterializeTemporaryExpr *MTE = + dyn_cast<MaterializeTemporaryExpr>(Base)) { + SmallVector<const Expr *, 2> CommaLHSs; + SmallVector<SubobjectAdjustment, 2> Adjustments; + const Expr *Temp = MTE->GetTemporaryExpr(); + const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs, + Adjustments); + // Keep any cv-qualifiers from the reference if we generated a temporary + // for it. + if (Inner != Temp) + return Inner->getType(); + } + + return Base->getType(); + } + + /// Get an LValue path entry, which is known to not be an array index, as a + /// field or base class. + static + APValue::BaseOrMemberType getAsBaseOrMember(APValue::LValuePathEntry E) { + APValue::BaseOrMemberType Value; + Value.setFromOpaqueValue(E.BaseOrMember); + return Value; + } + + /// Get an LValue path entry, which is known to not be an array index, as a + /// field declaration. + static const FieldDecl *getAsField(APValue::LValuePathEntry E) { + return dyn_cast<FieldDecl>(getAsBaseOrMember(E).getPointer()); + } + /// Get an LValue path entry, which is known to not be an array index, as a + /// base class declaration. + static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) { + return dyn_cast<CXXRecordDecl>(getAsBaseOrMember(E).getPointer()); + } + /// Determine whether this LValue path entry for a base class names a virtual + /// base class. + static bool isVirtualBaseClass(APValue::LValuePathEntry E) { + return getAsBaseOrMember(E).getInt(); + } + + /// Find the path length and type of the most-derived subobject in the given + /// path, and find the size of the containing array, if any. + static + unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base, + ArrayRef<APValue::LValuePathEntry> Path, + uint64_t &ArraySize, QualType &Type) { + unsigned MostDerivedLength = 0; + Type = Base; + for (unsigned I = 0, N = Path.size(); I != N; ++I) { + if (Type->isArrayType()) { + const ConstantArrayType *CAT = + cast<ConstantArrayType>(Ctx.getAsArrayType(Type)); + Type = CAT->getElementType(); + ArraySize = CAT->getSize().getZExtValue(); + MostDerivedLength = I + 1; + } else if (Type->isAnyComplexType()) { + const ComplexType *CT = Type->castAs<ComplexType>(); + Type = CT->getElementType(); + ArraySize = 2; + MostDerivedLength = I + 1; + } else if (const FieldDecl *FD = getAsField(Path[I])) { + Type = FD->getType(); + ArraySize = 0; + MostDerivedLength = I + 1; + } else { + // Path[I] describes a base class. + ArraySize = 0; + } + } + return MostDerivedLength; + } + + // The order of this enum is important for diagnostics. + enum CheckSubobjectKind { + CSK_Base, CSK_Derived, CSK_Field, CSK_ArrayToPointer, CSK_ArrayIndex, + CSK_This, CSK_Real, CSK_Imag + }; + + /// A path from a glvalue to a subobject of that glvalue. + struct SubobjectDesignator { + /// True if the subobject was named in a manner not supported by C++11. Such + /// lvalues can still be folded, but they are not core constant expressions + /// and we cannot perform lvalue-to-rvalue conversions on them. + bool Invalid : 1; + + /// Is this a pointer one past the end of an object? + bool IsOnePastTheEnd : 1; + + /// The length of the path to the most-derived object of which this is a + /// subobject. + unsigned MostDerivedPathLength : 30; + + /// The size of the array of which the most-derived object is an element, or + /// 0 if the most-derived object is not an array element. + uint64_t MostDerivedArraySize; + + /// The type of the most derived object referred to by this address. + QualType MostDerivedType; + + typedef APValue::LValuePathEntry PathEntry; + + /// The entries on the path from the glvalue to the designated subobject. + SmallVector<PathEntry, 8> Entries; + + SubobjectDesignator() : Invalid(true) {} + + explicit SubobjectDesignator(QualType T) + : Invalid(false), IsOnePastTheEnd(false), MostDerivedPathLength(0), + MostDerivedArraySize(0), MostDerivedType(T) {} + + SubobjectDesignator(ASTContext &Ctx, const APValue &V) + : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false), + MostDerivedPathLength(0), MostDerivedArraySize(0) { + if (!Invalid) { + IsOnePastTheEnd = V.isLValueOnePastTheEnd(); + ArrayRef<PathEntry> VEntries = V.getLValuePath(); + Entries.insert(Entries.end(), VEntries.begin(), VEntries.end()); + if (V.getLValueBase()) + MostDerivedPathLength = + findMostDerivedSubobject(Ctx, getType(V.getLValueBase()), + V.getLValuePath(), MostDerivedArraySize, + MostDerivedType); + } + } + + void setInvalid() { + Invalid = true; + Entries.clear(); + } + + /// Determine whether this is a one-past-the-end pointer. + bool isOnePastTheEnd() const { + if (IsOnePastTheEnd) + return true; + if (MostDerivedArraySize && + Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize) + return true; + return false; + } + + /// Check that this refers to a valid subobject. + bool isValidSubobject() const { + if (Invalid) + return false; + return !isOnePastTheEnd(); + } + /// Check that this refers to a valid subobject, and if not, produce a + /// relevant diagnostic and set the designator as invalid. + bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK); + + /// Update this designator to refer to the first element within this array. + void addArrayUnchecked(const ConstantArrayType *CAT) { + PathEntry Entry; + Entry.ArrayIndex = 0; + Entries.push_back(Entry); + + // This is a most-derived object. + MostDerivedType = CAT->getElementType(); + MostDerivedArraySize = CAT->getSize().getZExtValue(); + MostDerivedPathLength = Entries.size(); + } + /// Update this designator to refer to the given base or member of this + /// object. + void addDeclUnchecked(const Decl *D, bool Virtual = false) { + PathEntry Entry; + APValue::BaseOrMemberType Value(D, Virtual); + Entry.BaseOrMember = Value.getOpaqueValue(); + Entries.push_back(Entry); + + // If this isn't a base class, it's a new most-derived object. + if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) { + MostDerivedType = FD->getType(); + MostDerivedArraySize = 0; + MostDerivedPathLength = Entries.size(); + } + } + /// Update this designator to refer to the given complex component. + void addComplexUnchecked(QualType EltTy, bool Imag) { + PathEntry Entry; + Entry.ArrayIndex = Imag; + Entries.push_back(Entry); + + // This is technically a most-derived object, though in practice this + // is unlikely to matter. + MostDerivedType = EltTy; + MostDerivedArraySize = 2; + MostDerivedPathLength = Entries.size(); + } + void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N); + /// Add N to the address of this subobject. + void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) { + if (Invalid) return; + if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize) { + Entries.back().ArrayIndex += N; + if (Entries.back().ArrayIndex > MostDerivedArraySize) { + diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex); + setInvalid(); + } + return; + } + // [expr.add]p4: For the purposes of these operators, a pointer to a + // nonarray object behaves the same as a pointer to the first element of + // an array of length one with the type of the object as its element type. + if (IsOnePastTheEnd && N == (uint64_t)-1) + IsOnePastTheEnd = false; + else if (!IsOnePastTheEnd && N == 1) + IsOnePastTheEnd = true; + else if (N != 0) { + diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N); + setInvalid(); + } + } + }; + + /// A stack frame in the constexpr call stack. + struct CallStackFrame { + EvalInfo &Info; + + /// Parent - The caller of this stack frame. + CallStackFrame *Caller; + + /// CallLoc - The location of the call expression for this call. + SourceLocation CallLoc; + + /// Callee - The function which was called. + const FunctionDecl *Callee; + + /// Index - The call index of this call. + unsigned Index; + + /// This - The binding for the this pointer in this call, if any. + const LValue *This; + + /// Arguments - Parameter bindings for this function call, indexed by + /// parameters' function scope indices. + APValue *Arguments; + + // Note that we intentionally use std::map here so that references to + // values are stable. + typedef std::map<const void*, APValue> MapTy; + typedef MapTy::const_iterator temp_iterator; + /// Temporaries - Temporary lvalues materialized within this stack frame. + MapTy Temporaries; + + CallStackFrame(EvalInfo &Info, SourceLocation CallLoc, + const FunctionDecl *Callee, const LValue *This, + APValue *Arguments); + ~CallStackFrame(); + + APValue *getTemporary(const void *Key) { + MapTy::iterator I = Temporaries.find(Key); + return I == Temporaries.end() ? 0 : &I->second; + } + APValue &createTemporary(const void *Key, bool IsLifetimeExtended); + }; + + /// Temporarily override 'this'. + class ThisOverrideRAII { + public: + ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable) + : Frame(Frame), OldThis(Frame.This) { + if (Enable) + Frame.This = NewThis; + } + ~ThisOverrideRAII() { + Frame.This = OldThis; + } + private: + CallStackFrame &Frame; + const LValue *OldThis; + }; + + /// A partial diagnostic which we might know in advance that we are not going + /// to emit. + class OptionalDiagnostic { + PartialDiagnostic *Diag; + + public: + explicit OptionalDiagnostic(PartialDiagnostic *Diag = 0) : Diag(Diag) {} + + template<typename T> + OptionalDiagnostic &operator<<(const T &v) { + if (Diag) + *Diag << v; + return *this; + } + + OptionalDiagnostic &operator<<(const APSInt &I) { + if (Diag) { + SmallVector<char, 32> Buffer; + I.toString(Buffer); + *Diag << StringRef(Buffer.data(), Buffer.size()); + } + return *this; + } + + OptionalDiagnostic &operator<<(const APFloat &F) { + if (Diag) { + // FIXME: Force the precision of the source value down so we don't + // print digits which are usually useless (we don't really care here if + // we truncate a digit by accident in edge cases). Ideally, + // APFloat::toString would automatically print the shortest + // representation which rounds to the correct value, but it's a bit + // tricky to implement. + unsigned precision = + llvm::APFloat::semanticsPrecision(F.getSemantics()); + precision = (precision * 59 + 195) / 196; + SmallVector<char, 32> Buffer; + F.toString(Buffer, precision); + *Diag << StringRef(Buffer.data(), Buffer.size()); + } + return *this; + } + }; + + /// A cleanup, and a flag indicating whether it is lifetime-extended. + class Cleanup { + llvm::PointerIntPair<APValue*, 1, bool> Value; + + public: + Cleanup(APValue *Val, bool IsLifetimeExtended) + : Value(Val, IsLifetimeExtended) {} + + bool isLifetimeExtended() const { return Value.getInt(); } + void endLifetime() { + *Value.getPointer() = APValue(); + } + }; + + /// EvalInfo - This is a private struct used by the evaluator to capture + /// information about a subexpression as it is folded. It retains information + /// about the AST context, but also maintains information about the folded + /// expression. + /// + /// If an expression could be evaluated, it is still possible it is not a C + /// "integer constant expression" or constant expression. If not, this struct + /// captures information about how and why not. + /// + /// One bit of information passed *into* the request for constant folding + /// indicates whether the subexpression is "evaluated" or not according to C + /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can + /// evaluate the expression regardless of what the RHS is, but C only allows + /// certain things in certain situations. + struct EvalInfo { + ASTContext &Ctx; + + /// EvalStatus - Contains information about the evaluation. + Expr::EvalStatus &EvalStatus; + + /// CurrentCall - The top of the constexpr call stack. + CallStackFrame *CurrentCall; + + /// CallStackDepth - The number of calls in the call stack right now. + unsigned CallStackDepth; + + /// NextCallIndex - The next call index to assign. + unsigned NextCallIndex; + + /// StepsLeft - The remaining number of evaluation steps we're permitted + /// to perform. This is essentially a limit for the number of statements + /// we will evaluate. + unsigned StepsLeft; + + /// BottomFrame - The frame in which evaluation started. This must be + /// initialized after CurrentCall and CallStackDepth. + CallStackFrame BottomFrame; + + /// A stack of values whose lifetimes end at the end of some surrounding + /// evaluation frame. + llvm::SmallVector<Cleanup, 16> CleanupStack; + + /// EvaluatingDecl - This is the declaration whose initializer is being + /// evaluated, if any. + APValue::LValueBase EvaluatingDecl; + + /// EvaluatingDeclValue - This is the value being constructed for the + /// declaration whose initializer is being evaluated, if any. + APValue *EvaluatingDeclValue; + + /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further + /// notes attached to it will also be stored, otherwise they will not be. + bool HasActiveDiagnostic; + + enum EvaluationMode { + /// Evaluate as a constant expression. Stop if we find that the expression + /// is not a constant expression. + EM_ConstantExpression, + + /// Evaluate as a potential constant expression. Keep going if we hit a + /// construct that we can't evaluate yet (because we don't yet know the + /// value of something) but stop if we hit something that could never be + /// a constant expression. + EM_PotentialConstantExpression, + + /// Fold the expression to a constant. Stop if we hit a side-effect that + /// we can't model. + EM_ConstantFold, + + /// Evaluate the expression looking for integer overflow and similar + /// issues. Don't worry about side-effects, and try to visit all + /// subexpressions. + EM_EvaluateForOverflow, + + /// Evaluate in any way we know how. Don't worry about side-effects that + /// can't be modeled. + EM_IgnoreSideEffects + } EvalMode; + + /// Are we checking whether the expression is a potential constant + /// expression? + bool checkingPotentialConstantExpression() const { + return EvalMode == EM_PotentialConstantExpression; + } + + /// Are we checking an expression for overflow? + // FIXME: We should check for any kind of undefined or suspicious behavior + // in such constructs, not just overflow. + bool checkingForOverflow() { return EvalMode == EM_EvaluateForOverflow; } + + EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode) + : Ctx(const_cast<ASTContext&>(C)), EvalStatus(S), CurrentCall(0), + CallStackDepth(0), NextCallIndex(1), + StepsLeft(getLangOpts().ConstexprStepLimit), + BottomFrame(*this, SourceLocation(), 0, 0, 0), + EvaluatingDecl((const ValueDecl*)0), EvaluatingDeclValue(0), + HasActiveDiagnostic(false), EvalMode(Mode) {} + + void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) { + EvaluatingDecl = Base; + EvaluatingDeclValue = &Value; + } + + const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); } + + bool CheckCallLimit(SourceLocation Loc) { + // Don't perform any constexpr calls (other than the call we're checking) + // when checking a potential constant expression. + if (checkingPotentialConstantExpression() && CallStackDepth > 1) + return false; + if (NextCallIndex == 0) { + // NextCallIndex has wrapped around. + Diag(Loc, diag::note_constexpr_call_limit_exceeded); + return false; + } + if (CallStackDepth <= getLangOpts().ConstexprCallDepth) + return true; + Diag(Loc, diag::note_constexpr_depth_limit_exceeded) + << getLangOpts().ConstexprCallDepth; + return false; + } + + CallStackFrame *getCallFrame(unsigned CallIndex) { + assert(CallIndex && "no call index in getCallFrame"); + // We will eventually hit BottomFrame, which has Index 1, so Frame can't + // be null in this loop. + CallStackFrame *Frame = CurrentCall; + while (Frame->Index > CallIndex) + Frame = Frame->Caller; + return (Frame->Index == CallIndex) ? Frame : 0; + } + + bool nextStep(const Stmt *S) { + if (!StepsLeft) { + Diag(S->getLocStart(), diag::note_constexpr_step_limit_exceeded); + return false; + } + --StepsLeft; + return true; + } + + private: + /// Add a diagnostic to the diagnostics list. + PartialDiagnostic &addDiag(SourceLocation Loc, diag::kind DiagId) { + PartialDiagnostic PD(DiagId, Ctx.getDiagAllocator()); + EvalStatus.Diag->push_back(std::make_pair(Loc, PD)); + return EvalStatus.Diag->back().second; + } + + /// Add notes containing a call stack to the current point of evaluation. + void addCallStack(unsigned Limit); + + public: + /// Diagnose that the evaluation cannot be folded. + OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId + = diag::note_invalid_subexpr_in_const_expr, + unsigned ExtraNotes = 0) { + if (EvalStatus.Diag) { + // If we have a prior diagnostic, it will be noting that the expression + // isn't a constant expression. This diagnostic is more important, + // unless we require this evaluation to produce a constant expression. + // + // FIXME: We might want to show both diagnostics to the user in + // EM_ConstantFold mode. + if (!EvalStatus.Diag->empty()) { + switch (EvalMode) { + case EM_ConstantFold: + case EM_IgnoreSideEffects: + case EM_EvaluateForOverflow: + if (!EvalStatus.HasSideEffects) + break; + // We've had side-effects; we want the diagnostic from them, not + // some later problem. + case EM_ConstantExpression: + case EM_PotentialConstantExpression: + HasActiveDiagnostic = false; + return OptionalDiagnostic(); + } + } + + unsigned CallStackNotes = CallStackDepth - 1; + unsigned Limit = Ctx.getDiagnostics().getConstexprBacktraceLimit(); + if (Limit) + CallStackNotes = std::min(CallStackNotes, Limit + 1); + if (checkingPotentialConstantExpression()) + CallStackNotes = 0; + + HasActiveDiagnostic = true; + EvalStatus.Diag->clear(); + EvalStatus.Diag->reserve(1 + ExtraNotes + CallStackNotes); + addDiag(Loc, DiagId); + if (!checkingPotentialConstantExpression()) + addCallStack(Limit); + return OptionalDiagnostic(&(*EvalStatus.Diag)[0].second); + } + HasActiveDiagnostic = false; + return OptionalDiagnostic(); + } + + OptionalDiagnostic Diag(const Expr *E, diag::kind DiagId + = diag::note_invalid_subexpr_in_const_expr, + unsigned ExtraNotes = 0) { + if (EvalStatus.Diag) + return Diag(E->getExprLoc(), DiagId, ExtraNotes); + HasActiveDiagnostic = false; + return OptionalDiagnostic(); + } + + /// Diagnose that the evaluation does not produce a C++11 core constant + /// expression. + /// + /// FIXME: Stop evaluating if we're in EM_ConstantExpression or + /// EM_PotentialConstantExpression mode and we produce one of these. + template<typename LocArg> + OptionalDiagnostic CCEDiag(LocArg Loc, diag::kind DiagId + = diag::note_invalid_subexpr_in_const_expr, + unsigned ExtraNotes = 0) { + // Don't override a previous diagnostic. Don't bother collecting + // diagnostics if we're evaluating for overflow. + if (!EvalStatus.Diag || !EvalStatus.Diag->empty()) { + HasActiveDiagnostic = false; + return OptionalDiagnostic(); + } + return Diag(Loc, DiagId, ExtraNotes); + } + + /// Add a note to a prior diagnostic. + OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId) { + if (!HasActiveDiagnostic) + return OptionalDiagnostic(); + return OptionalDiagnostic(&addDiag(Loc, DiagId)); + } + + /// Add a stack of notes to a prior diagnostic. + void addNotes(ArrayRef<PartialDiagnosticAt> Diags) { + if (HasActiveDiagnostic) { + EvalStatus.Diag->insert(EvalStatus.Diag->end(), + Diags.begin(), Diags.end()); + } + } + + /// Should we continue evaluation after encountering a side-effect that we + /// couldn't model? + bool keepEvaluatingAfterSideEffect() { + switch (EvalMode) { + case EM_PotentialConstantExpression: + case EM_EvaluateForOverflow: + case EM_IgnoreSideEffects: + return true; + + case EM_ConstantExpression: + case EM_ConstantFold: + return false; + } + llvm_unreachable("Missed EvalMode case"); + } + + /// Note that we have had a side-effect, and determine whether we should + /// keep evaluating. + bool noteSideEffect() { + EvalStatus.HasSideEffects = true; + return keepEvaluatingAfterSideEffect(); + } + + /// Should we continue evaluation as much as possible after encountering a + /// construct which can't be reduced to a value? + bool keepEvaluatingAfterFailure() { + if (!StepsLeft) + return false; + + switch (EvalMode) { + case EM_PotentialConstantExpression: + case EM_EvaluateForOverflow: + return true; + + case EM_ConstantExpression: + case EM_ConstantFold: + case EM_IgnoreSideEffects: + return false; + } + llvm_unreachable("Missed EvalMode case"); + } + }; + + /// Object used to treat all foldable expressions as constant expressions. + struct FoldConstant { + EvalInfo &Info; + bool Enabled; + bool HadNoPriorDiags; + EvalInfo::EvaluationMode OldMode; + + explicit FoldConstant(EvalInfo &Info, bool Enabled) + : Info(Info), + Enabled(Enabled), + HadNoPriorDiags(Info.EvalStatus.Diag && + Info.EvalStatus.Diag->empty() && + !Info.EvalStatus.HasSideEffects), + OldMode(Info.EvalMode) { + if (Enabled && Info.EvalMode == EvalInfo::EM_ConstantExpression) + Info.EvalMode = EvalInfo::EM_ConstantFold; + } + void keepDiagnostics() { Enabled = false; } + ~FoldConstant() { + if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() && + !Info.EvalStatus.HasSideEffects) + Info.EvalStatus.Diag->clear(); + Info.EvalMode = OldMode; + } + }; + + /// RAII object used to suppress diagnostics and side-effects from a + /// speculative evaluation. + class SpeculativeEvaluationRAII { + EvalInfo &Info; + Expr::EvalStatus Old; + + public: + SpeculativeEvaluationRAII(EvalInfo &Info, + SmallVectorImpl<PartialDiagnosticAt> *NewDiag = 0) + : Info(Info), Old(Info.EvalStatus) { + Info.EvalStatus.Diag = NewDiag; + // If we're speculatively evaluating, we may have skipped over some + // evaluations and missed out a side effect. + Info.EvalStatus.HasSideEffects = true; + } + ~SpeculativeEvaluationRAII() { + Info.EvalStatus = Old; + } + }; + + /// RAII object wrapping a full-expression or block scope, and handling + /// the ending of the lifetime of temporaries created within it. + template<bool IsFullExpression> + class ScopeRAII { + EvalInfo &Info; + unsigned OldStackSize; + public: + ScopeRAII(EvalInfo &Info) + : Info(Info), OldStackSize(Info.CleanupStack.size()) {} + ~ScopeRAII() { + // Body moved to a static method to encourage the compiler to inline away + // instances of this class. + cleanup(Info, OldStackSize); + } + private: + static void cleanup(EvalInfo &Info, unsigned OldStackSize) { + unsigned NewEnd = OldStackSize; + for (unsigned I = OldStackSize, N = Info.CleanupStack.size(); + I != N; ++I) { + if (IsFullExpression && Info.CleanupStack[I].isLifetimeExtended()) { + // Full-expression cleanup of a lifetime-extended temporary: nothing + // to do, just move this cleanup to the right place in the stack. + std::swap(Info.CleanupStack[I], Info.CleanupStack[NewEnd]); + ++NewEnd; + } else { + // End the lifetime of the object. + Info.CleanupStack[I].endLifetime(); + } + } + Info.CleanupStack.erase(Info.CleanupStack.begin() + NewEnd, + Info.CleanupStack.end()); + } + }; + typedef ScopeRAII<false> BlockScopeRAII; + typedef ScopeRAII<true> FullExpressionRAII; +} + +bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E, + CheckSubobjectKind CSK) { + if (Invalid) + return false; + if (isOnePastTheEnd()) { + Info.CCEDiag(E, diag::note_constexpr_past_end_subobject) + << CSK; + setInvalid(); + return false; + } + return true; +} + +void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info, + const Expr *E, uint64_t N) { + if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize) + Info.CCEDiag(E, diag::note_constexpr_array_index) + << static_cast<int>(N) << /*array*/ 0 + << static_cast<unsigned>(MostDerivedArraySize); + else + Info.CCEDiag(E, diag::note_constexpr_array_index) + << static_cast<int>(N) << /*non-array*/ 1; + setInvalid(); +} + +CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc, + const FunctionDecl *Callee, const LValue *This, + APValue *Arguments) + : Info(Info), Caller(Info.CurrentCall), CallLoc(CallLoc), Callee(Callee), + Index(Info.NextCallIndex++), This(This), Arguments(Arguments) { + Info.CurrentCall = this; + ++Info.CallStackDepth; +} + +CallStackFrame::~CallStackFrame() { + assert(Info.CurrentCall == this && "calls retired out of order"); + --Info.CallStackDepth; + Info.CurrentCall = Caller; +} + +APValue &CallStackFrame::createTemporary(const void *Key, + bool IsLifetimeExtended) { + APValue &Result = Temporaries[Key]; + assert(Result.isUninit() && "temporary created multiple times"); + Info.CleanupStack.push_back(Cleanup(&Result, IsLifetimeExtended)); + return Result; +} + +static void describeCall(CallStackFrame *Frame, raw_ostream &Out); + +void EvalInfo::addCallStack(unsigned Limit) { + // Determine which calls to skip, if any. + unsigned ActiveCalls = CallStackDepth - 1; + unsigned SkipStart = ActiveCalls, SkipEnd = SkipStart; + if (Limit && Limit < ActiveCalls) { + SkipStart = Limit / 2 + Limit % 2; + SkipEnd = ActiveCalls - Limit / 2; + } + + // Walk the call stack and add the diagnostics. + unsigned CallIdx = 0; + for (CallStackFrame *Frame = CurrentCall; Frame != &BottomFrame; + Frame = Frame->Caller, ++CallIdx) { + // Skip this call? + if (CallIdx >= SkipStart && CallIdx < SkipEnd) { + if (CallIdx == SkipStart) { + // Note that we're skipping calls. + addDiag(Frame->CallLoc, diag::note_constexpr_calls_suppressed) + << unsigned(ActiveCalls - Limit); + } + continue; + } + + SmallVector<char, 128> Buffer; + llvm::raw_svector_ostream Out(Buffer); + describeCall(Frame, Out); + addDiag(Frame->CallLoc, diag::note_constexpr_call_here) << Out.str(); + } +} + +namespace { + struct ComplexValue { + private: + bool IsInt; + + public: + APSInt IntReal, IntImag; + APFloat FloatReal, FloatImag; + + ComplexValue() : FloatReal(APFloat::Bogus), FloatImag(APFloat::Bogus) {} + + void makeComplexFloat() { IsInt = false; } + bool isComplexFloat() const { return !IsInt; } + APFloat &getComplexFloatReal() { return FloatReal; } + APFloat &getComplexFloatImag() { return FloatImag; } + + void makeComplexInt() { IsInt = true; } + bool isComplexInt() const { return IsInt; } + APSInt &getComplexIntReal() { return IntReal; } + APSInt &getComplexIntImag() { return IntImag; } + + void moveInto(APValue &v) const { + if (isComplexFloat()) + v = APValue(FloatReal, FloatImag); + else + v = APValue(IntReal, IntImag); + } + void setFrom(const APValue &v) { + assert(v.isComplexFloat() || v.isComplexInt()); + if (v.isComplexFloat()) { + makeComplexFloat(); + FloatReal = v.getComplexFloatReal(); + FloatImag = v.getComplexFloatImag(); + } else { + makeComplexInt(); + IntReal = v.getComplexIntReal(); + IntImag = v.getComplexIntImag(); + } + } + }; + + struct LValue { + APValue::LValueBase Base; + CharUnits Offset; + unsigned CallIndex; + SubobjectDesignator Designator; + + const APValue::LValueBase getLValueBase() const { return Base; } + CharUnits &getLValueOffset() { return Offset; } + const CharUnits &getLValueOffset() const { return Offset; } + unsigned getLValueCallIndex() const { return CallIndex; } + SubobjectDesignator &getLValueDesignator() { return Designator; } + const SubobjectDesignator &getLValueDesignator() const { return Designator;} + + void moveInto(APValue &V) const { + if (Designator.Invalid) + V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex); + else + V = APValue(Base, Offset, Designator.Entries, + Designator.IsOnePastTheEnd, CallIndex); + } + void setFrom(ASTContext &Ctx, const APValue &V) { + assert(V.isLValue()); + Base = V.getLValueBase(); + Offset = V.getLValueOffset(); + CallIndex = V.getLValueCallIndex(); + Designator = SubobjectDesignator(Ctx, V); + } + + void set(APValue::LValueBase B, unsigned I = 0) { + Base = B; + Offset = CharUnits::Zero(); + CallIndex = I; + Designator = SubobjectDesignator(getType(B)); + } + + // Check that this LValue is not based on a null pointer. If it is, produce + // a diagnostic and mark the designator as invalid. + bool checkNullPointer(EvalInfo &Info, const Expr *E, + CheckSubobjectKind CSK) { + if (Designator.Invalid) + return false; + if (!Base) { + Info.CCEDiag(E, diag::note_constexpr_null_subobject) + << CSK; + Designator.setInvalid(); + return false; + } + return true; + } + + // Check this LValue refers to an object. If not, set the designator to be + // invalid and emit a diagnostic. + bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) { + // Outside C++11, do not build a designator referring to a subobject of + // any object: we won't use such a designator for anything. + if (!Info.getLangOpts().CPlusPlus11) + Designator.setInvalid(); + return checkNullPointer(Info, E, CSK) && + Designator.checkSubobject(Info, E, CSK); + } + + void addDecl(EvalInfo &Info, const Expr *E, + const Decl *D, bool Virtual = false) { + if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base)) + Designator.addDeclUnchecked(D, Virtual); + } + void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) { + if (checkSubobject(Info, E, CSK_ArrayToPointer)) + Designator.addArrayUnchecked(CAT); + } + void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) { + if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real)) + Designator.addComplexUnchecked(EltTy, Imag); + } + void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) { + if (checkNullPointer(Info, E, CSK_ArrayIndex)) + Designator.adjustIndex(Info, E, N); + } + }; + + struct MemberPtr { + MemberPtr() {} + explicit MemberPtr(const ValueDecl *Decl) : + DeclAndIsDerivedMember(Decl, false), Path() {} + + /// The member or (direct or indirect) field referred to by this member + /// pointer, or 0 if this is a null member pointer. + const ValueDecl *getDecl() const { + return DeclAndIsDerivedMember.getPointer(); + } + /// Is this actually a member of some type derived from the relevant class? + bool isDerivedMember() const { + return DeclAndIsDerivedMember.getInt(); + } + /// Get the class which the declaration actually lives in. + const CXXRecordDecl *getContainingRecord() const { + return cast<CXXRecordDecl>( + DeclAndIsDerivedMember.getPointer()->getDeclContext()); + } + + void moveInto(APValue &V) const { + V = APValue(getDecl(), isDerivedMember(), Path); + } + void setFrom(const APValue &V) { + assert(V.isMemberPointer()); + DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl()); + DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember()); + Path.clear(); + ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath(); + Path.insert(Path.end(), P.begin(), P.end()); + } + + /// DeclAndIsDerivedMember - The member declaration, and a flag indicating + /// whether the member is a member of some class derived from the class type + /// of the member pointer. + llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember; + /// Path - The path of base/derived classes from the member declaration's + /// class (exclusive) to the class type of the member pointer (inclusive). + SmallVector<const CXXRecordDecl*, 4> Path; + + /// Perform a cast towards the class of the Decl (either up or down the + /// hierarchy). + bool castBack(const CXXRecordDecl *Class) { + assert(!Path.empty()); + const CXXRecordDecl *Expected; + if (Path.size() >= 2) + Expected = Path[Path.size() - 2]; + else + Expected = getContainingRecord(); + if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) { + // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*), + // if B does not contain the original member and is not a base or + // derived class of the class containing the original member, the result + // of the cast is undefined. + // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to + // (D::*). We consider that to be a language defect. + return false; + } + Path.pop_back(); + return true; + } + /// Perform a base-to-derived member pointer cast. + bool castToDerived(const CXXRecordDecl *Derived) { + if (!getDecl()) + return true; + if (!isDerivedMember()) { + Path.push_back(Derived); + return true; + } + if (!castBack(Derived)) + return false; + if (Path.empty()) + DeclAndIsDerivedMember.setInt(false); + return true; + } + /// Perform a derived-to-base member pointer cast. + bool castToBase(const CXXRecordDecl *Base) { + if (!getDecl()) + return true; + if (Path.empty()) + DeclAndIsDerivedMember.setInt(true); + if (isDerivedMember()) { + Path.push_back(Base); + return true; + } + return castBack(Base); + } + }; + + /// Compare two member pointers, which are assumed to be of the same type. + static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) { + if (!LHS.getDecl() || !RHS.getDecl()) + return !LHS.getDecl() && !RHS.getDecl(); + if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl()) + return false; + return LHS.Path == RHS.Path; + } +} + +static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E); +static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, + const LValue &This, const Expr *E, + bool AllowNonLiteralTypes = false); +static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info); +static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info); +static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result, + EvalInfo &Info); +static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info); +static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info); +static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result, + EvalInfo &Info); +static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info); +static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info); +static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info); + +//===----------------------------------------------------------------------===// +// Misc utilities +//===----------------------------------------------------------------------===// + +/// Produce a string describing the given constexpr call. +static void describeCall(CallStackFrame *Frame, raw_ostream &Out) { + unsigned ArgIndex = 0; + bool IsMemberCall = isa<CXXMethodDecl>(Frame->Callee) && + !isa<CXXConstructorDecl>(Frame->Callee) && + cast<CXXMethodDecl>(Frame->Callee)->isInstance(); + + if (!IsMemberCall) + Out << *Frame->Callee << '('; + + if (Frame->This && IsMemberCall) { + APValue Val; + Frame->This->moveInto(Val); + Val.printPretty(Out, Frame->Info.Ctx, + Frame->This->Designator.MostDerivedType); + // FIXME: Add parens around Val if needed. + Out << "->" << *Frame->Callee << '('; + IsMemberCall = false; + } + + for (FunctionDecl::param_const_iterator I = Frame->Callee->param_begin(), + E = Frame->Callee->param_end(); I != E; ++I, ++ArgIndex) { + if (ArgIndex > (unsigned)IsMemberCall) + Out << ", "; + + const ParmVarDecl *Param = *I; + const APValue &Arg = Frame->Arguments[ArgIndex]; + Arg.printPretty(Out, Frame->Info.Ctx, Param->getType()); + + if (ArgIndex == 0 && IsMemberCall) + Out << "->" << *Frame->Callee << '('; + } + + Out << ')'; +} + +/// Evaluate an expression to see if it had side-effects, and discard its +/// result. +/// \return \c true if the caller should keep evaluating. +static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) { + APValue Scratch; + if (!Evaluate(Scratch, Info, E)) + // We don't need the value, but we might have skipped a side effect here. + return Info.noteSideEffect(); + return true; +} + +/// Sign- or zero-extend a value to 64 bits. If it's already 64 bits, just +/// return its existing value. +static int64_t getExtValue(const APSInt &Value) { + return Value.isSigned() ? Value.getSExtValue() + : static_cast<int64_t>(Value.getZExtValue()); +} + +/// Should this call expression be treated as a string literal? +static bool IsStringLiteralCall(const CallExpr *E) { + unsigned Builtin = E->isBuiltinCall(); + return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString || + Builtin == Builtin::BI__builtin___NSStringMakeConstantString); +} + +static bool IsGlobalLValue(APValue::LValueBase B) { + // C++11 [expr.const]p3 An address constant expression is a prvalue core + // constant expression of pointer type that evaluates to... + + // ... a null pointer value, or a prvalue core constant expression of type + // std::nullptr_t. + if (!B) return true; + + if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { + // ... the address of an object with static storage duration, + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) + return VD->hasGlobalStorage(); + // ... the address of a function, + return isa<FunctionDecl>(D); + } + + const Expr *E = B.get<const Expr*>(); + switch (E->getStmtClass()) { + default: + return false; + case Expr::CompoundLiteralExprClass: { + const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E); + return CLE->isFileScope() && CLE->isLValue(); + } + case Expr::MaterializeTemporaryExprClass: + // A materialized temporary might have been lifetime-extended to static + // storage duration. + return cast<MaterializeTemporaryExpr>(E)->getStorageDuration() == SD_Static; + // A string literal has static storage duration. + case Expr::StringLiteralClass: + case Expr::PredefinedExprClass: + case Expr::ObjCStringLiteralClass: + case Expr::ObjCEncodeExprClass: + case Expr::CXXTypeidExprClass: + case Expr::CXXUuidofExprClass: + return true; + case Expr::CallExprClass: + return IsStringLiteralCall(cast<CallExpr>(E)); + // For GCC compatibility, &&label has static storage duration. + case Expr::AddrLabelExprClass: + return true; + // A Block literal expression may be used as the initialization value for + // Block variables at global or local static scope. + case Expr::BlockExprClass: + return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures(); + case Expr::ImplicitValueInitExprClass: + // FIXME: + // We can never form an lvalue with an implicit value initialization as its + // base through expression evaluation, so these only appear in one case: the + // implicit variable declaration we invent when checking whether a constexpr + // constructor can produce a constant expression. We must assume that such + // an expression might be a global lvalue. + return true; + } +} + +static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) { + assert(Base && "no location for a null lvalue"); + const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); + if (VD) + Info.Note(VD->getLocation(), diag::note_declared_at); + else + Info.Note(Base.get<const Expr*>()->getExprLoc(), + diag::note_constexpr_temporary_here); +} + +/// Check that this reference or pointer core constant expression is a valid +/// value for an address or reference constant expression. Return true if we +/// can fold this expression, whether or not it's a constant expression. +static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc, + QualType Type, const LValue &LVal) { + bool IsReferenceType = Type->isReferenceType(); + + APValue::LValueBase Base = LVal.getLValueBase(); + const SubobjectDesignator &Designator = LVal.getLValueDesignator(); + + // Check that the object is a global. Note that the fake 'this' object we + // manufacture when checking potential constant expressions is conservatively + // assumed to be global here. + if (!IsGlobalLValue(Base)) { + if (Info.getLangOpts().CPlusPlus11) { + const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); + Info.Diag(Loc, diag::note_constexpr_non_global, 1) + << IsReferenceType << !Designator.Entries.empty() + << !!VD << VD; + NoteLValueLocation(Info, Base); + } else { + Info.Diag(Loc); + } + // Don't allow references to temporaries to escape. + return false; + } + assert((Info.checkingPotentialConstantExpression() || + LVal.getLValueCallIndex() == 0) && + "have call index for global lvalue"); + + // Check if this is a thread-local variable. + if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) { + if (const VarDecl *Var = dyn_cast<const VarDecl>(VD)) { + if (Var->getTLSKind()) + return false; + } + } + + // Allow address constant expressions to be past-the-end pointers. This is + // an extension: the standard requires them to point to an object. + if (!IsReferenceType) + return true; + + // A reference constant expression must refer to an object. + if (!Base) { + // FIXME: diagnostic + Info.CCEDiag(Loc); + return true; + } + + // Does this refer one past the end of some object? + if (Designator.isOnePastTheEnd()) { + const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); + Info.Diag(Loc, diag::note_constexpr_past_end, 1) + << !Designator.Entries.empty() << !!VD << VD; + NoteLValueLocation(Info, Base); + } + + return true; +} + +/// Check that this core constant expression is of literal type, and if not, +/// produce an appropriate diagnostic. +static bool CheckLiteralType(EvalInfo &Info, const Expr *E, + const LValue *This = 0) { + if (!E->isRValue() || E->getType()->isLiteralType(Info.Ctx)) + return true; + + // C++1y: A constant initializer for an object o [...] may also invoke + // constexpr constructors for o and its subobjects even if those objects + // are of non-literal class types. + if (Info.getLangOpts().CPlusPlus1y && This && + Info.EvaluatingDecl == This->getLValueBase()) + return true; + + // Prvalue constant expressions must be of literal types. + if (Info.getLangOpts().CPlusPlus11) + Info.Diag(E, diag::note_constexpr_nonliteral) + << E->getType(); + else + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; +} + +/// Check that this core constant expression value is a valid value for a +/// constant expression. If not, report an appropriate diagnostic. Does not +/// check that the expression is of literal type. +static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, + QualType Type, const APValue &Value) { + if (Value.isUninit()) { + Info.Diag(DiagLoc, diag::note_constexpr_uninitialized) + << true << Type; + return false; + } + + // Core issue 1454: For a literal constant expression of array or class type, + // each subobject of its value shall have been initialized by a constant + // expression. + if (Value.isArray()) { + QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType(); + for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) { + if (!CheckConstantExpression(Info, DiagLoc, EltTy, + Value.getArrayInitializedElt(I))) + return false; + } + if (!Value.hasArrayFiller()) + return true; + return CheckConstantExpression(Info, DiagLoc, EltTy, + Value.getArrayFiller()); + } + if (Value.isUnion() && Value.getUnionField()) { + return CheckConstantExpression(Info, DiagLoc, + Value.getUnionField()->getType(), + Value.getUnionValue()); + } + if (Value.isStruct()) { + RecordDecl *RD = Type->castAs<RecordType>()->getDecl(); + if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) { + unsigned BaseIndex = 0; + for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(), + End = CD->bases_end(); I != End; ++I, ++BaseIndex) { + if (!CheckConstantExpression(Info, DiagLoc, I->getType(), + Value.getStructBase(BaseIndex))) + return false; + } + } + for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I) { + if (!CheckConstantExpression(Info, DiagLoc, I->getType(), + Value.getStructField(I->getFieldIndex()))) + return false; + } + } + + if (Value.isLValue()) { + LValue LVal; + LVal.setFrom(Info.Ctx, Value); + return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal); + } + + // Everything else is fine. + return true; +} + +const ValueDecl *GetLValueBaseDecl(const LValue &LVal) { + return LVal.Base.dyn_cast<const ValueDecl*>(); +} + +static bool IsLiteralLValue(const LValue &Value) { + if (Value.CallIndex) + return false; + const Expr *E = Value.Base.dyn_cast<const Expr*>(); + return E && !isa<MaterializeTemporaryExpr>(E); +} + +static bool IsWeakLValue(const LValue &Value) { + const ValueDecl *Decl = GetLValueBaseDecl(Value); + return Decl && Decl->isWeak(); +} + +static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) { + // A null base expression indicates a null pointer. These are always + // evaluatable, and they are false unless the offset is zero. + if (!Value.getLValueBase()) { + Result = !Value.getLValueOffset().isZero(); + return true; + } + + // We have a non-null base. These are generally known to be true, but if it's + // a weak declaration it can be null at runtime. + Result = true; + const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>(); + return !Decl || !Decl->isWeak(); +} + +static bool HandleConversionToBool(const APValue &Val, bool &Result) { + switch (Val.getKind()) { + case APValue::Uninitialized: + return false; + case APValue::Int: + Result = Val.getInt().getBoolValue(); + return true; + case APValue::Float: + Result = !Val.getFloat().isZero(); + return true; + case APValue::ComplexInt: + Result = Val.getComplexIntReal().getBoolValue() || + Val.getComplexIntImag().getBoolValue(); + return true; + case APValue::ComplexFloat: + Result = !Val.getComplexFloatReal().isZero() || + !Val.getComplexFloatImag().isZero(); + return true; + case APValue::LValue: + return EvalPointerValueAsBool(Val, Result); + case APValue::MemberPointer: + Result = Val.getMemberPointerDecl(); + return true; + case APValue::Vector: + case APValue::Array: + case APValue::Struct: + case APValue::Union: + case APValue::AddrLabelDiff: + return false; + } + + llvm_unreachable("unknown APValue kind"); +} + +static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result, + EvalInfo &Info) { + assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition"); + APValue Val; + if (!Evaluate(Val, Info, E)) + return false; + return HandleConversionToBool(Val, Result); +} + +template<typename T> +static void HandleOverflow(EvalInfo &Info, const Expr *E, + const T &SrcValue, QualType DestType) { + Info.CCEDiag(E, diag::note_constexpr_overflow) + << SrcValue << DestType; +} + +static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E, + QualType SrcType, const APFloat &Value, + QualType DestType, APSInt &Result) { + unsigned DestWidth = Info.Ctx.getIntWidth(DestType); + // Determine whether we are converting to unsigned or signed. + bool DestSigned = DestType->isSignedIntegerOrEnumerationType(); + + Result = APSInt(DestWidth, !DestSigned); + bool ignored; + if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored) + & APFloat::opInvalidOp) + HandleOverflow(Info, E, Value, DestType); + return true; +} + +static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E, + QualType SrcType, QualType DestType, + APFloat &Result) { + APFloat Value = Result; + bool ignored; + if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType), + APFloat::rmNearestTiesToEven, &ignored) + & APFloat::opOverflow) + HandleOverflow(Info, E, Value, DestType); + return true; +} + +static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E, + QualType DestType, QualType SrcType, + APSInt &Value) { + unsigned DestWidth = Info.Ctx.getIntWidth(DestType); + APSInt Result = Value; + // Figure out if this is a truncate, extend or noop cast. + // If the input is signed, do a sign extend, noop, or truncate. + Result = Result.extOrTrunc(DestWidth); + Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType()); + return Result; +} + +static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E, + QualType SrcType, const APSInt &Value, + QualType DestType, APFloat &Result) { + Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1); + if (Result.convertFromAPInt(Value, Value.isSigned(), + APFloat::rmNearestTiesToEven) + & APFloat::opOverflow) + HandleOverflow(Info, E, Value, DestType); + return true; +} + +static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E, + APValue &Value, const FieldDecl *FD) { + assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield"); + + if (!Value.isInt()) { + // Trying to store a pointer-cast-to-integer into a bitfield. + // FIXME: In this case, we should provide the diagnostic for casting + // a pointer to an integer. + assert(Value.isLValue() && "integral value neither int nor lvalue?"); + Info.Diag(E); + return false; + } + + APSInt &Int = Value.getInt(); + unsigned OldBitWidth = Int.getBitWidth(); + unsigned NewBitWidth = FD->getBitWidthValue(Info.Ctx); + if (NewBitWidth < OldBitWidth) + Int = Int.trunc(NewBitWidth).extend(OldBitWidth); + return true; +} + +static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E, + llvm::APInt &Res) { + APValue SVal; + if (!Evaluate(SVal, Info, E)) + return false; + if (SVal.isInt()) { + Res = SVal.getInt(); + return true; + } + if (SVal.isFloat()) { + Res = SVal.getFloat().bitcastToAPInt(); + return true; + } + if (SVal.isVector()) { + QualType VecTy = E->getType(); + unsigned VecSize = Info.Ctx.getTypeSize(VecTy); + QualType EltTy = VecTy->castAs<VectorType>()->getElementType(); + unsigned EltSize = Info.Ctx.getTypeSize(EltTy); + bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); + Res = llvm::APInt::getNullValue(VecSize); + for (unsigned i = 0; i < SVal.getVectorLength(); i++) { + APValue &Elt = SVal.getVectorElt(i); + llvm::APInt EltAsInt; + if (Elt.isInt()) { + EltAsInt = Elt.getInt(); + } else if (Elt.isFloat()) { + EltAsInt = Elt.getFloat().bitcastToAPInt(); + } else { + // Don't try to handle vectors of anything other than int or float + // (not sure if it's possible to hit this case). + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; + } + unsigned BaseEltSize = EltAsInt.getBitWidth(); + if (BigEndian) + Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize); + else + Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize); + } + return true; + } + // Give up if the input isn't an int, float, or vector. For example, we + // reject "(v4i16)(intptr_t)&a". + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; +} + +/// Perform the given integer operation, which is known to need at most BitWidth +/// bits, and check for overflow in the original type (if that type was not an +/// unsigned type). +template<typename Operation> +static APSInt CheckedIntArithmetic(EvalInfo &Info, const Expr *E, + const APSInt &LHS, const APSInt &RHS, + unsigned BitWidth, Operation Op) { + if (LHS.isUnsigned()) + return Op(LHS, RHS); + + APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false); + APSInt Result = Value.trunc(LHS.getBitWidth()); + if (Result.extend(BitWidth) != Value) { + if (Info.checkingForOverflow()) + Info.Ctx.getDiagnostics().Report(E->getExprLoc(), + diag::warn_integer_constant_overflow) + << Result.toString(10) << E->getType(); + else + HandleOverflow(Info, E, Value, E->getType()); + } + return Result; +} + +/// Perform the given binary integer operation. +static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, + BinaryOperatorKind Opcode, APSInt RHS, + APSInt &Result) { + switch (Opcode) { + default: + Info.Diag(E); + return false; + case BO_Mul: + Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() * 2, + std::multiplies<APSInt>()); + return true; + case BO_Add: + Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1, + std::plus<APSInt>()); + return true; + case BO_Sub: + Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1, + std::minus<APSInt>()); + return true; + case BO_And: Result = LHS & RHS; return true; + case BO_Xor: Result = LHS ^ RHS; return true; + case BO_Or: Result = LHS | RHS; return true; + case BO_Div: + case BO_Rem: + if (RHS == 0) { + Info.Diag(E, diag::note_expr_divide_by_zero); + return false; + } + // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. + if (RHS.isNegative() && RHS.isAllOnesValue() && + LHS.isSigned() && LHS.isMinSignedValue()) + HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType()); + Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS); + return true; + case BO_Shl: { + if (Info.getLangOpts().OpenCL) + // OpenCL 6.3j: shift values are effectively % word size of LHS. + RHS &= APSInt(llvm::APInt(RHS.getBitWidth(), + static_cast<uint64_t>(LHS.getBitWidth() - 1)), + RHS.isUnsigned()); + else if (RHS.isSigned() && RHS.isNegative()) { + // During constant-folding, a negative shift is an opposite shift. Such + // a shift is not a constant expression. + Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS; + RHS = -RHS; + goto shift_right; + } + shift_left: + // C++11 [expr.shift]p1: Shift width must be less than the bit width of + // the shifted type. + unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1); + if (SA != RHS) { + Info.CCEDiag(E, diag::note_constexpr_large_shift) + << RHS << E->getType() << LHS.getBitWidth(); + } else if (LHS.isSigned()) { + // C++11 [expr.shift]p2: A signed left shift must have a non-negative + // operand, and must not overflow the corresponding unsigned type. + if (LHS.isNegative()) + Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS; + else if (LHS.countLeadingZeros() < SA) + Info.CCEDiag(E, diag::note_constexpr_lshift_discards); + } + Result = LHS << SA; + return true; + } + case BO_Shr: { + if (Info.getLangOpts().OpenCL) + // OpenCL 6.3j: shift values are effectively % word size of LHS. + RHS &= APSInt(llvm::APInt(RHS.getBitWidth(), + static_cast<uint64_t>(LHS.getBitWidth() - 1)), + RHS.isUnsigned()); + else if (RHS.isSigned() && RHS.isNegative()) { + // During constant-folding, a negative shift is an opposite shift. Such a + // shift is not a constant expression. + Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS; + RHS = -RHS; + goto shift_left; + } + shift_right: + // C++11 [expr.shift]p1: Shift width must be less than the bit width of the + // shifted type. + unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1); + if (SA != RHS) + Info.CCEDiag(E, diag::note_constexpr_large_shift) + << RHS << E->getType() << LHS.getBitWidth(); + Result = LHS >> SA; + return true; + } + + case BO_LT: Result = LHS < RHS; return true; + case BO_GT: Result = LHS > RHS; return true; + case BO_LE: Result = LHS <= RHS; return true; + case BO_GE: Result = LHS >= RHS; return true; + case BO_EQ: Result = LHS == RHS; return true; + case BO_NE: Result = LHS != RHS; return true; + } +} + +/// Perform the given binary floating-point operation, in-place, on LHS. +static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E, + APFloat &LHS, BinaryOperatorKind Opcode, + const APFloat &RHS) { + switch (Opcode) { + default: + Info.Diag(E); + return false; + case BO_Mul: + LHS.multiply(RHS, APFloat::rmNearestTiesToEven); + break; + case BO_Add: + LHS.add(RHS, APFloat::rmNearestTiesToEven); + break; + case BO_Sub: + LHS.subtract(RHS, APFloat::rmNearestTiesToEven); + break; + case BO_Div: + LHS.divide(RHS, APFloat::rmNearestTiesToEven); + break; + } + + if (LHS.isInfinity() || LHS.isNaN()) + Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN(); + return true; +} + +/// Cast an lvalue referring to a base subobject to a derived class, by +/// truncating the lvalue's path to the given length. +static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result, + const RecordDecl *TruncatedType, + unsigned TruncatedElements) { + SubobjectDesignator &D = Result.Designator; + + // Check we actually point to a derived class object. + if (TruncatedElements == D.Entries.size()) + return true; + assert(TruncatedElements >= D.MostDerivedPathLength && + "not casting to a derived class"); + if (!Result.checkSubobject(Info, E, CSK_Derived)) + return false; + + // Truncate the path to the subobject, and remove any derived-to-base offsets. + const RecordDecl *RD = TruncatedType; + for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) { + if (RD->isInvalidDecl()) return false; + const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); + const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]); + if (isVirtualBaseClass(D.Entries[I])) + Result.Offset -= Layout.getVBaseClassOffset(Base); + else + Result.Offset -= Layout.getBaseClassOffset(Base); + RD = Base; + } + D.Entries.resize(TruncatedElements); + return true; +} + +static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj, + const CXXRecordDecl *Derived, + const CXXRecordDecl *Base, + const ASTRecordLayout *RL = 0) { + if (!RL) { + if (Derived->isInvalidDecl()) return false; + RL = &Info.Ctx.getASTRecordLayout(Derived); + } + + Obj.getLValueOffset() += RL->getBaseClassOffset(Base); + Obj.addDecl(Info, E, Base, /*Virtual*/ false); + return true; +} + +static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj, + const CXXRecordDecl *DerivedDecl, + const CXXBaseSpecifier *Base) { + const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); + + if (!Base->isVirtual()) + return HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl); + + SubobjectDesignator &D = Obj.Designator; + if (D.Invalid) + return false; + + // Extract most-derived object and corresponding type. + DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl(); + if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength)) + return false; + + // Find the virtual base class. + if (DerivedDecl->isInvalidDecl()) return false; + const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl); + Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl); + Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true); + return true; +} + +static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E, + QualType Type, LValue &Result) { + for (CastExpr::path_const_iterator PathI = E->path_begin(), + PathE = E->path_end(); + PathI != PathE; ++PathI) { + if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(), + *PathI)) + return false; + Type = (*PathI)->getType(); + } + return true; +} + +/// Update LVal to refer to the given field, which must be a member of the type +/// currently described by LVal. +static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal, + const FieldDecl *FD, + const ASTRecordLayout *RL = 0) { + if (!RL) { + if (FD->getParent()->isInvalidDecl()) return false; + RL = &Info.Ctx.getASTRecordLayout(FD->getParent()); + } + + unsigned I = FD->getFieldIndex(); + LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I)); + LVal.addDecl(Info, E, FD); + return true; +} + +/// Update LVal to refer to the given indirect field. +static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E, + LValue &LVal, + const IndirectFieldDecl *IFD) { + for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(), + CE = IFD->chain_end(); C != CE; ++C) + if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C))) + return false; + return true; +} + +/// Get the size of the given type in char units. +static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, + QualType Type, CharUnits &Size) { + // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc + // extension. + if (Type->isVoidType() || Type->isFunctionType()) { + Size = CharUnits::One(); + return true; + } + + if (!Type->isConstantSizeType()) { + // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2. + // FIXME: Better diagnostic. + Info.Diag(Loc); + return false; + } + + Size = Info.Ctx.getTypeSizeInChars(Type); + return true; +} + +/// Update a pointer value to model pointer arithmetic. +/// \param Info - Information about the ongoing evaluation. +/// \param E - The expression being evaluated, for diagnostic purposes. +/// \param LVal - The pointer value to be updated. +/// \param EltTy - The pointee type represented by LVal. +/// \param Adjustment - The adjustment, in objects of type EltTy, to add. +static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E, + LValue &LVal, QualType EltTy, + int64_t Adjustment) { + CharUnits SizeOfPointee; + if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee)) + return false; + + // Compute the new offset in the appropriate width. + LVal.Offset += Adjustment * SizeOfPointee; + LVal.adjustIndex(Info, E, Adjustment); + return true; +} + +/// Update an lvalue to refer to a component of a complex number. +/// \param Info - Information about the ongoing evaluation. +/// \param LVal - The lvalue to be updated. +/// \param EltTy - The complex number's component type. +/// \param Imag - False for the real component, true for the imaginary. +static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E, + LValue &LVal, QualType EltTy, + bool Imag) { + if (Imag) { + CharUnits SizeOfComponent; + if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent)) + return false; + LVal.Offset += SizeOfComponent; + } + LVal.addComplex(Info, E, EltTy, Imag); + return true; +} + +/// Try to evaluate the initializer for a variable declaration. +/// +/// \param Info Information about the ongoing evaluation. +/// \param E An expression to be used when printing diagnostics. +/// \param VD The variable whose initializer should be obtained. +/// \param Frame The frame in which the variable was created. Must be null +/// if this variable is not local to the evaluation. +/// \param Result Filled in with a pointer to the value of the variable. +static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E, + const VarDecl *VD, CallStackFrame *Frame, + APValue *&Result) { + // If this is a parameter to an active constexpr function call, perform + // argument substitution. + if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) { + // Assume arguments of a potential constant expression are unknown + // constant expressions. + if (Info.checkingPotentialConstantExpression()) + return false; + if (!Frame || !Frame->Arguments) { + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; + } + Result = &Frame->Arguments[PVD->getFunctionScopeIndex()]; + return true; + } + + // If this is a local variable, dig out its value. + if (Frame) { + Result = Frame->getTemporary(VD); + assert(Result && "missing value for local variable"); + return true; + } + + // Dig out the initializer, and use the declaration which it's attached to. + const Expr *Init = VD->getAnyInitializer(VD); + if (!Init || Init->isValueDependent()) { + // If we're checking a potential constant expression, the variable could be + // initialized later. + if (!Info.checkingPotentialConstantExpression()) + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; + } + + // If we're currently evaluating the initializer of this declaration, use that + // in-flight value. + if (Info.EvaluatingDecl.dyn_cast<const ValueDecl*>() == VD) { + Result = Info.EvaluatingDeclValue; + return true; + } + + // Never evaluate the initializer of a weak variable. We can't be sure that + // this is the definition which will be used. + if (VD->isWeak()) { + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; + } + + // Check that we can fold the initializer. In C++, we will have already done + // this in the cases where it matters for conformance. + SmallVector<PartialDiagnosticAt, 8> Notes; + if (!VD->evaluateValue(Notes)) { + Info.Diag(E, diag::note_constexpr_var_init_non_constant, + Notes.size() + 1) << VD; + Info.Note(VD->getLocation(), diag::note_declared_at); + Info.addNotes(Notes); + return false; + } else if (!VD->checkInitIsICE()) { + Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant, + Notes.size() + 1) << VD; + Info.Note(VD->getLocation(), diag::note_declared_at); + Info.addNotes(Notes); + } + + Result = VD->getEvaluatedValue(); + return true; +} + +static bool IsConstNonVolatile(QualType T) { + Qualifiers Quals = T.getQualifiers(); + return Quals.hasConst() && !Quals.hasVolatile(); +} + +/// Get the base index of the given base class within an APValue representing +/// the given derived class. +static unsigned getBaseIndex(const CXXRecordDecl *Derived, + const CXXRecordDecl *Base) { + Base = Base->getCanonicalDecl(); + unsigned Index = 0; + for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(), + E = Derived->bases_end(); I != E; ++I, ++Index) { + if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base) + return Index; + } + + llvm_unreachable("base class missing from derived class's bases list"); +} + +/// Extract the value of a character from a string literal. +static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit, + uint64_t Index) { + // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant + const StringLiteral *S = cast<StringLiteral>(Lit); + const ConstantArrayType *CAT = + Info.Ctx.getAsConstantArrayType(S->getType()); + assert(CAT && "string literal isn't an array"); + QualType CharType = CAT->getElementType(); + assert(CharType->isIntegerType() && "unexpected character type"); + + APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(), + CharType->isUnsignedIntegerType()); + if (Index < S->getLength()) + Value = S->getCodeUnit(Index); + return Value; +} + +// Expand a string literal into an array of characters. +static void expandStringLiteral(EvalInfo &Info, const Expr *Lit, + APValue &Result) { + const StringLiteral *S = cast<StringLiteral>(Lit); + const ConstantArrayType *CAT = + Info.Ctx.getAsConstantArrayType(S->getType()); + assert(CAT && "string literal isn't an array"); + QualType CharType = CAT->getElementType(); + assert(CharType->isIntegerType() && "unexpected character type"); + + unsigned Elts = CAT->getSize().getZExtValue(); + Result = APValue(APValue::UninitArray(), + std::min(S->getLength(), Elts), Elts); + APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(), + CharType->isUnsignedIntegerType()); + if (Result.hasArrayFiller()) + Result.getArrayFiller() = APValue(Value); + for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) { + Value = S->getCodeUnit(I); + Result.getArrayInitializedElt(I) = APValue(Value); + } +} + +// Expand an array so that it has more than Index filled elements. +static void expandArray(APValue &Array, unsigned Index) { + unsigned Size = Array.getArraySize(); + assert(Index < Size); + + // Always at least double the number of elements for which we store a value. + unsigned OldElts = Array.getArrayInitializedElts(); + unsigned NewElts = std::max(Index+1, OldElts * 2); + NewElts = std::min(Size, std::max(NewElts, 8u)); + + // Copy the data across. + APValue NewValue(APValue::UninitArray(), NewElts, Size); + for (unsigned I = 0; I != OldElts; ++I) + NewValue.getArrayInitializedElt(I).swap(Array.getArrayInitializedElt(I)); + for (unsigned I = OldElts; I != NewElts; ++I) + NewValue.getArrayInitializedElt(I) = Array.getArrayFiller(); + if (NewValue.hasArrayFiller()) + NewValue.getArrayFiller() = Array.getArrayFiller(); + Array.swap(NewValue); +} + +/// Kinds of access we can perform on an object, for diagnostics. +enum AccessKinds { + AK_Read, + AK_Assign, + AK_Increment, + AK_Decrement +}; + +/// A handle to a complete object (an object that is not a subobject of +/// another object). +struct CompleteObject { + /// The value of the complete object. + APValue *Value; + /// The type of the complete object. + QualType Type; + + CompleteObject() : Value(0) {} + CompleteObject(APValue *Value, QualType Type) + : Value(Value), Type(Type) { + assert(Value && "missing value for complete object"); + } + + LLVM_EXPLICIT operator bool() const { return Value; } +}; + +/// Find the designated sub-object of an rvalue. +template<typename SubobjectHandler> +typename SubobjectHandler::result_type +findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj, + const SubobjectDesignator &Sub, SubobjectHandler &handler) { + if (Sub.Invalid) + // A diagnostic will have already been produced. + return handler.failed(); + if (Sub.isOnePastTheEnd()) { + if (Info.getLangOpts().CPlusPlus11) + Info.Diag(E, diag::note_constexpr_access_past_end) + << handler.AccessKind; + else + Info.Diag(E); + return handler.failed(); + } + + APValue *O = Obj.Value; + QualType ObjType = Obj.Type; + const FieldDecl *LastField = 0; + + // Walk the designator's path to find the subobject. + for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) { + if (O->isUninit()) { + if (!Info.checkingPotentialConstantExpression()) + Info.Diag(E, diag::note_constexpr_access_uninit) << handler.AccessKind; + return handler.failed(); + } + + if (I == N) { + if (!handler.found(*O, ObjType)) + return false; + + // If we modified a bit-field, truncate it to the right width. + if (handler.AccessKind != AK_Read && + LastField && LastField->isBitField() && + !truncateBitfieldValue(Info, E, *O, LastField)) + return false; + + return true; + } + + LastField = 0; + if (ObjType->isArrayType()) { + // Next subobject is an array element. + const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType); + assert(CAT && "vla in literal type?"); + uint64_t Index = Sub.Entries[I].ArrayIndex; + if (CAT->getSize().ule(Index)) { + // Note, it should not be possible to form a pointer with a valid + // designator which points more than one past the end of the array. + if (Info.getLangOpts().CPlusPlus11) + Info.Diag(E, diag::note_constexpr_access_past_end) + << handler.AccessKind; + else + Info.Diag(E); + return handler.failed(); + } + + ObjType = CAT->getElementType(); + + // An array object is represented as either an Array APValue or as an + // LValue which refers to a string literal. + if (O->isLValue()) { + assert(I == N - 1 && "extracting subobject of character?"); + assert(!O->hasLValuePath() || O->getLValuePath().empty()); + if (handler.AccessKind != AK_Read) + expandStringLiteral(Info, O->getLValueBase().get<const Expr *>(), + *O); + else + return handler.foundString(*O, ObjType, Index); + } + + if (O->getArrayInitializedElts() > Index) + O = &O->getArrayInitializedElt(Index); + else if (handler.AccessKind != AK_Read) { + expandArray(*O, Index); + O = &O->getArrayInitializedElt(Index); + } else + O = &O->getArrayFiller(); + } else if (ObjType->isAnyComplexType()) { + // Next subobject is a complex number. + uint64_t Index = Sub.Entries[I].ArrayIndex; + if (Index > 1) { + if (Info.getLangOpts().CPlusPlus11) + Info.Diag(E, diag::note_constexpr_access_past_end) + << handler.AccessKind; + else + Info.Diag(E); + return handler.failed(); + } + + bool WasConstQualified = ObjType.isConstQualified(); + ObjType = ObjType->castAs<ComplexType>()->getElementType(); + if (WasConstQualified) + ObjType.addConst(); + + assert(I == N - 1 && "extracting subobject of scalar?"); + if (O->isComplexInt()) { + return handler.found(Index ? O->getComplexIntImag() + : O->getComplexIntReal(), ObjType); + } else { + assert(O->isComplexFloat()); + return handler.found(Index ? O->getComplexFloatImag() + : O->getComplexFloatReal(), ObjType); + } + } else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) { + if (Field->isMutable() && handler.AccessKind == AK_Read) { + Info.Diag(E, diag::note_constexpr_ltor_mutable, 1) + << Field; + Info.Note(Field->getLocation(), diag::note_declared_at); + return handler.failed(); + } + + // Next subobject is a class, struct or union field. + RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl(); + if (RD->isUnion()) { + const FieldDecl *UnionField = O->getUnionField(); + if (!UnionField || + UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) { + Info.Diag(E, diag::note_constexpr_access_inactive_union_member) + << handler.AccessKind << Field << !UnionField << UnionField; + return handler.failed(); + } + O = &O->getUnionValue(); + } else + O = &O->getStructField(Field->getFieldIndex()); + + bool WasConstQualified = ObjType.isConstQualified(); + ObjType = Field->getType(); + if (WasConstQualified && !Field->isMutable()) + ObjType.addConst(); + + if (ObjType.isVolatileQualified()) { + if (Info.getLangOpts().CPlusPlus) { + // FIXME: Include a description of the path to the volatile subobject. + Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1) + << handler.AccessKind << 2 << Field; + Info.Note(Field->getLocation(), diag::note_declared_at); + } else { + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + } + return handler.failed(); + } + + LastField = Field; + } else { + // Next subobject is a base class. + const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl(); + const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]); + O = &O->getStructBase(getBaseIndex(Derived, Base)); + + bool WasConstQualified = ObjType.isConstQualified(); + ObjType = Info.Ctx.getRecordType(Base); + if (WasConstQualified) + ObjType.addConst(); + } + } +} + +namespace { +struct ExtractSubobjectHandler { + EvalInfo &Info; + APValue &Result; + + static const AccessKinds AccessKind = AK_Read; + + typedef bool result_type; + bool failed() { return false; } + bool found(APValue &Subobj, QualType SubobjType) { + Result = Subobj; + return true; + } + bool found(APSInt &Value, QualType SubobjType) { + Result = APValue(Value); + return true; + } + bool found(APFloat &Value, QualType SubobjType) { + Result = APValue(Value); + return true; + } + bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) { + Result = APValue(extractStringLiteralCharacter( + Info, Subobj.getLValueBase().get<const Expr *>(), Character)); + return true; + } +}; +} // end anonymous namespace + +const AccessKinds ExtractSubobjectHandler::AccessKind; + +/// Extract the designated sub-object of an rvalue. +static bool extractSubobject(EvalInfo &Info, const Expr *E, + const CompleteObject &Obj, + const SubobjectDesignator &Sub, + APValue &Result) { + ExtractSubobjectHandler Handler = { Info, Result }; + return findSubobject(Info, E, Obj, Sub, Handler); +} + +namespace { +struct ModifySubobjectHandler { + EvalInfo &Info; + APValue &NewVal; + const Expr *E; + + typedef bool result_type; + static const AccessKinds AccessKind = AK_Assign; + + bool checkConst(QualType QT) { + // Assigning to a const object has undefined behavior. + if (QT.isConstQualified()) { + Info.Diag(E, diag::note_constexpr_modify_const_type) << QT; + return false; + } + return true; + } + + bool failed() { return false; } + bool found(APValue &Subobj, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + // We've been given ownership of NewVal, so just swap it in. + Subobj.swap(NewVal); + return true; + } + bool found(APSInt &Value, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + if (!NewVal.isInt()) { + // Maybe trying to write a cast pointer value into a complex? + Info.Diag(E); + return false; + } + Value = NewVal.getInt(); + return true; + } + bool found(APFloat &Value, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + Value = NewVal.getFloat(); + return true; + } + bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) { + llvm_unreachable("shouldn't encounter string elements with ExpandArrays"); + } +}; +} // end anonymous namespace + +const AccessKinds ModifySubobjectHandler::AccessKind; + +/// Update the designated sub-object of an rvalue to the given value. +static bool modifySubobject(EvalInfo &Info, const Expr *E, + const CompleteObject &Obj, + const SubobjectDesignator &Sub, + APValue &NewVal) { + ModifySubobjectHandler Handler = { Info, NewVal, E }; + return findSubobject(Info, E, Obj, Sub, Handler); +} + +/// Find the position where two subobject designators diverge, or equivalently +/// the length of the common initial subsequence. +static unsigned FindDesignatorMismatch(QualType ObjType, + const SubobjectDesignator &A, + const SubobjectDesignator &B, + bool &WasArrayIndex) { + unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size()); + for (/**/; I != N; ++I) { + if (!ObjType.isNull() && + (ObjType->isArrayType() || ObjType->isAnyComplexType())) { + // Next subobject is an array element. + if (A.Entries[I].ArrayIndex != B.Entries[I].ArrayIndex) { + WasArrayIndex = true; + return I; + } + if (ObjType->isAnyComplexType()) + ObjType = ObjType->castAs<ComplexType>()->getElementType(); + else + ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType(); + } else { + if (A.Entries[I].BaseOrMember != B.Entries[I].BaseOrMember) { + WasArrayIndex = false; + return I; + } + if (const FieldDecl *FD = getAsField(A.Entries[I])) + // Next subobject is a field. + ObjType = FD->getType(); + else + // Next subobject is a base class. + ObjType = QualType(); + } + } + WasArrayIndex = false; + return I; +} + +/// Determine whether the given subobject designators refer to elements of the +/// same array object. +static bool AreElementsOfSameArray(QualType ObjType, + const SubobjectDesignator &A, + const SubobjectDesignator &B) { + if (A.Entries.size() != B.Entries.size()) + return false; + + bool IsArray = A.MostDerivedArraySize != 0; + if (IsArray && A.MostDerivedPathLength != A.Entries.size()) + // A is a subobject of the array element. + return false; + + // If A (and B) designates an array element, the last entry will be the array + // index. That doesn't have to match. Otherwise, we're in the 'implicit array + // of length 1' case, and the entire path must match. + bool WasArrayIndex; + unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex); + return CommonLength >= A.Entries.size() - IsArray; +} + +/// Find the complete object to which an LValue refers. +CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, AccessKinds AK, + const LValue &LVal, QualType LValType) { + if (!LVal.Base) { + Info.Diag(E, diag::note_constexpr_access_null) << AK; + return CompleteObject(); + } + + CallStackFrame *Frame = 0; + if (LVal.CallIndex) { + Frame = Info.getCallFrame(LVal.CallIndex); + if (!Frame) { + Info.Diag(E, diag::note_constexpr_lifetime_ended, 1) + << AK << LVal.Base.is<const ValueDecl*>(); + NoteLValueLocation(Info, LVal.Base); + return CompleteObject(); + } + } + + // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type + // is not a constant expression (even if the object is non-volatile). We also + // apply this rule to C++98, in order to conform to the expected 'volatile' + // semantics. + if (LValType.isVolatileQualified()) { + if (Info.getLangOpts().CPlusPlus) + Info.Diag(E, diag::note_constexpr_access_volatile_type) + << AK << LValType; + else + Info.Diag(E); + return CompleteObject(); + } + + // Compute value storage location and type of base object. + APValue *BaseVal = 0; + QualType BaseType = getType(LVal.Base); + + if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) { + // In C++98, const, non-volatile integers initialized with ICEs are ICEs. + // In C++11, constexpr, non-volatile variables initialized with constant + // expressions are constant expressions too. Inside constexpr functions, + // parameters are constant expressions even if they're non-const. + // In C++1y, objects local to a constant expression (those with a Frame) are + // both readable and writable inside constant expressions. + // In C, such things can also be folded, although they are not ICEs. + const VarDecl *VD = dyn_cast<VarDecl>(D); + if (VD) { + if (const VarDecl *VDef = VD->getDefinition(Info.Ctx)) + VD = VDef; + } + if (!VD || VD->isInvalidDecl()) { + Info.Diag(E); + return CompleteObject(); + } + + // Accesses of volatile-qualified objects are not allowed. + if (BaseType.isVolatileQualified()) { + if (Info.getLangOpts().CPlusPlus) { + Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1) + << AK << 1 << VD; + Info.Note(VD->getLocation(), diag::note_declared_at); + } else { + Info.Diag(E); + } + return CompleteObject(); + } + + // Unless we're looking at a local variable or argument in a constexpr call, + // the variable we're reading must be const. + if (!Frame) { + if (Info.getLangOpts().CPlusPlus1y && + VD == Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()) { + // OK, we can read and modify an object if we're in the process of + // evaluating its initializer, because its lifetime began in this + // evaluation. + } else if (AK != AK_Read) { + // All the remaining cases only permit reading. + Info.Diag(E, diag::note_constexpr_modify_global); + return CompleteObject(); + } else if (VD->isConstexpr()) { + // OK, we can read this variable. + } else if (BaseType->isIntegralOrEnumerationType()) { + if (!BaseType.isConstQualified()) { + if (Info.getLangOpts().CPlusPlus) { + Info.Diag(E, diag::note_constexpr_ltor_non_const_int, 1) << VD; + Info.Note(VD->getLocation(), diag::note_declared_at); + } else { + Info.Diag(E); + } + return CompleteObject(); + } + } else if (BaseType->isFloatingType() && BaseType.isConstQualified()) { + // We support folding of const floating-point types, in order to make + // static const data members of such types (supported as an extension) + // more useful. + if (Info.getLangOpts().CPlusPlus11) { + Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD; + Info.Note(VD->getLocation(), diag::note_declared_at); + } else { + Info.CCEDiag(E); + } + } else { + // FIXME: Allow folding of values of any literal type in all languages. + if (Info.getLangOpts().CPlusPlus11) { + Info.Diag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD; + Info.Note(VD->getLocation(), diag::note_declared_at); + } else { + Info.Diag(E); + } + return CompleteObject(); + } + } + + if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal)) + return CompleteObject(); + } else { + const Expr *Base = LVal.Base.dyn_cast<const Expr*>(); + + if (!Frame) { + if (const MaterializeTemporaryExpr *MTE = + dyn_cast<MaterializeTemporaryExpr>(Base)) { + assert(MTE->getStorageDuration() == SD_Static && + "should have a frame for a non-global materialized temporary"); + + // Per C++1y [expr.const]p2: + // an lvalue-to-rvalue conversion [is not allowed unless it applies to] + // - a [...] glvalue of integral or enumeration type that refers to + // a non-volatile const object [...] + // [...] + // - a [...] glvalue of literal type that refers to a non-volatile + // object whose lifetime began within the evaluation of e. + // + // C++11 misses the 'began within the evaluation of e' check and + // instead allows all temporaries, including things like: + // int &&r = 1; + // int x = ++r; + // constexpr int k = r; + // Therefore we use the C++1y rules in C++11 too. + const ValueDecl *VD = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>(); + const ValueDecl *ED = MTE->getExtendingDecl(); + if (!(BaseType.isConstQualified() && + BaseType->isIntegralOrEnumerationType()) && + !(VD && VD->getCanonicalDecl() == ED->getCanonicalDecl())) { + Info.Diag(E, diag::note_constexpr_access_static_temporary, 1) << AK; + Info.Note(MTE->getExprLoc(), diag::note_constexpr_temporary_here); + return CompleteObject(); + } + + BaseVal = Info.Ctx.getMaterializedTemporaryValue(MTE, false); + assert(BaseVal && "got reference to unevaluated temporary"); + } else { + Info.Diag(E); + return CompleteObject(); + } + } else { + BaseVal = Frame->getTemporary(Base); + assert(BaseVal && "missing value for temporary"); + } + + // Volatile temporary objects cannot be accessed in constant expressions. + if (BaseType.isVolatileQualified()) { + if (Info.getLangOpts().CPlusPlus) { + Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1) + << AK << 0; + Info.Note(Base->getExprLoc(), diag::note_constexpr_temporary_here); + } else { + Info.Diag(E); + } + return CompleteObject(); + } + } + + // During the construction of an object, it is not yet 'const'. + // FIXME: We don't set up EvaluatingDecl for local variables or temporaries, + // and this doesn't do quite the right thing for const subobjects of the + // object under construction. + if (LVal.getLValueBase() == Info.EvaluatingDecl) { + BaseType = Info.Ctx.getCanonicalType(BaseType); + BaseType.removeLocalConst(); + } + + // In C++1y, we can't safely access any mutable state when we might be + // evaluating after an unmodeled side effect or an evaluation failure. + // + // FIXME: Not all local state is mutable. Allow local constant subobjects + // to be read here (but take care with 'mutable' fields). + if (Frame && Info.getLangOpts().CPlusPlus1y && + (Info.EvalStatus.HasSideEffects || Info.keepEvaluatingAfterFailure())) + return CompleteObject(); + + return CompleteObject(BaseVal, BaseType); +} + +/// \brief Perform an lvalue-to-rvalue conversion on the given glvalue. This +/// can also be used for 'lvalue-to-lvalue' conversions for looking up the +/// glvalue referred to by an entity of reference type. +/// +/// \param Info - Information about the ongoing evaluation. +/// \param Conv - The expression for which we are performing the conversion. +/// Used for diagnostics. +/// \param Type - The type of the glvalue (before stripping cv-qualifiers in the +/// case of a non-class type). +/// \param LVal - The glvalue on which we are attempting to perform this action. +/// \param RVal - The produced value will be placed here. +static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, + QualType Type, + const LValue &LVal, APValue &RVal) { + if (LVal.Designator.Invalid) + return false; + + // Check for special cases where there is no existing APValue to look at. + const Expr *Base = LVal.Base.dyn_cast<const Expr*>(); + if (!LVal.Designator.Invalid && Base && !LVal.CallIndex && + !Type.isVolatileQualified()) { + if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) { + // In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the + // initializer until now for such expressions. Such an expression can't be + // an ICE in C, so this only matters for fold. + assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?"); + if (Type.isVolatileQualified()) { + Info.Diag(Conv); + return false; + } + APValue Lit; + if (!Evaluate(Lit, Info, CLE->getInitializer())) + return false; + CompleteObject LitObj(&Lit, Base->getType()); + return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal); + } else if (isa<StringLiteral>(Base)) { + // We represent a string literal array as an lvalue pointing at the + // corresponding expression, rather than building an array of chars. + // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant + APValue Str(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0); + CompleteObject StrObj(&Str, Base->getType()); + return extractSubobject(Info, Conv, StrObj, LVal.Designator, RVal); + } + } + + CompleteObject Obj = findCompleteObject(Info, Conv, AK_Read, LVal, Type); + return Obj && extractSubobject(Info, Conv, Obj, LVal.Designator, RVal); +} + +/// Perform an assignment of Val to LVal. Takes ownership of Val. +static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal, + QualType LValType, APValue &Val) { + if (LVal.Designator.Invalid) + return false; + + if (!Info.getLangOpts().CPlusPlus1y) { + Info.Diag(E); + return false; + } + + CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType); + return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val); +} + +static bool isOverflowingIntegerType(ASTContext &Ctx, QualType T) { + return T->isSignedIntegerType() && + Ctx.getIntWidth(T) >= Ctx.getIntWidth(Ctx.IntTy); +} + +namespace { +struct CompoundAssignSubobjectHandler { + EvalInfo &Info; + const Expr *E; + QualType PromotedLHSType; + BinaryOperatorKind Opcode; + const APValue &RHS; + + static const AccessKinds AccessKind = AK_Assign; + + typedef bool result_type; + + bool checkConst(QualType QT) { + // Assigning to a const object has undefined behavior. + if (QT.isConstQualified()) { + Info.Diag(E, diag::note_constexpr_modify_const_type) << QT; + return false; + } + return true; + } + + bool failed() { return false; } + bool found(APValue &Subobj, QualType SubobjType) { + switch (Subobj.getKind()) { + case APValue::Int: + return found(Subobj.getInt(), SubobjType); + case APValue::Float: + return found(Subobj.getFloat(), SubobjType); + case APValue::ComplexInt: + case APValue::ComplexFloat: + // FIXME: Implement complex compound assignment. + Info.Diag(E); + return false; + case APValue::LValue: + return foundPointer(Subobj, SubobjType); + default: + // FIXME: can this happen? + Info.Diag(E); + return false; + } + } + bool found(APSInt &Value, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + + if (!SubobjType->isIntegerType() || !RHS.isInt()) { + // We don't support compound assignment on integer-cast-to-pointer + // values. + Info.Diag(E); + return false; + } + + APSInt LHS = HandleIntToIntCast(Info, E, PromotedLHSType, + SubobjType, Value); + if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS.getInt(), LHS)) + return false; + Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS); + return true; + } + bool found(APFloat &Value, QualType SubobjType) { + return checkConst(SubobjType) && + HandleFloatToFloatCast(Info, E, SubobjType, PromotedLHSType, + Value) && + handleFloatFloatBinOp(Info, E, Value, Opcode, RHS.getFloat()) && + HandleFloatToFloatCast(Info, E, PromotedLHSType, SubobjType, Value); + } + bool foundPointer(APValue &Subobj, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + + QualType PointeeType; + if (const PointerType *PT = SubobjType->getAs<PointerType>()) + PointeeType = PT->getPointeeType(); + + if (PointeeType.isNull() || !RHS.isInt() || + (Opcode != BO_Add && Opcode != BO_Sub)) { + Info.Diag(E); + return false; + } + + int64_t Offset = getExtValue(RHS.getInt()); + if (Opcode == BO_Sub) + Offset = -Offset; + + LValue LVal; + LVal.setFrom(Info.Ctx, Subobj); + if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType, Offset)) + return false; + LVal.moveInto(Subobj); + return true; + } + bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) { + llvm_unreachable("shouldn't encounter string elements here"); + } +}; +} // end anonymous namespace + +const AccessKinds CompoundAssignSubobjectHandler::AccessKind; + +/// Perform a compound assignment of LVal <op>= RVal. +static bool handleCompoundAssignment( + EvalInfo &Info, const Expr *E, + const LValue &LVal, QualType LValType, QualType PromotedLValType, + BinaryOperatorKind Opcode, const APValue &RVal) { + if (LVal.Designator.Invalid) + return false; + + if (!Info.getLangOpts().CPlusPlus1y) { + Info.Diag(E); + return false; + } + + CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType); + CompoundAssignSubobjectHandler Handler = { Info, E, PromotedLValType, Opcode, + RVal }; + return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler); +} + +namespace { +struct IncDecSubobjectHandler { + EvalInfo &Info; + const Expr *E; + AccessKinds AccessKind; + APValue *Old; + + typedef bool result_type; + + bool checkConst(QualType QT) { + // Assigning to a const object has undefined behavior. + if (QT.isConstQualified()) { + Info.Diag(E, diag::note_constexpr_modify_const_type) << QT; + return false; + } + return true; + } + + bool failed() { return false; } + bool found(APValue &Subobj, QualType SubobjType) { + // Stash the old value. Also clear Old, so we don't clobber it later + // if we're post-incrementing a complex. + if (Old) { + *Old = Subobj; + Old = 0; + } + + switch (Subobj.getKind()) { + case APValue::Int: + return found(Subobj.getInt(), SubobjType); + case APValue::Float: + return found(Subobj.getFloat(), SubobjType); + case APValue::ComplexInt: + return found(Subobj.getComplexIntReal(), + SubobjType->castAs<ComplexType>()->getElementType() + .withCVRQualifiers(SubobjType.getCVRQualifiers())); + case APValue::ComplexFloat: + return found(Subobj.getComplexFloatReal(), + SubobjType->castAs<ComplexType>()->getElementType() + .withCVRQualifiers(SubobjType.getCVRQualifiers())); + case APValue::LValue: + return foundPointer(Subobj, SubobjType); + default: + // FIXME: can this happen? + Info.Diag(E); + return false; + } + } + bool found(APSInt &Value, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + + if (!SubobjType->isIntegerType()) { + // We don't support increment / decrement on integer-cast-to-pointer + // values. + Info.Diag(E); + return false; + } + + if (Old) *Old = APValue(Value); + + // bool arithmetic promotes to int, and the conversion back to bool + // doesn't reduce mod 2^n, so special-case it. + if (SubobjType->isBooleanType()) { + if (AccessKind == AK_Increment) + Value = 1; + else + Value = !Value; + return true; + } + + bool WasNegative = Value.isNegative(); + if (AccessKind == AK_Increment) { + ++Value; + + if (!WasNegative && Value.isNegative() && + isOverflowingIntegerType(Info.Ctx, SubobjType)) { + APSInt ActualValue(Value, /*IsUnsigned*/true); + HandleOverflow(Info, E, ActualValue, SubobjType); + } + } else { + --Value; + + if (WasNegative && !Value.isNegative() && + isOverflowingIntegerType(Info.Ctx, SubobjType)) { + unsigned BitWidth = Value.getBitWidth(); + APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false); + ActualValue.setBit(BitWidth); + HandleOverflow(Info, E, ActualValue, SubobjType); + } + } + return true; + } + bool found(APFloat &Value, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + + if (Old) *Old = APValue(Value); + + APFloat One(Value.getSemantics(), 1); + if (AccessKind == AK_Increment) + Value.add(One, APFloat::rmNearestTiesToEven); + else + Value.subtract(One, APFloat::rmNearestTiesToEven); + return true; + } + bool foundPointer(APValue &Subobj, QualType SubobjType) { + if (!checkConst(SubobjType)) + return false; + + QualType PointeeType; + if (const PointerType *PT = SubobjType->getAs<PointerType>()) + PointeeType = PT->getPointeeType(); + else { + Info.Diag(E); + return false; + } + + LValue LVal; + LVal.setFrom(Info.Ctx, Subobj); + if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType, + AccessKind == AK_Increment ? 1 : -1)) + return false; + LVal.moveInto(Subobj); + return true; + } + bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) { + llvm_unreachable("shouldn't encounter string elements here"); + } +}; +} // end anonymous namespace + +/// Perform an increment or decrement on LVal. +static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal, + QualType LValType, bool IsIncrement, APValue *Old) { + if (LVal.Designator.Invalid) + return false; + + if (!Info.getLangOpts().CPlusPlus1y) { + Info.Diag(E); + return false; + } + + AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement; + CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType); + IncDecSubobjectHandler Handler = { Info, E, AK, Old }; + return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler); +} + +/// Build an lvalue for the object argument of a member function call. +static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object, + LValue &This) { + if (Object->getType()->isPointerType()) + return EvaluatePointer(Object, This, Info); + + if (Object->isGLValue()) + return EvaluateLValue(Object, This, Info); + + if (Object->getType()->isLiteralType(Info.Ctx)) + return EvaluateTemporary(Object, This, Info); + + return false; +} + +/// HandleMemberPointerAccess - Evaluate a member access operation and build an +/// lvalue referring to the result. +/// +/// \param Info - Information about the ongoing evaluation. +/// \param LV - An lvalue referring to the base of the member pointer. +/// \param RHS - The member pointer expression. +/// \param IncludeMember - Specifies whether the member itself is included in +/// the resulting LValue subobject designator. This is not possible when +/// creating a bound member function. +/// \return The field or method declaration to which the member pointer refers, +/// or 0 if evaluation fails. +static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info, + QualType LVType, + LValue &LV, + const Expr *RHS, + bool IncludeMember = true) { + MemberPtr MemPtr; + if (!EvaluateMemberPointer(RHS, MemPtr, Info)) + return 0; + + // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to + // member value, the behavior is undefined. + if (!MemPtr.getDecl()) { + // FIXME: Specific diagnostic. + Info.Diag(RHS); + return 0; + } + + if (MemPtr.isDerivedMember()) { + // This is a member of some derived class. Truncate LV appropriately. + // The end of the derived-to-base path for the base object must match the + // derived-to-base path for the member pointer. + if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() > + LV.Designator.Entries.size()) { + Info.Diag(RHS); + return 0; + } + unsigned PathLengthToMember = + LV.Designator.Entries.size() - MemPtr.Path.size(); + for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) { + const CXXRecordDecl *LVDecl = getAsBaseClass( + LV.Designator.Entries[PathLengthToMember + I]); + const CXXRecordDecl *MPDecl = MemPtr.Path[I]; + if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) { + Info.Diag(RHS); + return 0; + } + } + + // Truncate the lvalue to the appropriate derived class. + if (!CastToDerivedClass(Info, RHS, LV, MemPtr.getContainingRecord(), + PathLengthToMember)) + return 0; + } else if (!MemPtr.Path.empty()) { + // Extend the LValue path with the member pointer's path. + LV.Designator.Entries.reserve(LV.Designator.Entries.size() + + MemPtr.Path.size() + IncludeMember); + + // Walk down to the appropriate base class. + if (const PointerType *PT = LVType->getAs<PointerType>()) + LVType = PT->getPointeeType(); + const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl(); + assert(RD && "member pointer access on non-class-type expression"); + // The first class in the path is that of the lvalue. + for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) { + const CXXRecordDecl *Base = MemPtr.Path[N - I - 1]; + if (!HandleLValueDirectBase(Info, RHS, LV, RD, Base)) + return 0; + RD = Base; + } + // Finally cast to the class containing the member. + if (!HandleLValueDirectBase(Info, RHS, LV, RD, + MemPtr.getContainingRecord())) + return 0; + } + + // Add the member. Note that we cannot build bound member functions here. + if (IncludeMember) { + if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl())) { + if (!HandleLValueMember(Info, RHS, LV, FD)) + return 0; + } else if (const IndirectFieldDecl *IFD = + dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) { + if (!HandleLValueIndirectMember(Info, RHS, LV, IFD)) + return 0; + } else { + llvm_unreachable("can't construct reference to bound member function"); + } + } + + return MemPtr.getDecl(); +} + +static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info, + const BinaryOperator *BO, + LValue &LV, + bool IncludeMember = true) { + assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI); + + if (!EvaluateObjectArgument(Info, BO->getLHS(), LV)) { + if (Info.keepEvaluatingAfterFailure()) { + MemberPtr MemPtr; + EvaluateMemberPointer(BO->getRHS(), MemPtr, Info); + } + return 0; + } + + return HandleMemberPointerAccess(Info, BO->getLHS()->getType(), LV, + BO->getRHS(), IncludeMember); +} + +/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on +/// the provided lvalue, which currently refers to the base object. +static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E, + LValue &Result) { + SubobjectDesignator &D = Result.Designator; + if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived)) + return false; + + QualType TargetQT = E->getType(); + if (const PointerType *PT = TargetQT->getAs<PointerType>()) + TargetQT = PT->getPointeeType(); + + // Check this cast lands within the final derived-to-base subobject path. + if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) { + Info.CCEDiag(E, diag::note_constexpr_invalid_downcast) + << D.MostDerivedType << TargetQT; + return false; + } + + // Check the type of the final cast. We don't need to check the path, + // since a cast can only be formed if the path is unique. + unsigned NewEntriesSize = D.Entries.size() - E->path_size(); + const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl(); + const CXXRecordDecl *FinalType; + if (NewEntriesSize == D.MostDerivedPathLength) + FinalType = D.MostDerivedType->getAsCXXRecordDecl(); + else + FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]); + if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) { + Info.CCEDiag(E, diag::note_constexpr_invalid_downcast) + << D.MostDerivedType << TargetQT; + return false; + } + + // Truncate the lvalue to the appropriate derived class. + return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize); +} + +namespace { +enum EvalStmtResult { + /// Evaluation failed. + ESR_Failed, + /// Hit a 'return' statement. + ESR_Returned, + /// Evaluation succeeded. + ESR_Succeeded, + /// Hit a 'continue' statement. + ESR_Continue, + /// Hit a 'break' statement. + ESR_Break, + /// Still scanning for 'case' or 'default' statement. + ESR_CaseNotFound +}; +} + +static bool EvaluateDecl(EvalInfo &Info, const Decl *D) { + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + // We don't need to evaluate the initializer for a static local. + if (!VD->hasLocalStorage()) + return true; + + LValue Result; + Result.set(VD, Info.CurrentCall->Index); + APValue &Val = Info.CurrentCall->createTemporary(VD, true); + + if (!VD->getInit()) { + Info.Diag(D->getLocStart(), diag::note_constexpr_uninitialized) + << false << VD->getType(); + Val = APValue(); + return false; + } + + if (!EvaluateInPlace(Val, Info, Result, VD->getInit())) { + // Wipe out any partially-computed value, to allow tracking that this + // evaluation failed. + Val = APValue(); + return false; + } + } + + return true; +} + +/// Evaluate a condition (either a variable declaration or an expression). +static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl, + const Expr *Cond, bool &Result) { + FullExpressionRAII Scope(Info); + if (CondDecl && !EvaluateDecl(Info, CondDecl)) + return false; + return EvaluateAsBooleanCondition(Cond, Result, Info); +} + +static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info, + const Stmt *S, const SwitchCase *SC = 0); + +/// Evaluate the body of a loop, and translate the result as appropriate. +static EvalStmtResult EvaluateLoopBody(APValue &Result, EvalInfo &Info, + const Stmt *Body, + const SwitchCase *Case = 0) { + BlockScopeRAII Scope(Info); + switch (EvalStmtResult ESR = EvaluateStmt(Result, Info, Body, Case)) { + case ESR_Break: + return ESR_Succeeded; + case ESR_Succeeded: + case ESR_Continue: + return ESR_Continue; + case ESR_Failed: + case ESR_Returned: + case ESR_CaseNotFound: + return ESR; + } + llvm_unreachable("Invalid EvalStmtResult!"); +} + +/// Evaluate a switch statement. +static EvalStmtResult EvaluateSwitch(APValue &Result, EvalInfo &Info, + const SwitchStmt *SS) { + BlockScopeRAII Scope(Info); + + // Evaluate the switch condition. + APSInt Value; + { + FullExpressionRAII Scope(Info); + if (SS->getConditionVariable() && + !EvaluateDecl(Info, SS->getConditionVariable())) + return ESR_Failed; + if (!EvaluateInteger(SS->getCond(), Value, Info)) + return ESR_Failed; + } + + // Find the switch case corresponding to the value of the condition. + // FIXME: Cache this lookup. + const SwitchCase *Found = 0; + for (const SwitchCase *SC = SS->getSwitchCaseList(); SC; + SC = SC->getNextSwitchCase()) { + if (isa<DefaultStmt>(SC)) { + Found = SC; + continue; + } + + const CaseStmt *CS = cast<CaseStmt>(SC); + APSInt LHS = CS->getLHS()->EvaluateKnownConstInt(Info.Ctx); + APSInt RHS = CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx) + : LHS; + if (LHS <= Value && Value <= RHS) { + Found = SC; + break; + } + } + + if (!Found) + return ESR_Succeeded; + + // Search the switch body for the switch case and evaluate it from there. + switch (EvalStmtResult ESR = EvaluateStmt(Result, Info, SS->getBody(), Found)) { + case ESR_Break: + return ESR_Succeeded; + case ESR_Succeeded: + case ESR_Continue: + case ESR_Failed: + case ESR_Returned: + return ESR; + case ESR_CaseNotFound: + // This can only happen if the switch case is nested within a statement + // expression. We have no intention of supporting that. + Info.Diag(Found->getLocStart(), diag::note_constexpr_stmt_expr_unsupported); + return ESR_Failed; + } + llvm_unreachable("Invalid EvalStmtResult!"); +} + +// Evaluate a statement. +static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info, + const Stmt *S, const SwitchCase *Case) { + if (!Info.nextStep(S)) + return ESR_Failed; + + // If we're hunting down a 'case' or 'default' label, recurse through + // substatements until we hit the label. + if (Case) { + // FIXME: We don't start the lifetime of objects whose initialization we + // jump over. However, such objects must be of class type with a trivial + // default constructor that initialize all subobjects, so must be empty, + // so this almost never matters. + switch (S->getStmtClass()) { + case Stmt::CompoundStmtClass: + // FIXME: Precompute which substatement of a compound statement we + // would jump to, and go straight there rather than performing a + // linear scan each time. + case Stmt::LabelStmtClass: + case Stmt::AttributedStmtClass: + case Stmt::DoStmtClass: + break; + + case Stmt::CaseStmtClass: + case Stmt::DefaultStmtClass: + if (Case == S) + Case = 0; + break; + + case Stmt::IfStmtClass: { + // FIXME: Precompute which side of an 'if' we would jump to, and go + // straight there rather than scanning both sides. + const IfStmt *IS = cast<IfStmt>(S); + + // Wrap the evaluation in a block scope, in case it's a DeclStmt + // preceded by our switch label. + BlockScopeRAII Scope(Info); + + EvalStmtResult ESR = EvaluateStmt(Result, Info, IS->getThen(), Case); + if (ESR != ESR_CaseNotFound || !IS->getElse()) + return ESR; + return EvaluateStmt(Result, Info, IS->getElse(), Case); + } + + case Stmt::WhileStmtClass: { + EvalStmtResult ESR = + EvaluateLoopBody(Result, Info, cast<WhileStmt>(S)->getBody(), Case); + if (ESR != ESR_Continue) + return ESR; + break; + } + + case Stmt::ForStmtClass: { + const ForStmt *FS = cast<ForStmt>(S); + EvalStmtResult ESR = + EvaluateLoopBody(Result, Info, FS->getBody(), Case); + if (ESR != ESR_Continue) + return ESR; + if (FS->getInc()) { + FullExpressionRAII IncScope(Info); + if (!EvaluateIgnoredValue(Info, FS->getInc())) + return ESR_Failed; + } + break; + } + + case Stmt::DeclStmtClass: + // FIXME: If the variable has initialization that can't be jumped over, + // bail out of any immediately-surrounding compound-statement too. + default: + return ESR_CaseNotFound; + } + } + + switch (S->getStmtClass()) { + default: + if (const Expr *E = dyn_cast<Expr>(S)) { + // Don't bother evaluating beyond an expression-statement which couldn't + // be evaluated. + FullExpressionRAII Scope(Info); + if (!EvaluateIgnoredValue(Info, E)) + return ESR_Failed; + return ESR_Succeeded; + } + + Info.Diag(S->getLocStart()); + return ESR_Failed; + + case Stmt::NullStmtClass: + return ESR_Succeeded; + + case Stmt::DeclStmtClass: { + const DeclStmt *DS = cast<DeclStmt>(S); + for (DeclStmt::const_decl_iterator DclIt = DS->decl_begin(), + DclEnd = DS->decl_end(); DclIt != DclEnd; ++DclIt) { + // Each declaration initialization is its own full-expression. + // FIXME: This isn't quite right; if we're performing aggregate + // initialization, each braced subexpression is its own full-expression. + FullExpressionRAII Scope(Info); + if (!EvaluateDecl(Info, *DclIt) && !Info.keepEvaluatingAfterFailure()) + return ESR_Failed; + } + return ESR_Succeeded; + } + + case Stmt::ReturnStmtClass: { + const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue(); + FullExpressionRAII Scope(Info); + if (RetExpr && !Evaluate(Result, Info, RetExpr)) + return ESR_Failed; + return ESR_Returned; + } + + case Stmt::CompoundStmtClass: { + BlockScopeRAII Scope(Info); + + const CompoundStmt *CS = cast<CompoundStmt>(S); + for (CompoundStmt::const_body_iterator BI = CS->body_begin(), + BE = CS->body_end(); BI != BE; ++BI) { + EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI, Case); + if (ESR == ESR_Succeeded) + Case = 0; + else if (ESR != ESR_CaseNotFound) + return ESR; + } + return Case ? ESR_CaseNotFound : ESR_Succeeded; + } + + case Stmt::IfStmtClass: { + const IfStmt *IS = cast<IfStmt>(S); + + // Evaluate the condition, as either a var decl or as an expression. + BlockScopeRAII Scope(Info); + bool Cond; + if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), Cond)) + return ESR_Failed; + + if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) { + EvalStmtResult ESR = EvaluateStmt(Result, Info, SubStmt); + if (ESR != ESR_Succeeded) + return ESR; + } + return ESR_Succeeded; + } + + case Stmt::WhileStmtClass: { + const WhileStmt *WS = cast<WhileStmt>(S); + while (true) { + BlockScopeRAII Scope(Info); + bool Continue; + if (!EvaluateCond(Info, WS->getConditionVariable(), WS->getCond(), + Continue)) + return ESR_Failed; + if (!Continue) + break; + + EvalStmtResult ESR = EvaluateLoopBody(Result, Info, WS->getBody()); + if (ESR != ESR_Continue) + return ESR; + } + return ESR_Succeeded; + } + + case Stmt::DoStmtClass: { + const DoStmt *DS = cast<DoStmt>(S); + bool Continue; + do { + EvalStmtResult ESR = EvaluateLoopBody(Result, Info, DS->getBody(), Case); + if (ESR != ESR_Continue) + return ESR; + Case = 0; + + FullExpressionRAII CondScope(Info); + if (!EvaluateAsBooleanCondition(DS->getCond(), Continue, Info)) + return ESR_Failed; + } while (Continue); + return ESR_Succeeded; + } + + case Stmt::ForStmtClass: { + const ForStmt *FS = cast<ForStmt>(S); + BlockScopeRAII Scope(Info); + if (FS->getInit()) { + EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit()); + if (ESR != ESR_Succeeded) + return ESR; + } + while (true) { + BlockScopeRAII Scope(Info); + bool Continue = true; + if (FS->getCond() && !EvaluateCond(Info, FS->getConditionVariable(), + FS->getCond(), Continue)) + return ESR_Failed; + if (!Continue) + break; + + EvalStmtResult ESR = EvaluateLoopBody(Result, Info, FS->getBody()); + if (ESR != ESR_Continue) + return ESR; + + if (FS->getInc()) { + FullExpressionRAII IncScope(Info); + if (!EvaluateIgnoredValue(Info, FS->getInc())) + return ESR_Failed; + } + } + return ESR_Succeeded; + } + + case Stmt::CXXForRangeStmtClass: { + const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(S); + BlockScopeRAII Scope(Info); + + // Initialize the __range variable. + EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getRangeStmt()); + if (ESR != ESR_Succeeded) + return ESR; + + // Create the __begin and __end iterators. + ESR = EvaluateStmt(Result, Info, FS->getBeginEndStmt()); + if (ESR != ESR_Succeeded) + return ESR; + + while (true) { + // Condition: __begin != __end. + { + bool Continue = true; + FullExpressionRAII CondExpr(Info); + if (!EvaluateAsBooleanCondition(FS->getCond(), Continue, Info)) + return ESR_Failed; + if (!Continue) + break; + } + + // User's variable declaration, initialized by *__begin. + BlockScopeRAII InnerScope(Info); + ESR = EvaluateStmt(Result, Info, FS->getLoopVarStmt()); + if (ESR != ESR_Succeeded) + return ESR; + + // Loop body. + ESR = EvaluateLoopBody(Result, Info, FS->getBody()); + if (ESR != ESR_Continue) + return ESR; + + // Increment: ++__begin + if (!EvaluateIgnoredValue(Info, FS->getInc())) + return ESR_Failed; + } + + return ESR_Succeeded; + } + + case Stmt::SwitchStmtClass: + return EvaluateSwitch(Result, Info, cast<SwitchStmt>(S)); + + case Stmt::ContinueStmtClass: + return ESR_Continue; + + case Stmt::BreakStmtClass: + return ESR_Break; + + case Stmt::LabelStmtClass: + return EvaluateStmt(Result, Info, cast<LabelStmt>(S)->getSubStmt(), Case); + + case Stmt::AttributedStmtClass: + // As a general principle, C++11 attributes can be ignored without + // any semantic impact. + return EvaluateStmt(Result, Info, cast<AttributedStmt>(S)->getSubStmt(), + Case); + + case Stmt::CaseStmtClass: + case Stmt::DefaultStmtClass: + return EvaluateStmt(Result, Info, cast<SwitchCase>(S)->getSubStmt(), Case); + } +} + +/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial +/// default constructor. If so, we'll fold it whether or not it's marked as +/// constexpr. If it is marked as constexpr, we will never implicitly define it, +/// so we need special handling. +static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc, + const CXXConstructorDecl *CD, + bool IsValueInitialization) { + if (!CD->isTrivial() || !CD->isDefaultConstructor()) + return false; + + // Value-initialization does not call a trivial default constructor, so such a + // call is a core constant expression whether or not the constructor is + // constexpr. + if (!CD->isConstexpr() && !IsValueInitialization) { + if (Info.getLangOpts().CPlusPlus11) { + // FIXME: If DiagDecl is an implicitly-declared special member function, + // we should be much more explicit about why it's not constexpr. + Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1) + << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD; + Info.Note(CD->getLocation(), diag::note_declared_at); + } else { + Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr); + } + } + return true; +} + +/// CheckConstexprFunction - Check that a function can be called in a constant +/// expression. +static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc, + const FunctionDecl *Declaration, + const FunctionDecl *Definition) { + // Potential constant expressions can contain calls to declared, but not yet + // defined, constexpr functions. + if (Info.checkingPotentialConstantExpression() && !Definition && + Declaration->isConstexpr()) + return false; + + // Bail out with no diagnostic if the function declaration itself is invalid. + // We will have produced a relevant diagnostic while parsing it. + if (Declaration->isInvalidDecl()) + return false; + + // Can we evaluate this function call? + if (Definition && Definition->isConstexpr() && !Definition->isInvalidDecl()) + return true; + + if (Info.getLangOpts().CPlusPlus11) { + const FunctionDecl *DiagDecl = Definition ? Definition : Declaration; + // FIXME: If DiagDecl is an implicitly-declared special member function, we + // should be much more explicit about why it's not constexpr. + Info.Diag(CallLoc, diag::note_constexpr_invalid_function, 1) + << DiagDecl->isConstexpr() << isa<CXXConstructorDecl>(DiagDecl) + << DiagDecl; + Info.Note(DiagDecl->getLocation(), diag::note_declared_at); + } else { + Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr); + } + return false; +} + +namespace { +typedef SmallVector<APValue, 8> ArgVector; +} + +/// EvaluateArgs - Evaluate the arguments to a function call. +static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues, + EvalInfo &Info) { + bool Success = true; + for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end(); + I != E; ++I) { + if (!Evaluate(ArgValues[I - Args.begin()], Info, *I)) { + // If we're checking for a potential constant expression, evaluate all + // initializers even if some of them fail. + if (!Info.keepEvaluatingAfterFailure()) + return false; + Success = false; + } + } + return Success; +} + +/// Evaluate a function call. +static bool HandleFunctionCall(SourceLocation CallLoc, + const FunctionDecl *Callee, const LValue *This, + ArrayRef<const Expr*> Args, const Stmt *Body, + EvalInfo &Info, APValue &Result) { + ArgVector ArgValues(Args.size()); + if (!EvaluateArgs(Args, ArgValues, Info)) + return false; + + if (!Info.CheckCallLimit(CallLoc)) + return false; + + CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data()); + + // For a trivial copy or move assignment, perform an APValue copy. This is + // essential for unions, where the operations performed by the assignment + // operator cannot be represented as statements. + const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee); + if (MD && MD->isDefaulted() && MD->isTrivial()) { + assert(This && + (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())); + LValue RHS; + RHS.setFrom(Info.Ctx, ArgValues[0]); + APValue RHSValue; + if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), + RHS, RHSValue)) + return false; + if (!handleAssignment(Info, Args[0], *This, MD->getThisType(Info.Ctx), + RHSValue)) + return false; + This->moveInto(Result); + return true; + } + + EvalStmtResult ESR = EvaluateStmt(Result, Info, Body); + if (ESR == ESR_Succeeded) { + if (Callee->getResultType()->isVoidType()) + return true; + Info.Diag(Callee->getLocEnd(), diag::note_constexpr_no_return); + } + return ESR == ESR_Returned; +} + +/// Evaluate a constructor call. +static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This, + ArrayRef<const Expr*> Args, + const CXXConstructorDecl *Definition, + EvalInfo &Info, APValue &Result) { + ArgVector ArgValues(Args.size()); + if (!EvaluateArgs(Args, ArgValues, Info)) + return false; + + if (!Info.CheckCallLimit(CallLoc)) + return false; + + const CXXRecordDecl *RD = Definition->getParent(); + if (RD->getNumVBases()) { + Info.Diag(CallLoc, diag::note_constexpr_virtual_base) << RD; + return false; + } + + CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues.data()); + + // If it's a delegating constructor, just delegate. + if (Definition->isDelegatingConstructor()) { + CXXConstructorDecl::init_const_iterator I = Definition->init_begin(); + { + FullExpressionRAII InitScope(Info); + if (!EvaluateInPlace(Result, Info, This, (*I)->getInit())) + return false; + } + return EvaluateStmt(Result, Info, Definition->getBody()) != ESR_Failed; + } + + // For a trivial copy or move constructor, perform an APValue copy. This is + // essential for unions, where the operations performed by the constructor + // cannot be represented by ctor-initializers. + if (Definition->isDefaulted() && + ((Definition->isCopyConstructor() && Definition->isTrivial()) || + (Definition->isMoveConstructor() && Definition->isTrivial()))) { + LValue RHS; + RHS.setFrom(Info.Ctx, ArgValues[0]); + return handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), + RHS, Result); + } + + // Reserve space for the struct members. + if (!RD->isUnion() && Result.isUninit()) + Result = APValue(APValue::UninitStruct(), RD->getNumBases(), + std::distance(RD->field_begin(), RD->field_end())); + + if (RD->isInvalidDecl()) return false; + const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); + + // A scope for temporaries lifetime-extended by reference members. + BlockScopeRAII LifetimeExtendedScope(Info); + + bool Success = true; + unsigned BasesSeen = 0; +#ifndef NDEBUG + CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin(); +#endif + for (CXXConstructorDecl::init_const_iterator I = Definition->init_begin(), + E = Definition->init_end(); I != E; ++I) { + LValue Subobject = This; + APValue *Value = &Result; + + // Determine the subobject to initialize. + FieldDecl *FD = 0; + if ((*I)->isBaseInitializer()) { + QualType BaseType((*I)->getBaseClass(), 0); +#ifndef NDEBUG + // Non-virtual base classes are initialized in the order in the class + // definition. We have already checked for virtual base classes. + assert(!BaseIt->isVirtual() && "virtual base for literal type"); + assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) && + "base class initializers not in expected order"); + ++BaseIt; +#endif + if (!HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD, + BaseType->getAsCXXRecordDecl(), &Layout)) + return false; + Value = &Result.getStructBase(BasesSeen++); + } else if ((FD = (*I)->getMember())) { + if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout)) + return false; + if (RD->isUnion()) { + Result = APValue(FD); + Value = &Result.getUnionValue(); + } else { + Value = &Result.getStructField(FD->getFieldIndex()); + } + } else if (IndirectFieldDecl *IFD = (*I)->getIndirectMember()) { + // Walk the indirect field decl's chain to find the object to initialize, + // and make sure we've initialized every step along it. + for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(), + CE = IFD->chain_end(); + C != CE; ++C) { + FD = cast<FieldDecl>(*C); + CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent()); + // Switch the union field if it differs. This happens if we had + // preceding zero-initialization, and we're now initializing a union + // subobject other than the first. + // FIXME: In this case, the values of the other subobjects are + // specified, since zero-initialization sets all padding bits to zero. + if (Value->isUninit() || + (Value->isUnion() && Value->getUnionField() != FD)) { + if (CD->isUnion()) + *Value = APValue(FD); + else + *Value = APValue(APValue::UninitStruct(), CD->getNumBases(), + std::distance(CD->field_begin(), CD->field_end())); + } + if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD)) + return false; + if (CD->isUnion()) + Value = &Value->getUnionValue(); + else + Value = &Value->getStructField(FD->getFieldIndex()); + } + } else { + llvm_unreachable("unknown base initializer kind"); + } + + FullExpressionRAII InitScope(Info); + if (!EvaluateInPlace(*Value, Info, Subobject, (*I)->getInit()) || + (FD && FD->isBitField() && !truncateBitfieldValue(Info, (*I)->getInit(), + *Value, FD))) { + // If we're checking for a potential constant expression, evaluate all + // initializers even if some of them fail. + if (!Info.keepEvaluatingAfterFailure()) + return false; + Success = false; + } + } + + return Success && + EvaluateStmt(Result, Info, Definition->getBody()) != ESR_Failed; +} + +//===----------------------------------------------------------------------===// +// Generic Evaluation +//===----------------------------------------------------------------------===// +namespace { + +// FIXME: RetTy is always bool. Remove it. +template <class Derived, typename RetTy=bool> +class ExprEvaluatorBase + : public ConstStmtVisitor<Derived, RetTy> { +private: + RetTy DerivedSuccess(const APValue &V, const Expr *E) { + return static_cast<Derived*>(this)->Success(V, E); + } + RetTy DerivedZeroInitialization(const Expr *E) { + return static_cast<Derived*>(this)->ZeroInitialization(E); + } + + // Check whether a conditional operator with a non-constant condition is a + // potential constant expression. If neither arm is a potential constant + // expression, then the conditional operator is not either. + template<typename ConditionalOperator> + void CheckPotentialConstantConditional(const ConditionalOperator *E) { + assert(Info.checkingPotentialConstantExpression()); + + // Speculatively evaluate both arms. + { + SmallVector<PartialDiagnosticAt, 8> Diag; + SpeculativeEvaluationRAII Speculate(Info, &Diag); + + StmtVisitorTy::Visit(E->getFalseExpr()); + if (Diag.empty()) + return; + + Diag.clear(); + StmtVisitorTy::Visit(E->getTrueExpr()); + if (Diag.empty()) + return; + } + + Error(E, diag::note_constexpr_conditional_never_const); + } + + + template<typename ConditionalOperator> + bool HandleConditionalOperator(const ConditionalOperator *E) { + bool BoolResult; + if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) { + if (Info.checkingPotentialConstantExpression()) + CheckPotentialConstantConditional(E); + return false; + } + + Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr(); + return StmtVisitorTy::Visit(EvalExpr); + } + +protected: + EvalInfo &Info; + typedef ConstStmtVisitor<Derived, RetTy> StmtVisitorTy; + typedef ExprEvaluatorBase ExprEvaluatorBaseTy; + + OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) { + return Info.CCEDiag(E, D); + } + + RetTy ZeroInitialization(const Expr *E) { return Error(E); } + +public: + ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {} + + EvalInfo &getEvalInfo() { return Info; } + + /// Report an evaluation error. This should only be called when an error is + /// first discovered. When propagating an error, just return false. + bool Error(const Expr *E, diag::kind D) { + Info.Diag(E, D); + return false; + } + bool Error(const Expr *E) { + return Error(E, diag::note_invalid_subexpr_in_const_expr); + } + + RetTy VisitStmt(const Stmt *) { + llvm_unreachable("Expression evaluator should not be called on stmts"); + } + RetTy VisitExpr(const Expr *E) { + return Error(E); + } + + RetTy VisitParenExpr(const ParenExpr *E) + { return StmtVisitorTy::Visit(E->getSubExpr()); } + RetTy VisitUnaryExtension(const UnaryOperator *E) + { return StmtVisitorTy::Visit(E->getSubExpr()); } + RetTy VisitUnaryPlus(const UnaryOperator *E) + { return StmtVisitorTy::Visit(E->getSubExpr()); } + RetTy VisitChooseExpr(const ChooseExpr *E) + { return StmtVisitorTy::Visit(E->getChosenSubExpr()); } + RetTy VisitGenericSelectionExpr(const GenericSelectionExpr *E) + { return StmtVisitorTy::Visit(E->getResultExpr()); } + RetTy VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E) + { return StmtVisitorTy::Visit(E->getReplacement()); } + RetTy VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) + { return StmtVisitorTy::Visit(E->getExpr()); } + RetTy VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) { + // The initializer may not have been parsed yet, or might be erroneous. + if (!E->getExpr()) + return Error(E); + return StmtVisitorTy::Visit(E->getExpr()); + } + // We cannot create any objects for which cleanups are required, so there is + // nothing to do here; all cleanups must come from unevaluated subexpressions. + RetTy VisitExprWithCleanups(const ExprWithCleanups *E) + { return StmtVisitorTy::Visit(E->getSubExpr()); } + + RetTy VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) { + CCEDiag(E, diag::note_constexpr_invalid_cast) << 0; + return static_cast<Derived*>(this)->VisitCastExpr(E); + } + RetTy VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) { + CCEDiag(E, diag::note_constexpr_invalid_cast) << 1; + return static_cast<Derived*>(this)->VisitCastExpr(E); + } + + RetTy VisitBinaryOperator(const BinaryOperator *E) { + switch (E->getOpcode()) { + default: + return Error(E); + + case BO_Comma: + VisitIgnoredValue(E->getLHS()); + return StmtVisitorTy::Visit(E->getRHS()); + + case BO_PtrMemD: + case BO_PtrMemI: { + LValue Obj; + if (!HandleMemberPointerAccess(Info, E, Obj)) + return false; + APValue Result; + if (!handleLValueToRValueConversion(Info, E, E->getType(), Obj, Result)) + return false; + return DerivedSuccess(Result, E); + } + } + } + + RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) { + // Evaluate and cache the common expression. We treat it as a temporary, + // even though it's not quite the same thing. + if (!Evaluate(Info.CurrentCall->createTemporary(E->getOpaqueValue(), false), + Info, E->getCommon())) + return false; + + return HandleConditionalOperator(E); + } + + RetTy VisitConditionalOperator(const ConditionalOperator *E) { + bool IsBcpCall = false; + // If the condition (ignoring parens) is a __builtin_constant_p call, + // the result is a constant expression if it can be folded without + // side-effects. This is an important GNU extension. See GCC PR38377 + // for discussion. + if (const CallExpr *CallCE = + dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts())) + if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p) + IsBcpCall = true; + + // Always assume __builtin_constant_p(...) ? ... : ... is a potential + // constant expression; we can't check whether it's potentially foldable. + if (Info.checkingPotentialConstantExpression() && IsBcpCall) + return false; + + FoldConstant Fold(Info, IsBcpCall); + if (!HandleConditionalOperator(E)) { + Fold.keepDiagnostics(); + return false; + } + + return true; + } + + RetTy VisitOpaqueValueExpr(const OpaqueValueExpr *E) { + if (APValue *Value = Info.CurrentCall->getTemporary(E)) + return DerivedSuccess(*Value, E); + + const Expr *Source = E->getSourceExpr(); + if (!Source) + return Error(E); + if (Source == E) { // sanity checking. + assert(0 && "OpaqueValueExpr recursively refers to itself"); + return Error(E); + } + return StmtVisitorTy::Visit(Source); + } + + RetTy VisitCallExpr(const CallExpr *E) { + const Expr *Callee = E->getCallee()->IgnoreParens(); + QualType CalleeType = Callee->getType(); + + const FunctionDecl *FD = 0; + LValue *This = 0, ThisVal; + ArrayRef<const Expr *> Args(E->getArgs(), E->getNumArgs()); + bool HasQualifier = false; + + // Extract function decl and 'this' pointer from the callee. + if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) { + const ValueDecl *Member = 0; + if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) { + // Explicit bound member calls, such as x.f() or p->g(); + if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal)) + return false; + Member = ME->getMemberDecl(); + This = &ThisVal; + HasQualifier = ME->hasQualifier(); + } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) { + // Indirect bound member calls ('.*' or '->*'). + Member = HandleMemberPointerAccess(Info, BE, ThisVal, false); + if (!Member) return false; + This = &ThisVal; + } else + return Error(Callee); + + FD = dyn_cast<FunctionDecl>(Member); + if (!FD) + return Error(Callee); + } else if (CalleeType->isFunctionPointerType()) { + LValue Call; + if (!EvaluatePointer(Callee, Call, Info)) + return false; + + if (!Call.getLValueOffset().isZero()) + return Error(Callee); + FD = dyn_cast_or_null<FunctionDecl>( + Call.getLValueBase().dyn_cast<const ValueDecl*>()); + if (!FD) + return Error(Callee); + + // Overloaded operator calls to member functions are represented as normal + // calls with '*this' as the first argument. + const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); + if (MD && !MD->isStatic()) { + // FIXME: When selecting an implicit conversion for an overloaded + // operator delete, we sometimes try to evaluate calls to conversion + // operators without a 'this' parameter! + if (Args.empty()) + return Error(E); + + if (!EvaluateObjectArgument(Info, Args[0], ThisVal)) + return false; + This = &ThisVal; + Args = Args.slice(1); + } + + // Don't call function pointers which have been cast to some other type. + if (!Info.Ctx.hasSameType(CalleeType->getPointeeType(), FD->getType())) + return Error(E); + } else + return Error(E); + + if (This && !This->checkSubobject(Info, E, CSK_This)) + return false; + + // DR1358 allows virtual constexpr functions in some cases. Don't allow + // calls to such functions in constant expressions. + if (This && !HasQualifier && + isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isVirtual()) + return Error(E, diag::note_constexpr_virtual_call); + + const FunctionDecl *Definition = 0; + Stmt *Body = FD->getBody(Definition); + APValue Result; + + if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition) || + !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body, + Info, Result)) + return false; + + return DerivedSuccess(Result, E); + } + + RetTy VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { + return StmtVisitorTy::Visit(E->getInitializer()); + } + RetTy VisitInitListExpr(const InitListExpr *E) { + if (E->getNumInits() == 0) + return DerivedZeroInitialization(E); + if (E->getNumInits() == 1) + return StmtVisitorTy::Visit(E->getInit(0)); + return Error(E); + } + RetTy VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { + return DerivedZeroInitialization(E); + } + RetTy VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { + return DerivedZeroInitialization(E); + } + RetTy VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { + return DerivedZeroInitialization(E); + } + + /// A member expression where the object is a prvalue is itself a prvalue. + RetTy VisitMemberExpr(const MemberExpr *E) { + assert(!E->isArrow() && "missing call to bound member function?"); + + APValue Val; + if (!Evaluate(Val, Info, E->getBase())) + return false; + + QualType BaseTy = E->getBase()->getType(); + + const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl()); + if (!FD) return Error(E); + assert(!FD->getType()->isReferenceType() && "prvalue reference?"); + assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() == + FD->getParent()->getCanonicalDecl() && "record / field mismatch"); + + CompleteObject Obj(&Val, BaseTy); + SubobjectDesignator Designator(BaseTy); + Designator.addDeclUnchecked(FD); + + APValue Result; + return extractSubobject(Info, E, Obj, Designator, Result) && + DerivedSuccess(Result, E); + } + + RetTy VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + break; + + case CK_AtomicToNonAtomic: { + APValue AtomicVal; + if (!EvaluateAtomic(E->getSubExpr(), AtomicVal, Info)) + return false; + return DerivedSuccess(AtomicVal, E); + } + + case CK_NoOp: + case CK_UserDefinedConversion: + return StmtVisitorTy::Visit(E->getSubExpr()); + + case CK_LValueToRValue: { + LValue LVal; + if (!EvaluateLValue(E->getSubExpr(), LVal, Info)) + return false; + APValue RVal; + // Note, we use the subexpression's type in order to retain cv-qualifiers. + if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(), + LVal, RVal)) + return false; + return DerivedSuccess(RVal, E); + } + } + + return Error(E); + } + + RetTy VisitUnaryPostInc(const UnaryOperator *UO) { + return VisitUnaryPostIncDec(UO); + } + RetTy VisitUnaryPostDec(const UnaryOperator *UO) { + return VisitUnaryPostIncDec(UO); + } + RetTy VisitUnaryPostIncDec(const UnaryOperator *UO) { + if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure()) + return Error(UO); + + LValue LVal; + if (!EvaluateLValue(UO->getSubExpr(), LVal, Info)) + return false; + APValue RVal; + if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(), + UO->isIncrementOp(), &RVal)) + return false; + return DerivedSuccess(RVal, UO); + } + + RetTy VisitStmtExpr(const StmtExpr *E) { + // We will have checked the full-expressions inside the statement expression + // when they were completed, and don't need to check them again now. + if (Info.checkingForOverflow()) + return Error(E); + + BlockScopeRAII Scope(Info); + const CompoundStmt *CS = E->getSubStmt(); + for (CompoundStmt::const_body_iterator BI = CS->body_begin(), + BE = CS->body_end(); + /**/; ++BI) { + if (BI + 1 == BE) { + const Expr *FinalExpr = dyn_cast<Expr>(*BI); + if (!FinalExpr) { + Info.Diag((*BI)->getLocStart(), + diag::note_constexpr_stmt_expr_unsupported); + return false; + } + return this->Visit(FinalExpr); + } + + APValue ReturnValue; + EvalStmtResult ESR = EvaluateStmt(ReturnValue, Info, *BI); + if (ESR != ESR_Succeeded) { + // FIXME: If the statement-expression terminated due to 'return', + // 'break', or 'continue', it would be nice to propagate that to + // the outer statement evaluation rather than bailing out. + if (ESR != ESR_Failed) + Info.Diag((*BI)->getLocStart(), + diag::note_constexpr_stmt_expr_unsupported); + return false; + } + } + } + + /// Visit a value which is evaluated, but whose value is ignored. + void VisitIgnoredValue(const Expr *E) { + EvaluateIgnoredValue(Info, E); + } +}; + +} + +//===----------------------------------------------------------------------===// +// Common base class for lvalue and temporary evaluation. +//===----------------------------------------------------------------------===// +namespace { +template<class Derived> +class LValueExprEvaluatorBase + : public ExprEvaluatorBase<Derived, bool> { +protected: + LValue &Result; + typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy; + typedef ExprEvaluatorBase<Derived, bool> ExprEvaluatorBaseTy; + + bool Success(APValue::LValueBase B) { + Result.set(B); + return true; + } + +public: + LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result) : + ExprEvaluatorBaseTy(Info), Result(Result) {} + + bool Success(const APValue &V, const Expr *E) { + Result.setFrom(this->Info.Ctx, V); + return true; + } + + bool VisitMemberExpr(const MemberExpr *E) { + // Handle non-static data members. + QualType BaseTy; + if (E->isArrow()) { + if (!EvaluatePointer(E->getBase(), Result, this->Info)) + return false; + BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType(); + } else if (E->getBase()->isRValue()) { + assert(E->getBase()->getType()->isRecordType()); + if (!EvaluateTemporary(E->getBase(), Result, this->Info)) + return false; + BaseTy = E->getBase()->getType(); + } else { + if (!this->Visit(E->getBase())) + return false; + BaseTy = E->getBase()->getType(); + } + + const ValueDecl *MD = E->getMemberDecl(); + if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) { + assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() == + FD->getParent()->getCanonicalDecl() && "record / field mismatch"); + (void)BaseTy; + if (!HandleLValueMember(this->Info, E, Result, FD)) + return false; + } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) { + if (!HandleLValueIndirectMember(this->Info, E, Result, IFD)) + return false; + } else + return this->Error(E); + + if (MD->getType()->isReferenceType()) { + APValue RefValue; + if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result, + RefValue)) + return false; + return Success(RefValue, E); + } + return true; + } + + bool VisitBinaryOperator(const BinaryOperator *E) { + switch (E->getOpcode()) { + default: + return ExprEvaluatorBaseTy::VisitBinaryOperator(E); + + case BO_PtrMemD: + case BO_PtrMemI: + return HandleMemberPointerAccess(this->Info, E, Result); + } + } + + bool VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + if (!this->Visit(E->getSubExpr())) + return false; + + // Now figure out the necessary offset to add to the base LV to get from + // the derived class to the base class. + return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(), + Result); + } + } +}; +} + +//===----------------------------------------------------------------------===// +// LValue Evaluation +// +// This is used for evaluating lvalues (in C and C++), xvalues (in C++11), +// function designators (in C), decl references to void objects (in C), and +// temporaries (if building with -Wno-address-of-temporary). +// +// LValue evaluation produces values comprising a base expression of one of the +// following types: +// - Declarations +// * VarDecl +// * FunctionDecl +// - Literals +// * CompoundLiteralExpr in C +// * StringLiteral +// * CXXTypeidExpr +// * PredefinedExpr +// * ObjCStringLiteralExpr +// * ObjCEncodeExpr +// * AddrLabelExpr +// * BlockExpr +// * CallExpr for a MakeStringConstant builtin +// - Locals and temporaries +// * MaterializeTemporaryExpr +// * Any Expr, with a CallIndex indicating the function in which the temporary +// was evaluated, for cases where the MaterializeTemporaryExpr is missing +// from the AST (FIXME). +// * A MaterializeTemporaryExpr that has static storage duration, with no +// CallIndex, for a lifetime-extended temporary. +// plus an offset in bytes. +//===----------------------------------------------------------------------===// +namespace { +class LValueExprEvaluator + : public LValueExprEvaluatorBase<LValueExprEvaluator> { +public: + LValueExprEvaluator(EvalInfo &Info, LValue &Result) : + LValueExprEvaluatorBaseTy(Info, Result) {} + + bool VisitVarDecl(const Expr *E, const VarDecl *VD); + bool VisitUnaryPreIncDec(const UnaryOperator *UO); + + bool VisitDeclRefExpr(const DeclRefExpr *E); + bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); } + bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E); + bool VisitMemberExpr(const MemberExpr *E); + bool VisitStringLiteral(const StringLiteral *E) { return Success(E); } + bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); } + bool VisitCXXTypeidExpr(const CXXTypeidExpr *E); + bool VisitCXXUuidofExpr(const CXXUuidofExpr *E); + bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E); + bool VisitUnaryDeref(const UnaryOperator *E); + bool VisitUnaryReal(const UnaryOperator *E); + bool VisitUnaryImag(const UnaryOperator *E); + bool VisitUnaryPreInc(const UnaryOperator *UO) { + return VisitUnaryPreIncDec(UO); + } + bool VisitUnaryPreDec(const UnaryOperator *UO) { + return VisitUnaryPreIncDec(UO); + } + bool VisitBinAssign(const BinaryOperator *BO); + bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO); + + bool VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + return LValueExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_LValueBitCast: + this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + if (!Visit(E->getSubExpr())) + return false; + Result.Designator.setInvalid(); + return true; + + case CK_BaseToDerived: + if (!Visit(E->getSubExpr())) + return false; + return HandleBaseToDerivedCast(Info, E, Result); + } + } +}; +} // end anonymous namespace + +/// Evaluate an expression as an lvalue. This can be legitimately called on +/// expressions which are not glvalues, in two cases: +/// * function designators in C, and +/// * "extern void" objects +static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info) { + assert(E->isGLValue() || E->getType()->isFunctionType() || + E->getType()->isVoidType()); + return LValueExprEvaluator(Info, Result).Visit(E); +} + +bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) { + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) + return Success(FD); + if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) + return VisitVarDecl(E, VD); + return Error(E); +} + +bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) { + CallStackFrame *Frame = 0; + if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1) + Frame = Info.CurrentCall; + + if (!VD->getType()->isReferenceType()) { + if (Frame) { + Result.set(VD, Frame->Index); + return true; + } + return Success(VD); + } + + APValue *V; + if (!evaluateVarDeclInit(Info, E, VD, Frame, V)) + return false; + if (V->isUninit()) { + if (!Info.checkingPotentialConstantExpression()) + Info.Diag(E, diag::note_constexpr_use_uninit_reference); + return false; + } + return Success(*V, E); +} + +bool LValueExprEvaluator::VisitMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *E) { + // Walk through the expression to find the materialized temporary itself. + SmallVector<const Expr *, 2> CommaLHSs; + SmallVector<SubobjectAdjustment, 2> Adjustments; + const Expr *Inner = E->GetTemporaryExpr()-> + skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); + + // If we passed any comma operators, evaluate their LHSs. + for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I) + if (!EvaluateIgnoredValue(Info, CommaLHSs[I])) + return false; + + // A materialized temporary with static storage duration can appear within the + // result of a constant expression evaluation, so we need to preserve its + // value for use outside this evaluation. + APValue *Value; + if (E->getStorageDuration() == SD_Static) { + Value = Info.Ctx.getMaterializedTemporaryValue(E, true); + *Value = APValue(); + Result.set(E); + } else { + Value = &Info.CurrentCall-> + createTemporary(E, E->getStorageDuration() == SD_Automatic); + Result.set(E, Info.CurrentCall->Index); + } + + QualType Type = Inner->getType(); + + // Materialize the temporary itself. + if (!EvaluateInPlace(*Value, Info, Result, Inner) || + (E->getStorageDuration() == SD_Static && + !CheckConstantExpression(Info, E->getExprLoc(), Type, *Value))) { + *Value = APValue(); + return false; + } + + // Adjust our lvalue to refer to the desired subobject. + for (unsigned I = Adjustments.size(); I != 0; /**/) { + --I; + switch (Adjustments[I].Kind) { + case SubobjectAdjustment::DerivedToBaseAdjustment: + if (!HandleLValueBasePath(Info, Adjustments[I].DerivedToBase.BasePath, + Type, Result)) + return false; + Type = Adjustments[I].DerivedToBase.BasePath->getType(); + break; + + case SubobjectAdjustment::FieldAdjustment: + if (!HandleLValueMember(Info, E, Result, Adjustments[I].Field)) + return false; + Type = Adjustments[I].Field->getType(); + break; + + case SubobjectAdjustment::MemberPointerAdjustment: + if (!HandleMemberPointerAccess(this->Info, Type, Result, + Adjustments[I].Ptr.RHS)) + return false; + Type = Adjustments[I].Ptr.MPT->getPointeeType(); + break; + } + } + + return true; +} + +bool +LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { + assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?"); + // Defer visiting the literal until the lvalue-to-rvalue conversion. We can + // only see this when folding in C, so there's no standard to follow here. + return Success(E); +} + +bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) { + if (!E->isPotentiallyEvaluated()) + return Success(E); + + Info.Diag(E, diag::note_constexpr_typeid_polymorphic) + << E->getExprOperand()->getType() + << E->getExprOperand()->getSourceRange(); + return false; +} + +bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) { + return Success(E); +} + +bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) { + // Handle static data members. + if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) { + VisitIgnoredValue(E->getBase()); + return VisitVarDecl(E, VD); + } + + // Handle static member functions. + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) { + if (MD->isStatic()) { + VisitIgnoredValue(E->getBase()); + return Success(MD); + } + } + + // Handle non-static data members. + return LValueExprEvaluatorBaseTy::VisitMemberExpr(E); +} + +bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { + // FIXME: Deal with vectors as array subscript bases. + if (E->getBase()->getType()->isVectorType()) + return Error(E); + + if (!EvaluatePointer(E->getBase(), Result, Info)) + return false; + + APSInt Index; + if (!EvaluateInteger(E->getIdx(), Index, Info)) + return false; + + return HandleLValueArrayAdjustment(Info, E, Result, E->getType(), + getExtValue(Index)); +} + +bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) { + return EvaluatePointer(E->getSubExpr(), Result, Info); +} + +bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { + if (!Visit(E->getSubExpr())) + return false; + // __real is a no-op on scalar lvalues. + if (E->getSubExpr()->getType()->isAnyComplexType()) + HandleLValueComplexElement(Info, E, Result, E->getType(), false); + return true; +} + +bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { + assert(E->getSubExpr()->getType()->isAnyComplexType() && + "lvalue __imag__ on scalar?"); + if (!Visit(E->getSubExpr())) + return false; + HandleLValueComplexElement(Info, E, Result, E->getType(), true); + return true; +} + +bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) { + if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure()) + return Error(UO); + + if (!this->Visit(UO->getSubExpr())) + return false; + + return handleIncDec( + this->Info, UO, Result, UO->getSubExpr()->getType(), + UO->isIncrementOp(), 0); +} + +bool LValueExprEvaluator::VisitCompoundAssignOperator( + const CompoundAssignOperator *CAO) { + if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure()) + return Error(CAO); + + APValue RHS; + + // The overall lvalue result is the result of evaluating the LHS. + if (!this->Visit(CAO->getLHS())) { + if (Info.keepEvaluatingAfterFailure()) + Evaluate(RHS, this->Info, CAO->getRHS()); + return false; + } + + if (!Evaluate(RHS, this->Info, CAO->getRHS())) + return false; + + return handleCompoundAssignment( + this->Info, CAO, + Result, CAO->getLHS()->getType(), CAO->getComputationLHSType(), + CAO->getOpForCompoundAssignment(CAO->getOpcode()), RHS); +} + +bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) { + if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure()) + return Error(E); + + APValue NewVal; + + if (!this->Visit(E->getLHS())) { + if (Info.keepEvaluatingAfterFailure()) + Evaluate(NewVal, this->Info, E->getRHS()); + return false; + } + + if (!Evaluate(NewVal, this->Info, E->getRHS())) + return false; + + return handleAssignment(this->Info, E, Result, E->getLHS()->getType(), + NewVal); +} + +//===----------------------------------------------------------------------===// +// Pointer Evaluation +//===----------------------------------------------------------------------===// + +namespace { +class PointerExprEvaluator + : public ExprEvaluatorBase<PointerExprEvaluator, bool> { + LValue &Result; + + bool Success(const Expr *E) { + Result.set(E); + return true; + } +public: + + PointerExprEvaluator(EvalInfo &info, LValue &Result) + : ExprEvaluatorBaseTy(info), Result(Result) {} + + bool Success(const APValue &V, const Expr *E) { + Result.setFrom(Info.Ctx, V); + return true; + } + bool ZeroInitialization(const Expr *E) { + return Success((Expr*)0); + } + + bool VisitBinaryOperator(const BinaryOperator *E); + bool VisitCastExpr(const CastExpr* E); + bool VisitUnaryAddrOf(const UnaryOperator *E); + bool VisitObjCStringLiteral(const ObjCStringLiteral *E) + { return Success(E); } + bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) + { return Success(E); } + bool VisitAddrLabelExpr(const AddrLabelExpr *E) + { return Success(E); } + bool VisitCallExpr(const CallExpr *E); + bool VisitBlockExpr(const BlockExpr *E) { + if (!E->getBlockDecl()->hasCaptures()) + return Success(E); + return Error(E); + } + bool VisitCXXThisExpr(const CXXThisExpr *E) { + // Can't look at 'this' when checking a potential constant expression. + if (Info.checkingPotentialConstantExpression()) + return false; + if (!Info.CurrentCall->This) + return Error(E); + Result = *Info.CurrentCall->This; + return true; + } + + // FIXME: Missing: @protocol, @selector +}; +} // end anonymous namespace + +static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->hasPointerRepresentation()); + return PointerExprEvaluator(Info, Result).Visit(E); +} + +bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { + if (E->getOpcode() != BO_Add && + E->getOpcode() != BO_Sub) + return ExprEvaluatorBaseTy::VisitBinaryOperator(E); + + const Expr *PExp = E->getLHS(); + const Expr *IExp = E->getRHS(); + if (IExp->getType()->isPointerType()) + std::swap(PExp, IExp); + + bool EvalPtrOK = EvaluatePointer(PExp, Result, Info); + if (!EvalPtrOK && !Info.keepEvaluatingAfterFailure()) + return false; + + llvm::APSInt Offset; + if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK) + return false; + + int64_t AdditionalOffset = getExtValue(Offset); + if (E->getOpcode() == BO_Sub) + AdditionalOffset = -AdditionalOffset; + + QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType(); + return HandleLValueArrayAdjustment(Info, E, Result, Pointee, + AdditionalOffset); +} + +bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) { + return EvaluateLValue(E->getSubExpr(), Result, Info); +} + +bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) { + const Expr* SubExpr = E->getSubExpr(); + + switch (E->getCastKind()) { + default: + break; + + case CK_BitCast: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + if (!Visit(SubExpr)) + return false; + // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are + // permitted in constant expressions in C++11. Bitcasts from cv void* are + // also static_casts, but we disallow them as a resolution to DR1312. + if (!E->getType()->isVoidPointerType()) { + Result.Designator.setInvalid(); + if (SubExpr->getType()->isVoidPointerType()) + CCEDiag(E, diag::note_constexpr_invalid_cast) + << 3 << SubExpr->getType(); + else + CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + } + return true; + + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + if (!EvaluatePointer(E->getSubExpr(), Result, Info)) + return false; + if (!Result.Base && Result.Offset.isZero()) + return true; + + // Now figure out the necessary offset to add to the base LV to get from + // the derived class to the base class. + return HandleLValueBasePath(Info, E, E->getSubExpr()->getType()-> + castAs<PointerType>()->getPointeeType(), + Result); + + case CK_BaseToDerived: + if (!Visit(E->getSubExpr())) + return false; + if (!Result.Base && Result.Offset.isZero()) + return true; + return HandleBaseToDerivedCast(Info, E, Result); + + case CK_NullToPointer: + VisitIgnoredValue(E->getSubExpr()); + return ZeroInitialization(E); + + case CK_IntegralToPointer: { + CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + + APValue Value; + if (!EvaluateIntegerOrLValue(SubExpr, Value, Info)) + break; + + if (Value.isInt()) { + unsigned Size = Info.Ctx.getTypeSize(E->getType()); + uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue(); + Result.Base = (Expr*)0; + Result.Offset = CharUnits::fromQuantity(N); + Result.CallIndex = 0; + Result.Designator.setInvalid(); + return true; + } else { + // Cast is of an lvalue, no need to change value. + Result.setFrom(Info.Ctx, Value); + return true; + } + } + case CK_ArrayToPointerDecay: + if (SubExpr->isGLValue()) { + if (!EvaluateLValue(SubExpr, Result, Info)) + return false; + } else { + Result.set(SubExpr, Info.CurrentCall->Index); + if (!EvaluateInPlace(Info.CurrentCall->createTemporary(SubExpr, false), + Info, Result, SubExpr)) + return false; + } + // The result is a pointer to the first element of the array. + if (const ConstantArrayType *CAT + = Info.Ctx.getAsConstantArrayType(SubExpr->getType())) + Result.addArray(Info, E, CAT); + else + Result.Designator.setInvalid(); + return true; + + case CK_FunctionToPointerDecay: + return EvaluateLValue(SubExpr, Result, Info); + } + + return ExprEvaluatorBaseTy::VisitCastExpr(E); +} + +bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) { + if (IsStringLiteralCall(E)) + return Success(E); + + switch (E->isBuiltinCall()) { + case Builtin::BI__builtin_addressof: + return EvaluateLValue(E->getArg(0), Result, Info); + + default: + return ExprEvaluatorBaseTy::VisitCallExpr(E); + } +} + +//===----------------------------------------------------------------------===// +// Member Pointer Evaluation +//===----------------------------------------------------------------------===// + +namespace { +class MemberPointerExprEvaluator + : public ExprEvaluatorBase<MemberPointerExprEvaluator, bool> { + MemberPtr &Result; + + bool Success(const ValueDecl *D) { + Result = MemberPtr(D); + return true; + } +public: + + MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result) + : ExprEvaluatorBaseTy(Info), Result(Result) {} + + bool Success(const APValue &V, const Expr *E) { + Result.setFrom(V); + return true; + } + bool ZeroInitialization(const Expr *E) { + return Success((const ValueDecl*)0); + } + + bool VisitCastExpr(const CastExpr *E); + bool VisitUnaryAddrOf(const UnaryOperator *E); +}; +} // end anonymous namespace + +static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result, + EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isMemberPointerType()); + return MemberPointerExprEvaluator(Info, Result).Visit(E); +} + +bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_NullToMemberPointer: + VisitIgnoredValue(E->getSubExpr()); + return ZeroInitialization(E); + + case CK_BaseToDerivedMemberPointer: { + if (!Visit(E->getSubExpr())) + return false; + if (E->path_empty()) + return true; + // Base-to-derived member pointer casts store the path in derived-to-base + // order, so iterate backwards. The CXXBaseSpecifier also provides us with + // the wrong end of the derived->base arc, so stagger the path by one class. + typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter; + for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin()); + PathI != PathE; ++PathI) { + assert(!(*PathI)->isVirtual() && "memptr cast through vbase"); + const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl(); + if (!Result.castToDerived(Derived)) + return Error(E); + } + const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass(); + if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl())) + return Error(E); + return true; + } + + case CK_DerivedToBaseMemberPointer: + if (!Visit(E->getSubExpr())) + return false; + for (CastExpr::path_const_iterator PathI = E->path_begin(), + PathE = E->path_end(); PathI != PathE; ++PathI) { + assert(!(*PathI)->isVirtual() && "memptr cast through vbase"); + const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl(); + if (!Result.castToBase(Base)) + return Error(E); + } + return true; + } +} + +bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) { + // C++11 [expr.unary.op]p3 has very strict rules on how the address of a + // member can be formed. + return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl()); +} + +//===----------------------------------------------------------------------===// +// Record Evaluation +//===----------------------------------------------------------------------===// + +namespace { + class RecordExprEvaluator + : public ExprEvaluatorBase<RecordExprEvaluator, bool> { + const LValue &This; + APValue &Result; + public: + + RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result) + : ExprEvaluatorBaseTy(info), This(This), Result(Result) {} + + bool Success(const APValue &V, const Expr *E) { + Result = V; + return true; + } + bool ZeroInitialization(const Expr *E); + + bool VisitCastExpr(const CastExpr *E); + bool VisitInitListExpr(const InitListExpr *E); + bool VisitCXXConstructExpr(const CXXConstructExpr *E); + bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E); + }; +} + +/// Perform zero-initialization on an object of non-union class type. +/// C++11 [dcl.init]p5: +/// To zero-initialize an object or reference of type T means: +/// [...] +/// -- if T is a (possibly cv-qualified) non-union class type, +/// each non-static data member and each base-class subobject is +/// zero-initialized +static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E, + const RecordDecl *RD, + const LValue &This, APValue &Result) { + assert(!RD->isUnion() && "Expected non-union class type"); + const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD); + Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0, + std::distance(RD->field_begin(), RD->field_end())); + + if (RD->isInvalidDecl()) return false; + const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); + + if (CD) { + unsigned Index = 0; + for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(), + End = CD->bases_end(); I != End; ++I, ++Index) { + const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl(); + LValue Subobject = This; + if (!HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout)) + return false; + if (!HandleClassZeroInitialization(Info, E, Base, Subobject, + Result.getStructBase(Index))) + return false; + } + } + + for (RecordDecl::field_iterator I = RD->field_begin(), End = RD->field_end(); + I != End; ++I) { + // -- if T is a reference type, no initialization is performed. + if (I->getType()->isReferenceType()) + continue; + + LValue Subobject = This; + if (!HandleLValueMember(Info, E, Subobject, *I, &Layout)) + return false; + + ImplicitValueInitExpr VIE(I->getType()); + if (!EvaluateInPlace( + Result.getStructField(I->getFieldIndex()), Info, Subobject, &VIE)) + return false; + } + + return true; +} + +bool RecordExprEvaluator::ZeroInitialization(const Expr *E) { + const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl(); + if (RD->isInvalidDecl()) return false; + if (RD->isUnion()) { + // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the + // object's first non-static named data member is zero-initialized + RecordDecl::field_iterator I = RD->field_begin(); + if (I == RD->field_end()) { + Result = APValue((const FieldDecl*)0); + return true; + } + + LValue Subobject = This; + if (!HandleLValueMember(Info, E, Subobject, *I)) + return false; + Result = APValue(*I); + ImplicitValueInitExpr VIE(I->getType()); + return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE); + } + + if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) { + Info.Diag(E, diag::note_constexpr_virtual_base) << RD; + return false; + } + + return HandleClassZeroInitialization(Info, E, RD, This, Result); +} + +bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_ConstructorConversion: + return Visit(E->getSubExpr()); + + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: { + APValue DerivedObject; + if (!Evaluate(DerivedObject, Info, E->getSubExpr())) + return false; + if (!DerivedObject.isStruct()) + return Error(E->getSubExpr()); + + // Derived-to-base rvalue conversion: just slice off the derived part. + APValue *Value = &DerivedObject; + const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl(); + for (CastExpr::path_const_iterator PathI = E->path_begin(), + PathE = E->path_end(); PathI != PathE; ++PathI) { + assert(!(*PathI)->isVirtual() && "record rvalue with virtual base"); + const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl(); + Value = &Value->getStructBase(getBaseIndex(RD, Base)); + RD = Base; + } + Result = *Value; + return true; + } + } +} + +bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { + const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl(); + if (RD->isInvalidDecl()) return false; + const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); + + if (RD->isUnion()) { + const FieldDecl *Field = E->getInitializedFieldInUnion(); + Result = APValue(Field); + if (!Field) + return true; + + // If the initializer list for a union does not contain any elements, the + // first element of the union is value-initialized. + // FIXME: The element should be initialized from an initializer list. + // Is this difference ever observable for initializer lists which + // we don't build? + ImplicitValueInitExpr VIE(Field->getType()); + const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE; + + LValue Subobject = This; + if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout)) + return false; + + // Temporarily override This, in case there's a CXXDefaultInitExpr in here. + ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This, + isa<CXXDefaultInitExpr>(InitExpr)); + + return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr); + } + + assert((!isa<CXXRecordDecl>(RD) || !cast<CXXRecordDecl>(RD)->getNumBases()) && + "initializer list for class with base classes"); + Result = APValue(APValue::UninitStruct(), 0, + std::distance(RD->field_begin(), RD->field_end())); + unsigned ElementNo = 0; + bool Success = true; + for (RecordDecl::field_iterator Field = RD->field_begin(), + FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) { + // Anonymous bit-fields are not considered members of the class for + // purposes of aggregate initialization. + if (Field->isUnnamedBitfield()) + continue; + + LValue Subobject = This; + + bool HaveInit = ElementNo < E->getNumInits(); + + // FIXME: Diagnostics here should point to the end of the initializer + // list, not the start. + if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E, + Subobject, *Field, &Layout)) + return false; + + // Perform an implicit value-initialization for members beyond the end of + // the initializer list. + ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType()); + const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE; + + // Temporarily override This, in case there's a CXXDefaultInitExpr in here. + ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This, + isa<CXXDefaultInitExpr>(Init)); + + APValue &FieldVal = Result.getStructField(Field->getFieldIndex()); + if (!EvaluateInPlace(FieldVal, Info, Subobject, Init) || + (Field->isBitField() && !truncateBitfieldValue(Info, Init, + FieldVal, *Field))) { + if (!Info.keepEvaluatingAfterFailure()) + return false; + Success = false; + } + } + + return Success; +} + +bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) { + const CXXConstructorDecl *FD = E->getConstructor(); + if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false; + + bool ZeroInit = E->requiresZeroInitialization(); + if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) { + // If we've already performed zero-initialization, we're already done. + if (!Result.isUninit()) + return true; + + if (ZeroInit) + return ZeroInitialization(E); + + const CXXRecordDecl *RD = FD->getParent(); + if (RD->isUnion()) + Result = APValue((FieldDecl*)0); + else + Result = APValue(APValue::UninitStruct(), RD->getNumBases(), + std::distance(RD->field_begin(), RD->field_end())); + return true; + } + + const FunctionDecl *Definition = 0; + FD->getBody(Definition); + + if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition)) + return false; + + // Avoid materializing a temporary for an elidable copy/move constructor. + if (E->isElidable() && !ZeroInit) + if (const MaterializeTemporaryExpr *ME + = dyn_cast<MaterializeTemporaryExpr>(E->getArg(0))) + return Visit(ME->GetTemporaryExpr()); + + if (ZeroInit && !ZeroInitialization(E)) + return false; + + ArrayRef<const Expr *> Args(E->getArgs(), E->getNumArgs()); + return HandleConstructorCall(E->getExprLoc(), This, Args, + cast<CXXConstructorDecl>(Definition), Info, + Result); +} + +bool RecordExprEvaluator::VisitCXXStdInitializerListExpr( + const CXXStdInitializerListExpr *E) { + const ConstantArrayType *ArrayType = + Info.Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); + + LValue Array; + if (!EvaluateLValue(E->getSubExpr(), Array, Info)) + return false; + + // Get a pointer to the first element of the array. + Array.addArray(Info, E, ArrayType); + + // FIXME: Perform the checks on the field types in SemaInit. + RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); + RecordDecl::field_iterator Field = Record->field_begin(); + if (Field == Record->field_end()) + return Error(E); + + // Start pointer. + if (!Field->getType()->isPointerType() || + !Info.Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType())) + return Error(E); + + // FIXME: What if the initializer_list type has base classes, etc? + Result = APValue(APValue::UninitStruct(), 0, 2); + Array.moveInto(Result.getStructField(0)); + + if (++Field == Record->field_end()) + return Error(E); + + if (Field->getType()->isPointerType() && + Info.Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType())) { + // End pointer. + if (!HandleLValueArrayAdjustment(Info, E, Array, + ArrayType->getElementType(), + ArrayType->getSize().getZExtValue())) + return false; + Array.moveInto(Result.getStructField(1)); + } else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType())) + // Length. + Result.getStructField(1) = APValue(APSInt(ArrayType->getSize())); + else + return Error(E); + + if (++Field != Record->field_end()) + return Error(E); + + return true; +} + +static bool EvaluateRecord(const Expr *E, const LValue &This, + APValue &Result, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isRecordType() && + "can't evaluate expression as a record rvalue"); + return RecordExprEvaluator(Info, This, Result).Visit(E); +} + +//===----------------------------------------------------------------------===// +// Temporary Evaluation +// +// Temporaries are represented in the AST as rvalues, but generally behave like +// lvalues. The full-object of which the temporary is a subobject is implicitly +// materialized so that a reference can bind to it. +//===----------------------------------------------------------------------===// +namespace { +class TemporaryExprEvaluator + : public LValueExprEvaluatorBase<TemporaryExprEvaluator> { +public: + TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) : + LValueExprEvaluatorBaseTy(Info, Result) {} + + /// Visit an expression which constructs the value of this temporary. + bool VisitConstructExpr(const Expr *E) { + Result.set(E, Info.CurrentCall->Index); + return EvaluateInPlace(Info.CurrentCall->createTemporary(E, false), + Info, Result, E); + } + + bool VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + return LValueExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_ConstructorConversion: + return VisitConstructExpr(E->getSubExpr()); + } + } + bool VisitInitListExpr(const InitListExpr *E) { + return VisitConstructExpr(E); + } + bool VisitCXXConstructExpr(const CXXConstructExpr *E) { + return VisitConstructExpr(E); + } + bool VisitCallExpr(const CallExpr *E) { + return VisitConstructExpr(E); + } +}; +} // end anonymous namespace + +/// Evaluate an expression of record type as a temporary. +static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isRecordType()); + return TemporaryExprEvaluator(Info, Result).Visit(E); +} + +//===----------------------------------------------------------------------===// +// Vector Evaluation +//===----------------------------------------------------------------------===// + +namespace { + class VectorExprEvaluator + : public ExprEvaluatorBase<VectorExprEvaluator, bool> { + APValue &Result; + public: + + VectorExprEvaluator(EvalInfo &info, APValue &Result) + : ExprEvaluatorBaseTy(info), Result(Result) {} + + bool Success(const ArrayRef<APValue> &V, const Expr *E) { + assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements()); + // FIXME: remove this APValue copy. + Result = APValue(V.data(), V.size()); + return true; + } + bool Success(const APValue &V, const Expr *E) { + assert(V.isVector()); + Result = V; + return true; + } + bool ZeroInitialization(const Expr *E); + + bool VisitUnaryReal(const UnaryOperator *E) + { return Visit(E->getSubExpr()); } + bool VisitCastExpr(const CastExpr* E); + bool VisitInitListExpr(const InitListExpr *E); + bool VisitUnaryImag(const UnaryOperator *E); + // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div, + // binary comparisons, binary and/or/xor, + // shufflevector, ExtVectorElementExpr + }; +} // end anonymous namespace + +static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue"); + return VectorExprEvaluator(Info, Result).Visit(E); +} + +bool VectorExprEvaluator::VisitCastExpr(const CastExpr* E) { + const VectorType *VTy = E->getType()->castAs<VectorType>(); + unsigned NElts = VTy->getNumElements(); + + const Expr *SE = E->getSubExpr(); + QualType SETy = SE->getType(); + + switch (E->getCastKind()) { + case CK_VectorSplat: { + APValue Val = APValue(); + if (SETy->isIntegerType()) { + APSInt IntResult; + if (!EvaluateInteger(SE, IntResult, Info)) + return false; + Val = APValue(IntResult); + } else if (SETy->isRealFloatingType()) { + APFloat F(0.0); + if (!EvaluateFloat(SE, F, Info)) + return false; + Val = APValue(F); + } else { + return Error(E); + } + + // Splat and create vector APValue. + SmallVector<APValue, 4> Elts(NElts, Val); + return Success(Elts, E); + } + case CK_BitCast: { + // Evaluate the operand into an APInt we can extract from. + llvm::APInt SValInt; + if (!EvalAndBitcastToAPInt(Info, SE, SValInt)) + return false; + // Extract the elements + QualType EltTy = VTy->getElementType(); + unsigned EltSize = Info.Ctx.getTypeSize(EltTy); + bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); + SmallVector<APValue, 4> Elts; + if (EltTy->isRealFloatingType()) { + const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy); + unsigned FloatEltSize = EltSize; + if (&Sem == &APFloat::x87DoubleExtended) + FloatEltSize = 80; + for (unsigned i = 0; i < NElts; i++) { + llvm::APInt Elt; + if (BigEndian) + Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize); + else + Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize); + Elts.push_back(APValue(APFloat(Sem, Elt))); + } + } else if (EltTy->isIntegerType()) { + for (unsigned i = 0; i < NElts; i++) { + llvm::APInt Elt; + if (BigEndian) + Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize); + else + Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize); + Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType()))); + } + } else { + return Error(E); + } + return Success(Elts, E); + } + default: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + } +} + +bool +VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) { + const VectorType *VT = E->getType()->castAs<VectorType>(); + unsigned NumInits = E->getNumInits(); + unsigned NumElements = VT->getNumElements(); + + QualType EltTy = VT->getElementType(); + SmallVector<APValue, 4> Elements; + + // The number of initializers can be less than the number of + // vector elements. For OpenCL, this can be due to nested vector + // initialization. For GCC compatibility, missing trailing elements + // should be initialized with zeroes. + unsigned CountInits = 0, CountElts = 0; + while (CountElts < NumElements) { + // Handle nested vector initialization. + if (CountInits < NumInits + && E->getInit(CountInits)->getType()->isVectorType()) { + APValue v; + if (!EvaluateVector(E->getInit(CountInits), v, Info)) + return Error(E); + unsigned vlen = v.getVectorLength(); + for (unsigned j = 0; j < vlen; j++) + Elements.push_back(v.getVectorElt(j)); + CountElts += vlen; + } else if (EltTy->isIntegerType()) { + llvm::APSInt sInt(32); + if (CountInits < NumInits) { + if (!EvaluateInteger(E->getInit(CountInits), sInt, Info)) + return false; + } else // trailing integer zero. + sInt = Info.Ctx.MakeIntValue(0, EltTy); + Elements.push_back(APValue(sInt)); + CountElts++; + } else { + llvm::APFloat f(0.0); + if (CountInits < NumInits) { + if (!EvaluateFloat(E->getInit(CountInits), f, Info)) + return false; + } else // trailing float zero. + f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)); + Elements.push_back(APValue(f)); + CountElts++; + } + CountInits++; + } + return Success(Elements, E); +} + +bool +VectorExprEvaluator::ZeroInitialization(const Expr *E) { + const VectorType *VT = E->getType()->getAs<VectorType>(); + QualType EltTy = VT->getElementType(); + APValue ZeroElement; + if (EltTy->isIntegerType()) + ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy)); + else + ZeroElement = + APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy))); + + SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement); + return Success(Elements, E); +} + +bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { + VisitIgnoredValue(E->getSubExpr()); + return ZeroInitialization(E); +} + +//===----------------------------------------------------------------------===// +// Array Evaluation +//===----------------------------------------------------------------------===// + +namespace { + class ArrayExprEvaluator + : public ExprEvaluatorBase<ArrayExprEvaluator, bool> { + const LValue &This; + APValue &Result; + public: + + ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result) + : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {} + + bool Success(const APValue &V, const Expr *E) { + assert((V.isArray() || V.isLValue()) && + "expected array or string literal"); + Result = V; + return true; + } + + bool ZeroInitialization(const Expr *E) { + const ConstantArrayType *CAT = + Info.Ctx.getAsConstantArrayType(E->getType()); + if (!CAT) + return Error(E); + + Result = APValue(APValue::UninitArray(), 0, + CAT->getSize().getZExtValue()); + if (!Result.hasArrayFiller()) return true; + + // Zero-initialize all elements. + LValue Subobject = This; + Subobject.addArray(Info, E, CAT); + ImplicitValueInitExpr VIE(CAT->getElementType()); + return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE); + } + + bool VisitInitListExpr(const InitListExpr *E); + bool VisitCXXConstructExpr(const CXXConstructExpr *E); + bool VisitCXXConstructExpr(const CXXConstructExpr *E, + const LValue &Subobject, + APValue *Value, QualType Type); + }; +} // end anonymous namespace + +static bool EvaluateArray(const Expr *E, const LValue &This, + APValue &Result, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue"); + return ArrayExprEvaluator(Info, This, Result).Visit(E); +} + +bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) { + const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType()); + if (!CAT) + return Error(E); + + // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...] + // an appropriately-typed string literal enclosed in braces. + if (E->isStringLiteralInit()) { + LValue LV; + if (!EvaluateLValue(E->getInit(0), LV, Info)) + return false; + APValue Val; + LV.moveInto(Val); + return Success(Val, E); + } + + bool Success = true; + + assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) && + "zero-initialized array shouldn't have any initialized elts"); + APValue Filler; + if (Result.isArray() && Result.hasArrayFiller()) + Filler = Result.getArrayFiller(); + + unsigned NumEltsToInit = E->getNumInits(); + unsigned NumElts = CAT->getSize().getZExtValue(); + const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : 0; + + // If the initializer might depend on the array index, run it for each + // array element. For now, just whitelist non-class value-initialization. + if (NumEltsToInit != NumElts && !isa<ImplicitValueInitExpr>(FillerExpr)) + NumEltsToInit = NumElts; + + Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts); + + // If the array was previously zero-initialized, preserve the + // zero-initialized values. + if (!Filler.isUninit()) { + for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I) + Result.getArrayInitializedElt(I) = Filler; + if (Result.hasArrayFiller()) + Result.getArrayFiller() = Filler; + } + + LValue Subobject = This; + Subobject.addArray(Info, E, CAT); + for (unsigned Index = 0; Index != NumEltsToInit; ++Index) { + const Expr *Init = + Index < E->getNumInits() ? E->getInit(Index) : FillerExpr; + if (!EvaluateInPlace(Result.getArrayInitializedElt(Index), + Info, Subobject, Init) || + !HandleLValueArrayAdjustment(Info, Init, Subobject, + CAT->getElementType(), 1)) { + if (!Info.keepEvaluatingAfterFailure()) + return false; + Success = false; + } + } + + if (!Result.hasArrayFiller()) + return Success; + + // If we get here, we have a trivial filler, which we can just evaluate + // once and splat over the rest of the array elements. + assert(FillerExpr && "no array filler for incomplete init list"); + return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, + FillerExpr) && Success; +} + +bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) { + return VisitCXXConstructExpr(E, This, &Result, E->getType()); +} + +bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, + const LValue &Subobject, + APValue *Value, + QualType Type) { + bool HadZeroInit = !Value->isUninit(); + + if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) { + unsigned N = CAT->getSize().getZExtValue(); + + // Preserve the array filler if we had prior zero-initialization. + APValue Filler = + HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller() + : APValue(); + + *Value = APValue(APValue::UninitArray(), N, N); + + if (HadZeroInit) + for (unsigned I = 0; I != N; ++I) + Value->getArrayInitializedElt(I) = Filler; + + // Initialize the elements. + LValue ArrayElt = Subobject; + ArrayElt.addArray(Info, E, CAT); + for (unsigned I = 0; I != N; ++I) + if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I), + CAT->getElementType()) || + !HandleLValueArrayAdjustment(Info, E, ArrayElt, + CAT->getElementType(), 1)) + return false; + + return true; + } + + if (!Type->isRecordType()) + return Error(E); + + const CXXConstructorDecl *FD = E->getConstructor(); + + bool ZeroInit = E->requiresZeroInitialization(); + if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) { + if (HadZeroInit) + return true; + + if (ZeroInit) { + ImplicitValueInitExpr VIE(Type); + return EvaluateInPlace(*Value, Info, Subobject, &VIE); + } + + const CXXRecordDecl *RD = FD->getParent(); + if (RD->isUnion()) + *Value = APValue((FieldDecl*)0); + else + *Value = + APValue(APValue::UninitStruct(), RD->getNumBases(), + std::distance(RD->field_begin(), RD->field_end())); + return true; + } + + const FunctionDecl *Definition = 0; + FD->getBody(Definition); + + if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition)) + return false; + + if (ZeroInit && !HadZeroInit) { + ImplicitValueInitExpr VIE(Type); + if (!EvaluateInPlace(*Value, Info, Subobject, &VIE)) + return false; + } + + ArrayRef<const Expr *> Args(E->getArgs(), E->getNumArgs()); + return HandleConstructorCall(E->getExprLoc(), Subobject, Args, + cast<CXXConstructorDecl>(Definition), + Info, *Value); +} + +//===----------------------------------------------------------------------===// +// Integer Evaluation +// +// As a GNU extension, we support casting pointers to sufficiently-wide integer +// types and back in constant folding. Integer values are thus represented +// either as an integer-valued APValue, or as an lvalue-valued APValue. +//===----------------------------------------------------------------------===// + +namespace { +class IntExprEvaluator + : public ExprEvaluatorBase<IntExprEvaluator, bool> { + APValue &Result; +public: + IntExprEvaluator(EvalInfo &info, APValue &result) + : ExprEvaluatorBaseTy(info), Result(result) {} + + bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) { + assert(E->getType()->isIntegralOrEnumerationType() && + "Invalid evaluation result."); + assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() && + "Invalid evaluation result."); + assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && + "Invalid evaluation result."); + Result = APValue(SI); + return true; + } + bool Success(const llvm::APSInt &SI, const Expr *E) { + return Success(SI, E, Result); + } + + bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) { + assert(E->getType()->isIntegralOrEnumerationType() && + "Invalid evaluation result."); + assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && + "Invalid evaluation result."); + Result = APValue(APSInt(I)); + Result.getInt().setIsUnsigned( + E->getType()->isUnsignedIntegerOrEnumerationType()); + return true; + } + bool Success(const llvm::APInt &I, const Expr *E) { + return Success(I, E, Result); + } + + bool Success(uint64_t Value, const Expr *E, APValue &Result) { + assert(E->getType()->isIntegralOrEnumerationType() && + "Invalid evaluation result."); + Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType())); + return true; + } + bool Success(uint64_t Value, const Expr *E) { + return Success(Value, E, Result); + } + + bool Success(CharUnits Size, const Expr *E) { + return Success(Size.getQuantity(), E); + } + + bool Success(const APValue &V, const Expr *E) { + if (V.isLValue() || V.isAddrLabelDiff()) { + Result = V; + return true; + } + return Success(V.getInt(), E); + } + + bool ZeroInitialization(const Expr *E) { return Success(0, E); } + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + bool VisitIntegerLiteral(const IntegerLiteral *E) { + return Success(E->getValue(), E); + } + bool VisitCharacterLiteral(const CharacterLiteral *E) { + return Success(E->getValue(), E); + } + + bool CheckReferencedDecl(const Expr *E, const Decl *D); + bool VisitDeclRefExpr(const DeclRefExpr *E) { + if (CheckReferencedDecl(E, E->getDecl())) + return true; + + return ExprEvaluatorBaseTy::VisitDeclRefExpr(E); + } + bool VisitMemberExpr(const MemberExpr *E) { + if (CheckReferencedDecl(E, E->getMemberDecl())) { + VisitIgnoredValue(E->getBase()); + return true; + } + + return ExprEvaluatorBaseTy::VisitMemberExpr(E); + } + + bool VisitCallExpr(const CallExpr *E); + bool VisitBinaryOperator(const BinaryOperator *E); + bool VisitOffsetOfExpr(const OffsetOfExpr *E); + bool VisitUnaryOperator(const UnaryOperator *E); + + bool VisitCastExpr(const CastExpr* E); + bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); + + bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + return Success(E->getValue(), E); + } + + bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { + return Success(E->getValue(), E); + } + + // Note, GNU defines __null as an integer, not a pointer. + bool VisitGNUNullExpr(const GNUNullExpr *E) { + return ZeroInitialization(E); + } + + bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) { + return Success(E->getValue(), E); + } + + bool VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) { + return Success(E->getValue(), E); + } + + bool VisitTypeTraitExpr(const TypeTraitExpr *E) { + return Success(E->getValue(), E); + } + + bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { + return Success(E->getValue(), E); + } + + bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { + return Success(E->getValue(), E); + } + + bool VisitUnaryReal(const UnaryOperator *E); + bool VisitUnaryImag(const UnaryOperator *E); + + bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E); + bool VisitSizeOfPackExpr(const SizeOfPackExpr *E); + +private: + CharUnits GetAlignOfExpr(const Expr *E); + CharUnits GetAlignOfType(QualType T); + static QualType GetObjectType(APValue::LValueBase B); + bool TryEvaluateBuiltinObjectSize(const CallExpr *E); + // FIXME: Missing: array subscript of vector, member of vector +}; +} // end anonymous namespace + +/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and +/// produce either the integer value or a pointer. +/// +/// GCC has a heinous extension which folds casts between pointer types and +/// pointer-sized integral types. We support this by allowing the evaluation of +/// an integer rvalue to produce a pointer (represented as an lvalue) instead. +/// Some simple arithmetic on such values is supported (they are treated much +/// like char*). +static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result, + EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType()); + return IntExprEvaluator(Info, Result).Visit(E); +} + +static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) { + APValue Val; + if (!EvaluateIntegerOrLValue(E, Val, Info)) + return false; + if (!Val.isInt()) { + // FIXME: It would be better to produce the diagnostic for casting + // a pointer to an integer. + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; + } + Result = Val.getInt(); + return true; +} + +/// Check whether the given declaration can be directly converted to an integral +/// rvalue. If not, no diagnostic is produced; there are other things we can +/// try. +bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) { + // Enums are integer constant exprs. + if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) { + // Check for signedness/width mismatches between E type and ECD value. + bool SameSign = (ECD->getInitVal().isSigned() + == E->getType()->isSignedIntegerOrEnumerationType()); + bool SameWidth = (ECD->getInitVal().getBitWidth() + == Info.Ctx.getIntWidth(E->getType())); + if (SameSign && SameWidth) + return Success(ECD->getInitVal(), E); + else { + // Get rid of mismatch (otherwise Success assertions will fail) + // by computing a new value matching the type of E. + llvm::APSInt Val = ECD->getInitVal(); + if (!SameSign) + Val.setIsSigned(!ECD->getInitVal().isSigned()); + if (!SameWidth) + Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType())); + return Success(Val, E); + } + } + return false; +} + +/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way +/// as GCC. +static int EvaluateBuiltinClassifyType(const CallExpr *E) { + // The following enum mimics the values returned by GCC. + // FIXME: Does GCC differ between lvalue and rvalue references here? + enum gcc_type_class { + no_type_class = -1, + void_type_class, integer_type_class, char_type_class, + enumeral_type_class, boolean_type_class, + pointer_type_class, reference_type_class, offset_type_class, + real_type_class, complex_type_class, + function_type_class, method_type_class, + record_type_class, union_type_class, + array_type_class, string_type_class, + lang_type_class + }; + + // If no argument was supplied, default to "no_type_class". This isn't + // ideal, however it is what gcc does. + if (E->getNumArgs() == 0) + return no_type_class; + + QualType ArgTy = E->getArg(0)->getType(); + if (ArgTy->isVoidType()) + return void_type_class; + else if (ArgTy->isEnumeralType()) + return enumeral_type_class; + else if (ArgTy->isBooleanType()) + return boolean_type_class; + else if (ArgTy->isCharType()) + return string_type_class; // gcc doesn't appear to use char_type_class + else if (ArgTy->isIntegerType()) + return integer_type_class; + else if (ArgTy->isPointerType()) + return pointer_type_class; + else if (ArgTy->isReferenceType()) + return reference_type_class; + else if (ArgTy->isRealType()) + return real_type_class; + else if (ArgTy->isComplexType()) + return complex_type_class; + else if (ArgTy->isFunctionType()) + return function_type_class; + else if (ArgTy->isStructureOrClassType()) + return record_type_class; + else if (ArgTy->isUnionType()) + return union_type_class; + else if (ArgTy->isArrayType()) + return array_type_class; + else if (ArgTy->isUnionType()) + return union_type_class; + else // FIXME: offset_type_class, method_type_class, & lang_type_class? + llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type"); +} + +/// EvaluateBuiltinConstantPForLValue - Determine the result of +/// __builtin_constant_p when applied to the given lvalue. +/// +/// An lvalue is only "constant" if it is a pointer or reference to the first +/// character of a string literal. +template<typename LValue> +static bool EvaluateBuiltinConstantPForLValue(const LValue &LV) { + const Expr *E = LV.getLValueBase().template dyn_cast<const Expr*>(); + return E && isa<StringLiteral>(E) && LV.getLValueOffset().isZero(); +} + +/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to +/// GCC as we can manage. +static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) { + QualType ArgType = Arg->getType(); + + // __builtin_constant_p always has one operand. The rules which gcc follows + // are not precisely documented, but are as follows: + // + // - If the operand is of integral, floating, complex or enumeration type, + // and can be folded to a known value of that type, it returns 1. + // - If the operand and can be folded to a pointer to the first character + // of a string literal (or such a pointer cast to an integral type), it + // returns 1. + // + // Otherwise, it returns 0. + // + // FIXME: GCC also intends to return 1 for literals of aggregate types, but + // its support for this does not currently work. + if (ArgType->isIntegralOrEnumerationType()) { + Expr::EvalResult Result; + if (!Arg->EvaluateAsRValue(Result, Ctx) || Result.HasSideEffects) + return false; + + APValue &V = Result.Val; + if (V.getKind() == APValue::Int) + return true; + + return EvaluateBuiltinConstantPForLValue(V); + } else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) { + return Arg->isEvaluatable(Ctx); + } else if (ArgType->isPointerType() || Arg->isGLValue()) { + LValue LV; + Expr::EvalStatus Status; + EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold); + if ((Arg->isGLValue() ? EvaluateLValue(Arg, LV, Info) + : EvaluatePointer(Arg, LV, Info)) && + !Status.HasSideEffects) + return EvaluateBuiltinConstantPForLValue(LV); + } + + // Anything else isn't considered to be sufficiently constant. + return false; +} + +/// Retrieves the "underlying object type" of the given expression, +/// as used by __builtin_object_size. +QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) { + if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) + return VD->getType(); + } else if (const Expr *E = B.get<const Expr*>()) { + if (isa<CompoundLiteralExpr>(E)) + return E->getType(); + } + + return QualType(); +} + +bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) { + LValue Base; + + { + // The operand of __builtin_object_size is never evaluated for side-effects. + // If there are any, but we can determine the pointed-to object anyway, then + // ignore the side-effects. + SpeculativeEvaluationRAII SpeculativeEval(Info); + if (!EvaluatePointer(E->getArg(0), Base, Info)) + return false; + } + + // If we can prove the base is null, lower to zero now. + if (!Base.getLValueBase()) return Success(0, E); + + QualType T = GetObjectType(Base.getLValueBase()); + if (T.isNull() || + T->isIncompleteType() || + T->isFunctionType() || + T->isVariablyModifiedType() || + T->isDependentType()) + return Error(E); + + CharUnits Size = Info.Ctx.getTypeSizeInChars(T); + CharUnits Offset = Base.getLValueOffset(); + + if (!Offset.isNegative() && Offset <= Size) + Size -= Offset; + else + Size = CharUnits::Zero(); + return Success(Size, E); +} + +bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) { + switch (unsigned BuiltinOp = E->isBuiltinCall()) { + default: + return ExprEvaluatorBaseTy::VisitCallExpr(E); + + case Builtin::BI__builtin_object_size: { + if (TryEvaluateBuiltinObjectSize(E)) + return true; + + // If evaluating the argument has side-effects, we can't determine the size + // of the object, and so we lower it to unknown now. CodeGen relies on us to + // handle all cases where the expression has side-effects. + if (E->getArg(0)->HasSideEffects(Info.Ctx)) { + if (E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue() <= 1) + return Success(-1ULL, E); + return Success(0, E); + } + + // Expression had no side effects, but we couldn't statically determine the + // size of the referenced object. + return Error(E); + } + + case Builtin::BI__builtin_bswap16: + case Builtin::BI__builtin_bswap32: + case Builtin::BI__builtin_bswap64: { + APSInt Val; + if (!EvaluateInteger(E->getArg(0), Val, Info)) + return false; + + return Success(Val.byteSwap(), E); + } + + case Builtin::BI__builtin_classify_type: + return Success(EvaluateBuiltinClassifyType(E), E); + + // FIXME: BI__builtin_clrsb + // FIXME: BI__builtin_clrsbl + // FIXME: BI__builtin_clrsbll + + case Builtin::BI__builtin_clz: + case Builtin::BI__builtin_clzl: + case Builtin::BI__builtin_clzll: { + APSInt Val; + if (!EvaluateInteger(E->getArg(0), Val, Info)) + return false; + if (!Val) + return Error(E); + + return Success(Val.countLeadingZeros(), E); + } + + case Builtin::BI__builtin_constant_p: + return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E); + + case Builtin::BI__builtin_ctz: + case Builtin::BI__builtin_ctzl: + case Builtin::BI__builtin_ctzll: { + APSInt Val; + if (!EvaluateInteger(E->getArg(0), Val, Info)) + return false; + if (!Val) + return Error(E); + + return Success(Val.countTrailingZeros(), E); + } + + case Builtin::BI__builtin_eh_return_data_regno: { + int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue(); + Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand); + return Success(Operand, E); + } + + case Builtin::BI__builtin_expect: + return Visit(E->getArg(0)); + + case Builtin::BI__builtin_ffs: + case Builtin::BI__builtin_ffsl: + case Builtin::BI__builtin_ffsll: { + APSInt Val; + if (!EvaluateInteger(E->getArg(0), Val, Info)) + return false; + + unsigned N = Val.countTrailingZeros(); + return Success(N == Val.getBitWidth() ? 0 : N + 1, E); + } + + case Builtin::BI__builtin_fpclassify: { + APFloat Val(0.0); + if (!EvaluateFloat(E->getArg(5), Val, Info)) + return false; + unsigned Arg; + switch (Val.getCategory()) { + case APFloat::fcNaN: Arg = 0; break; + case APFloat::fcInfinity: Arg = 1; break; + case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break; + case APFloat::fcZero: Arg = 4; break; + } + return Visit(E->getArg(Arg)); + } + + case Builtin::BI__builtin_isinf_sign: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E); + } + + case Builtin::BI__builtin_isinf: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isInfinity() ? 1 : 0, E); + } + + case Builtin::BI__builtin_isfinite: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isFinite() ? 1 : 0, E); + } + + case Builtin::BI__builtin_isnan: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isNaN() ? 1 : 0, E); + } + + case Builtin::BI__builtin_isnormal: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isNormal() ? 1 : 0, E); + } + + case Builtin::BI__builtin_parity: + case Builtin::BI__builtin_parityl: + case Builtin::BI__builtin_parityll: { + APSInt Val; + if (!EvaluateInteger(E->getArg(0), Val, Info)) + return false; + + return Success(Val.countPopulation() % 2, E); + } + + case Builtin::BI__builtin_popcount: + case Builtin::BI__builtin_popcountl: + case Builtin::BI__builtin_popcountll: { + APSInt Val; + if (!EvaluateInteger(E->getArg(0), Val, Info)) + return false; + + return Success(Val.countPopulation(), E); + } + + case Builtin::BIstrlen: + // A call to strlen is not a constant expression. + if (Info.getLangOpts().CPlusPlus11) + Info.CCEDiag(E, diag::note_constexpr_invalid_function) + << /*isConstexpr*/0 << /*isConstructor*/0 << "'strlen'"; + else + Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); + // Fall through. + case Builtin::BI__builtin_strlen: { + // As an extension, we support __builtin_strlen() as a constant expression, + // and support folding strlen() to a constant. + LValue String; + if (!EvaluatePointer(E->getArg(0), String, Info)) + return false; + + // Fast path: if it's a string literal, search the string value. + if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>( + String.getLValueBase().dyn_cast<const Expr *>())) { + // The string literal may have embedded null characters. Find the first + // one and truncate there. + StringRef Str = S->getBytes(); + int64_t Off = String.Offset.getQuantity(); + if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() && + S->getCharByteWidth() == 1) { + Str = Str.substr(Off); + + StringRef::size_type Pos = Str.find(0); + if (Pos != StringRef::npos) + Str = Str.substr(0, Pos); + + return Success(Str.size(), E); + } + + // Fall through to slow path to issue appropriate diagnostic. + } + + // Slow path: scan the bytes of the string looking for the terminating 0. + QualType CharTy = E->getArg(0)->getType()->getPointeeType(); + for (uint64_t Strlen = 0; /**/; ++Strlen) { + APValue Char; + if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) || + !Char.isInt()) + return false; + if (!Char.getInt()) + return Success(Strlen, E); + if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1)) + return false; + } + } + + case Builtin::BI__atomic_always_lock_free: + case Builtin::BI__atomic_is_lock_free: + case Builtin::BI__c11_atomic_is_lock_free: { + APSInt SizeVal; + if (!EvaluateInteger(E->getArg(0), SizeVal, Info)) + return false; + + // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power + // of two less than the maximum inline atomic width, we know it is + // lock-free. If the size isn't a power of two, or greater than the + // maximum alignment where we promote atomics, we know it is not lock-free + // (at least not in the sense of atomic_is_lock_free). Otherwise, + // the answer can only be determined at runtime; for example, 16-byte + // atomics have lock-free implementations on some, but not all, + // x86-64 processors. + + // Check power-of-two. + CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue()); + if (Size.isPowerOfTwo()) { + // Check against inlining width. + unsigned InlineWidthBits = + Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth(); + if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) { + if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || + Size == CharUnits::One() || + E->getArg(1)->isNullPointerConstant(Info.Ctx, + Expr::NPC_NeverValueDependent)) + // OK, we will inline appropriately-aligned operations of this size, + // and _Atomic(T) is appropriately-aligned. + return Success(1, E); + + QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()-> + castAs<PointerType>()->getPointeeType(); + if (!PointeeType->isIncompleteType() && + Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) { + // OK, we will inline operations on this object. + return Success(1, E); + } + } + } + + return BuiltinOp == Builtin::BI__atomic_always_lock_free ? + Success(0, E) : Error(E); + } + } +} + +static bool HasSameBase(const LValue &A, const LValue &B) { + if (!A.getLValueBase()) + return !B.getLValueBase(); + if (!B.getLValueBase()) + return false; + + if (A.getLValueBase().getOpaqueValue() != + B.getLValueBase().getOpaqueValue()) { + const Decl *ADecl = GetLValueBaseDecl(A); + if (!ADecl) + return false; + const Decl *BDecl = GetLValueBaseDecl(B); + if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl()) + return false; + } + + return IsGlobalLValue(A.getLValueBase()) || + A.getLValueCallIndex() == B.getLValueCallIndex(); +} + +namespace { + +/// \brief Data recursive integer evaluator of certain binary operators. +/// +/// We use a data recursive algorithm for binary operators so that we are able +/// to handle extreme cases of chained binary operators without causing stack +/// overflow. +class DataRecursiveIntBinOpEvaluator { + struct EvalResult { + APValue Val; + bool Failed; + + EvalResult() : Failed(false) { } + + void swap(EvalResult &RHS) { + Val.swap(RHS.Val); + Failed = RHS.Failed; + RHS.Failed = false; + } + }; + + struct Job { + const Expr *E; + EvalResult LHSResult; // meaningful only for binary operator expression. + enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind; + + Job() : StoredInfo(0) { } + void startSpeculativeEval(EvalInfo &Info) { + OldEvalStatus = Info.EvalStatus; + Info.EvalStatus.Diag = 0; + StoredInfo = &Info; + } + ~Job() { + if (StoredInfo) { + StoredInfo->EvalStatus = OldEvalStatus; + } + } + private: + EvalInfo *StoredInfo; // non-null if status changed. + Expr::EvalStatus OldEvalStatus; + }; + + SmallVector<Job, 16> Queue; + + IntExprEvaluator &IntEval; + EvalInfo &Info; + APValue &FinalResult; + +public: + DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result) + : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { } + + /// \brief True if \param E is a binary operator that we are going to handle + /// data recursively. + /// We handle binary operators that are comma, logical, or that have operands + /// with integral or enumeration type. + static bool shouldEnqueue(const BinaryOperator *E) { + return E->getOpcode() == BO_Comma || + E->isLogicalOp() || + (E->getLHS()->getType()->isIntegralOrEnumerationType() && + E->getRHS()->getType()->isIntegralOrEnumerationType()); + } + + bool Traverse(const BinaryOperator *E) { + enqueue(E); + EvalResult PrevResult; + while (!Queue.empty()) + process(PrevResult); + + if (PrevResult.Failed) return false; + + FinalResult.swap(PrevResult.Val); + return true; + } + +private: + bool Success(uint64_t Value, const Expr *E, APValue &Result) { + return IntEval.Success(Value, E, Result); + } + bool Success(const APSInt &Value, const Expr *E, APValue &Result) { + return IntEval.Success(Value, E, Result); + } + bool Error(const Expr *E) { + return IntEval.Error(E); + } + bool Error(const Expr *E, diag::kind D) { + return IntEval.Error(E, D); + } + + OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) { + return Info.CCEDiag(E, D); + } + + // \brief Returns true if visiting the RHS is necessary, false otherwise. + bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E, + bool &SuppressRHSDiags); + + bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult, + const BinaryOperator *E, APValue &Result); + + void EvaluateExpr(const Expr *E, EvalResult &Result) { + Result.Failed = !Evaluate(Result.Val, Info, E); + if (Result.Failed) + Result.Val = APValue(); + } + + void process(EvalResult &Result); + + void enqueue(const Expr *E) { + E = E->IgnoreParens(); + Queue.resize(Queue.size()+1); + Queue.back().E = E; + Queue.back().Kind = Job::AnyExprKind; + } +}; + +} + +bool DataRecursiveIntBinOpEvaluator:: + VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E, + bool &SuppressRHSDiags) { + if (E->getOpcode() == BO_Comma) { + // Ignore LHS but note if we could not evaluate it. + if (LHSResult.Failed) + return Info.noteSideEffect(); + return true; + } + + if (E->isLogicalOp()) { + bool LHSAsBool; + if (!LHSResult.Failed && HandleConversionToBool(LHSResult.Val, LHSAsBool)) { + // We were able to evaluate the LHS, see if we can get away with not + // evaluating the RHS: 0 && X -> 0, 1 || X -> 1 + if (LHSAsBool == (E->getOpcode() == BO_LOr)) { + Success(LHSAsBool, E, LHSResult.Val); + return false; // Ignore RHS + } + } else { + LHSResult.Failed = true; + + // Since we weren't able to evaluate the left hand side, it + // must have had side effects. + if (!Info.noteSideEffect()) + return false; + + // We can't evaluate the LHS; however, sometimes the result + // is determined by the RHS: X && 0 -> 0, X || 1 -> 1. + // Don't ignore RHS and suppress diagnostics from this arm. + SuppressRHSDiags = true; + } + + return true; + } + + assert(E->getLHS()->getType()->isIntegralOrEnumerationType() && + E->getRHS()->getType()->isIntegralOrEnumerationType()); + + if (LHSResult.Failed && !Info.keepEvaluatingAfterFailure()) + return false; // Ignore RHS; + + return true; +} + +bool DataRecursiveIntBinOpEvaluator:: + VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult, + const BinaryOperator *E, APValue &Result) { + if (E->getOpcode() == BO_Comma) { + if (RHSResult.Failed) + return false; + Result = RHSResult.Val; + return true; + } + + if (E->isLogicalOp()) { + bool lhsResult, rhsResult; + bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult); + bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult); + + if (LHSIsOK) { + if (RHSIsOK) { + if (E->getOpcode() == BO_LOr) + return Success(lhsResult || rhsResult, E, Result); + else + return Success(lhsResult && rhsResult, E, Result); + } + } else { + if (RHSIsOK) { + // We can't evaluate the LHS; however, sometimes the result + // is determined by the RHS: X && 0 -> 0, X || 1 -> 1. + if (rhsResult == (E->getOpcode() == BO_LOr)) + return Success(rhsResult, E, Result); + } + } + + return false; + } + + assert(E->getLHS()->getType()->isIntegralOrEnumerationType() && + E->getRHS()->getType()->isIntegralOrEnumerationType()); + + if (LHSResult.Failed || RHSResult.Failed) + return false; + + const APValue &LHSVal = LHSResult.Val; + const APValue &RHSVal = RHSResult.Val; + + // Handle cases like (unsigned long)&a + 4. + if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) { + Result = LHSVal; + CharUnits AdditionalOffset = + CharUnits::fromQuantity(RHSVal.getInt().getZExtValue()); + if (E->getOpcode() == BO_Add) + Result.getLValueOffset() += AdditionalOffset; + else + Result.getLValueOffset() -= AdditionalOffset; + return true; + } + + // Handle cases like 4 + (unsigned long)&a + if (E->getOpcode() == BO_Add && + RHSVal.isLValue() && LHSVal.isInt()) { + Result = RHSVal; + Result.getLValueOffset() += + CharUnits::fromQuantity(LHSVal.getInt().getZExtValue()); + return true; + } + + if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) { + // Handle (intptr_t)&&A - (intptr_t)&&B. + if (!LHSVal.getLValueOffset().isZero() || + !RHSVal.getLValueOffset().isZero()) + return false; + const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>(); + const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>(); + if (!LHSExpr || !RHSExpr) + return false; + const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr); + const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr); + if (!LHSAddrExpr || !RHSAddrExpr) + return false; + // Make sure both labels come from the same function. + if (LHSAddrExpr->getLabel()->getDeclContext() != + RHSAddrExpr->getLabel()->getDeclContext()) + return false; + Result = APValue(LHSAddrExpr, RHSAddrExpr); + return true; + } + + // All the remaining cases expect both operands to be an integer + if (!LHSVal.isInt() || !RHSVal.isInt()) + return Error(E); + + // Set up the width and signedness manually, in case it can't be deduced + // from the operation we're performing. + // FIXME: Don't do this in the cases where we can deduce it. + APSInt Value(Info.Ctx.getIntWidth(E->getType()), + E->getType()->isUnsignedIntegerOrEnumerationType()); + if (!handleIntIntBinOp(Info, E, LHSVal.getInt(), E->getOpcode(), + RHSVal.getInt(), Value)) + return false; + return Success(Value, E, Result); +} + +void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) { + Job &job = Queue.back(); + + switch (job.Kind) { + case Job::AnyExprKind: { + if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) { + if (shouldEnqueue(Bop)) { + job.Kind = Job::BinOpKind; + enqueue(Bop->getLHS()); + return; + } + } + + EvaluateExpr(job.E, Result); + Queue.pop_back(); + return; + } + + case Job::BinOpKind: { + const BinaryOperator *Bop = cast<BinaryOperator>(job.E); + bool SuppressRHSDiags = false; + if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) { + Queue.pop_back(); + return; + } + if (SuppressRHSDiags) + job.startSpeculativeEval(Info); + job.LHSResult.swap(Result); + job.Kind = Job::BinOpVisitedLHSKind; + enqueue(Bop->getRHS()); + return; + } + + case Job::BinOpVisitedLHSKind: { + const BinaryOperator *Bop = cast<BinaryOperator>(job.E); + EvalResult RHS; + RHS.swap(Result); + Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val); + Queue.pop_back(); + return; + } + } + + llvm_unreachable("Invalid Job::Kind!"); +} + +bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { + if (E->isAssignmentOp()) + return Error(E); + + if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E)) + return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E); + + QualType LHSTy = E->getLHS()->getType(); + QualType RHSTy = E->getRHS()->getType(); + + if (LHSTy->isAnyComplexType()) { + assert(RHSTy->isAnyComplexType() && "Invalid comparison"); + ComplexValue LHS, RHS; + + bool LHSOK = EvaluateComplex(E->getLHS(), LHS, Info); + if (!LHSOK && !Info.keepEvaluatingAfterFailure()) + return false; + + if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK) + return false; + + if (LHS.isComplexFloat()) { + APFloat::cmpResult CR_r = + LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal()); + APFloat::cmpResult CR_i = + LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag()); + + if (E->getOpcode() == BO_EQ) + return Success((CR_r == APFloat::cmpEqual && + CR_i == APFloat::cmpEqual), E); + else { + assert(E->getOpcode() == BO_NE && + "Invalid complex comparison."); + return Success(((CR_r == APFloat::cmpGreaterThan || + CR_r == APFloat::cmpLessThan || + CR_r == APFloat::cmpUnordered) || + (CR_i == APFloat::cmpGreaterThan || + CR_i == APFloat::cmpLessThan || + CR_i == APFloat::cmpUnordered)), E); + } + } else { + if (E->getOpcode() == BO_EQ) + return Success((LHS.getComplexIntReal() == RHS.getComplexIntReal() && + LHS.getComplexIntImag() == RHS.getComplexIntImag()), E); + else { + assert(E->getOpcode() == BO_NE && + "Invalid compex comparison."); + return Success((LHS.getComplexIntReal() != RHS.getComplexIntReal() || + LHS.getComplexIntImag() != RHS.getComplexIntImag()), E); + } + } + } + + if (LHSTy->isRealFloatingType() && + RHSTy->isRealFloatingType()) { + APFloat RHS(0.0), LHS(0.0); + + bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info); + if (!LHSOK && !Info.keepEvaluatingAfterFailure()) + return false; + + if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK) + return false; + + APFloat::cmpResult CR = LHS.compare(RHS); + + switch (E->getOpcode()) { + default: + llvm_unreachable("Invalid binary operator!"); + case BO_LT: + return Success(CR == APFloat::cmpLessThan, E); + case BO_GT: + return Success(CR == APFloat::cmpGreaterThan, E); + case BO_LE: + return Success(CR == APFloat::cmpLessThan || CR == APFloat::cmpEqual, E); + case BO_GE: + return Success(CR == APFloat::cmpGreaterThan || CR == APFloat::cmpEqual, + E); + case BO_EQ: + return Success(CR == APFloat::cmpEqual, E); + case BO_NE: + return Success(CR == APFloat::cmpGreaterThan + || CR == APFloat::cmpLessThan + || CR == APFloat::cmpUnordered, E); + } + } + + if (LHSTy->isPointerType() && RHSTy->isPointerType()) { + if (E->getOpcode() == BO_Sub || E->isComparisonOp()) { + LValue LHSValue, RHSValue; + + bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info); + if (!LHSOK && Info.keepEvaluatingAfterFailure()) + return false; + + if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK) + return false; + + // Reject differing bases from the normal codepath; we special-case + // comparisons to null. + if (!HasSameBase(LHSValue, RHSValue)) { + if (E->getOpcode() == BO_Sub) { + // Handle &&A - &&B. + if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero()) + return false; + const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>(); + const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr*>(); + if (!LHSExpr || !RHSExpr) + return false; + const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr); + const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr); + if (!LHSAddrExpr || !RHSAddrExpr) + return false; + // Make sure both labels come from the same function. + if (LHSAddrExpr->getLabel()->getDeclContext() != + RHSAddrExpr->getLabel()->getDeclContext()) + return false; + Result = APValue(LHSAddrExpr, RHSAddrExpr); + return true; + } + // Inequalities and subtractions between unrelated pointers have + // unspecified or undefined behavior. + if (!E->isEqualityOp()) + return Error(E); + // A constant address may compare equal to the address of a symbol. + // The one exception is that address of an object cannot compare equal + // to a null pointer constant. + if ((!LHSValue.Base && !LHSValue.Offset.isZero()) || + (!RHSValue.Base && !RHSValue.Offset.isZero())) + return Error(E); + // It's implementation-defined whether distinct literals will have + // distinct addresses. In clang, the result of such a comparison is + // unspecified, so it is not a constant expression. However, we do know + // that the address of a literal will be non-null. + if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) && + LHSValue.Base && RHSValue.Base) + return Error(E); + // We can't tell whether weak symbols will end up pointing to the same + // object. + if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue)) + return Error(E); + // Pointers with different bases cannot represent the same object. + // (Note that clang defaults to -fmerge-all-constants, which can + // lead to inconsistent results for comparisons involving the address + // of a constant; this generally doesn't matter in practice.) + return Success(E->getOpcode() == BO_NE, E); + } + + const CharUnits &LHSOffset = LHSValue.getLValueOffset(); + const CharUnits &RHSOffset = RHSValue.getLValueOffset(); + + SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator(); + SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator(); + + if (E->getOpcode() == BO_Sub) { + // C++11 [expr.add]p6: + // Unless both pointers point to elements of the same array object, or + // one past the last element of the array object, the behavior is + // undefined. + if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && + !AreElementsOfSameArray(getType(LHSValue.Base), + LHSDesignator, RHSDesignator)) + CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array); + + QualType Type = E->getLHS()->getType(); + QualType ElementType = Type->getAs<PointerType>()->getPointeeType(); + + CharUnits ElementSize; + if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize)) + return false; + + // As an extension, a type may have zero size (empty struct or union in + // C, array of zero length). Pointer subtraction in such cases has + // undefined behavior, so is not constant. + if (ElementSize.isZero()) { + Info.Diag(E, diag::note_constexpr_pointer_subtraction_zero_size) + << ElementType; + return false; + } + + // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime, + // and produce incorrect results when it overflows. Such behavior + // appears to be non-conforming, but is common, so perhaps we should + // assume the standard intended for such cases to be undefined behavior + // and check for them. + + // Compute (LHSOffset - RHSOffset) / Size carefully, checking for + // overflow in the final conversion to ptrdiff_t. + APSInt LHS( + llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false); + APSInt RHS( + llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false); + APSInt ElemSize( + llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), false); + APSInt TrueResult = (LHS - RHS) / ElemSize; + APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType())); + + if (Result.extend(65) != TrueResult) + HandleOverflow(Info, E, TrueResult, E->getType()); + return Success(Result, E); + } + + // C++11 [expr.rel]p3: + // Pointers to void (after pointer conversions) can be compared, with a + // result defined as follows: If both pointers represent the same + // address or are both the null pointer value, the result is true if the + // operator is <= or >= and false otherwise; otherwise the result is + // unspecified. + // We interpret this as applying to pointers to *cv* void. + if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset && + E->isRelationalOp()) + CCEDiag(E, diag::note_constexpr_void_comparison); + + // C++11 [expr.rel]p2: + // - If two pointers point to non-static data members of the same object, + // or to subobjects or array elements fo such members, recursively, the + // pointer to the later declared member compares greater provided the + // two members have the same access control and provided their class is + // not a union. + // [...] + // - Otherwise pointer comparisons are unspecified. + if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && + E->isRelationalOp()) { + bool WasArrayIndex; + unsigned Mismatch = + FindDesignatorMismatch(getType(LHSValue.Base), LHSDesignator, + RHSDesignator, WasArrayIndex); + // At the point where the designators diverge, the comparison has a + // specified value if: + // - we are comparing array indices + // - we are comparing fields of a union, or fields with the same access + // Otherwise, the result is unspecified and thus the comparison is not a + // constant expression. + if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() && + Mismatch < RHSDesignator.Entries.size()) { + const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]); + const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]); + if (!LF && !RF) + CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes); + else if (!LF) + CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field) + << getAsBaseClass(LHSDesignator.Entries[Mismatch]) + << RF->getParent() << RF; + else if (!RF) + CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field) + << getAsBaseClass(RHSDesignator.Entries[Mismatch]) + << LF->getParent() << LF; + else if (!LF->getParent()->isUnion() && + LF->getAccess() != RF->getAccess()) + CCEDiag(E, diag::note_constexpr_pointer_comparison_differing_access) + << LF << LF->getAccess() << RF << RF->getAccess() + << LF->getParent(); + } + } + + // The comparison here must be unsigned, and performed with the same + // width as the pointer. + unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy); + uint64_t CompareLHS = LHSOffset.getQuantity(); + uint64_t CompareRHS = RHSOffset.getQuantity(); + assert(PtrSize <= 64 && "Unexpected pointer width"); + uint64_t Mask = ~0ULL >> (64 - PtrSize); + CompareLHS &= Mask; + CompareRHS &= Mask; + + // If there is a base and this is a relational operator, we can only + // compare pointers within the object in question; otherwise, the result + // depends on where the object is located in memory. + if (!LHSValue.Base.isNull() && E->isRelationalOp()) { + QualType BaseTy = getType(LHSValue.Base); + if (BaseTy->isIncompleteType()) + return Error(E); + CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy); + uint64_t OffsetLimit = Size.getQuantity(); + if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit) + return Error(E); + } + + switch (E->getOpcode()) { + default: llvm_unreachable("missing comparison operator"); + case BO_LT: return Success(CompareLHS < CompareRHS, E); + case BO_GT: return Success(CompareLHS > CompareRHS, E); + case BO_LE: return Success(CompareLHS <= CompareRHS, E); + case BO_GE: return Success(CompareLHS >= CompareRHS, E); + case BO_EQ: return Success(CompareLHS == CompareRHS, E); + case BO_NE: return Success(CompareLHS != CompareRHS, E); + } + } + } + + if (LHSTy->isMemberPointerType()) { + assert(E->isEqualityOp() && "unexpected member pointer operation"); + assert(RHSTy->isMemberPointerType() && "invalid comparison"); + + MemberPtr LHSValue, RHSValue; + + bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info); + if (!LHSOK && Info.keepEvaluatingAfterFailure()) + return false; + + if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK) + return false; + + // C++11 [expr.eq]p2: + // If both operands are null, they compare equal. Otherwise if only one is + // null, they compare unequal. + if (!LHSValue.getDecl() || !RHSValue.getDecl()) { + bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl(); + return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E); + } + + // Otherwise if either is a pointer to a virtual member function, the + // result is unspecified. + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl())) + if (MD->isVirtual()) + CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD; + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl())) + if (MD->isVirtual()) + CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD; + + // Otherwise they compare equal if and only if they would refer to the + // same member of the same most derived object or the same subobject if + // they were dereferenced with a hypothetical object of the associated + // class type. + bool Equal = LHSValue == RHSValue; + return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E); + } + + if (LHSTy->isNullPtrType()) { + assert(E->isComparisonOp() && "unexpected nullptr operation"); + assert(RHSTy->isNullPtrType() && "missing pointer conversion"); + // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t + // are compared, the result is true of the operator is <=, >= or ==, and + // false otherwise. + BinaryOperator::Opcode Opcode = E->getOpcode(); + return Success(Opcode == BO_EQ || Opcode == BO_LE || Opcode == BO_GE, E); + } + + assert((!LHSTy->isIntegralOrEnumerationType() || + !RHSTy->isIntegralOrEnumerationType()) && + "DataRecursiveIntBinOpEvaluator should have handled integral types"); + // We can't continue from here for non-integral types. + return ExprEvaluatorBaseTy::VisitBinaryOperator(E); +} + +CharUnits IntExprEvaluator::GetAlignOfType(QualType T) { + // C++ [expr.alignof]p3: "When alignof is applied to a reference type, the + // result shall be the alignment of the referenced type." + if (const ReferenceType *Ref = T->getAs<ReferenceType>()) + T = Ref->getPointeeType(); + + // __alignof is defined to return the preferred alignment. + return Info.Ctx.toCharUnitsFromBits( + Info.Ctx.getPreferredTypeAlign(T.getTypePtr())); +} + +CharUnits IntExprEvaluator::GetAlignOfExpr(const Expr *E) { + E = E->IgnoreParens(); + + // The kinds of expressions that we have special-case logic here for + // should be kept up to date with the special checks for those + // expressions in Sema. + + // alignof decl is always accepted, even if it doesn't make sense: we default + // to 1 in those cases. + if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) + return Info.Ctx.getDeclAlign(DRE->getDecl(), + /*RefAsPointee*/true); + + if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) + return Info.Ctx.getDeclAlign(ME->getMemberDecl(), + /*RefAsPointee*/true); + + return GetAlignOfType(E->getType()); +} + + +/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with +/// a result as the expression's type. +bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( + const UnaryExprOrTypeTraitExpr *E) { + switch(E->getKind()) { + case UETT_AlignOf: { + if (E->isArgumentType()) + return Success(GetAlignOfType(E->getArgumentType()), E); + else + return Success(GetAlignOfExpr(E->getArgumentExpr()), E); + } + + case UETT_VecStep: { + QualType Ty = E->getTypeOfArgument(); + + if (Ty->isVectorType()) { + unsigned n = Ty->castAs<VectorType>()->getNumElements(); + + // The vec_step built-in functions that take a 3-component + // vector return 4. (OpenCL 1.1 spec 6.11.12) + if (n == 3) + n = 4; + + return Success(n, E); + } else + return Success(1, E); + } + + case UETT_SizeOf: { + QualType SrcTy = E->getTypeOfArgument(); + // C++ [expr.sizeof]p2: "When applied to a reference or a reference type, + // the result is the size of the referenced type." + if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>()) + SrcTy = Ref->getPointeeType(); + + CharUnits Sizeof; + if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof)) + return false; + return Success(Sizeof, E); + } + } + + llvm_unreachable("unknown expr/type trait"); +} + +bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) { + CharUnits Result; + unsigned n = OOE->getNumComponents(); + if (n == 0) + return Error(OOE); + QualType CurrentType = OOE->getTypeSourceInfo()->getType(); + for (unsigned i = 0; i != n; ++i) { + OffsetOfExpr::OffsetOfNode ON = OOE->getComponent(i); + switch (ON.getKind()) { + case OffsetOfExpr::OffsetOfNode::Array: { + const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex()); + APSInt IdxResult; + if (!EvaluateInteger(Idx, IdxResult, Info)) + return false; + const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType); + if (!AT) + return Error(OOE); + CurrentType = AT->getElementType(); + CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType); + Result += IdxResult.getSExtValue() * ElementSize; + break; + } + + case OffsetOfExpr::OffsetOfNode::Field: { + FieldDecl *MemberDecl = ON.getField(); + const RecordType *RT = CurrentType->getAs<RecordType>(); + if (!RT) + return Error(OOE); + RecordDecl *RD = RT->getDecl(); + if (RD->isInvalidDecl()) return false; + const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD); + unsigned i = MemberDecl->getFieldIndex(); + assert(i < RL.getFieldCount() && "offsetof field in wrong type"); + Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i)); + CurrentType = MemberDecl->getType().getNonReferenceType(); + break; + } + + case OffsetOfExpr::OffsetOfNode::Identifier: + llvm_unreachable("dependent __builtin_offsetof"); + + case OffsetOfExpr::OffsetOfNode::Base: { + CXXBaseSpecifier *BaseSpec = ON.getBase(); + if (BaseSpec->isVirtual()) + return Error(OOE); + + // Find the layout of the class whose base we are looking into. + const RecordType *RT = CurrentType->getAs<RecordType>(); + if (!RT) + return Error(OOE); + RecordDecl *RD = RT->getDecl(); + if (RD->isInvalidDecl()) return false; + const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD); + + // Find the base class itself. + CurrentType = BaseSpec->getType(); + const RecordType *BaseRT = CurrentType->getAs<RecordType>(); + if (!BaseRT) + return Error(OOE); + + // Add the offset to the base. + Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl())); + break; + } + } + } + return Success(Result, OOE); +} + +bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { + switch (E->getOpcode()) { + default: + // Address, indirect, pre/post inc/dec, etc are not valid constant exprs. + // See C99 6.6p3. + return Error(E); + case UO_Extension: + // FIXME: Should extension allow i-c-e extension expressions in its scope? + // If so, we could clear the diagnostic ID. + return Visit(E->getSubExpr()); + case UO_Plus: + // The result is just the value. + return Visit(E->getSubExpr()); + case UO_Minus: { + if (!Visit(E->getSubExpr())) + return false; + if (!Result.isInt()) return Error(E); + const APSInt &Value = Result.getInt(); + if (Value.isSigned() && Value.isMinSignedValue()) + HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1), + E->getType()); + return Success(-Value, E); + } + case UO_Not: { + if (!Visit(E->getSubExpr())) + return false; + if (!Result.isInt()) return Error(E); + return Success(~Result.getInt(), E); + } + case UO_LNot: { + bool bres; + if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info)) + return false; + return Success(!bres, E); + } + } +} + +/// HandleCast - This is used to evaluate implicit or explicit casts where the +/// result type is integer. +bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { + const Expr *SubExpr = E->getSubExpr(); + QualType DestType = E->getType(); + QualType SrcType = SubExpr->getType(); + + switch (E->getCastKind()) { + case CK_BaseToDerived: + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + case CK_Dynamic: + case CK_ToUnion: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_NullToPointer: + case CK_NullToMemberPointer: + case CK_BaseToDerivedMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_ReinterpretMemberPointer: + case CK_ConstructorConversion: + case CK_IntegralToPointer: + case CK_ToVoid: + case CK_VectorSplat: + case CK_IntegralToFloating: + case CK_FloatingCast: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_ObjCObjectLValueCast: + case CK_FloatingRealToComplex: + case CK_FloatingComplexToReal: + case CK_FloatingComplexCast: + case CK_FloatingComplexToIntegralComplex: + case CK_IntegralRealToComplex: + case CK_IntegralComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_BuiltinFnToFnPtr: + case CK_ZeroToOCLEvent: + case CK_NonAtomicToAtomic: + llvm_unreachable("invalid cast kind for integral value"); + + case CK_BitCast: + case CK_Dependent: + case CK_LValueBitCast: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + return Error(E); + + case CK_UserDefinedConversion: + case CK_LValueToRValue: + case CK_AtomicToNonAtomic: + case CK_NoOp: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_MemberPointerToBoolean: + case CK_PointerToBoolean: + case CK_IntegralToBoolean: + case CK_FloatingToBoolean: + case CK_FloatingComplexToBoolean: + case CK_IntegralComplexToBoolean: { + bool BoolResult; + if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info)) + return false; + return Success(BoolResult, E); + } + + case CK_IntegralCast: { + if (!Visit(SubExpr)) + return false; + + if (!Result.isInt()) { + // Allow casts of address-of-label differences if they are no-ops + // or narrowing. (The narrowing case isn't actually guaranteed to + // be constant-evaluatable except in some narrow cases which are hard + // to detect here. We let it through on the assumption the user knows + // what they are doing.) + if (Result.isAddrLabelDiff()) + return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType); + // Only allow casts of lvalues if they are lossless. + return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType); + } + + return Success(HandleIntToIntCast(Info, E, DestType, SrcType, + Result.getInt()), E); + } + + case CK_PointerToIntegral: { + CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + + LValue LV; + if (!EvaluatePointer(SubExpr, LV, Info)) + return false; + + if (LV.getLValueBase()) { + // Only allow based lvalue casts if they are lossless. + // FIXME: Allow a larger integer size than the pointer size, and allow + // narrowing back down to pointer width in subsequent integral casts. + // FIXME: Check integer type's active bits, not its type size. + if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType)) + return Error(E); + + LV.Designator.setInvalid(); + LV.moveInto(Result); + return true; + } + + APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset().getQuantity(), + SrcType); + return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E); + } + + case CK_IntegralComplexToReal: { + ComplexValue C; + if (!EvaluateComplex(SubExpr, C, Info)) + return false; + return Success(C.getComplexIntReal(), E); + } + + case CK_FloatingToIntegral: { + APFloat F(0.0); + if (!EvaluateFloat(SubExpr, F, Info)) + return false; + + APSInt Value; + if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value)) + return false; + return Success(Value, E); + } + } + + llvm_unreachable("unknown cast resulting in integral value"); +} + +bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { + if (E->getSubExpr()->getType()->isAnyComplexType()) { + ComplexValue LV; + if (!EvaluateComplex(E->getSubExpr(), LV, Info)) + return false; + if (!LV.isComplexInt()) + return Error(E); + return Success(LV.getComplexIntReal(), E); + } + + return Visit(E->getSubExpr()); +} + +bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { + if (E->getSubExpr()->getType()->isComplexIntegerType()) { + ComplexValue LV; + if (!EvaluateComplex(E->getSubExpr(), LV, Info)) + return false; + if (!LV.isComplexInt()) + return Error(E); + return Success(LV.getComplexIntImag(), E); + } + + VisitIgnoredValue(E->getSubExpr()); + return Success(0, E); +} + +bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) { + return Success(E->getPackLength(), E); +} + +bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { + return Success(E->getValue(), E); +} + +//===----------------------------------------------------------------------===// +// Float Evaluation +//===----------------------------------------------------------------------===// + +namespace { +class FloatExprEvaluator + : public ExprEvaluatorBase<FloatExprEvaluator, bool> { + APFloat &Result; +public: + FloatExprEvaluator(EvalInfo &info, APFloat &result) + : ExprEvaluatorBaseTy(info), Result(result) {} + + bool Success(const APValue &V, const Expr *e) { + Result = V.getFloat(); + return true; + } + + bool ZeroInitialization(const Expr *E) { + Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType())); + return true; + } + + bool VisitCallExpr(const CallExpr *E); + + bool VisitUnaryOperator(const UnaryOperator *E); + bool VisitBinaryOperator(const BinaryOperator *E); + bool VisitFloatingLiteral(const FloatingLiteral *E); + bool VisitCastExpr(const CastExpr *E); + + bool VisitUnaryReal(const UnaryOperator *E); + bool VisitUnaryImag(const UnaryOperator *E); + + // FIXME: Missing: array subscript of vector, member of vector +}; +} // end anonymous namespace + +static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isRealFloatingType()); + return FloatExprEvaluator(Info, Result).Visit(E); +} + +static bool TryEvaluateBuiltinNaN(const ASTContext &Context, + QualType ResultTy, + const Expr *Arg, + bool SNaN, + llvm::APFloat &Result) { + const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); + if (!S) return false; + + const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy); + + llvm::APInt fill; + + // Treat empty strings as if they were zero. + if (S->getString().empty()) + fill = llvm::APInt(32, 0); + else if (S->getString().getAsInteger(0, fill)) + return false; + + if (SNaN) + Result = llvm::APFloat::getSNaN(Sem, false, &fill); + else + Result = llvm::APFloat::getQNaN(Sem, false, &fill); + return true; +} + +bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) { + switch (E->isBuiltinCall()) { + default: + return ExprEvaluatorBaseTy::VisitCallExpr(E); + + case Builtin::BI__builtin_huge_val: + case Builtin::BI__builtin_huge_valf: + case Builtin::BI__builtin_huge_vall: + case Builtin::BI__builtin_inf: + case Builtin::BI__builtin_inff: + case Builtin::BI__builtin_infl: { + const llvm::fltSemantics &Sem = + Info.Ctx.getFloatTypeSemantics(E->getType()); + Result = llvm::APFloat::getInf(Sem); + return true; + } + + case Builtin::BI__builtin_nans: + case Builtin::BI__builtin_nansf: + case Builtin::BI__builtin_nansl: + if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0), + true, Result)) + return Error(E); + return true; + + case Builtin::BI__builtin_nan: + case Builtin::BI__builtin_nanf: + case Builtin::BI__builtin_nanl: + // If this is __builtin_nan() turn this into a nan, otherwise we + // can't constant fold it. + if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0), + false, Result)) + return Error(E); + return true; + + case Builtin::BI__builtin_fabs: + case Builtin::BI__builtin_fabsf: + case Builtin::BI__builtin_fabsl: + if (!EvaluateFloat(E->getArg(0), Result, Info)) + return false; + + if (Result.isNegative()) + Result.changeSign(); + return true; + + // FIXME: Builtin::BI__builtin_powi + // FIXME: Builtin::BI__builtin_powif + // FIXME: Builtin::BI__builtin_powil + + case Builtin::BI__builtin_copysign: + case Builtin::BI__builtin_copysignf: + case Builtin::BI__builtin_copysignl: { + APFloat RHS(0.); + if (!EvaluateFloat(E->getArg(0), Result, Info) || + !EvaluateFloat(E->getArg(1), RHS, Info)) + return false; + Result.copySign(RHS); + return true; + } + } +} + +bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { + if (E->getSubExpr()->getType()->isAnyComplexType()) { + ComplexValue CV; + if (!EvaluateComplex(E->getSubExpr(), CV, Info)) + return false; + Result = CV.FloatReal; + return true; + } + + return Visit(E->getSubExpr()); +} + +bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { + if (E->getSubExpr()->getType()->isAnyComplexType()) { + ComplexValue CV; + if (!EvaluateComplex(E->getSubExpr(), CV, Info)) + return false; + Result = CV.FloatImag; + return true; + } + + VisitIgnoredValue(E->getSubExpr()); + const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType()); + Result = llvm::APFloat::getZero(Sem); + return true; +} + +bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { + switch (E->getOpcode()) { + default: return Error(E); + case UO_Plus: + return EvaluateFloat(E->getSubExpr(), Result, Info); + case UO_Minus: + if (!EvaluateFloat(E->getSubExpr(), Result, Info)) + return false; + Result.changeSign(); + return true; + } +} + +bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { + if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) + return ExprEvaluatorBaseTy::VisitBinaryOperator(E); + + APFloat RHS(0.0); + bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info); + if (!LHSOK && !Info.keepEvaluatingAfterFailure()) + return false; + return EvaluateFloat(E->getRHS(), RHS, Info) && LHSOK && + handleFloatFloatBinOp(Info, E, Result, E->getOpcode(), RHS); +} + +bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) { + Result = E->getValue(); + return true; +} + +bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) { + const Expr* SubExpr = E->getSubExpr(); + + switch (E->getCastKind()) { + default: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_IntegralToFloating: { + APSInt IntResult; + return EvaluateInteger(SubExpr, IntResult, Info) && + HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult, + E->getType(), Result); + } + + case CK_FloatingCast: { + if (!Visit(SubExpr)) + return false; + return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(), + Result); + } + + case CK_FloatingComplexToReal: { + ComplexValue V; + if (!EvaluateComplex(SubExpr, V, Info)) + return false; + Result = V.getComplexFloatReal(); + return true; + } + } +} + +//===----------------------------------------------------------------------===// +// Complex Evaluation (for float and integer) +//===----------------------------------------------------------------------===// + +namespace { +class ComplexExprEvaluator + : public ExprEvaluatorBase<ComplexExprEvaluator, bool> { + ComplexValue &Result; + +public: + ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result) + : ExprEvaluatorBaseTy(info), Result(Result) {} + + bool Success(const APValue &V, const Expr *e) { + Result.setFrom(V); + return true; + } + + bool ZeroInitialization(const Expr *E); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + bool VisitImaginaryLiteral(const ImaginaryLiteral *E); + bool VisitCastExpr(const CastExpr *E); + bool VisitBinaryOperator(const BinaryOperator *E); + bool VisitUnaryOperator(const UnaryOperator *E); + bool VisitInitListExpr(const InitListExpr *E); +}; +} // end anonymous namespace + +static bool EvaluateComplex(const Expr *E, ComplexValue &Result, + EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isAnyComplexType()); + return ComplexExprEvaluator(Info, Result).Visit(E); +} + +bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) { + QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); + if (ElemTy->isRealFloatingType()) { + Result.makeComplexFloat(); + APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy)); + Result.FloatReal = Zero; + Result.FloatImag = Zero; + } else { + Result.makeComplexInt(); + APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy); + Result.IntReal = Zero; + Result.IntImag = Zero; + } + return true; +} + +bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) { + const Expr* SubExpr = E->getSubExpr(); + + if (SubExpr->getType()->isRealFloatingType()) { + Result.makeComplexFloat(); + APFloat &Imag = Result.FloatImag; + if (!EvaluateFloat(SubExpr, Imag, Info)) + return false; + + Result.FloatReal = APFloat(Imag.getSemantics()); + return true; + } else { + assert(SubExpr->getType()->isIntegerType() && + "Unexpected imaginary literal."); + + Result.makeComplexInt(); + APSInt &Imag = Result.IntImag; + if (!EvaluateInteger(SubExpr, Imag, Info)) + return false; + + Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned()); + return true; + } +} + +bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) { + + switch (E->getCastKind()) { + case CK_BitCast: + case CK_BaseToDerived: + case CK_DerivedToBase: + case CK_UncheckedDerivedToBase: + case CK_Dynamic: + case CK_ToUnion: + case CK_ArrayToPointerDecay: + case CK_FunctionToPointerDecay: + case CK_NullToPointer: + case CK_NullToMemberPointer: + case CK_BaseToDerivedMemberPointer: + case CK_DerivedToBaseMemberPointer: + case CK_MemberPointerToBoolean: + case CK_ReinterpretMemberPointer: + case CK_ConstructorConversion: + case CK_IntegralToPointer: + case CK_PointerToIntegral: + case CK_PointerToBoolean: + case CK_ToVoid: + case CK_VectorSplat: + case CK_IntegralCast: + case CK_IntegralToBoolean: + case CK_IntegralToFloating: + case CK_FloatingToIntegral: + case CK_FloatingToBoolean: + case CK_FloatingCast: + case CK_CPointerToObjCPointerCast: + case CK_BlockPointerToObjCPointerCast: + case CK_AnyPointerToBlockPointerCast: + case CK_ObjCObjectLValueCast: + case CK_FloatingComplexToReal: + case CK_FloatingComplexToBoolean: + case CK_IntegralComplexToReal: + case CK_IntegralComplexToBoolean: + case CK_ARCProduceObject: + case CK_ARCConsumeObject: + case CK_ARCReclaimReturnedObject: + case CK_ARCExtendBlockObject: + case CK_CopyAndAutoreleaseBlockObject: + case CK_BuiltinFnToFnPtr: + case CK_ZeroToOCLEvent: + case CK_NonAtomicToAtomic: + llvm_unreachable("invalid cast kind for complex value"); + + case CK_LValueToRValue: + case CK_AtomicToNonAtomic: + case CK_NoOp: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + + case CK_Dependent: + case CK_LValueBitCast: + case CK_UserDefinedConversion: + return Error(E); + + case CK_FloatingRealToComplex: { + APFloat &Real = Result.FloatReal; + if (!EvaluateFloat(E->getSubExpr(), Real, Info)) + return false; + + Result.makeComplexFloat(); + Result.FloatImag = APFloat(Real.getSemantics()); + return true; + } + + case CK_FloatingComplexCast: { + if (!Visit(E->getSubExpr())) + return false; + + QualType To = E->getType()->getAs<ComplexType>()->getElementType(); + QualType From + = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType(); + + return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) && + HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag); + } + + case CK_FloatingComplexToIntegralComplex: { + if (!Visit(E->getSubExpr())) + return false; + + QualType To = E->getType()->getAs<ComplexType>()->getElementType(); + QualType From + = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType(); + Result.makeComplexInt(); + return HandleFloatToIntCast(Info, E, From, Result.FloatReal, + To, Result.IntReal) && + HandleFloatToIntCast(Info, E, From, Result.FloatImag, + To, Result.IntImag); + } + + case CK_IntegralRealToComplex: { + APSInt &Real = Result.IntReal; + if (!EvaluateInteger(E->getSubExpr(), Real, Info)) + return false; + + Result.makeComplexInt(); + Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned()); + return true; + } + + case CK_IntegralComplexCast: { + if (!Visit(E->getSubExpr())) + return false; + + QualType To = E->getType()->getAs<ComplexType>()->getElementType(); + QualType From + = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType(); + + Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal); + Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag); + return true; + } + + case CK_IntegralComplexToFloatingComplex: { + if (!Visit(E->getSubExpr())) + return false; + + QualType To = E->getType()->castAs<ComplexType>()->getElementType(); + QualType From + = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); + Result.makeComplexFloat(); + return HandleIntToFloatCast(Info, E, From, Result.IntReal, + To, Result.FloatReal) && + HandleIntToFloatCast(Info, E, From, Result.IntImag, + To, Result.FloatImag); + } + } + + llvm_unreachable("unknown cast resulting in complex value"); +} + +bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { + if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) + return ExprEvaluatorBaseTy::VisitBinaryOperator(E); + + bool LHSOK = Visit(E->getLHS()); + if (!LHSOK && !Info.keepEvaluatingAfterFailure()) + return false; + + ComplexValue RHS; + if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK) + return false; + + assert(Result.isComplexFloat() == RHS.isComplexFloat() && + "Invalid operands to binary operator."); + switch (E->getOpcode()) { + default: return Error(E); + case BO_Add: + if (Result.isComplexFloat()) { + Result.getComplexFloatReal().add(RHS.getComplexFloatReal(), + APFloat::rmNearestTiesToEven); + Result.getComplexFloatImag().add(RHS.getComplexFloatImag(), + APFloat::rmNearestTiesToEven); + } else { + Result.getComplexIntReal() += RHS.getComplexIntReal(); + Result.getComplexIntImag() += RHS.getComplexIntImag(); + } + break; + case BO_Sub: + if (Result.isComplexFloat()) { + Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(), + APFloat::rmNearestTiesToEven); + Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(), + APFloat::rmNearestTiesToEven); + } else { + Result.getComplexIntReal() -= RHS.getComplexIntReal(); + Result.getComplexIntImag() -= RHS.getComplexIntImag(); + } + break; + case BO_Mul: + if (Result.isComplexFloat()) { + ComplexValue LHS = Result; + APFloat &LHS_r = LHS.getComplexFloatReal(); + APFloat &LHS_i = LHS.getComplexFloatImag(); + APFloat &RHS_r = RHS.getComplexFloatReal(); + APFloat &RHS_i = RHS.getComplexFloatImag(); + + APFloat Tmp = LHS_r; + Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven); + Result.getComplexFloatReal() = Tmp; + Tmp = LHS_i; + Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven); + Result.getComplexFloatReal().subtract(Tmp, APFloat::rmNearestTiesToEven); + + Tmp = LHS_r; + Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven); + Result.getComplexFloatImag() = Tmp; + Tmp = LHS_i; + Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven); + Result.getComplexFloatImag().add(Tmp, APFloat::rmNearestTiesToEven); + } else { + ComplexValue LHS = Result; + Result.getComplexIntReal() = + (LHS.getComplexIntReal() * RHS.getComplexIntReal() - + LHS.getComplexIntImag() * RHS.getComplexIntImag()); + Result.getComplexIntImag() = + (LHS.getComplexIntReal() * RHS.getComplexIntImag() + + LHS.getComplexIntImag() * RHS.getComplexIntReal()); + } + break; + case BO_Div: + if (Result.isComplexFloat()) { + ComplexValue LHS = Result; + APFloat &LHS_r = LHS.getComplexFloatReal(); + APFloat &LHS_i = LHS.getComplexFloatImag(); + APFloat &RHS_r = RHS.getComplexFloatReal(); + APFloat &RHS_i = RHS.getComplexFloatImag(); + APFloat &Res_r = Result.getComplexFloatReal(); + APFloat &Res_i = Result.getComplexFloatImag(); + + APFloat Den = RHS_r; + Den.multiply(RHS_r, APFloat::rmNearestTiesToEven); + APFloat Tmp = RHS_i; + Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven); + Den.add(Tmp, APFloat::rmNearestTiesToEven); + + Res_r = LHS_r; + Res_r.multiply(RHS_r, APFloat::rmNearestTiesToEven); + Tmp = LHS_i; + Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven); + Res_r.add(Tmp, APFloat::rmNearestTiesToEven); + Res_r.divide(Den, APFloat::rmNearestTiesToEven); + + Res_i = LHS_i; + Res_i.multiply(RHS_r, APFloat::rmNearestTiesToEven); + Tmp = LHS_r; + Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven); + Res_i.subtract(Tmp, APFloat::rmNearestTiesToEven); + Res_i.divide(Den, APFloat::rmNearestTiesToEven); + } else { + if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0) + return Error(E, diag::note_expr_divide_by_zero); + + ComplexValue LHS = Result; + APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() + + RHS.getComplexIntImag() * RHS.getComplexIntImag(); + Result.getComplexIntReal() = + (LHS.getComplexIntReal() * RHS.getComplexIntReal() + + LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den; + Result.getComplexIntImag() = + (LHS.getComplexIntImag() * RHS.getComplexIntReal() - + LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den; + } + break; + } + + return true; +} + +bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { + // Get the operand value into 'Result'. + if (!Visit(E->getSubExpr())) + return false; + + switch (E->getOpcode()) { + default: + return Error(E); + case UO_Extension: + return true; + case UO_Plus: + // The result is always just the subexpr. + return true; + case UO_Minus: + if (Result.isComplexFloat()) { + Result.getComplexFloatReal().changeSign(); + Result.getComplexFloatImag().changeSign(); + } + else { + Result.getComplexIntReal() = -Result.getComplexIntReal(); + Result.getComplexIntImag() = -Result.getComplexIntImag(); + } + return true; + case UO_Not: + if (Result.isComplexFloat()) + Result.getComplexFloatImag().changeSign(); + else + Result.getComplexIntImag() = -Result.getComplexIntImag(); + return true; + } +} + +bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) { + if (E->getNumInits() == 2) { + if (E->getType()->isComplexType()) { + Result.makeComplexFloat(); + if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info)) + return false; + if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info)) + return false; + } else { + Result.makeComplexInt(); + if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info)) + return false; + if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info)) + return false; + } + return true; + } + return ExprEvaluatorBaseTy::VisitInitListExpr(E); +} + +//===----------------------------------------------------------------------===// +// Atomic expression evaluation, essentially just handling the NonAtomicToAtomic +// implicit conversion. +//===----------------------------------------------------------------------===// + +namespace { +class AtomicExprEvaluator : + public ExprEvaluatorBase<AtomicExprEvaluator, bool> { + APValue &Result; +public: + AtomicExprEvaluator(EvalInfo &Info, APValue &Result) + : ExprEvaluatorBaseTy(Info), Result(Result) {} + + bool Success(const APValue &V, const Expr *E) { + Result = V; + return true; + } + + bool ZeroInitialization(const Expr *E) { + ImplicitValueInitExpr VIE( + E->getType()->castAs<AtomicType>()->getValueType()); + return Evaluate(Result, Info, &VIE); + } + + bool VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + case CK_NonAtomicToAtomic: + return Evaluate(Result, Info, E->getSubExpr()); + } + } +}; +} // end anonymous namespace + +static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isAtomicType()); + return AtomicExprEvaluator(Info, Result).Visit(E); +} + +//===----------------------------------------------------------------------===// +// Void expression evaluation, primarily for a cast to void on the LHS of a +// comma operator +//===----------------------------------------------------------------------===// + +namespace { +class VoidExprEvaluator + : public ExprEvaluatorBase<VoidExprEvaluator, bool> { +public: + VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {} + + bool Success(const APValue &V, const Expr *e) { return true; } + + bool VisitCastExpr(const CastExpr *E) { + switch (E->getCastKind()) { + default: + return ExprEvaluatorBaseTy::VisitCastExpr(E); + case CK_ToVoid: + VisitIgnoredValue(E->getSubExpr()); + return true; + } + } +}; +} // end anonymous namespace + +static bool EvaluateVoid(const Expr *E, EvalInfo &Info) { + assert(E->isRValue() && E->getType()->isVoidType()); + return VoidExprEvaluator(Info).Visit(E); +} + +//===----------------------------------------------------------------------===// +// Top level Expr::EvaluateAsRValue method. +//===----------------------------------------------------------------------===// + +static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) { + // In C, function designators are not lvalues, but we evaluate them as if they + // are. + QualType T = E->getType(); + if (E->isGLValue() || T->isFunctionType()) { + LValue LV; + if (!EvaluateLValue(E, LV, Info)) + return false; + LV.moveInto(Result); + } else if (T->isVectorType()) { + if (!EvaluateVector(E, Result, Info)) + return false; + } else if (T->isIntegralOrEnumerationType()) { + if (!IntExprEvaluator(Info, Result).Visit(E)) + return false; + } else if (T->hasPointerRepresentation()) { + LValue LV; + if (!EvaluatePointer(E, LV, Info)) + return false; + LV.moveInto(Result); + } else if (T->isRealFloatingType()) { + llvm::APFloat F(0.0); + if (!EvaluateFloat(E, F, Info)) + return false; + Result = APValue(F); + } else if (T->isAnyComplexType()) { + ComplexValue C; + if (!EvaluateComplex(E, C, Info)) + return false; + C.moveInto(Result); + } else if (T->isMemberPointerType()) { + MemberPtr P; + if (!EvaluateMemberPointer(E, P, Info)) + return false; + P.moveInto(Result); + return true; + } else if (T->isArrayType()) { + LValue LV; + LV.set(E, Info.CurrentCall->Index); + APValue &Value = Info.CurrentCall->createTemporary(E, false); + if (!EvaluateArray(E, LV, Value, Info)) + return false; + Result = Value; + } else if (T->isRecordType()) { + LValue LV; + LV.set(E, Info.CurrentCall->Index); + APValue &Value = Info.CurrentCall->createTemporary(E, false); + if (!EvaluateRecord(E, LV, Value, Info)) + return false; + Result = Value; + } else if (T->isVoidType()) { + if (!Info.getLangOpts().CPlusPlus11) + Info.CCEDiag(E, diag::note_constexpr_nonliteral) + << E->getType(); + if (!EvaluateVoid(E, Info)) + return false; + } else if (T->isAtomicType()) { + if (!EvaluateAtomic(E, Result, Info)) + return false; + } else if (Info.getLangOpts().CPlusPlus11) { + Info.Diag(E, diag::note_constexpr_nonliteral) << E->getType(); + return false; + } else { + Info.Diag(E, diag::note_invalid_subexpr_in_const_expr); + return false; + } + + return true; +} + +/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some +/// cases, the in-place evaluation is essential, since later initializers for +/// an object can indirectly refer to subobjects which were initialized earlier. +static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This, + const Expr *E, bool AllowNonLiteralTypes) { + if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This)) + return false; + + if (E->isRValue()) { + // Evaluate arrays and record types in-place, so that later initializers can + // refer to earlier-initialized members of the object. + if (E->getType()->isArrayType()) + return EvaluateArray(E, This, Result, Info); + else if (E->getType()->isRecordType()) + return EvaluateRecord(E, This, Result, Info); + } + + // For any other type, in-place evaluation is unimportant. + return Evaluate(Result, Info, E); +} + +/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit +/// lvalue-to-rvalue cast if it is an lvalue. +static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) { + if (!CheckLiteralType(Info, E)) + return false; + + if (!::Evaluate(Result, Info, E)) + return false; + + if (E->isGLValue()) { + LValue LV; + LV.setFrom(Info.Ctx, Result); + if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result)) + return false; + } + + // Check this core constant expression is a constant expression. + return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result); +} + +static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result, + const ASTContext &Ctx, bool &IsConst) { + // Fast-path evaluations of integer literals, since we sometimes see files + // containing vast quantities of these. + if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(Exp)) { + Result.Val = APValue(APSInt(L->getValue(), + L->getType()->isUnsignedIntegerType())); + IsConst = true; + return true; + } + + // FIXME: Evaluating values of large array and record types can cause + // performance problems. Only do so in C++11 for now. + if (Exp->isRValue() && (Exp->getType()->isArrayType() || + Exp->getType()->isRecordType()) && + !Ctx.getLangOpts().CPlusPlus11) { + IsConst = false; + return true; + } + return false; +} + + +/// EvaluateAsRValue - Return true if this is a constant which we can fold using +/// any crazy technique (that has nothing to do with language standards) that +/// we want to. If this function returns true, it returns the folded constant +/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion +/// will be applied to the result. +bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const { + bool IsConst; + if (FastEvaluateAsRValue(this, Result, Ctx, IsConst)) + return IsConst; + + EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects); + return ::EvaluateAsRValue(Info, this, Result.Val); +} + +bool Expr::EvaluateAsBooleanCondition(bool &Result, + const ASTContext &Ctx) const { + EvalResult Scratch; + return EvaluateAsRValue(Scratch, Ctx) && + HandleConversionToBool(Scratch.Val, Result); +} + +bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx, + SideEffectsKind AllowSideEffects) const { + if (!getType()->isIntegralOrEnumerationType()) + return false; + + EvalResult ExprResult; + if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() || + (!AllowSideEffects && ExprResult.HasSideEffects)) + return false; + + Result = ExprResult.Val.getInt(); + return true; +} + +bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const { + EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold); + + LValue LV; + if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects || + !CheckLValueConstantExpression(Info, getExprLoc(), + Ctx.getLValueReferenceType(getType()), LV)) + return false; + + LV.moveInto(Result.Val); + return true; +} + +bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx, + const VarDecl *VD, + SmallVectorImpl<PartialDiagnosticAt> &Notes) const { + // FIXME: Evaluating initializers for large array and record types can cause + // performance problems. Only do so in C++11 for now. + if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) && + !Ctx.getLangOpts().CPlusPlus11) + return false; + + Expr::EvalStatus EStatus; + EStatus.Diag = &Notes; + + EvalInfo InitInfo(Ctx, EStatus, EvalInfo::EM_ConstantFold); + InitInfo.setEvaluatingDecl(VD, Value); + + LValue LVal; + LVal.set(VD); + + // C++11 [basic.start.init]p2: + // Variables with static storage duration or thread storage duration shall be + // zero-initialized before any other initialization takes place. + // This behavior is not present in C. + if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() && + !VD->getType()->isReferenceType()) { + ImplicitValueInitExpr VIE(VD->getType()); + if (!EvaluateInPlace(Value, InitInfo, LVal, &VIE, + /*AllowNonLiteralTypes=*/true)) + return false; + } + + if (!EvaluateInPlace(Value, InitInfo, LVal, this, + /*AllowNonLiteralTypes=*/true) || + EStatus.HasSideEffects) + return false; + + return CheckConstantExpression(InitInfo, VD->getLocation(), VD->getType(), + Value); +} + +/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be +/// constant folded, but discard the result. +bool Expr::isEvaluatable(const ASTContext &Ctx) const { + EvalResult Result; + return EvaluateAsRValue(Result, Ctx) && !Result.HasSideEffects; +} + +APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx, + SmallVectorImpl<PartialDiagnosticAt> *Diag) const { + EvalResult EvalResult; + EvalResult.Diag = Diag; + bool Result = EvaluateAsRValue(EvalResult, Ctx); + (void)Result; + assert(Result && "Could not evaluate expression"); + assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer"); + + return EvalResult.Val.getInt(); +} + +void Expr::EvaluateForOverflow(const ASTContext &Ctx) const { + bool IsConst; + EvalResult EvalResult; + if (!FastEvaluateAsRValue(this, EvalResult, Ctx, IsConst)) { + EvalInfo Info(Ctx, EvalResult, EvalInfo::EM_EvaluateForOverflow); + (void)::EvaluateAsRValue(Info, this, EvalResult.Val); + } +} + +bool Expr::EvalResult::isGlobalLValue() const { + assert(Val.isLValue()); + return IsGlobalLValue(Val.getLValueBase()); +} + + +/// isIntegerConstantExpr - this recursive routine will test if an expression is +/// an integer constant expression. + +/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero, +/// comma, etc + +// CheckICE - This function does the fundamental ICE checking: the returned +// ICEDiag contains an ICEKind indicating whether the expression is an ICE, +// and a (possibly null) SourceLocation indicating the location of the problem. +// +// Note that to reduce code duplication, this helper does no evaluation +// itself; the caller checks whether the expression is evaluatable, and +// in the rare cases where CheckICE actually cares about the evaluated +// value, it calls into Evalute. + +namespace { + +enum ICEKind { + /// This expression is an ICE. + IK_ICE, + /// This expression is not an ICE, but if it isn't evaluated, it's + /// a legal subexpression for an ICE. This return value is used to handle + /// the comma operator in C99 mode, and non-constant subexpressions. + IK_ICEIfUnevaluated, + /// This expression is not an ICE, and is not a legal subexpression for one. + IK_NotICE +}; + +struct ICEDiag { + ICEKind Kind; + SourceLocation Loc; + + ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {} +}; + +} + +static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); } + +static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; } + +static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) { + Expr::EvalResult EVResult; + if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects || + !EVResult.Val.isInt()) + return ICEDiag(IK_NotICE, E->getLocStart()); + + return NoDiag(); +} + +static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { + assert(!E->isValueDependent() && "Should not see value dependent exprs!"); + if (!E->getType()->isIntegralOrEnumerationType()) + return ICEDiag(IK_NotICE, E->getLocStart()); + + switch (E->getStmtClass()) { +#define ABSTRACT_STMT(Node) +#define STMT(Node, Base) case Expr::Node##Class: +#define EXPR(Node, Base) +#include "clang/AST/StmtNodes.inc" + case Expr::PredefinedExprClass: + case Expr::FloatingLiteralClass: + case Expr::ImaginaryLiteralClass: + case Expr::StringLiteralClass: + case Expr::ArraySubscriptExprClass: + case Expr::MemberExprClass: + case Expr::CompoundAssignOperatorClass: + case Expr::CompoundLiteralExprClass: + case Expr::ExtVectorElementExprClass: + case Expr::DesignatedInitExprClass: + case Expr::ImplicitValueInitExprClass: + case Expr::ParenListExprClass: + case Expr::VAArgExprClass: + case Expr::AddrLabelExprClass: + case Expr::StmtExprClass: + case Expr::CXXMemberCallExprClass: + case Expr::CUDAKernelCallExprClass: + case Expr::CXXDynamicCastExprClass: + case Expr::CXXTypeidExprClass: + case Expr::CXXUuidofExprClass: + case Expr::MSPropertyRefExprClass: + case Expr::CXXNullPtrLiteralExprClass: + case Expr::UserDefinedLiteralClass: + case Expr::CXXThisExprClass: + case Expr::CXXThrowExprClass: + case Expr::CXXNewExprClass: + case Expr::CXXDeleteExprClass: + case Expr::CXXPseudoDestructorExprClass: + case Expr::UnresolvedLookupExprClass: + case Expr::DependentScopeDeclRefExprClass: + case Expr::CXXConstructExprClass: + case Expr::CXXStdInitializerListExprClass: + case Expr::CXXBindTemporaryExprClass: + case Expr::ExprWithCleanupsClass: + case Expr::CXXTemporaryObjectExprClass: + case Expr::CXXUnresolvedConstructExprClass: + case Expr::CXXDependentScopeMemberExprClass: + case Expr::UnresolvedMemberExprClass: + case Expr::ObjCStringLiteralClass: + case Expr::ObjCBoxedExprClass: + case Expr::ObjCArrayLiteralClass: + case Expr::ObjCDictionaryLiteralClass: + case Expr::ObjCEncodeExprClass: + case Expr::ObjCMessageExprClass: + case Expr::ObjCSelectorExprClass: + case Expr::ObjCProtocolExprClass: + case Expr::ObjCIvarRefExprClass: + case Expr::ObjCPropertyRefExprClass: + case Expr::ObjCSubscriptRefExprClass: + case Expr::ObjCIsaExprClass: + case Expr::ShuffleVectorExprClass: + case Expr::ConvertVectorExprClass: + case Expr::BlockExprClass: + case Expr::NoStmtClass: + case Expr::OpaqueValueExprClass: + case Expr::PackExpansionExprClass: + case Expr::SubstNonTypeTemplateParmPackExprClass: + case Expr::FunctionParmPackExprClass: + case Expr::AsTypeExprClass: + case Expr::ObjCIndirectCopyRestoreExprClass: + case Expr::MaterializeTemporaryExprClass: + case Expr::PseudoObjectExprClass: + case Expr::AtomicExprClass: + case Expr::InitListExprClass: + case Expr::LambdaExprClass: + return ICEDiag(IK_NotICE, E->getLocStart()); + + case Expr::SizeOfPackExprClass: + case Expr::GNUNullExprClass: + // GCC considers the GNU __null value to be an integral constant expression. + return NoDiag(); + + case Expr::SubstNonTypeTemplateParmExprClass: + return + CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx); + + case Expr::ParenExprClass: + return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx); + case Expr::GenericSelectionExprClass: + return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx); + case Expr::IntegerLiteralClass: + case Expr::CharacterLiteralClass: + case Expr::ObjCBoolLiteralExprClass: + case Expr::CXXBoolLiteralExprClass: + case Expr::CXXScalarValueInitExprClass: + case Expr::UnaryTypeTraitExprClass: + case Expr::BinaryTypeTraitExprClass: + case Expr::TypeTraitExprClass: + case Expr::ArrayTypeTraitExprClass: + case Expr::ExpressionTraitExprClass: + case Expr::CXXNoexceptExprClass: + return NoDiag(); + case Expr::CallExprClass: + case Expr::CXXOperatorCallExprClass: { + // C99 6.6/3 allows function calls within unevaluated subexpressions of + // constant expressions, but they can never be ICEs because an ICE cannot + // contain an operand of (pointer to) function type. + const CallExpr *CE = cast<CallExpr>(E); + if (CE->isBuiltinCall()) + return CheckEvalInICE(E, Ctx); + return ICEDiag(IK_NotICE, E->getLocStart()); + } + case Expr::DeclRefExprClass: { + if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl())) + return NoDiag(); + const ValueDecl *D = dyn_cast<ValueDecl>(cast<DeclRefExpr>(E)->getDecl()); + if (Ctx.getLangOpts().CPlusPlus && + D && IsConstNonVolatile(D->getType())) { + // Parameter variables are never constants. Without this check, + // getAnyInitializer() can find a default argument, which leads + // to chaos. + if (isa<ParmVarDecl>(D)) + return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation()); + + // C++ 7.1.5.1p2 + // A variable of non-volatile const-qualified integral or enumeration + // type initialized by an ICE can be used in ICEs. + if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) { + if (!Dcl->getType()->isIntegralOrEnumerationType()) + return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation()); + + const VarDecl *VD; + // Look for a declaration of this variable that has an initializer, and + // check whether it is an ICE. + if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE()) + return NoDiag(); + else + return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation()); + } + } + return ICEDiag(IK_NotICE, E->getLocStart()); + } + case Expr::UnaryOperatorClass: { + const UnaryOperator *Exp = cast<UnaryOperator>(E); + switch (Exp->getOpcode()) { + case UO_PostInc: + case UO_PostDec: + case UO_PreInc: + case UO_PreDec: + case UO_AddrOf: + case UO_Deref: + // C99 6.6/3 allows increment and decrement within unevaluated + // subexpressions of constant expressions, but they can never be ICEs + // because an ICE cannot contain an lvalue operand. + return ICEDiag(IK_NotICE, E->getLocStart()); + case UO_Extension: + case UO_LNot: + case UO_Plus: + case UO_Minus: + case UO_Not: + case UO_Real: + case UO_Imag: + return CheckICE(Exp->getSubExpr(), Ctx); + } + + // OffsetOf falls through here. + } + case Expr::OffsetOfExprClass: { + // Note that per C99, offsetof must be an ICE. And AFAIK, using + // EvaluateAsRValue matches the proposed gcc behavior for cases like + // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect + // compliance: we should warn earlier for offsetof expressions with + // array subscripts that aren't ICEs, and if the array subscripts + // are ICEs, the value of the offsetof must be an integer constant. + return CheckEvalInICE(E, Ctx); + } + case Expr::UnaryExprOrTypeTraitExprClass: { + const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E); + if ((Exp->getKind() == UETT_SizeOf) && + Exp->getTypeOfArgument()->isVariableArrayType()) + return ICEDiag(IK_NotICE, E->getLocStart()); + return NoDiag(); + } + case Expr::BinaryOperatorClass: { + const BinaryOperator *Exp = cast<BinaryOperator>(E); + switch (Exp->getOpcode()) { + case BO_PtrMemD: + case BO_PtrMemI: + case BO_Assign: + case BO_MulAssign: + case BO_DivAssign: + case BO_RemAssign: + case BO_AddAssign: + case BO_SubAssign: + case BO_ShlAssign: + case BO_ShrAssign: + case BO_AndAssign: + case BO_XorAssign: + case BO_OrAssign: + // C99 6.6/3 allows assignments within unevaluated subexpressions of + // constant expressions, but they can never be ICEs because an ICE cannot + // contain an lvalue operand. + return ICEDiag(IK_NotICE, E->getLocStart()); + + case BO_Mul: + case BO_Div: + case BO_Rem: + case BO_Add: + case BO_Sub: + case BO_Shl: + case BO_Shr: + case BO_LT: + case BO_GT: + case BO_LE: + case BO_GE: + case BO_EQ: + case BO_NE: + case BO_And: + case BO_Xor: + case BO_Or: + case BO_Comma: { + ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx); + ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx); + if (Exp->getOpcode() == BO_Div || + Exp->getOpcode() == BO_Rem) { + // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure + // we don't evaluate one. + if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) { + llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx); + if (REval == 0) + return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart()); + if (REval.isSigned() && REval.isAllOnesValue()) { + llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx); + if (LEval.isMinSignedValue()) + return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart()); + } + } + } + if (Exp->getOpcode() == BO_Comma) { + if (Ctx.getLangOpts().C99) { + // C99 6.6p3 introduces a strange edge case: comma can be in an ICE + // if it isn't evaluated. + if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) + return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart()); + } else { + // In both C89 and C++, commas in ICEs are illegal. + return ICEDiag(IK_NotICE, E->getLocStart()); + } + } + return Worst(LHSResult, RHSResult); + } + case BO_LAnd: + case BO_LOr: { + ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx); + ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx); + if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) { + // Rare case where the RHS has a comma "side-effect"; we need + // to actually check the condition to see whether the side + // with the comma is evaluated. + if ((Exp->getOpcode() == BO_LAnd) != + (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0)) + return RHSResult; + return NoDiag(); + } + + return Worst(LHSResult, RHSResult); + } + } + } + case Expr::ImplicitCastExprClass: + case Expr::CStyleCastExprClass: + case Expr::CXXFunctionalCastExprClass: + case Expr::CXXStaticCastExprClass: + case Expr::CXXReinterpretCastExprClass: + case Expr::CXXConstCastExprClass: + case Expr::ObjCBridgedCastExprClass: { + const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr(); + if (isa<ExplicitCastExpr>(E)) { + if (const FloatingLiteral *FL + = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) { + unsigned DestWidth = Ctx.getIntWidth(E->getType()); + bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType(); + APSInt IgnoredVal(DestWidth, !DestSigned); + bool Ignored; + // If the value does not fit in the destination type, the behavior is + // undefined, so we are not required to treat it as a constant + // expression. + if (FL->getValue().convertToInteger(IgnoredVal, + llvm::APFloat::rmTowardZero, + &Ignored) & APFloat::opInvalidOp) + return ICEDiag(IK_NotICE, E->getLocStart()); + return NoDiag(); + } + } + switch (cast<CastExpr>(E)->getCastKind()) { + case CK_LValueToRValue: + case CK_AtomicToNonAtomic: + case CK_NonAtomicToAtomic: + case CK_NoOp: + case CK_IntegralToBoolean: + case CK_IntegralCast: + return CheckICE(SubExpr, Ctx); + default: + return ICEDiag(IK_NotICE, E->getLocStart()); + } + } + case Expr::BinaryConditionalOperatorClass: { + const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E); + ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx); + if (CommonResult.Kind == IK_NotICE) return CommonResult; + ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx); + if (FalseResult.Kind == IK_NotICE) return FalseResult; + if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult; + if (FalseResult.Kind == IK_ICEIfUnevaluated && + Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag(); + return FalseResult; + } + case Expr::ConditionalOperatorClass: { + const ConditionalOperator *Exp = cast<ConditionalOperator>(E); + // If the condition (ignoring parens) is a __builtin_constant_p call, + // then only the true side is actually considered in an integer constant + // expression, and it is fully evaluated. This is an important GNU + // extension. See GCC PR38377 for discussion. + if (const CallExpr *CallCE + = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts())) + if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p) + return CheckEvalInICE(E, Ctx); + ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx); + if (CondResult.Kind == IK_NotICE) + return CondResult; + + ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx); + ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx); + + if (TrueResult.Kind == IK_NotICE) + return TrueResult; + if (FalseResult.Kind == IK_NotICE) + return FalseResult; + if (CondResult.Kind == IK_ICEIfUnevaluated) + return CondResult; + if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE) + return NoDiag(); + // Rare case where the diagnostics depend on which side is evaluated + // Note that if we get here, CondResult is 0, and at least one of + // TrueResult and FalseResult is non-zero. + if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0) + return FalseResult; + return TrueResult; + } + case Expr::CXXDefaultArgExprClass: + return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx); + case Expr::CXXDefaultInitExprClass: + return CheckICE(cast<CXXDefaultInitExpr>(E)->getExpr(), Ctx); + case Expr::ChooseExprClass: { + return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx); + } + } + + llvm_unreachable("Invalid StmtClass!"); +} + +/// Evaluate an expression as a C++11 integral constant expression. +static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx, + const Expr *E, + llvm::APSInt *Value, + SourceLocation *Loc) { + if (!E->getType()->isIntegralOrEnumerationType()) { + if (Loc) *Loc = E->getExprLoc(); + return false; + } + + APValue Result; + if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc)) + return false; + + assert(Result.isInt() && "pointer cast to int is not an ICE"); + if (Value) *Value = Result.getInt(); + return true; +} + +bool Expr::isIntegerConstantExpr(const ASTContext &Ctx, + SourceLocation *Loc) const { + if (Ctx.getLangOpts().CPlusPlus11) + return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, 0, Loc); + + ICEDiag D = CheckICE(this, Ctx); + if (D.Kind != IK_ICE) { + if (Loc) *Loc = D.Loc; + return false; + } + return true; +} + +bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx, + SourceLocation *Loc, bool isEvaluated) const { + if (Ctx.getLangOpts().CPlusPlus11) + return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc); + + if (!isIntegerConstantExpr(Ctx, Loc)) + return false; + if (!EvaluateAsInt(Value, Ctx)) + llvm_unreachable("ICE cannot be evaluated!"); + return true; +} + +bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const { + return CheckICE(this, Ctx).Kind == IK_ICE; +} + +bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result, + SourceLocation *Loc) const { + // We support this checking in C++98 mode in order to diagnose compatibility + // issues. + assert(Ctx.getLangOpts().CPlusPlus); + + // Build evaluation settings. + Expr::EvalStatus Status; + SmallVector<PartialDiagnosticAt, 8> Diags; + Status.Diag = &Diags; + EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression); + + APValue Scratch; + bool IsConstExpr = ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch); + + if (!Diags.empty()) { + IsConstExpr = false; + if (Loc) *Loc = Diags[0].first; + } else if (!IsConstExpr) { + // FIXME: This shouldn't happen. + if (Loc) *Loc = getExprLoc(); + } + + return IsConstExpr; +} + +bool Expr::isPotentialConstantExpr(const FunctionDecl *FD, + SmallVectorImpl< + PartialDiagnosticAt> &Diags) { + // FIXME: It would be useful to check constexpr function templates, but at the + // moment the constant expression evaluator cannot cope with the non-rigorous + // ASTs which we build for dependent expressions. + if (FD->isDependentContext()) + return true; + + Expr::EvalStatus Status; + Status.Diag = &Diags; + + EvalInfo Info(FD->getASTContext(), Status, + EvalInfo::EM_PotentialConstantExpression); + + const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); + const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : 0; + + // Fabricate an arbitrary expression on the stack and pretend that it + // is a temporary being used as the 'this' pointer. + LValue This; + ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy); + This.set(&VIE, Info.CurrentCall->Index); + + ArrayRef<const Expr*> Args; + + SourceLocation Loc = FD->getLocation(); + + APValue Scratch; + if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) { + // Evaluate the call as a constant initializer, to allow the construction + // of objects of non-literal types. + Info.setEvaluatingDecl(This.getLValueBase(), Scratch); + HandleConstructorCall(Loc, This, Args, CD, Info, Scratch); + } else + HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : 0, + Args, FD->getBody(), Info, Scratch); + + return Diags.empty(); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp new file mode 100644 index 000000000000..96ebe92ce3ab --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp @@ -0,0 +1,62 @@ +//===- ExternalASTSource.cpp - Abstract External AST Interface --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides the default implementation of the ExternalASTSource +// interface, which enables construction of AST nodes from some external +// source. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ExternalASTSource.h" +#include "clang/AST/DeclarationName.h" + +using namespace clang; + +ExternalASTSource::~ExternalASTSource() { } + +void ExternalASTSource::PrintStats() { } + +Decl *ExternalASTSource::GetExternalDecl(uint32_t ID) { + return 0; +} + +Selector ExternalASTSource::GetExternalSelector(uint32_t ID) { + return Selector(); +} + +uint32_t ExternalASTSource::GetNumExternalSelectors() { + return 0; +} + +Stmt *ExternalASTSource::GetExternalDeclStmt(uint64_t Offset) { + return 0; +} + +CXXBaseSpecifier * +ExternalASTSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) { + return 0; +} + +bool +ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC, + DeclarationName Name) { + return false; +} + +void ExternalASTSource::completeVisibleDeclsMap(const DeclContext *DC) { +} + +ExternalLoadResult +ExternalASTSource::FindExternalLexicalDecls(const DeclContext *DC, + bool (*isKindWeWant)(Decl::Kind), + SmallVectorImpl<Decl*> &Result) { + return ELR_AlreadyLoaded; +} + +void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const { } diff --git a/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp new file mode 100644 index 000000000000..3d64310dc56b --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp @@ -0,0 +1,164 @@ +//===- InheritViz.cpp - Graphviz visualization for inheritance --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements CXXRecordDecl::viewInheritance, which +// generates a GraphViz DOT file that depicts the class inheritance +// diagram and then calls Graphviz/dot+gv on it. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/TypeOrdering.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/GraphWriter.h" +#include "llvm/Support/raw_ostream.h" +#include <map> +#include <set> + +using namespace llvm; + +namespace clang { + +/// InheritanceHierarchyWriter - Helper class that writes out a +/// GraphViz file that diagrams the inheritance hierarchy starting at +/// a given C++ class type. Note that we do not use LLVM's +/// GraphWriter, because the interface does not permit us to properly +/// differentiate between uses of types as virtual bases +/// vs. non-virtual bases. +class InheritanceHierarchyWriter { + ASTContext& Context; + raw_ostream &Out; + std::map<QualType, int, QualTypeOrdering> DirectBaseCount; + std::set<QualType, QualTypeOrdering> KnownVirtualBases; + +public: + InheritanceHierarchyWriter(ASTContext& Context, raw_ostream& Out) + : Context(Context), Out(Out) { } + + void WriteGraph(QualType Type) { + Out << "digraph \"" << DOT::EscapeString(Type.getAsString()) << "\" {\n"; + WriteNode(Type, false); + Out << "}\n"; + } + +protected: + /// WriteNode - Write out the description of node in the inheritance + /// diagram, which may be a base class or it may be the root node. + void WriteNode(QualType Type, bool FromVirtual); + + /// WriteNodeReference - Write out a reference to the given node, + /// using a unique identifier for each direct base and for the + /// (only) virtual base. + raw_ostream& WriteNodeReference(QualType Type, bool FromVirtual); +}; + +void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) { + QualType CanonType = Context.getCanonicalType(Type); + + if (FromVirtual) { + if (KnownVirtualBases.find(CanonType) != KnownVirtualBases.end()) + return; + + // We haven't seen this virtual base before, so display it and + // its bases. + KnownVirtualBases.insert(CanonType); + } + + // Declare the node itself. + Out << " "; + WriteNodeReference(Type, FromVirtual); + + // Give the node a label based on the name of the class. + std::string TypeName = Type.getAsString(); + Out << " [ shape=\"box\", label=\"" << DOT::EscapeString(TypeName); + + // If the name of the class was a typedef or something different + // from the "real" class name, show the real class name in + // parentheses so we don't confuse ourselves. + if (TypeName != CanonType.getAsString()) { + Out << "\\n(" << CanonType.getAsString() << ")"; + } + + // Finished describing the node. + Out << " \"];\n"; + + // Display the base classes. + const CXXRecordDecl *Decl + = static_cast<const CXXRecordDecl *>(Type->getAs<RecordType>()->getDecl()); + for (CXXRecordDecl::base_class_const_iterator Base = Decl->bases_begin(); + Base != Decl->bases_end(); ++Base) { + QualType CanonBaseType = Context.getCanonicalType(Base->getType()); + + // If this is not virtual inheritance, bump the direct base + // count for the type. + if (!Base->isVirtual()) + ++DirectBaseCount[CanonBaseType]; + + // Write out the node (if we need to). + WriteNode(Base->getType(), Base->isVirtual()); + + // Write out the edge. + Out << " "; + WriteNodeReference(Type, FromVirtual); + Out << " -> "; + WriteNodeReference(Base->getType(), Base->isVirtual()); + + // Write out edge attributes to show the kind of inheritance. + if (Base->isVirtual()) { + Out << " [ style=\"dashed\" ]"; + } + Out << ";"; + } +} + +/// WriteNodeReference - Write out a reference to the given node, +/// using a unique identifier for each direct base and for the +/// (only) virtual base. +raw_ostream& +InheritanceHierarchyWriter::WriteNodeReference(QualType Type, + bool FromVirtual) { + QualType CanonType = Context.getCanonicalType(Type); + + Out << "Class_" << CanonType.getAsOpaquePtr(); + if (!FromVirtual) + Out << "_" << DirectBaseCount[CanonType]; + return Out; +} + +/// viewInheritance - Display the inheritance hierarchy of this C++ +/// class using GraphViz. +void CXXRecordDecl::viewInheritance(ASTContext& Context) const { + QualType Self = Context.getTypeDeclType(this); + + int FD; + SmallString<128> Filename; + error_code EC = + sys::fs::createTemporaryFile(Self.getAsString(), "dot", FD, Filename); + if (EC) { + llvm::errs() << "Error: " << EC.message() << "\n"; + return; + } + + llvm::errs() << "Writing '" << Filename << "'... "; + + llvm::raw_fd_ostream O(FD, true); + + InheritanceHierarchyWriter Writer(Context, O); + Writer.WriteGraph(Self); + llvm::errs() << " done. \n"; + + O.close(); + + // Display the graph + DisplayGraph(Filename); +} + +} diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp new file mode 100644 index 000000000000..578466028ce8 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp @@ -0,0 +1,96 @@ +//===------- ItaniumCXXABI.cpp - AST support for the Itanium C++ ABI ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides C++ AST support targeting the Itanium C++ ABI, which is +// documented at: +// http://www.codesourcery.com/public/cxx-abi/abi.html +// http://www.codesourcery.com/public/cxx-abi/abi-eh.html +// +// It also supports the closely-related ARM C++ ABI, documented at: +// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf +// +//===----------------------------------------------------------------------===// + +#include "CXXABI.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/MangleNumberingContext.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/Type.h" +#include "clang/Basic/TargetInfo.h" + +using namespace clang; + +namespace { + +/// \brief Keeps track of the mangled names of lambda expressions and block +/// literals within a particular context. +class ItaniumNumberingContext : public MangleNumberingContext { + llvm::DenseMap<IdentifierInfo*, unsigned> VarManglingNumbers; + +public: + /// Variable decls are numbered by identifier. + virtual unsigned getManglingNumber(const VarDecl *VD) { + return ++VarManglingNumbers[VD->getIdentifier()]; + } +}; + +class ItaniumCXXABI : public CXXABI { +protected: + ASTContext &Context; +public: + ItaniumCXXABI(ASTContext &Ctx) : Context(Ctx) { } + + std::pair<uint64_t, unsigned> + getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const { + const TargetInfo &Target = Context.getTargetInfo(); + TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0); + uint64_t Width = Target.getTypeWidth(PtrDiff); + unsigned Align = Target.getTypeAlign(PtrDiff); + if (MPT->getPointeeType()->isFunctionType()) + Width = 2 * Width; + return std::make_pair(Width, Align); + } + + CallingConv getDefaultMethodCallConv(bool isVariadic) const { + return CC_C; + } + + // We cheat and just check that the class has a vtable pointer, and that it's + // only big enough to have a vtable pointer and nothing more (or less). + bool isNearlyEmpty(const CXXRecordDecl *RD) const { + + // Check that the class has a vtable pointer. + if (!RD->isDynamicClass()) + return false; + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + CharUnits PointerSize = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + return Layout.getNonVirtualSize() == PointerSize; + } + + virtual MangleNumberingContext *createMangleNumberingContext() const { + return new ItaniumNumberingContext(); + } +}; + +class ARMCXXABI : public ItaniumCXXABI { +public: + ARMCXXABI(ASTContext &Ctx) : ItaniumCXXABI(Ctx) { } +}; +} + +CXXABI *clang::CreateItaniumCXXABI(ASTContext &Ctx) { + return new ItaniumCXXABI(Ctx); +} + +CXXABI *clang::CreateARMCXXABI(ASTContext &Ctx) { + return new ARMCXXABI(Ctx); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp new file mode 100644 index 000000000000..0621d7b1ad86 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp @@ -0,0 +1,3795 @@ +//===--- ItaniumMangle.cpp - Itanium C++ Name Mangling ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implements C++ name mangling according to the Itanium C++ ABI, +// which is used in GCC 3.2 and newer (and many compilers that are +// ABI-compatible with GCC): +// +// http://www.codesourcery.com/public/cxx-abi/abi.html +// +//===----------------------------------------------------------------------===// +#include "clang/AST/Mangle.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/ABI.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +#define MANGLE_CHECKER 0 + +#if MANGLE_CHECKER +#include <cxxabi.h> +#endif + +using namespace clang; + +namespace { + +/// \brief Retrieve the declaration context that should be used when mangling +/// the given declaration. +static const DeclContext *getEffectiveDeclContext(const Decl *D) { + // The ABI assumes that lambda closure types that occur within + // default arguments live in the context of the function. However, due to + // the way in which Clang parses and creates function declarations, this is + // not the case: the lambda closure type ends up living in the context + // where the function itself resides, because the function declaration itself + // had not yet been created. Fix the context here. + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + if (RD->isLambda()) + if (ParmVarDecl *ContextParam + = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl())) + return ContextParam->getDeclContext(); + } + + // Perform the same check for block literals. + if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + if (ParmVarDecl *ContextParam + = dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl())) + return ContextParam->getDeclContext(); + } + + const DeclContext *DC = D->getDeclContext(); + if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(DC)) + return getEffectiveDeclContext(CD); + + return DC; +} + +static const DeclContext *getEffectiveParentContext(const DeclContext *DC) { + return getEffectiveDeclContext(cast<Decl>(DC)); +} + +static bool isLocalContainerContext(const DeclContext *DC) { + return isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC) || isa<BlockDecl>(DC); +} + +static const RecordDecl *GetLocalClassDecl(const Decl *D) { + const DeclContext *DC = getEffectiveDeclContext(D); + while (!DC->isNamespace() && !DC->isTranslationUnit()) { + if (isLocalContainerContext(DC)) + return dyn_cast<RecordDecl>(D); + D = cast<Decl>(DC); + DC = getEffectiveDeclContext(D); + } + return 0; +} + +static const FunctionDecl *getStructor(const FunctionDecl *fn) { + if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate()) + return ftd->getTemplatedDecl(); + + return fn; +} + +static const NamedDecl *getStructor(const NamedDecl *decl) { + const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl); + return (fn ? getStructor(fn) : decl); +} + +static const unsigned UnknownArity = ~0U; + +class ItaniumMangleContextImpl : public ItaniumMangleContext { + llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds; + typedef std::pair<const DeclContext*, IdentifierInfo*> DiscriminatorKeyTy; + llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator; + llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier; + +public: + explicit ItaniumMangleContextImpl(ASTContext &Context, + DiagnosticsEngine &Diags) + : ItaniumMangleContext(Context, Diags) {} + + uint64_t getAnonymousStructId(const TagDecl *TD) { + std::pair<llvm::DenseMap<const TagDecl *, + uint64_t>::iterator, bool> Result = + AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size())); + return Result.first->second; + } + + /// @name Mangler Entry Points + /// @{ + + bool shouldMangleCXXName(const NamedDecl *D); + void mangleCXXName(const NamedDecl *D, raw_ostream &); + void mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, + raw_ostream &); + void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type, + const ThisAdjustment &ThisAdjustment, + raw_ostream &); + void mangleReferenceTemporary(const VarDecl *D, + raw_ostream &); + void mangleCXXVTable(const CXXRecordDecl *RD, + raw_ostream &); + void mangleCXXVTT(const CXXRecordDecl *RD, + raw_ostream &); + void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset, + const CXXRecordDecl *Type, + raw_ostream &); + void mangleCXXRTTI(QualType T, raw_ostream &); + void mangleCXXRTTIName(QualType T, raw_ostream &); + void mangleTypeName(QualType T, raw_ostream &); + void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type, + raw_ostream &); + void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, + raw_ostream &); + + void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &); + void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out); + void mangleDynamicAtExitDestructor(const VarDecl *D, raw_ostream &Out); + void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &); + void mangleItaniumThreadLocalWrapper(const VarDecl *D, raw_ostream &); + + bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) { + // Lambda closure types are already numbered. + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND)) + if (RD->isLambda()) + return false; + + // Anonymous tags are already numbered. + if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) { + if (Tag->getName().empty() && !Tag->getTypedefNameForAnonDecl()) + return false; + } + + // Use the canonical number for externally visible decls. + if (ND->isExternallyVisible()) { + unsigned discriminator = getASTContext().getManglingNumber(ND); + if (discriminator == 1) + return false; + disc = discriminator - 2; + return true; + } + + // Make up a reasonable number for internal decls. + unsigned &discriminator = Uniquifier[ND]; + if (!discriminator) { + const DeclContext *DC = getEffectiveDeclContext(ND); + discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())]; + } + if (discriminator == 1) + return false; + disc = discriminator-2; + return true; + } + /// @} +}; + +/// CXXNameMangler - Manage the mangling of a single name. +class CXXNameMangler { + ItaniumMangleContextImpl &Context; + raw_ostream &Out; + + /// The "structor" is the top-level declaration being mangled, if + /// that's not a template specialization; otherwise it's the pattern + /// for that specialization. + const NamedDecl *Structor; + unsigned StructorType; + + /// SeqID - The next subsitution sequence number. + unsigned SeqID; + + class FunctionTypeDepthState { + unsigned Bits; + + enum { InResultTypeMask = 1 }; + + public: + FunctionTypeDepthState() : Bits(0) {} + + /// The number of function types we're inside. + unsigned getDepth() const { + return Bits >> 1; + } + + /// True if we're in the return type of the innermost function type. + bool isInResultType() const { + return Bits & InResultTypeMask; + } + + FunctionTypeDepthState push() { + FunctionTypeDepthState tmp = *this; + Bits = (Bits & ~InResultTypeMask) + 2; + return tmp; + } + + void enterResultType() { + Bits |= InResultTypeMask; + } + + void leaveResultType() { + Bits &= ~InResultTypeMask; + } + + void pop(FunctionTypeDepthState saved) { + assert(getDepth() == saved.getDepth() + 1); + Bits = saved.Bits; + } + + } FunctionTypeDepth; + + llvm::DenseMap<uintptr_t, unsigned> Substitutions; + + ASTContext &getASTContext() const { return Context.getASTContext(); } + +public: + CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_, + const NamedDecl *D = 0) + : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(0), + SeqID(0) { + // These can't be mangled without a ctor type or dtor type. + assert(!D || (!isa<CXXDestructorDecl>(D) && + !isa<CXXConstructorDecl>(D))); + } + CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_, + const CXXConstructorDecl *D, CXXCtorType Type) + : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), + SeqID(0) { } + CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_, + const CXXDestructorDecl *D, CXXDtorType Type) + : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), + SeqID(0) { } + +#if MANGLE_CHECKER + ~CXXNameMangler() { + if (Out.str()[0] == '\01') + return; + + int status = 0; + char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status); + assert(status == 0 && "Could not demangle mangled name!"); + free(result); + } +#endif + raw_ostream &getStream() { return Out; } + + void mangle(const NamedDecl *D, StringRef Prefix = "_Z"); + void mangleCallOffset(int64_t NonVirtual, int64_t Virtual); + void mangleNumber(const llvm::APSInt &I); + void mangleNumber(int64_t Number); + void mangleFloat(const llvm::APFloat &F); + void mangleFunctionEncoding(const FunctionDecl *FD); + void mangleName(const NamedDecl *ND); + void mangleType(QualType T); + void mangleNameOrStandardSubstitution(const NamedDecl *ND); + +private: + bool mangleSubstitution(const NamedDecl *ND); + bool mangleSubstitution(QualType T); + bool mangleSubstitution(TemplateName Template); + bool mangleSubstitution(uintptr_t Ptr); + + void mangleExistingSubstitution(QualType type); + void mangleExistingSubstitution(TemplateName name); + + bool mangleStandardSubstitution(const NamedDecl *ND); + + void addSubstitution(const NamedDecl *ND) { + ND = cast<NamedDecl>(ND->getCanonicalDecl()); + + addSubstitution(reinterpret_cast<uintptr_t>(ND)); + } + void addSubstitution(QualType T); + void addSubstitution(TemplateName Template); + void addSubstitution(uintptr_t Ptr); + + void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, + NamedDecl *firstQualifierLookup, + bool recursive = false); + void mangleUnresolvedName(NestedNameSpecifier *qualifier, + NamedDecl *firstQualifierLookup, + DeclarationName name, + unsigned KnownArity = UnknownArity); + + void mangleName(const TemplateDecl *TD, + const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs); + void mangleUnqualifiedName(const NamedDecl *ND) { + mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity); + } + void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name, + unsigned KnownArity); + void mangleUnscopedName(const NamedDecl *ND); + void mangleUnscopedTemplateName(const TemplateDecl *ND); + void mangleUnscopedTemplateName(TemplateName); + void mangleSourceName(const IdentifierInfo *II); + void mangleLocalName(const Decl *D); + void mangleBlockForPrefix(const BlockDecl *Block); + void mangleUnqualifiedBlock(const BlockDecl *Block); + void mangleLambda(const CXXRecordDecl *Lambda); + void mangleNestedName(const NamedDecl *ND, const DeclContext *DC, + bool NoFunction=false); + void mangleNestedName(const TemplateDecl *TD, + const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs); + void manglePrefix(NestedNameSpecifier *qualifier); + void manglePrefix(const DeclContext *DC, bool NoFunction=false); + void manglePrefix(QualType type); + void mangleTemplatePrefix(const TemplateDecl *ND, bool NoFunction=false); + void mangleTemplatePrefix(TemplateName Template); + void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity); + void mangleQualifiers(Qualifiers Quals); + void mangleRefQualifier(RefQualifierKind RefQualifier); + + void mangleObjCMethodName(const ObjCMethodDecl *MD); + + // Declare manglers for every type class. +#define ABSTRACT_TYPE(CLASS, PARENT) +#define NON_CANONICAL_TYPE(CLASS, PARENT) +#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T); +#include "clang/AST/TypeNodes.def" + + void mangleType(const TagType*); + void mangleType(TemplateName); + void mangleBareFunctionType(const FunctionType *T, + bool MangleReturnType); + void mangleNeonVectorType(const VectorType *T); + void mangleAArch64NeonVectorType(const VectorType *T); + + void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value); + void mangleMemberExpr(const Expr *base, bool isArrow, + NestedNameSpecifier *qualifier, + NamedDecl *firstQualifierLookup, + DeclarationName name, + unsigned knownArity); + void mangleExpression(const Expr *E, unsigned Arity = UnknownArity); + void mangleCXXCtorType(CXXCtorType T); + void mangleCXXDtorType(CXXDtorType T); + + void mangleTemplateArgs(const ASTTemplateArgumentListInfo &TemplateArgs); + void mangleTemplateArgs(const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs); + void mangleTemplateArgs(const TemplateArgumentList &AL); + void mangleTemplateArg(TemplateArgument A); + + void mangleTemplateParameter(unsigned Index); + + void mangleFunctionParam(const ParmVarDecl *parm); +}; + +} + +bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) { + const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); + if (FD) { + LanguageLinkage L = FD->getLanguageLinkage(); + // Overloadable functions need mangling. + if (FD->hasAttr<OverloadableAttr>()) + return true; + + // "main" is not mangled. + if (FD->isMain()) + return false; + + // C++ functions and those whose names are not a simple identifier need + // mangling. + if (!FD->getDeclName().isIdentifier() || L == CXXLanguageLinkage) + return true; + + // C functions are not mangled. + if (L == CLanguageLinkage) + return false; + } + + // Otherwise, no mangling is done outside C++ mode. + if (!getASTContext().getLangOpts().CPlusPlus) + return false; + + const VarDecl *VD = dyn_cast<VarDecl>(D); + if (VD) { + // C variables are not mangled. + if (VD->isExternC()) + return false; + + // Variables at global scope with non-internal linkage are not mangled + const DeclContext *DC = getEffectiveDeclContext(D); + // Check for extern variable declared locally. + if (DC->isFunctionOrMethod() && D->hasLinkage()) + while (!DC->isNamespace() && !DC->isTranslationUnit()) + DC = getEffectiveParentContext(DC); + if (DC->isTranslationUnit() && D->getFormalLinkage() != InternalLinkage && + !isa<VarTemplateSpecializationDecl>(D)) + return false; + } + + return true; +} + +void CXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) { + // <mangled-name> ::= _Z <encoding> + // ::= <data name> + // ::= <special-name> + Out << Prefix; + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) + mangleFunctionEncoding(FD); + else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) + mangleName(VD); + else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D)) + mangleName(IFD->getAnonField()); + else + mangleName(cast<FieldDecl>(D)); +} + +void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { + // <encoding> ::= <function name> <bare-function-type> + mangleName(FD); + + // Don't mangle in the type if this isn't a decl we should typically mangle. + if (!Context.shouldMangleDeclName(FD)) + return; + + // Whether the mangling of a function type includes the return type depends on + // the context and the nature of the function. The rules for deciding whether + // the return type is included are: + // + // 1. Template functions (names or types) have return types encoded, with + // the exceptions listed below. + // 2. Function types not appearing as part of a function name mangling, + // e.g. parameters, pointer types, etc., have return type encoded, with the + // exceptions listed below. + // 3. Non-template function names do not have return types encoded. + // + // The exceptions mentioned in (1) and (2) above, for which the return type is + // never included, are + // 1. Constructors. + // 2. Destructors. + // 3. Conversion operator functions, e.g. operator int. + bool MangleReturnType = false; + if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) { + if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) || + isa<CXXConversionDecl>(FD))) + MangleReturnType = true; + + // Mangle the type of the primary template. + FD = PrimaryTemplate->getTemplatedDecl(); + } + + mangleBareFunctionType(FD->getType()->getAs<FunctionType>(), + MangleReturnType); +} + +static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) { + while (isa<LinkageSpecDecl>(DC)) { + DC = getEffectiveParentContext(DC); + } + + return DC; +} + +/// isStd - Return whether a given namespace is the 'std' namespace. +static bool isStd(const NamespaceDecl *NS) { + if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS)) + ->isTranslationUnit()) + return false; + + const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier(); + return II && II->isStr("std"); +} + +// isStdNamespace - Return whether a given decl context is a toplevel 'std' +// namespace. +static bool isStdNamespace(const DeclContext *DC) { + if (!DC->isNamespace()) + return false; + + return isStd(cast<NamespaceDecl>(DC)); +} + +static const TemplateDecl * +isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) { + // Check if we have a function template. + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){ + if (const TemplateDecl *TD = FD->getPrimaryTemplate()) { + TemplateArgs = FD->getTemplateSpecializationArgs(); + return TD; + } + } + + // Check if we have a class template. + if (const ClassTemplateSpecializationDecl *Spec = + dyn_cast<ClassTemplateSpecializationDecl>(ND)) { + TemplateArgs = &Spec->getTemplateArgs(); + return Spec->getSpecializedTemplate(); + } + + // Check if we have a variable template. + if (const VarTemplateSpecializationDecl *Spec = + dyn_cast<VarTemplateSpecializationDecl>(ND)) { + TemplateArgs = &Spec->getTemplateArgs(); + return Spec->getSpecializedTemplate(); + } + + return 0; +} + +static bool isLambda(const NamedDecl *ND) { + const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND); + if (!Record) + return false; + + return Record->isLambda(); +} + +void CXXNameMangler::mangleName(const NamedDecl *ND) { + // <name> ::= <nested-name> + // ::= <unscoped-name> + // ::= <unscoped-template-name> <template-args> + // ::= <local-name> + // + const DeclContext *DC = getEffectiveDeclContext(ND); + + // If this is an extern variable declared locally, the relevant DeclContext + // is that of the containing namespace, or the translation unit. + // FIXME: This is a hack; extern variables declared locally should have + // a proper semantic declaration context! + if (isLocalContainerContext(DC) && ND->hasLinkage() && !isLambda(ND)) + while (!DC->isNamespace() && !DC->isTranslationUnit()) + DC = getEffectiveParentContext(DC); + else if (GetLocalClassDecl(ND)) { + mangleLocalName(ND); + return; + } + + DC = IgnoreLinkageSpecDecls(DC); + + if (DC->isTranslationUnit() || isStdNamespace(DC)) { + // Check if we have a template. + const TemplateArgumentList *TemplateArgs = 0; + if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) { + mangleUnscopedTemplateName(TD); + mangleTemplateArgs(*TemplateArgs); + return; + } + + mangleUnscopedName(ND); + return; + } + + if (isLocalContainerContext(DC)) { + mangleLocalName(ND); + return; + } + + mangleNestedName(ND, DC); +} +void CXXNameMangler::mangleName(const TemplateDecl *TD, + const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs) { + const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD)); + + if (DC->isTranslationUnit() || isStdNamespace(DC)) { + mangleUnscopedTemplateName(TD); + mangleTemplateArgs(TemplateArgs, NumTemplateArgs); + } else { + mangleNestedName(TD, TemplateArgs, NumTemplateArgs); + } +} + +void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) { + // <unscoped-name> ::= <unqualified-name> + // ::= St <unqualified-name> # ::std:: + + if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND)))) + Out << "St"; + + mangleUnqualifiedName(ND); +} + +void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) { + // <unscoped-template-name> ::= <unscoped-name> + // ::= <substitution> + if (mangleSubstitution(ND)) + return; + + // <template-template-param> ::= <template-param> + if (const TemplateTemplateParmDecl *TTP + = dyn_cast<TemplateTemplateParmDecl>(ND)) { + mangleTemplateParameter(TTP->getIndex()); + return; + } + + mangleUnscopedName(ND->getTemplatedDecl()); + addSubstitution(ND); +} + +void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) { + // <unscoped-template-name> ::= <unscoped-name> + // ::= <substitution> + if (TemplateDecl *TD = Template.getAsTemplateDecl()) + return mangleUnscopedTemplateName(TD); + + if (mangleSubstitution(Template)) + return; + + DependentTemplateName *Dependent = Template.getAsDependentTemplateName(); + assert(Dependent && "Not a dependent template name?"); + if (const IdentifierInfo *Id = Dependent->getIdentifier()) + mangleSourceName(Id); + else + mangleOperatorName(Dependent->getOperator(), UnknownArity); + + addSubstitution(Template); +} + +void CXXNameMangler::mangleFloat(const llvm::APFloat &f) { + // ABI: + // Floating-point literals are encoded using a fixed-length + // lowercase hexadecimal string corresponding to the internal + // representation (IEEE on Itanium), high-order bytes first, + // without leading zeroes. For example: "Lf bf800000 E" is -1.0f + // on Itanium. + // The 'without leading zeroes' thing seems to be an editorial + // mistake; see the discussion on cxx-abi-dev beginning on + // 2012-01-16. + + // Our requirements here are just barely weird enough to justify + // using a custom algorithm instead of post-processing APInt::toString(). + + llvm::APInt valueBits = f.bitcastToAPInt(); + unsigned numCharacters = (valueBits.getBitWidth() + 3) / 4; + assert(numCharacters != 0); + + // Allocate a buffer of the right number of characters. + SmallVector<char, 20> buffer; + buffer.set_size(numCharacters); + + // Fill the buffer left-to-right. + for (unsigned stringIndex = 0; stringIndex != numCharacters; ++stringIndex) { + // The bit-index of the next hex digit. + unsigned digitBitIndex = 4 * (numCharacters - stringIndex - 1); + + // Project out 4 bits starting at 'digitIndex'. + llvm::integerPart hexDigit + = valueBits.getRawData()[digitBitIndex / llvm::integerPartWidth]; + hexDigit >>= (digitBitIndex % llvm::integerPartWidth); + hexDigit &= 0xF; + + // Map that over to a lowercase hex digit. + static const char charForHex[16] = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' + }; + buffer[stringIndex] = charForHex[hexDigit]; + } + + Out.write(buffer.data(), numCharacters); +} + +void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) { + if (Value.isSigned() && Value.isNegative()) { + Out << 'n'; + Value.abs().print(Out, /*signed*/ false); + } else { + Value.print(Out, /*signed*/ false); + } +} + +void CXXNameMangler::mangleNumber(int64_t Number) { + // <number> ::= [n] <non-negative decimal integer> + if (Number < 0) { + Out << 'n'; + Number = -Number; + } + + Out << Number; +} + +void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) { + // <call-offset> ::= h <nv-offset> _ + // ::= v <v-offset> _ + // <nv-offset> ::= <offset number> # non-virtual base override + // <v-offset> ::= <offset number> _ <virtual offset number> + // # virtual base override, with vcall offset + if (!Virtual) { + Out << 'h'; + mangleNumber(NonVirtual); + Out << '_'; + return; + } + + Out << 'v'; + mangleNumber(NonVirtual); + Out << '_'; + mangleNumber(Virtual); + Out << '_'; +} + +void CXXNameMangler::manglePrefix(QualType type) { + if (const TemplateSpecializationType *TST = + type->getAs<TemplateSpecializationType>()) { + if (!mangleSubstitution(QualType(TST, 0))) { + mangleTemplatePrefix(TST->getTemplateName()); + + // FIXME: GCC does not appear to mangle the template arguments when + // the template in question is a dependent template name. Should we + // emulate that badness? + mangleTemplateArgs(TST->getArgs(), TST->getNumArgs()); + addSubstitution(QualType(TST, 0)); + } + } else if (const DependentTemplateSpecializationType *DTST + = type->getAs<DependentTemplateSpecializationType>()) { + TemplateName Template + = getASTContext().getDependentTemplateName(DTST->getQualifier(), + DTST->getIdentifier()); + mangleTemplatePrefix(Template); + + // FIXME: GCC does not appear to mangle the template arguments when + // the template in question is a dependent template name. Should we + // emulate that badness? + mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs()); + } else { + // We use the QualType mangle type variant here because it handles + // substitutions. + mangleType(type); + } +} + +/// Mangle everything prior to the base-unresolved-name in an unresolved-name. +/// +/// \param firstQualifierLookup - the entity found by unqualified lookup +/// for the first name in the qualifier, if this is for a member expression +/// \param recursive - true if this is being called recursively, +/// i.e. if there is more prefix "to the right". +void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, + NamedDecl *firstQualifierLookup, + bool recursive) { + + // x, ::x + // <unresolved-name> ::= [gs] <base-unresolved-name> + + // T::x / decltype(p)::x + // <unresolved-name> ::= sr <unresolved-type> <base-unresolved-name> + + // T::N::x /decltype(p)::N::x + // <unresolved-name> ::= srN <unresolved-type> <unresolved-qualifier-level>+ E + // <base-unresolved-name> + + // A::x, N::y, A<T>::z; "gs" means leading "::" + // <unresolved-name> ::= [gs] sr <unresolved-qualifier-level>+ E + // <base-unresolved-name> + + switch (qualifier->getKind()) { + case NestedNameSpecifier::Global: + Out << "gs"; + + // We want an 'sr' unless this is the entire NNS. + if (recursive) + Out << "sr"; + + // We never want an 'E' here. + return; + + case NestedNameSpecifier::Namespace: + if (qualifier->getPrefix()) + mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + /*recursive*/ true); + else + Out << "sr"; + mangleSourceName(qualifier->getAsNamespace()->getIdentifier()); + break; + case NestedNameSpecifier::NamespaceAlias: + if (qualifier->getPrefix()) + mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + /*recursive*/ true); + else + Out << "sr"; + mangleSourceName(qualifier->getAsNamespaceAlias()->getIdentifier()); + break; + + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: { + const Type *type = qualifier->getAsType(); + + // We only want to use an unresolved-type encoding if this is one of: + // - a decltype + // - a template type parameter + // - a template template parameter with arguments + // In all of these cases, we should have no prefix. + if (qualifier->getPrefix()) { + mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + /*recursive*/ true); + } else { + // Otherwise, all the cases want this. + Out << "sr"; + } + + // Only certain other types are valid as prefixes; enumerate them. + switch (type->getTypeClass()) { + case Type::Builtin: + case Type::Complex: + case Type::Decayed: + case Type::Pointer: + case Type::BlockPointer: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::DependentSizedArray: + case Type::DependentSizedExtVector: + case Type::Vector: + case Type::ExtVector: + case Type::FunctionProto: + case Type::FunctionNoProto: + case Type::Enum: + case Type::Paren: + case Type::Elaborated: + case Type::Attributed: + case Type::Auto: + case Type::PackExpansion: + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + case Type::Atomic: + llvm_unreachable("type is illegal as a nested name specifier"); + + case Type::SubstTemplateTypeParmPack: + // FIXME: not clear how to mangle this! + // template <class T...> class A { + // template <class U...> void foo(decltype(T::foo(U())) x...); + // }; + Out << "_SUBSTPACK_"; + break; + + // <unresolved-type> ::= <template-param> + // ::= <decltype> + // ::= <template-template-param> <template-args> + // (this last is not official yet) + case Type::TypeOfExpr: + case Type::TypeOf: + case Type::Decltype: + case Type::TemplateTypeParm: + case Type::UnaryTransform: + case Type::SubstTemplateTypeParm: + unresolvedType: + assert(!qualifier->getPrefix()); + + // We only get here recursively if we're followed by identifiers. + if (recursive) Out << 'N'; + + // This seems to do everything we want. It's not really + // sanctioned for a substituted template parameter, though. + mangleType(QualType(type, 0)); + + // We never want to print 'E' directly after an unresolved-type, + // so we return directly. + return; + + case Type::Typedef: + mangleSourceName(cast<TypedefType>(type)->getDecl()->getIdentifier()); + break; + + case Type::UnresolvedUsing: + mangleSourceName(cast<UnresolvedUsingType>(type)->getDecl() + ->getIdentifier()); + break; + + case Type::Record: + mangleSourceName(cast<RecordType>(type)->getDecl()->getIdentifier()); + break; + + case Type::TemplateSpecialization: { + const TemplateSpecializationType *tst + = cast<TemplateSpecializationType>(type); + TemplateName name = tst->getTemplateName(); + switch (name.getKind()) { + case TemplateName::Template: + case TemplateName::QualifiedTemplate: { + TemplateDecl *temp = name.getAsTemplateDecl(); + + // If the base is a template template parameter, this is an + // unresolved type. + assert(temp && "no template for template specialization type"); + if (isa<TemplateTemplateParmDecl>(temp)) goto unresolvedType; + + mangleSourceName(temp->getIdentifier()); + break; + } + + case TemplateName::OverloadedTemplate: + case TemplateName::DependentTemplate: + llvm_unreachable("invalid base for a template specialization type"); + + case TemplateName::SubstTemplateTemplateParm: { + SubstTemplateTemplateParmStorage *subst + = name.getAsSubstTemplateTemplateParm(); + mangleExistingSubstitution(subst->getReplacement()); + break; + } + + case TemplateName::SubstTemplateTemplateParmPack: { + // FIXME: not clear how to mangle this! + // template <template <class U> class T...> class A { + // template <class U...> void foo(decltype(T<U>::foo) x...); + // }; + Out << "_SUBSTPACK_"; + break; + } + } + + mangleTemplateArgs(tst->getArgs(), tst->getNumArgs()); + break; + } + + case Type::InjectedClassName: + mangleSourceName(cast<InjectedClassNameType>(type)->getDecl() + ->getIdentifier()); + break; + + case Type::DependentName: + mangleSourceName(cast<DependentNameType>(type)->getIdentifier()); + break; + + case Type::DependentTemplateSpecialization: { + const DependentTemplateSpecializationType *tst + = cast<DependentTemplateSpecializationType>(type); + mangleSourceName(tst->getIdentifier()); + mangleTemplateArgs(tst->getArgs(), tst->getNumArgs()); + break; + } + } + break; + } + + case NestedNameSpecifier::Identifier: + // Member expressions can have these without prefixes. + if (qualifier->getPrefix()) { + mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + /*recursive*/ true); + } else if (firstQualifierLookup) { + + // Try to make a proper qualifier out of the lookup result, and + // then just recurse on that. + NestedNameSpecifier *newQualifier; + if (TypeDecl *typeDecl = dyn_cast<TypeDecl>(firstQualifierLookup)) { + QualType type = getASTContext().getTypeDeclType(typeDecl); + + // Pretend we had a different nested name specifier. + newQualifier = NestedNameSpecifier::Create(getASTContext(), + /*prefix*/ 0, + /*template*/ false, + type.getTypePtr()); + } else if (NamespaceDecl *nspace = + dyn_cast<NamespaceDecl>(firstQualifierLookup)) { + newQualifier = NestedNameSpecifier::Create(getASTContext(), + /*prefix*/ 0, + nspace); + } else if (NamespaceAliasDecl *alias = + dyn_cast<NamespaceAliasDecl>(firstQualifierLookup)) { + newQualifier = NestedNameSpecifier::Create(getASTContext(), + /*prefix*/ 0, + alias); + } else { + // No sensible mangling to do here. + newQualifier = 0; + } + + if (newQualifier) + return mangleUnresolvedPrefix(newQualifier, /*lookup*/ 0, recursive); + + } else { + Out << "sr"; + } + + mangleSourceName(qualifier->getAsIdentifier()); + break; + } + + // If this was the innermost part of the NNS, and we fell out to + // here, append an 'E'. + if (!recursive) + Out << 'E'; +} + +/// Mangle an unresolved-name, which is generally used for names which +/// weren't resolved to specific entities. +void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier, + NamedDecl *firstQualifierLookup, + DeclarationName name, + unsigned knownArity) { + if (qualifier) mangleUnresolvedPrefix(qualifier, firstQualifierLookup); + mangleUnqualifiedName(0, name, knownArity); +} + +static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) { + assert(RD->isAnonymousStructOrUnion() && + "Expected anonymous struct or union!"); + + for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I) { + if (I->getIdentifier()) + return *I; + + if (const RecordType *RT = I->getType()->getAs<RecordType>()) + if (const FieldDecl *NamedDataMember = + FindFirstNamedDataMember(RT->getDecl())) + return NamedDataMember; + } + + // We didn't find a named data member. + return 0; +} + +void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, + DeclarationName Name, + unsigned KnownArity) { + // <unqualified-name> ::= <operator-name> + // ::= <ctor-dtor-name> + // ::= <source-name> + switch (Name.getNameKind()) { + case DeclarationName::Identifier: { + if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) { + // We must avoid conflicts between internally- and externally- + // linked variable and function declaration names in the same TU: + // void test() { extern void foo(); } + // static void foo(); + // This naming convention is the same as that followed by GCC, + // though it shouldn't actually matter. + if (ND && ND->getFormalLinkage() == InternalLinkage && + getEffectiveDeclContext(ND)->isFileContext()) + Out << 'L'; + + mangleSourceName(II); + break; + } + + // Otherwise, an anonymous entity. We must have a declaration. + assert(ND && "mangling empty name without declaration"); + + if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) { + if (NS->isAnonymousNamespace()) { + // This is how gcc mangles these names. + Out << "12_GLOBAL__N_1"; + break; + } + } + + if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { + // We must have an anonymous union or struct declaration. + const RecordDecl *RD = + cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl()); + + // Itanium C++ ABI 5.1.2: + // + // For the purposes of mangling, the name of an anonymous union is + // considered to be the name of the first named data member found by a + // pre-order, depth-first, declaration-order walk of the data members of + // the anonymous union. If there is no such data member (i.e., if all of + // the data members in the union are unnamed), then there is no way for + // a program to refer to the anonymous union, and there is therefore no + // need to mangle its name. + const FieldDecl *FD = FindFirstNamedDataMember(RD); + + // It's actually possible for various reasons for us to get here + // with an empty anonymous struct / union. Fortunately, it + // doesn't really matter what name we generate. + if (!FD) break; + assert(FD->getIdentifier() && "Data member name isn't an identifier!"); + + mangleSourceName(FD->getIdentifier()); + break; + } + + // Class extensions have no name as a category, and it's possible + // for them to be the semantic parent of certain declarations + // (primarily, tag decls defined within declarations). Such + // declarations will always have internal linkage, so the name + // doesn't really matter, but we shouldn't crash on them. For + // safety, just handle all ObjC containers here. + if (isa<ObjCContainerDecl>(ND)) + break; + + // We must have an anonymous struct. + const TagDecl *TD = cast<TagDecl>(ND); + if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) { + assert(TD->getDeclContext() == D->getDeclContext() && + "Typedef should not be in another decl context!"); + assert(D->getDeclName().getAsIdentifierInfo() && + "Typedef was not named!"); + mangleSourceName(D->getDeclName().getAsIdentifierInfo()); + break; + } + + // <unnamed-type-name> ::= <closure-type-name> + // + // <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _ + // <lambda-sig> ::= <parameter-type>+ # Parameter types or 'v' for 'void'. + if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) { + if (Record->isLambda() && Record->getLambdaManglingNumber()) { + mangleLambda(Record); + break; + } + } + + if (TD->isExternallyVisible()) { + unsigned UnnamedMangle = getASTContext().getManglingNumber(TD); + Out << "Ut"; + if (UnnamedMangle > 1) + Out << llvm::utostr(UnnamedMangle - 2); + Out << '_'; + break; + } + + // Get a unique id for the anonymous struct. + uint64_t AnonStructId = Context.getAnonymousStructId(TD); + + // Mangle it as a source name in the form + // [n] $_<id> + // where n is the length of the string. + SmallString<8> Str; + Str += "$_"; + Str += llvm::utostr(AnonStructId); + + Out << Str.size(); + Out << Str.str(); + break; + } + + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + llvm_unreachable("Can't mangle Objective-C selector names here!"); + + case DeclarationName::CXXConstructorName: + if (ND == Structor) + // If the named decl is the C++ constructor we're mangling, use the type + // we were given. + mangleCXXCtorType(static_cast<CXXCtorType>(StructorType)); + else + // Otherwise, use the complete constructor name. This is relevant if a + // class with a constructor is declared within a constructor. + mangleCXXCtorType(Ctor_Complete); + break; + + case DeclarationName::CXXDestructorName: + if (ND == Structor) + // If the named decl is the C++ destructor we're mangling, use the type we + // were given. + mangleCXXDtorType(static_cast<CXXDtorType>(StructorType)); + else + // Otherwise, use the complete destructor name. This is relevant if a + // class with a destructor is declared within a destructor. + mangleCXXDtorType(Dtor_Complete); + break; + + case DeclarationName::CXXConversionFunctionName: + // <operator-name> ::= cv <type> # (cast) + Out << "cv"; + mangleType(Name.getCXXNameType()); + break; + + case DeclarationName::CXXOperatorName: { + unsigned Arity; + if (ND) { + Arity = cast<FunctionDecl>(ND)->getNumParams(); + + // If we have a C++ member function, we need to include the 'this' pointer. + // FIXME: This does not make sense for operators that are static, but their + // names stay the same regardless of the arity (operator new for instance). + if (isa<CXXMethodDecl>(ND)) + Arity++; + } else + Arity = KnownArity; + + mangleOperatorName(Name.getCXXOverloadedOperator(), Arity); + break; + } + + case DeclarationName::CXXLiteralOperatorName: + // FIXME: This mangling is not yet official. + Out << "li"; + mangleSourceName(Name.getCXXLiteralIdentifier()); + break; + + case DeclarationName::CXXUsingDirective: + llvm_unreachable("Can't mangle a using directive name!"); + } +} + +void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) { + // <source-name> ::= <positive length number> <identifier> + // <number> ::= [n] <non-negative decimal integer> + // <identifier> ::= <unqualified source code identifier> + Out << II->getLength() << II->getName(); +} + +void CXXNameMangler::mangleNestedName(const NamedDecl *ND, + const DeclContext *DC, + bool NoFunction) { + // <nested-name> + // ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E + // ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix> + // <template-args> E + + Out << 'N'; + if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) { + Qualifiers MethodQuals = + Qualifiers::fromCVRMask(Method->getTypeQualifiers()); + // We do not consider restrict a distinguishing attribute for overloading + // purposes so we must not mangle it. + MethodQuals.removeRestrict(); + mangleQualifiers(MethodQuals); + mangleRefQualifier(Method->getRefQualifier()); + } + + // Check if we have a template. + const TemplateArgumentList *TemplateArgs = 0; + if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) { + mangleTemplatePrefix(TD, NoFunction); + mangleTemplateArgs(*TemplateArgs); + } + else { + manglePrefix(DC, NoFunction); + mangleUnqualifiedName(ND); + } + + Out << 'E'; +} +void CXXNameMangler::mangleNestedName(const TemplateDecl *TD, + const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs) { + // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E + + Out << 'N'; + + mangleTemplatePrefix(TD); + mangleTemplateArgs(TemplateArgs, NumTemplateArgs); + + Out << 'E'; +} + +void CXXNameMangler::mangleLocalName(const Decl *D) { + // <local-name> := Z <function encoding> E <entity name> [<discriminator>] + // := Z <function encoding> E s [<discriminator>] + // <local-name> := Z <function encoding> E d [ <parameter number> ] + // _ <entity name> + // <discriminator> := _ <non-negative number> + assert(isa<NamedDecl>(D) || isa<BlockDecl>(D)); + const RecordDecl *RD = GetLocalClassDecl(D); + const DeclContext *DC = getEffectiveDeclContext(RD ? RD : D); + + Out << 'Z'; + + if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) + mangleObjCMethodName(MD); + else if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) + mangleBlockForPrefix(BD); + else + mangleFunctionEncoding(cast<FunctionDecl>(DC)); + + Out << 'E'; + + if (RD) { + // The parameter number is omitted for the last parameter, 0 for the + // second-to-last parameter, 1 for the third-to-last parameter, etc. The + // <entity name> will of course contain a <closure-type-name>: Its + // numbering will be local to the particular argument in which it appears + // -- other default arguments do not affect its encoding. + const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD); + if (CXXRD->isLambda()) { + if (const ParmVarDecl *Parm + = dyn_cast_or_null<ParmVarDecl>(CXXRD->getLambdaContextDecl())) { + if (const FunctionDecl *Func + = dyn_cast<FunctionDecl>(Parm->getDeclContext())) { + Out << 'd'; + unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex(); + if (Num > 1) + mangleNumber(Num - 2); + Out << '_'; + } + } + } + + // Mangle the name relative to the closest enclosing function. + // equality ok because RD derived from ND above + if (D == RD) { + mangleUnqualifiedName(RD); + } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + manglePrefix(getEffectiveDeclContext(BD), true /*NoFunction*/); + mangleUnqualifiedBlock(BD); + } else { + const NamedDecl *ND = cast<NamedDecl>(D); + mangleNestedName(ND, getEffectiveDeclContext(ND), true /*NoFunction*/); + } + } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + // Mangle a block in a default parameter; see above explanation for + // lambdas. + if (const ParmVarDecl *Parm + = dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl())) { + if (const FunctionDecl *Func + = dyn_cast<FunctionDecl>(Parm->getDeclContext())) { + Out << 'd'; + unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex(); + if (Num > 1) + mangleNumber(Num - 2); + Out << '_'; + } + } + + mangleUnqualifiedBlock(BD); + } else { + mangleUnqualifiedName(cast<NamedDecl>(D)); + } + + if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) { + unsigned disc; + if (Context.getNextDiscriminator(ND, disc)) { + if (disc < 10) + Out << '_' << disc; + else + Out << "__" << disc << '_'; + } + } +} + +void CXXNameMangler::mangleBlockForPrefix(const BlockDecl *Block) { + if (GetLocalClassDecl(Block)) { + mangleLocalName(Block); + return; + } + const DeclContext *DC = getEffectiveDeclContext(Block); + if (isLocalContainerContext(DC)) { + mangleLocalName(Block); + return; + } + manglePrefix(getEffectiveDeclContext(Block)); + mangleUnqualifiedBlock(Block); +} + +void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) { + if (Decl *Context = Block->getBlockManglingContextDecl()) { + if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) && + Context->getDeclContext()->isRecord()) { + if (const IdentifierInfo *Name + = cast<NamedDecl>(Context)->getIdentifier()) { + mangleSourceName(Name); + Out << 'M'; + } + } + } + + // If we have a block mangling number, use it. + unsigned Number = Block->getBlockManglingNumber(); + // Otherwise, just make up a number. It doesn't matter what it is because + // the symbol in question isn't externally visible. + if (!Number) + Number = Context.getBlockId(Block, false); + Out << "Ub"; + if (Number > 1) + Out << Number - 2; + Out << '_'; +} + +void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) { + // If the context of a closure type is an initializer for a class member + // (static or nonstatic), it is encoded in a qualified name with a final + // <prefix> of the form: + // + // <data-member-prefix> := <member source-name> M + // + // Technically, the data-member-prefix is part of the <prefix>. However, + // since a closure type will always be mangled with a prefix, it's easier + // to emit that last part of the prefix here. + if (Decl *Context = Lambda->getLambdaContextDecl()) { + if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) && + Context->getDeclContext()->isRecord()) { + if (const IdentifierInfo *Name + = cast<NamedDecl>(Context)->getIdentifier()) { + mangleSourceName(Name); + Out << 'M'; + } + } + } + + Out << "Ul"; + const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()-> + getAs<FunctionProtoType>(); + mangleBareFunctionType(Proto, /*MangleReturnType=*/false); + Out << "E"; + + // The number is omitted for the first closure type with a given + // <lambda-sig> in a given context; it is n-2 for the nth closure type + // (in lexical order) with that same <lambda-sig> and context. + // + // The AST keeps track of the number for us. + unsigned Number = Lambda->getLambdaManglingNumber(); + assert(Number > 0 && "Lambda should be mangled as an unnamed class"); + if (Number > 1) + mangleNumber(Number - 2); + Out << '_'; +} + +void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) { + switch (qualifier->getKind()) { + case NestedNameSpecifier::Global: + // nothing + return; + + case NestedNameSpecifier::Namespace: + mangleName(qualifier->getAsNamespace()); + return; + + case NestedNameSpecifier::NamespaceAlias: + mangleName(qualifier->getAsNamespaceAlias()->getNamespace()); + return; + + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: + manglePrefix(QualType(qualifier->getAsType(), 0)); + return; + + case NestedNameSpecifier::Identifier: + // Member expressions can have these without prefixes, but that + // should end up in mangleUnresolvedPrefix instead. + assert(qualifier->getPrefix()); + manglePrefix(qualifier->getPrefix()); + + mangleSourceName(qualifier->getAsIdentifier()); + return; + } + + llvm_unreachable("unexpected nested name specifier"); +} + +void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) { + // <prefix> ::= <prefix> <unqualified-name> + // ::= <template-prefix> <template-args> + // ::= <template-param> + // ::= # empty + // ::= <substitution> + + DC = IgnoreLinkageSpecDecls(DC); + + if (DC->isTranslationUnit()) + return; + + if (NoFunction && isLocalContainerContext(DC)) + return; + + assert(!isLocalContainerContext(DC)); + + const NamedDecl *ND = cast<NamedDecl>(DC); + if (mangleSubstitution(ND)) + return; + + // Check if we have a template. + const TemplateArgumentList *TemplateArgs = 0; + if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) { + mangleTemplatePrefix(TD); + mangleTemplateArgs(*TemplateArgs); + } else { + manglePrefix(getEffectiveDeclContext(ND), NoFunction); + mangleUnqualifiedName(ND); + } + + addSubstitution(ND); +} + +void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) { + // <template-prefix> ::= <prefix> <template unqualified-name> + // ::= <template-param> + // ::= <substitution> + if (TemplateDecl *TD = Template.getAsTemplateDecl()) + return mangleTemplatePrefix(TD); + + if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName()) + manglePrefix(Qualified->getQualifier()); + + if (OverloadedTemplateStorage *Overloaded + = Template.getAsOverloadedTemplate()) { + mangleUnqualifiedName(0, (*Overloaded->begin())->getDeclName(), + UnknownArity); + return; + } + + DependentTemplateName *Dependent = Template.getAsDependentTemplateName(); + assert(Dependent && "Unknown template name kind?"); + manglePrefix(Dependent->getQualifier()); + mangleUnscopedTemplateName(Template); +} + +void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND, + bool NoFunction) { + // <template-prefix> ::= <prefix> <template unqualified-name> + // ::= <template-param> + // ::= <substitution> + // <template-template-param> ::= <template-param> + // <substitution> + + if (mangleSubstitution(ND)) + return; + + // <template-template-param> ::= <template-param> + if (const TemplateTemplateParmDecl *TTP + = dyn_cast<TemplateTemplateParmDecl>(ND)) { + mangleTemplateParameter(TTP->getIndex()); + return; + } + + manglePrefix(getEffectiveDeclContext(ND), NoFunction); + mangleUnqualifiedName(ND->getTemplatedDecl()); + addSubstitution(ND); +} + +/// Mangles a template name under the production <type>. Required for +/// template template arguments. +/// <type> ::= <class-enum-type> +/// ::= <template-param> +/// ::= <substitution> +void CXXNameMangler::mangleType(TemplateName TN) { + if (mangleSubstitution(TN)) + return; + + TemplateDecl *TD = 0; + + switch (TN.getKind()) { + case TemplateName::QualifiedTemplate: + TD = TN.getAsQualifiedTemplateName()->getTemplateDecl(); + goto HaveDecl; + + case TemplateName::Template: + TD = TN.getAsTemplateDecl(); + goto HaveDecl; + + HaveDecl: + if (isa<TemplateTemplateParmDecl>(TD)) + mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex()); + else + mangleName(TD); + break; + + case TemplateName::OverloadedTemplate: + llvm_unreachable("can't mangle an overloaded template name as a <type>"); + + case TemplateName::DependentTemplate: { + const DependentTemplateName *Dependent = TN.getAsDependentTemplateName(); + assert(Dependent->isIdentifier()); + + // <class-enum-type> ::= <name> + // <name> ::= <nested-name> + mangleUnresolvedPrefix(Dependent->getQualifier(), 0); + mangleSourceName(Dependent->getIdentifier()); + break; + } + + case TemplateName::SubstTemplateTemplateParm: { + // Substituted template parameters are mangled as the substituted + // template. This will check for the substitution twice, which is + // fine, but we have to return early so that we don't try to *add* + // the substitution twice. + SubstTemplateTemplateParmStorage *subst + = TN.getAsSubstTemplateTemplateParm(); + mangleType(subst->getReplacement()); + return; + } + + case TemplateName::SubstTemplateTemplateParmPack: { + // FIXME: not clear how to mangle this! + // template <template <class> class T...> class A { + // template <template <class> class U...> void foo(B<T,U> x...); + // }; + Out << "_SUBSTPACK_"; + break; + } + } + + addSubstitution(TN); +} + +void +CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) { + switch (OO) { + // <operator-name> ::= nw # new + case OO_New: Out << "nw"; break; + // ::= na # new[] + case OO_Array_New: Out << "na"; break; + // ::= dl # delete + case OO_Delete: Out << "dl"; break; + // ::= da # delete[] + case OO_Array_Delete: Out << "da"; break; + // ::= ps # + (unary) + // ::= pl # + (binary or unknown) + case OO_Plus: + Out << (Arity == 1? "ps" : "pl"); break; + // ::= ng # - (unary) + // ::= mi # - (binary or unknown) + case OO_Minus: + Out << (Arity == 1? "ng" : "mi"); break; + // ::= ad # & (unary) + // ::= an # & (binary or unknown) + case OO_Amp: + Out << (Arity == 1? "ad" : "an"); break; + // ::= de # * (unary) + // ::= ml # * (binary or unknown) + case OO_Star: + // Use binary when unknown. + Out << (Arity == 1? "de" : "ml"); break; + // ::= co # ~ + case OO_Tilde: Out << "co"; break; + // ::= dv # / + case OO_Slash: Out << "dv"; break; + // ::= rm # % + case OO_Percent: Out << "rm"; break; + // ::= or # | + case OO_Pipe: Out << "or"; break; + // ::= eo # ^ + case OO_Caret: Out << "eo"; break; + // ::= aS # = + case OO_Equal: Out << "aS"; break; + // ::= pL # += + case OO_PlusEqual: Out << "pL"; break; + // ::= mI # -= + case OO_MinusEqual: Out << "mI"; break; + // ::= mL # *= + case OO_StarEqual: Out << "mL"; break; + // ::= dV # /= + case OO_SlashEqual: Out << "dV"; break; + // ::= rM # %= + case OO_PercentEqual: Out << "rM"; break; + // ::= aN # &= + case OO_AmpEqual: Out << "aN"; break; + // ::= oR # |= + case OO_PipeEqual: Out << "oR"; break; + // ::= eO # ^= + case OO_CaretEqual: Out << "eO"; break; + // ::= ls # << + case OO_LessLess: Out << "ls"; break; + // ::= rs # >> + case OO_GreaterGreater: Out << "rs"; break; + // ::= lS # <<= + case OO_LessLessEqual: Out << "lS"; break; + // ::= rS # >>= + case OO_GreaterGreaterEqual: Out << "rS"; break; + // ::= eq # == + case OO_EqualEqual: Out << "eq"; break; + // ::= ne # != + case OO_ExclaimEqual: Out << "ne"; break; + // ::= lt # < + case OO_Less: Out << "lt"; break; + // ::= gt # > + case OO_Greater: Out << "gt"; break; + // ::= le # <= + case OO_LessEqual: Out << "le"; break; + // ::= ge # >= + case OO_GreaterEqual: Out << "ge"; break; + // ::= nt # ! + case OO_Exclaim: Out << "nt"; break; + // ::= aa # && + case OO_AmpAmp: Out << "aa"; break; + // ::= oo # || + case OO_PipePipe: Out << "oo"; break; + // ::= pp # ++ + case OO_PlusPlus: Out << "pp"; break; + // ::= mm # -- + case OO_MinusMinus: Out << "mm"; break; + // ::= cm # , + case OO_Comma: Out << "cm"; break; + // ::= pm # ->* + case OO_ArrowStar: Out << "pm"; break; + // ::= pt # -> + case OO_Arrow: Out << "pt"; break; + // ::= cl # () + case OO_Call: Out << "cl"; break; + // ::= ix # [] + case OO_Subscript: Out << "ix"; break; + + // ::= qu # ? + // The conditional operator can't be overloaded, but we still handle it when + // mangling expressions. + case OO_Conditional: Out << "qu"; break; + + case OO_None: + case NUM_OVERLOADED_OPERATORS: + llvm_unreachable("Not an overloaded operator"); + } +} + +void CXXNameMangler::mangleQualifiers(Qualifiers Quals) { + // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const + if (Quals.hasRestrict()) + Out << 'r'; + if (Quals.hasVolatile()) + Out << 'V'; + if (Quals.hasConst()) + Out << 'K'; + + if (Quals.hasAddressSpace()) { + // Address space extension: + // + // <type> ::= U <target-addrspace> + // <type> ::= U <OpenCL-addrspace> + // <type> ::= U <CUDA-addrspace> + + SmallString<64> ASString; + unsigned AS = Quals.getAddressSpace(); + + if (Context.getASTContext().addressSpaceMapManglingFor(AS)) { + // <target-addrspace> ::= "AS" <address-space-number> + unsigned TargetAS = Context.getASTContext().getTargetAddressSpace(AS); + ASString = "AS" + llvm::utostr_32(TargetAS); + } else { + switch (AS) { + default: llvm_unreachable("Not a language specific address space"); + // <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant" ] + case LangAS::opencl_global: ASString = "CLglobal"; break; + case LangAS::opencl_local: ASString = "CLlocal"; break; + case LangAS::opencl_constant: ASString = "CLconstant"; break; + // <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ] + case LangAS::cuda_device: ASString = "CUdevice"; break; + case LangAS::cuda_constant: ASString = "CUconstant"; break; + case LangAS::cuda_shared: ASString = "CUshared"; break; + } + } + Out << 'U' << ASString.size() << ASString; + } + + StringRef LifetimeName; + switch (Quals.getObjCLifetime()) { + // Objective-C ARC Extension: + // + // <type> ::= U "__strong" + // <type> ::= U "__weak" + // <type> ::= U "__autoreleasing" + case Qualifiers::OCL_None: + break; + + case Qualifiers::OCL_Weak: + LifetimeName = "__weak"; + break; + + case Qualifiers::OCL_Strong: + LifetimeName = "__strong"; + break; + + case Qualifiers::OCL_Autoreleasing: + LifetimeName = "__autoreleasing"; + break; + + case Qualifiers::OCL_ExplicitNone: + // The __unsafe_unretained qualifier is *not* mangled, so that + // __unsafe_unretained types in ARC produce the same manglings as the + // equivalent (but, naturally, unqualified) types in non-ARC, providing + // better ABI compatibility. + // + // It's safe to do this because unqualified 'id' won't show up + // in any type signatures that need to be mangled. + break; + } + if (!LifetimeName.empty()) + Out << 'U' << LifetimeName.size() << LifetimeName; +} + +void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) { + // <ref-qualifier> ::= R # lvalue reference + // ::= O # rvalue-reference + switch (RefQualifier) { + case RQ_None: + break; + + case RQ_LValue: + Out << 'R'; + break; + + case RQ_RValue: + Out << 'O'; + break; + } +} + +void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) { + Context.mangleObjCMethodName(MD, Out); +} + +void CXXNameMangler::mangleType(QualType T) { + // If our type is instantiation-dependent but not dependent, we mangle + // it as it was written in the source, removing any top-level sugar. + // Otherwise, use the canonical type. + // + // FIXME: This is an approximation of the instantiation-dependent name + // mangling rules, since we should really be using the type as written and + // augmented via semantic analysis (i.e., with implicit conversions and + // default template arguments) for any instantiation-dependent type. + // Unfortunately, that requires several changes to our AST: + // - Instantiation-dependent TemplateSpecializationTypes will need to be + // uniqued, so that we can handle substitutions properly + // - Default template arguments will need to be represented in the + // TemplateSpecializationType, since they need to be mangled even though + // they aren't written. + // - Conversions on non-type template arguments need to be expressed, since + // they can affect the mangling of sizeof/alignof. + if (!T->isInstantiationDependentType() || T->isDependentType()) + T = T.getCanonicalType(); + else { + // Desugar any types that are purely sugar. + do { + // Don't desugar through template specialization types that aren't + // type aliases. We need to mangle the template arguments as written. + if (const TemplateSpecializationType *TST + = dyn_cast<TemplateSpecializationType>(T)) + if (!TST->isTypeAlias()) + break; + + QualType Desugared + = T.getSingleStepDesugaredType(Context.getASTContext()); + if (Desugared == T) + break; + + T = Desugared; + } while (true); + } + SplitQualType split = T.split(); + Qualifiers quals = split.Quals; + const Type *ty = split.Ty; + + bool isSubstitutable = quals || !isa<BuiltinType>(T); + if (isSubstitutable && mangleSubstitution(T)) + return; + + // If we're mangling a qualified array type, push the qualifiers to + // the element type. + if (quals && isa<ArrayType>(T)) { + ty = Context.getASTContext().getAsArrayType(T); + quals = Qualifiers(); + + // Note that we don't update T: we want to add the + // substitution at the original type. + } + + if (quals) { + mangleQualifiers(quals); + // Recurse: even if the qualified type isn't yet substitutable, + // the unqualified type might be. + mangleType(QualType(ty, 0)); + } else { + switch (ty->getTypeClass()) { +#define ABSTRACT_TYPE(CLASS, PARENT) +#define NON_CANONICAL_TYPE(CLASS, PARENT) \ + case Type::CLASS: \ + llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \ + return; +#define TYPE(CLASS, PARENT) \ + case Type::CLASS: \ + mangleType(static_cast<const CLASS##Type*>(ty)); \ + break; +#include "clang/AST/TypeNodes.def" + } + } + + // Add the substitution. + if (isSubstitutable) + addSubstitution(T); +} + +void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) { + if (!mangleStandardSubstitution(ND)) + mangleName(ND); +} + +void CXXNameMangler::mangleType(const BuiltinType *T) { + // <type> ::= <builtin-type> + // <builtin-type> ::= v # void + // ::= w # wchar_t + // ::= b # bool + // ::= c # char + // ::= a # signed char + // ::= h # unsigned char + // ::= s # short + // ::= t # unsigned short + // ::= i # int + // ::= j # unsigned int + // ::= l # long + // ::= m # unsigned long + // ::= x # long long, __int64 + // ::= y # unsigned long long, __int64 + // ::= n # __int128 + // UNSUPPORTED: ::= o # unsigned __int128 + // ::= f # float + // ::= d # double + // ::= e # long double, __float80 + // UNSUPPORTED: ::= g # __float128 + // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits) + // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits) + // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits) + // ::= Dh # IEEE 754r half-precision floating point (16 bits) + // ::= Di # char32_t + // ::= Ds # char16_t + // ::= Dn # std::nullptr_t (i.e., decltype(nullptr)) + // ::= u <source-name> # vendor extended type + switch (T->getKind()) { + case BuiltinType::Void: Out << 'v'; break; + case BuiltinType::Bool: Out << 'b'; break; + case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break; + case BuiltinType::UChar: Out << 'h'; break; + case BuiltinType::UShort: Out << 't'; break; + case BuiltinType::UInt: Out << 'j'; break; + case BuiltinType::ULong: Out << 'm'; break; + case BuiltinType::ULongLong: Out << 'y'; break; + case BuiltinType::UInt128: Out << 'o'; break; + case BuiltinType::SChar: Out << 'a'; break; + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: Out << 'w'; break; + case BuiltinType::Char16: Out << "Ds"; break; + case BuiltinType::Char32: Out << "Di"; break; + case BuiltinType::Short: Out << 's'; break; + case BuiltinType::Int: Out << 'i'; break; + case BuiltinType::Long: Out << 'l'; break; + case BuiltinType::LongLong: Out << 'x'; break; + case BuiltinType::Int128: Out << 'n'; break; + case BuiltinType::Half: Out << "Dh"; break; + case BuiltinType::Float: Out << 'f'; break; + case BuiltinType::Double: Out << 'd'; break; + case BuiltinType::LongDouble: Out << 'e'; break; + case BuiltinType::NullPtr: Out << "Dn"; break; + +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) \ + case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + case BuiltinType::Dependent: + llvm_unreachable("mangling a placeholder type"); + case BuiltinType::ObjCId: Out << "11objc_object"; break; + case BuiltinType::ObjCClass: Out << "10objc_class"; break; + case BuiltinType::ObjCSel: Out << "13objc_selector"; break; + case BuiltinType::OCLImage1d: Out << "11ocl_image1d"; break; + case BuiltinType::OCLImage1dArray: Out << "16ocl_image1darray"; break; + case BuiltinType::OCLImage1dBuffer: Out << "17ocl_image1dbuffer"; break; + case BuiltinType::OCLImage2d: Out << "11ocl_image2d"; break; + case BuiltinType::OCLImage2dArray: Out << "16ocl_image2darray"; break; + case BuiltinType::OCLImage3d: Out << "11ocl_image3d"; break; + case BuiltinType::OCLSampler: Out << "11ocl_sampler"; break; + case BuiltinType::OCLEvent: Out << "9ocl_event"; break; + } +} + +// <type> ::= <function-type> +// <function-type> ::= [<CV-qualifiers>] F [Y] +// <bare-function-type> [<ref-qualifier>] E +void CXXNameMangler::mangleType(const FunctionProtoType *T) { + // Mangle CV-qualifiers, if present. These are 'this' qualifiers, + // e.g. "const" in "int (A::*)() const". + mangleQualifiers(Qualifiers::fromCVRMask(T->getTypeQuals())); + + Out << 'F'; + + // FIXME: We don't have enough information in the AST to produce the 'Y' + // encoding for extern "C" function types. + mangleBareFunctionType(T, /*MangleReturnType=*/true); + + // Mangle the ref-qualifier, if present. + mangleRefQualifier(T->getRefQualifier()); + + Out << 'E'; +} +void CXXNameMangler::mangleType(const FunctionNoProtoType *T) { + llvm_unreachable("Can't mangle K&R function prototypes"); +} +void CXXNameMangler::mangleBareFunctionType(const FunctionType *T, + bool MangleReturnType) { + // We should never be mangling something without a prototype. + const FunctionProtoType *Proto = cast<FunctionProtoType>(T); + + // Record that we're in a function type. See mangleFunctionParam + // for details on what we're trying to achieve here. + FunctionTypeDepthState saved = FunctionTypeDepth.push(); + + // <bare-function-type> ::= <signature type>+ + if (MangleReturnType) { + FunctionTypeDepth.enterResultType(); + mangleType(Proto->getResultType()); + FunctionTypeDepth.leaveResultType(); + } + + if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) { + // <builtin-type> ::= v # void + Out << 'v'; + + FunctionTypeDepth.pop(saved); + return; + } + + for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(), + ArgEnd = Proto->arg_type_end(); + Arg != ArgEnd; ++Arg) + mangleType(Context.getASTContext().getSignatureParameterType(*Arg)); + + FunctionTypeDepth.pop(saved); + + // <builtin-type> ::= z # ellipsis + if (Proto->isVariadic()) + Out << 'z'; +} + +// <type> ::= <class-enum-type> +// <class-enum-type> ::= <name> +void CXXNameMangler::mangleType(const UnresolvedUsingType *T) { + mangleName(T->getDecl()); +} + +// <type> ::= <class-enum-type> +// <class-enum-type> ::= <name> +void CXXNameMangler::mangleType(const EnumType *T) { + mangleType(static_cast<const TagType*>(T)); +} +void CXXNameMangler::mangleType(const RecordType *T) { + mangleType(static_cast<const TagType*>(T)); +} +void CXXNameMangler::mangleType(const TagType *T) { + mangleName(T->getDecl()); +} + +// <type> ::= <array-type> +// <array-type> ::= A <positive dimension number> _ <element type> +// ::= A [<dimension expression>] _ <element type> +void CXXNameMangler::mangleType(const ConstantArrayType *T) { + Out << 'A' << T->getSize() << '_'; + mangleType(T->getElementType()); +} +void CXXNameMangler::mangleType(const VariableArrayType *T) { + Out << 'A'; + // decayed vla types (size 0) will just be skipped. + if (T->getSizeExpr()) + mangleExpression(T->getSizeExpr()); + Out << '_'; + mangleType(T->getElementType()); +} +void CXXNameMangler::mangleType(const DependentSizedArrayType *T) { + Out << 'A'; + mangleExpression(T->getSizeExpr()); + Out << '_'; + mangleType(T->getElementType()); +} +void CXXNameMangler::mangleType(const IncompleteArrayType *T) { + Out << "A_"; + mangleType(T->getElementType()); +} + +// <type> ::= <pointer-to-member-type> +// <pointer-to-member-type> ::= M <class type> <member type> +void CXXNameMangler::mangleType(const MemberPointerType *T) { + Out << 'M'; + mangleType(QualType(T->getClass(), 0)); + QualType PointeeType = T->getPointeeType(); + if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) { + mangleType(FPT); + + // Itanium C++ ABI 5.1.8: + // + // The type of a non-static member function is considered to be different, + // for the purposes of substitution, from the type of a namespace-scope or + // static member function whose type appears similar. The types of two + // non-static member functions are considered to be different, for the + // purposes of substitution, if the functions are members of different + // classes. In other words, for the purposes of substitution, the class of + // which the function is a member is considered part of the type of + // function. + + // Given that we already substitute member function pointers as a + // whole, the net effect of this rule is just to unconditionally + // suppress substitution on the function type in a member pointer. + // We increment the SeqID here to emulate adding an entry to the + // substitution table. + ++SeqID; + } else + mangleType(PointeeType); +} + +// <type> ::= <template-param> +void CXXNameMangler::mangleType(const TemplateTypeParmType *T) { + mangleTemplateParameter(T->getIndex()); +} + +// <type> ::= <template-param> +void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) { + // FIXME: not clear how to mangle this! + // template <class T...> class A { + // template <class U...> void foo(T(*)(U) x...); + // }; + Out << "_SUBSTPACK_"; +} + +// <type> ::= P <type> # pointer-to +void CXXNameMangler::mangleType(const PointerType *T) { + Out << 'P'; + mangleType(T->getPointeeType()); +} +void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) { + Out << 'P'; + mangleType(T->getPointeeType()); +} + +// <type> ::= R <type> # reference-to +void CXXNameMangler::mangleType(const LValueReferenceType *T) { + Out << 'R'; + mangleType(T->getPointeeType()); +} + +// <type> ::= O <type> # rvalue reference-to (C++0x) +void CXXNameMangler::mangleType(const RValueReferenceType *T) { + Out << 'O'; + mangleType(T->getPointeeType()); +} + +// <type> ::= C <type> # complex pair (C 2000) +void CXXNameMangler::mangleType(const ComplexType *T) { + Out << 'C'; + mangleType(T->getElementType()); +} + +// ARM's ABI for Neon vector types specifies that they should be mangled as +// if they are structs (to match ARM's initial implementation). The +// vector type must be one of the special types predefined by ARM. +void CXXNameMangler::mangleNeonVectorType(const VectorType *T) { + QualType EltType = T->getElementType(); + assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType"); + const char *EltName = 0; + if (T->getVectorKind() == VectorType::NeonPolyVector) { + switch (cast<BuiltinType>(EltType)->getKind()) { + case BuiltinType::SChar: EltName = "poly8_t"; break; + case BuiltinType::Short: EltName = "poly16_t"; break; + default: llvm_unreachable("unexpected Neon polynomial vector element type"); + } + } else { + switch (cast<BuiltinType>(EltType)->getKind()) { + case BuiltinType::SChar: EltName = "int8_t"; break; + case BuiltinType::UChar: EltName = "uint8_t"; break; + case BuiltinType::Short: EltName = "int16_t"; break; + case BuiltinType::UShort: EltName = "uint16_t"; break; + case BuiltinType::Int: EltName = "int32_t"; break; + case BuiltinType::UInt: EltName = "uint32_t"; break; + case BuiltinType::LongLong: EltName = "int64_t"; break; + case BuiltinType::ULongLong: EltName = "uint64_t"; break; + case BuiltinType::Float: EltName = "float32_t"; break; + case BuiltinType::Half: EltName = "float16_t";break; + default: + llvm_unreachable("unexpected Neon vector element type"); + } + } + const char *BaseName = 0; + unsigned BitSize = (T->getNumElements() * + getASTContext().getTypeSize(EltType)); + if (BitSize == 64) + BaseName = "__simd64_"; + else { + assert(BitSize == 128 && "Neon vector type not 64 or 128 bits"); + BaseName = "__simd128_"; + } + Out << strlen(BaseName) + strlen(EltName); + Out << BaseName << EltName; +} + +static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) { + switch (EltType->getKind()) { + case BuiltinType::SChar: + return "Int8"; + case BuiltinType::Short: + return "Int16"; + case BuiltinType::Int: + return "Int32"; + case BuiltinType::LongLong: + return "Int64"; + case BuiltinType::UChar: + return "Uint8"; + case BuiltinType::UShort: + return "Uint16"; + case BuiltinType::UInt: + return "Uint32"; + case BuiltinType::ULongLong: + return "Uint64"; + case BuiltinType::Half: + return "Float16"; + case BuiltinType::Float: + return "Float32"; + case BuiltinType::Double: + return "Float64"; + default: + llvm_unreachable("Unexpected vector element base type"); + } +} + +// AArch64's ABI for Neon vector types specifies that they should be mangled as +// the equivalent internal name. The vector type must be one of the special +// types predefined by ARM. +void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) { + QualType EltType = T->getElementType(); + assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType"); + unsigned BitSize = + (T->getNumElements() * getASTContext().getTypeSize(EltType)); + (void)BitSize; // Silence warning. + + assert((BitSize == 64 || BitSize == 128) && + "Neon vector type not 64 or 128 bits"); + + StringRef EltName; + if (T->getVectorKind() == VectorType::NeonPolyVector) { + switch (cast<BuiltinType>(EltType)->getKind()) { + case BuiltinType::UChar: + EltName = "Poly8"; + break; + case BuiltinType::UShort: + EltName = "Poly16"; + break; + case BuiltinType::ULongLong: + EltName = "Poly64"; + break; + default: + llvm_unreachable("unexpected Neon polynomial vector element type"); + } + } else + EltName = mangleAArch64VectorBase(cast<BuiltinType>(EltType)); + + std::string TypeName = + ("__" + EltName + "x" + llvm::utostr(T->getNumElements()) + "_t").str(); + Out << TypeName.length() << TypeName; +} + +// GNU extension: vector types +// <type> ::= <vector-type> +// <vector-type> ::= Dv <positive dimension number> _ +// <extended element type> +// ::= Dv [<dimension expression>] _ <element type> +// <extended element type> ::= <element type> +// ::= p # AltiVec vector pixel +// ::= b # Altivec vector bool +void CXXNameMangler::mangleType(const VectorType *T) { + if ((T->getVectorKind() == VectorType::NeonVector || + T->getVectorKind() == VectorType::NeonPolyVector)) { + if (getASTContext().getTargetInfo().getTriple().getArch() == + llvm::Triple::aarch64) + mangleAArch64NeonVectorType(T); + else + mangleNeonVectorType(T); + return; + } + Out << "Dv" << T->getNumElements() << '_'; + if (T->getVectorKind() == VectorType::AltiVecPixel) + Out << 'p'; + else if (T->getVectorKind() == VectorType::AltiVecBool) + Out << 'b'; + else + mangleType(T->getElementType()); +} +void CXXNameMangler::mangleType(const ExtVectorType *T) { + mangleType(static_cast<const VectorType*>(T)); +} +void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) { + Out << "Dv"; + mangleExpression(T->getSizeExpr()); + Out << '_'; + mangleType(T->getElementType()); +} + +void CXXNameMangler::mangleType(const PackExpansionType *T) { + // <type> ::= Dp <type> # pack expansion (C++0x) + Out << "Dp"; + mangleType(T->getPattern()); +} + +void CXXNameMangler::mangleType(const ObjCInterfaceType *T) { + mangleSourceName(T->getDecl()->getIdentifier()); +} + +void CXXNameMangler::mangleType(const ObjCObjectType *T) { + if (!T->qual_empty()) { + // Mangle protocol qualifiers. + SmallString<64> QualStr; + llvm::raw_svector_ostream QualOS(QualStr); + QualOS << "objcproto"; + ObjCObjectType::qual_iterator i = T->qual_begin(), e = T->qual_end(); + for ( ; i != e; ++i) { + StringRef name = (*i)->getName(); + QualOS << name.size() << name; + } + QualOS.flush(); + Out << 'U' << QualStr.size() << QualStr; + } + mangleType(T->getBaseType()); +} + +void CXXNameMangler::mangleType(const BlockPointerType *T) { + Out << "U13block_pointer"; + mangleType(T->getPointeeType()); +} + +void CXXNameMangler::mangleType(const InjectedClassNameType *T) { + // Mangle injected class name types as if the user had written the + // specialization out fully. It may not actually be possible to see + // this mangling, though. + mangleType(T->getInjectedSpecializationType()); +} + +void CXXNameMangler::mangleType(const TemplateSpecializationType *T) { + if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) { + mangleName(TD, T->getArgs(), T->getNumArgs()); + } else { + if (mangleSubstitution(QualType(T, 0))) + return; + + mangleTemplatePrefix(T->getTemplateName()); + + // FIXME: GCC does not appear to mangle the template arguments when + // the template in question is a dependent template name. Should we + // emulate that badness? + mangleTemplateArgs(T->getArgs(), T->getNumArgs()); + addSubstitution(QualType(T, 0)); + } +} + +void CXXNameMangler::mangleType(const DependentNameType *T) { + // Typename types are always nested + Out << 'N'; + manglePrefix(T->getQualifier()); + mangleSourceName(T->getIdentifier()); + Out << 'E'; +} + +void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) { + // Dependently-scoped template types are nested if they have a prefix. + Out << 'N'; + + // TODO: avoid making this TemplateName. + TemplateName Prefix = + getASTContext().getDependentTemplateName(T->getQualifier(), + T->getIdentifier()); + mangleTemplatePrefix(Prefix); + + // FIXME: GCC does not appear to mangle the template arguments when + // the template in question is a dependent template name. Should we + // emulate that badness? + mangleTemplateArgs(T->getArgs(), T->getNumArgs()); + Out << 'E'; +} + +void CXXNameMangler::mangleType(const TypeOfType *T) { + // FIXME: this is pretty unsatisfactory, but there isn't an obvious + // "extension with parameters" mangling. + Out << "u6typeof"; +} + +void CXXNameMangler::mangleType(const TypeOfExprType *T) { + // FIXME: this is pretty unsatisfactory, but there isn't an obvious + // "extension with parameters" mangling. + Out << "u6typeof"; +} + +void CXXNameMangler::mangleType(const DecltypeType *T) { + Expr *E = T->getUnderlyingExpr(); + + // type ::= Dt <expression> E # decltype of an id-expression + // # or class member access + // ::= DT <expression> E # decltype of an expression + + // This purports to be an exhaustive list of id-expressions and + // class member accesses. Note that we do not ignore parentheses; + // parentheses change the semantics of decltype for these + // expressions (and cause the mangler to use the other form). + if (isa<DeclRefExpr>(E) || + isa<MemberExpr>(E) || + isa<UnresolvedLookupExpr>(E) || + isa<DependentScopeDeclRefExpr>(E) || + isa<CXXDependentScopeMemberExpr>(E) || + isa<UnresolvedMemberExpr>(E)) + Out << "Dt"; + else + Out << "DT"; + mangleExpression(E); + Out << 'E'; +} + +void CXXNameMangler::mangleType(const UnaryTransformType *T) { + // If this is dependent, we need to record that. If not, we simply + // mangle it as the underlying type since they are equivalent. + if (T->isDependentType()) { + Out << 'U'; + + switch (T->getUTTKind()) { + case UnaryTransformType::EnumUnderlyingType: + Out << "3eut"; + break; + } + } + + mangleType(T->getUnderlyingType()); +} + +void CXXNameMangler::mangleType(const AutoType *T) { + QualType D = T->getDeducedType(); + // <builtin-type> ::= Da # dependent auto + if (D.isNull()) + Out << (T->isDecltypeAuto() ? "Dc" : "Da"); + else + mangleType(D); +} + +void CXXNameMangler::mangleType(const AtomicType *T) { + // <type> ::= U <source-name> <type> # vendor extended type qualifier + // (Until there's a standardized mangling...) + Out << "U7_Atomic"; + mangleType(T->getValueType()); +} + +void CXXNameMangler::mangleIntegerLiteral(QualType T, + const llvm::APSInt &Value) { + // <expr-primary> ::= L <type> <value number> E # integer literal + Out << 'L'; + + mangleType(T); + if (T->isBooleanType()) { + // Boolean values are encoded as 0/1. + Out << (Value.getBoolValue() ? '1' : '0'); + } else { + mangleNumber(Value); + } + Out << 'E'; + +} + +/// Mangles a member expression. +void CXXNameMangler::mangleMemberExpr(const Expr *base, + bool isArrow, + NestedNameSpecifier *qualifier, + NamedDecl *firstQualifierLookup, + DeclarationName member, + unsigned arity) { + // <expression> ::= dt <expression> <unresolved-name> + // ::= pt <expression> <unresolved-name> + if (base) { + if (base->isImplicitCXXThis()) { + // Note: GCC mangles member expressions to the implicit 'this' as + // *this., whereas we represent them as this->. The Itanium C++ ABI + // does not specify anything here, so we follow GCC. + Out << "dtdefpT"; + } else { + Out << (isArrow ? "pt" : "dt"); + mangleExpression(base); + } + } + mangleUnresolvedName(qualifier, firstQualifierLookup, member, arity); +} + +/// Look at the callee of the given call expression and determine if +/// it's a parenthesized id-expression which would have triggered ADL +/// otherwise. +static bool isParenthesizedADLCallee(const CallExpr *call) { + const Expr *callee = call->getCallee(); + const Expr *fn = callee->IgnoreParens(); + + // Must be parenthesized. IgnoreParens() skips __extension__ nodes, + // too, but for those to appear in the callee, it would have to be + // parenthesized. + if (callee == fn) return false; + + // Must be an unresolved lookup. + const UnresolvedLookupExpr *lookup = dyn_cast<UnresolvedLookupExpr>(fn); + if (!lookup) return false; + + assert(!lookup->requiresADL()); + + // Must be an unqualified lookup. + if (lookup->getQualifier()) return false; + + // Must not have found a class member. Note that if one is a class + // member, they're all class members. + if (lookup->getNumDecls() > 0 && + (*lookup->decls_begin())->isCXXClassMember()) + return false; + + // Otherwise, ADL would have been triggered. + return true; +} + +void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) { + // <expression> ::= <unary operator-name> <expression> + // ::= <binary operator-name> <expression> <expression> + // ::= <trinary operator-name> <expression> <expression> <expression> + // ::= cv <type> expression # conversion with one argument + // ::= cv <type> _ <expression>* E # conversion with a different number of arguments + // ::= st <type> # sizeof (a type) + // ::= at <type> # alignof (a type) + // ::= <template-param> + // ::= <function-param> + // ::= sr <type> <unqualified-name> # dependent name + // ::= sr <type> <unqualified-name> <template-args> # dependent template-id + // ::= ds <expression> <expression> # expr.*expr + // ::= sZ <template-param> # size of a parameter pack + // ::= sZ <function-param> # size of a function parameter pack + // ::= <expr-primary> + // <expr-primary> ::= L <type> <value number> E # integer literal + // ::= L <type <value float> E # floating literal + // ::= L <mangled-name> E # external name + // ::= fpT # 'this' expression + QualType ImplicitlyConvertedToType; + +recurse: + switch (E->getStmtClass()) { + case Expr::NoStmtClass: +#define ABSTRACT_STMT(Type) +#define EXPR(Type, Base) +#define STMT(Type, Base) \ + case Expr::Type##Class: +#include "clang/AST/StmtNodes.inc" + // fallthrough + + // These all can only appear in local or variable-initialization + // contexts and so should never appear in a mangling. + case Expr::AddrLabelExprClass: + case Expr::DesignatedInitExprClass: + case Expr::ImplicitValueInitExprClass: + case Expr::ParenListExprClass: + case Expr::LambdaExprClass: + case Expr::MSPropertyRefExprClass: + llvm_unreachable("unexpected statement kind"); + + // FIXME: invent manglings for all these. + case Expr::BlockExprClass: + case Expr::CXXPseudoDestructorExprClass: + case Expr::ChooseExprClass: + case Expr::CompoundLiteralExprClass: + case Expr::ExtVectorElementExprClass: + case Expr::GenericSelectionExprClass: + case Expr::ObjCEncodeExprClass: + case Expr::ObjCIsaExprClass: + case Expr::ObjCIvarRefExprClass: + case Expr::ObjCMessageExprClass: + case Expr::ObjCPropertyRefExprClass: + case Expr::ObjCProtocolExprClass: + case Expr::ObjCSelectorExprClass: + case Expr::ObjCStringLiteralClass: + case Expr::ObjCBoxedExprClass: + case Expr::ObjCArrayLiteralClass: + case Expr::ObjCDictionaryLiteralClass: + case Expr::ObjCSubscriptRefExprClass: + case Expr::ObjCIndirectCopyRestoreExprClass: + case Expr::OffsetOfExprClass: + case Expr::PredefinedExprClass: + case Expr::ShuffleVectorExprClass: + case Expr::ConvertVectorExprClass: + case Expr::StmtExprClass: + case Expr::UnaryTypeTraitExprClass: + case Expr::BinaryTypeTraitExprClass: + case Expr::TypeTraitExprClass: + case Expr::ArrayTypeTraitExprClass: + case Expr::ExpressionTraitExprClass: + case Expr::VAArgExprClass: + case Expr::CXXUuidofExprClass: + case Expr::CUDAKernelCallExprClass: + case Expr::AsTypeExprClass: + case Expr::PseudoObjectExprClass: + case Expr::AtomicExprClass: + { + // As bad as this diagnostic is, it's better than crashing. + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot yet mangle expression type %0"); + Diags.Report(E->getExprLoc(), DiagID) + << E->getStmtClassName() << E->getSourceRange(); + break; + } + + // Even gcc-4.5 doesn't mangle this. + case Expr::BinaryConditionalOperatorClass: { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = + Diags.getCustomDiagID(DiagnosticsEngine::Error, + "?: operator with omitted middle operand cannot be mangled"); + Diags.Report(E->getExprLoc(), DiagID) + << E->getStmtClassName() << E->getSourceRange(); + break; + } + + // These are used for internal purposes and cannot be meaningfully mangled. + case Expr::OpaqueValueExprClass: + llvm_unreachable("cannot mangle opaque value; mangling wrong thing?"); + + case Expr::InitListExprClass: { + // Proposal by Jason Merrill, 2012-01-03 + Out << "il"; + const InitListExpr *InitList = cast<InitListExpr>(E); + for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i) + mangleExpression(InitList->getInit(i)); + Out << "E"; + break; + } + + case Expr::CXXDefaultArgExprClass: + mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity); + break; + + case Expr::CXXDefaultInitExprClass: + mangleExpression(cast<CXXDefaultInitExpr>(E)->getExpr(), Arity); + break; + + case Expr::CXXStdInitializerListExprClass: + mangleExpression(cast<CXXStdInitializerListExpr>(E)->getSubExpr(), Arity); + break; + + case Expr::SubstNonTypeTemplateParmExprClass: + mangleExpression(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), + Arity); + break; + + case Expr::UserDefinedLiteralClass: + // We follow g++'s approach of mangling a UDL as a call to the literal + // operator. + case Expr::CXXMemberCallExprClass: // fallthrough + case Expr::CallExprClass: { + const CallExpr *CE = cast<CallExpr>(E); + + // <expression> ::= cp <simple-id> <expression>* E + // We use this mangling only when the call would use ADL except + // for being parenthesized. Per discussion with David + // Vandervoorde, 2011.04.25. + if (isParenthesizedADLCallee(CE)) { + Out << "cp"; + // The callee here is a parenthesized UnresolvedLookupExpr with + // no qualifier and should always get mangled as a <simple-id> + // anyway. + + // <expression> ::= cl <expression>* E + } else { + Out << "cl"; + } + + mangleExpression(CE->getCallee(), CE->getNumArgs()); + for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I) + mangleExpression(CE->getArg(I)); + Out << 'E'; + break; + } + + case Expr::CXXNewExprClass: { + const CXXNewExpr *New = cast<CXXNewExpr>(E); + if (New->isGlobalNew()) Out << "gs"; + Out << (New->isArray() ? "na" : "nw"); + for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(), + E = New->placement_arg_end(); I != E; ++I) + mangleExpression(*I); + Out << '_'; + mangleType(New->getAllocatedType()); + if (New->hasInitializer()) { + // Proposal by Jason Merrill, 2012-01-03 + if (New->getInitializationStyle() == CXXNewExpr::ListInit) + Out << "il"; + else + Out << "pi"; + const Expr *Init = New->getInitializer(); + if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) { + // Directly inline the initializers. + for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), + E = CCE->arg_end(); + I != E; ++I) + mangleExpression(*I); + } else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) { + for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i) + mangleExpression(PLE->getExpr(i)); + } else if (New->getInitializationStyle() == CXXNewExpr::ListInit && + isa<InitListExpr>(Init)) { + // Only take InitListExprs apart for list-initialization. + const InitListExpr *InitList = cast<InitListExpr>(Init); + for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i) + mangleExpression(InitList->getInit(i)); + } else + mangleExpression(Init); + } + Out << 'E'; + break; + } + + case Expr::MemberExprClass: { + const MemberExpr *ME = cast<MemberExpr>(E); + mangleMemberExpr(ME->getBase(), ME->isArrow(), + ME->getQualifier(), 0, ME->getMemberDecl()->getDeclName(), + Arity); + break; + } + + case Expr::UnresolvedMemberExprClass: { + const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E); + mangleMemberExpr(ME->getBase(), ME->isArrow(), + ME->getQualifier(), 0, ME->getMemberName(), + Arity); + if (ME->hasExplicitTemplateArgs()) + mangleTemplateArgs(ME->getExplicitTemplateArgs()); + break; + } + + case Expr::CXXDependentScopeMemberExprClass: { + const CXXDependentScopeMemberExpr *ME + = cast<CXXDependentScopeMemberExpr>(E); + mangleMemberExpr(ME->getBase(), ME->isArrow(), + ME->getQualifier(), ME->getFirstQualifierFoundInScope(), + ME->getMember(), Arity); + if (ME->hasExplicitTemplateArgs()) + mangleTemplateArgs(ME->getExplicitTemplateArgs()); + break; + } + + case Expr::UnresolvedLookupExprClass: { + const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E); + mangleUnresolvedName(ULE->getQualifier(), 0, ULE->getName(), Arity); + + // All the <unresolved-name> productions end in a + // base-unresolved-name, where <template-args> are just tacked + // onto the end. + if (ULE->hasExplicitTemplateArgs()) + mangleTemplateArgs(ULE->getExplicitTemplateArgs()); + break; + } + + case Expr::CXXUnresolvedConstructExprClass: { + const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E); + unsigned N = CE->arg_size(); + + Out << "cv"; + mangleType(CE->getType()); + if (N != 1) Out << '_'; + for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I)); + if (N != 1) Out << 'E'; + break; + } + + case Expr::CXXTemporaryObjectExprClass: + case Expr::CXXConstructExprClass: { + const CXXConstructExpr *CE = cast<CXXConstructExpr>(E); + unsigned N = CE->getNumArgs(); + + // Proposal by Jason Merrill, 2012-01-03 + if (CE->isListInitialization()) + Out << "tl"; + else + Out << "cv"; + mangleType(CE->getType()); + if (N != 1) Out << '_'; + for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I)); + if (N != 1) Out << 'E'; + break; + } + + case Expr::CXXScalarValueInitExprClass: + Out <<"cv"; + mangleType(E->getType()); + Out <<"_E"; + break; + + case Expr::CXXNoexceptExprClass: + Out << "nx"; + mangleExpression(cast<CXXNoexceptExpr>(E)->getOperand()); + break; + + case Expr::UnaryExprOrTypeTraitExprClass: { + const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E); + + if (!SAE->isInstantiationDependent()) { + // Itanium C++ ABI: + // If the operand of a sizeof or alignof operator is not + // instantiation-dependent it is encoded as an integer literal + // reflecting the result of the operator. + // + // If the result of the operator is implicitly converted to a known + // integer type, that type is used for the literal; otherwise, the type + // of std::size_t or std::ptrdiff_t is used. + QualType T = (ImplicitlyConvertedToType.isNull() || + !ImplicitlyConvertedToType->isIntegerType())? SAE->getType() + : ImplicitlyConvertedToType; + llvm::APSInt V = SAE->EvaluateKnownConstInt(Context.getASTContext()); + mangleIntegerLiteral(T, V); + break; + } + + switch(SAE->getKind()) { + case UETT_SizeOf: + Out << 's'; + break; + case UETT_AlignOf: + Out << 'a'; + break; + case UETT_VecStep: + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot yet mangle vec_step expression"); + Diags.Report(DiagID); + return; + } + if (SAE->isArgumentType()) { + Out << 't'; + mangleType(SAE->getArgumentType()); + } else { + Out << 'z'; + mangleExpression(SAE->getArgumentExpr()); + } + break; + } + + case Expr::CXXThrowExprClass: { + const CXXThrowExpr *TE = cast<CXXThrowExpr>(E); + // <expression> ::= tw <expression> # throw expression + // ::= tr # rethrow + if (TE->getSubExpr()) { + Out << "tw"; + mangleExpression(TE->getSubExpr()); + } else { + Out << "tr"; + } + break; + } + + case Expr::CXXTypeidExprClass: { + const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E); + // <expression> ::= ti <type> # typeid (type) + // ::= te <expression> # typeid (expression) + if (TIE->isTypeOperand()) { + Out << "ti"; + mangleType(TIE->getTypeOperand(Context.getASTContext())); + } else { + Out << "te"; + mangleExpression(TIE->getExprOperand()); + } + break; + } + + case Expr::CXXDeleteExprClass: { + const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E); + // <expression> ::= [gs] dl <expression> # [::] delete expr + // ::= [gs] da <expression> # [::] delete [] expr + if (DE->isGlobalDelete()) Out << "gs"; + Out << (DE->isArrayForm() ? "da" : "dl"); + mangleExpression(DE->getArgument()); + break; + } + + case Expr::UnaryOperatorClass: { + const UnaryOperator *UO = cast<UnaryOperator>(E); + mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()), + /*Arity=*/1); + mangleExpression(UO->getSubExpr()); + break; + } + + case Expr::ArraySubscriptExprClass: { + const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E); + + // Array subscript is treated as a syntactically weird form of + // binary operator. + Out << "ix"; + mangleExpression(AE->getLHS()); + mangleExpression(AE->getRHS()); + break; + } + + case Expr::CompoundAssignOperatorClass: // fallthrough + case Expr::BinaryOperatorClass: { + const BinaryOperator *BO = cast<BinaryOperator>(E); + if (BO->getOpcode() == BO_PtrMemD) + Out << "ds"; + else + mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()), + /*Arity=*/2); + mangleExpression(BO->getLHS()); + mangleExpression(BO->getRHS()); + break; + } + + case Expr::ConditionalOperatorClass: { + const ConditionalOperator *CO = cast<ConditionalOperator>(E); + mangleOperatorName(OO_Conditional, /*Arity=*/3); + mangleExpression(CO->getCond()); + mangleExpression(CO->getLHS(), Arity); + mangleExpression(CO->getRHS(), Arity); + break; + } + + case Expr::ImplicitCastExprClass: { + ImplicitlyConvertedToType = E->getType(); + E = cast<ImplicitCastExpr>(E)->getSubExpr(); + goto recurse; + } + + case Expr::ObjCBridgedCastExprClass: { + // Mangle ownership casts as a vendor extended operator __bridge, + // __bridge_transfer, or __bridge_retain. + StringRef Kind = cast<ObjCBridgedCastExpr>(E)->getBridgeKindName(); + Out << "v1U" << Kind.size() << Kind; + } + // Fall through to mangle the cast itself. + + case Expr::CStyleCastExprClass: + case Expr::CXXStaticCastExprClass: + case Expr::CXXDynamicCastExprClass: + case Expr::CXXReinterpretCastExprClass: + case Expr::CXXConstCastExprClass: + case Expr::CXXFunctionalCastExprClass: { + const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E); + Out << "cv"; + mangleType(ECE->getType()); + mangleExpression(ECE->getSubExpr()); + break; + } + + case Expr::CXXOperatorCallExprClass: { + const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E); + unsigned NumArgs = CE->getNumArgs(); + mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs); + // Mangle the arguments. + for (unsigned i = 0; i != NumArgs; ++i) + mangleExpression(CE->getArg(i)); + break; + } + + case Expr::ParenExprClass: + mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity); + break; + + case Expr::DeclRefExprClass: { + const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl(); + + switch (D->getKind()) { + default: + // <expr-primary> ::= L <mangled-name> E # external name + Out << 'L'; + mangle(D, "_Z"); + Out << 'E'; + break; + + case Decl::ParmVar: + mangleFunctionParam(cast<ParmVarDecl>(D)); + break; + + case Decl::EnumConstant: { + const EnumConstantDecl *ED = cast<EnumConstantDecl>(D); + mangleIntegerLiteral(ED->getType(), ED->getInitVal()); + break; + } + + case Decl::NonTypeTemplateParm: { + const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D); + mangleTemplateParameter(PD->getIndex()); + break; + } + + } + + break; + } + + case Expr::SubstNonTypeTemplateParmPackExprClass: + // FIXME: not clear how to mangle this! + // template <unsigned N...> class A { + // template <class U...> void foo(U (&x)[N]...); + // }; + Out << "_SUBSTPACK_"; + break; + + case Expr::FunctionParmPackExprClass: { + // FIXME: not clear how to mangle this! + const FunctionParmPackExpr *FPPE = cast<FunctionParmPackExpr>(E); + Out << "v110_SUBSTPACK"; + mangleFunctionParam(FPPE->getParameterPack()); + break; + } + + case Expr::DependentScopeDeclRefExprClass: { + const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E); + mangleUnresolvedName(DRE->getQualifier(), 0, DRE->getDeclName(), Arity); + + // All the <unresolved-name> productions end in a + // base-unresolved-name, where <template-args> are just tacked + // onto the end. + if (DRE->hasExplicitTemplateArgs()) + mangleTemplateArgs(DRE->getExplicitTemplateArgs()); + break; + } + + case Expr::CXXBindTemporaryExprClass: + mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr()); + break; + + case Expr::ExprWithCleanupsClass: + mangleExpression(cast<ExprWithCleanups>(E)->getSubExpr(), Arity); + break; + + case Expr::FloatingLiteralClass: { + const FloatingLiteral *FL = cast<FloatingLiteral>(E); + Out << 'L'; + mangleType(FL->getType()); + mangleFloat(FL->getValue()); + Out << 'E'; + break; + } + + case Expr::CharacterLiteralClass: + Out << 'L'; + mangleType(E->getType()); + Out << cast<CharacterLiteral>(E)->getValue(); + Out << 'E'; + break; + + // FIXME. __objc_yes/__objc_no are mangled same as true/false + case Expr::ObjCBoolLiteralExprClass: + Out << "Lb"; + Out << (cast<ObjCBoolLiteralExpr>(E)->getValue() ? '1' : '0'); + Out << 'E'; + break; + + case Expr::CXXBoolLiteralExprClass: + Out << "Lb"; + Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0'); + Out << 'E'; + break; + + case Expr::IntegerLiteralClass: { + llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue()); + if (E->getType()->isSignedIntegerType()) + Value.setIsSigned(true); + mangleIntegerLiteral(E->getType(), Value); + break; + } + + case Expr::ImaginaryLiteralClass: { + const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E); + // Mangle as if a complex literal. + // Proposal from David Vandevoorde, 2010.06.30. + Out << 'L'; + mangleType(E->getType()); + if (const FloatingLiteral *Imag = + dyn_cast<FloatingLiteral>(IE->getSubExpr())) { + // Mangle a floating-point zero of the appropriate type. + mangleFloat(llvm::APFloat(Imag->getValue().getSemantics())); + Out << '_'; + mangleFloat(Imag->getValue()); + } else { + Out << "0_"; + llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue()); + if (IE->getSubExpr()->getType()->isSignedIntegerType()) + Value.setIsSigned(true); + mangleNumber(Value); + } + Out << 'E'; + break; + } + + case Expr::StringLiteralClass: { + // Revised proposal from David Vandervoorde, 2010.07.15. + Out << 'L'; + assert(isa<ConstantArrayType>(E->getType())); + mangleType(E->getType()); + Out << 'E'; + break; + } + + case Expr::GNUNullExprClass: + // FIXME: should this really be mangled the same as nullptr? + // fallthrough + + case Expr::CXXNullPtrLiteralExprClass: { + Out << "LDnE"; + break; + } + + case Expr::PackExpansionExprClass: + Out << "sp"; + mangleExpression(cast<PackExpansionExpr>(E)->getPattern()); + break; + + case Expr::SizeOfPackExprClass: { + Out << "sZ"; + const NamedDecl *Pack = cast<SizeOfPackExpr>(E)->getPack(); + if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Pack)) + mangleTemplateParameter(TTP->getIndex()); + else if (const NonTypeTemplateParmDecl *NTTP + = dyn_cast<NonTypeTemplateParmDecl>(Pack)) + mangleTemplateParameter(NTTP->getIndex()); + else if (const TemplateTemplateParmDecl *TempTP + = dyn_cast<TemplateTemplateParmDecl>(Pack)) + mangleTemplateParameter(TempTP->getIndex()); + else + mangleFunctionParam(cast<ParmVarDecl>(Pack)); + break; + } + + case Expr::MaterializeTemporaryExprClass: { + mangleExpression(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr()); + break; + } + + case Expr::CXXThisExprClass: + Out << "fpT"; + break; + } +} + +/// Mangle an expression which refers to a parameter variable. +/// +/// <expression> ::= <function-param> +/// <function-param> ::= fp <top-level CV-qualifiers> _ # L == 0, I == 0 +/// <function-param> ::= fp <top-level CV-qualifiers> +/// <parameter-2 non-negative number> _ # L == 0, I > 0 +/// <function-param> ::= fL <L-1 non-negative number> +/// p <top-level CV-qualifiers> _ # L > 0, I == 0 +/// <function-param> ::= fL <L-1 non-negative number> +/// p <top-level CV-qualifiers> +/// <I-1 non-negative number> _ # L > 0, I > 0 +/// +/// L is the nesting depth of the parameter, defined as 1 if the +/// parameter comes from the innermost function prototype scope +/// enclosing the current context, 2 if from the next enclosing +/// function prototype scope, and so on, with one special case: if +/// we've processed the full parameter clause for the innermost +/// function type, then L is one less. This definition conveniently +/// makes it irrelevant whether a function's result type was written +/// trailing or leading, but is otherwise overly complicated; the +/// numbering was first designed without considering references to +/// parameter in locations other than return types, and then the +/// mangling had to be generalized without changing the existing +/// manglings. +/// +/// I is the zero-based index of the parameter within its parameter +/// declaration clause. Note that the original ABI document describes +/// this using 1-based ordinals. +void CXXNameMangler::mangleFunctionParam(const ParmVarDecl *parm) { + unsigned parmDepth = parm->getFunctionScopeDepth(); + unsigned parmIndex = parm->getFunctionScopeIndex(); + + // Compute 'L'. + // parmDepth does not include the declaring function prototype. + // FunctionTypeDepth does account for that. + assert(parmDepth < FunctionTypeDepth.getDepth()); + unsigned nestingDepth = FunctionTypeDepth.getDepth() - parmDepth; + if (FunctionTypeDepth.isInResultType()) + nestingDepth--; + + if (nestingDepth == 0) { + Out << "fp"; + } else { + Out << "fL" << (nestingDepth - 1) << 'p'; + } + + // Top-level qualifiers. We don't have to worry about arrays here, + // because parameters declared as arrays should already have been + // transformed to have pointer type. FIXME: apparently these don't + // get mangled if used as an rvalue of a known non-class type? + assert(!parm->getType()->isArrayType() + && "parameter's type is still an array type?"); + mangleQualifiers(parm->getType().getQualifiers()); + + // Parameter index. + if (parmIndex != 0) { + Out << (parmIndex - 1); + } + Out << '_'; +} + +void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) { + // <ctor-dtor-name> ::= C1 # complete object constructor + // ::= C2 # base object constructor + // ::= C3 # complete object allocating constructor + // + switch (T) { + case Ctor_Complete: + Out << "C1"; + break; + case Ctor_Base: + Out << "C2"; + break; + case Ctor_CompleteAllocating: + Out << "C3"; + break; + } +} + +void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) { + // <ctor-dtor-name> ::= D0 # deleting destructor + // ::= D1 # complete object destructor + // ::= D2 # base object destructor + // + switch (T) { + case Dtor_Deleting: + Out << "D0"; + break; + case Dtor_Complete: + Out << "D1"; + break; + case Dtor_Base: + Out << "D2"; + break; + } +} + +void CXXNameMangler::mangleTemplateArgs( + const ASTTemplateArgumentListInfo &TemplateArgs) { + // <template-args> ::= I <template-arg>+ E + Out << 'I'; + for (unsigned i = 0, e = TemplateArgs.NumTemplateArgs; i != e; ++i) + mangleTemplateArg(TemplateArgs.getTemplateArgs()[i].getArgument()); + Out << 'E'; +} + +void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentList &AL) { + // <template-args> ::= I <template-arg>+ E + Out << 'I'; + for (unsigned i = 0, e = AL.size(); i != e; ++i) + mangleTemplateArg(AL[i]); + Out << 'E'; +} + +void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs) { + // <template-args> ::= I <template-arg>+ E + Out << 'I'; + for (unsigned i = 0; i != NumTemplateArgs; ++i) + mangleTemplateArg(TemplateArgs[i]); + Out << 'E'; +} + +void CXXNameMangler::mangleTemplateArg(TemplateArgument A) { + // <template-arg> ::= <type> # type or template + // ::= X <expression> E # expression + // ::= <expr-primary> # simple expressions + // ::= J <template-arg>* E # argument pack + if (!A.isInstantiationDependent() || A.isDependent()) + A = Context.getASTContext().getCanonicalTemplateArgument(A); + + switch (A.getKind()) { + case TemplateArgument::Null: + llvm_unreachable("Cannot mangle NULL template argument"); + + case TemplateArgument::Type: + mangleType(A.getAsType()); + break; + case TemplateArgument::Template: + // This is mangled as <type>. + mangleType(A.getAsTemplate()); + break; + case TemplateArgument::TemplateExpansion: + // <type> ::= Dp <type> # pack expansion (C++0x) + Out << "Dp"; + mangleType(A.getAsTemplateOrTemplatePattern()); + break; + case TemplateArgument::Expression: { + // It's possible to end up with a DeclRefExpr here in certain + // dependent cases, in which case we should mangle as a + // declaration. + const Expr *E = A.getAsExpr()->IgnoreParens(); + if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { + const ValueDecl *D = DRE->getDecl(); + if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) { + Out << "L"; + mangle(D, "_Z"); + Out << 'E'; + break; + } + } + + Out << 'X'; + mangleExpression(E); + Out << 'E'; + break; + } + case TemplateArgument::Integral: + mangleIntegerLiteral(A.getIntegralType(), A.getAsIntegral()); + break; + case TemplateArgument::Declaration: { + // <expr-primary> ::= L <mangled-name> E # external name + // Clang produces AST's where pointer-to-member-function expressions + // and pointer-to-function expressions are represented as a declaration not + // an expression. We compensate for it here to produce the correct mangling. + ValueDecl *D = A.getAsDecl(); + bool compensateMangling = !A.isDeclForReferenceParam(); + if (compensateMangling) { + Out << 'X'; + mangleOperatorName(OO_Amp, 1); + } + + Out << 'L'; + // References to external entities use the mangled name; if the name would + // not normally be manged then mangle it as unqualified. + // + // FIXME: The ABI specifies that external names here should have _Z, but + // gcc leaves this off. + if (compensateMangling) + mangle(D, "_Z"); + else + mangle(D, "Z"); + Out << 'E'; + + if (compensateMangling) + Out << 'E'; + + break; + } + case TemplateArgument::NullPtr: { + // <expr-primary> ::= L <type> 0 E + Out << 'L'; + mangleType(A.getNullPtrType()); + Out << "0E"; + break; + } + case TemplateArgument::Pack: { + // <template-arg> ::= J <template-arg>* E + Out << 'J'; + for (TemplateArgument::pack_iterator PA = A.pack_begin(), + PAEnd = A.pack_end(); + PA != PAEnd; ++PA) + mangleTemplateArg(*PA); + Out << 'E'; + } + } +} + +void CXXNameMangler::mangleTemplateParameter(unsigned Index) { + // <template-param> ::= T_ # first template parameter + // ::= T <parameter-2 non-negative number> _ + if (Index == 0) + Out << "T_"; + else + Out << 'T' << (Index - 1) << '_'; +} + +void CXXNameMangler::mangleExistingSubstitution(QualType type) { + bool result = mangleSubstitution(type); + assert(result && "no existing substitution for type"); + (void) result; +} + +void CXXNameMangler::mangleExistingSubstitution(TemplateName tname) { + bool result = mangleSubstitution(tname); + assert(result && "no existing substitution for template name"); + (void) result; +} + +// <substitution> ::= S <seq-id> _ +// ::= S_ +bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) { + // Try one of the standard substitutions first. + if (mangleStandardSubstitution(ND)) + return true; + + ND = cast<NamedDecl>(ND->getCanonicalDecl()); + return mangleSubstitution(reinterpret_cast<uintptr_t>(ND)); +} + +/// \brief Determine whether the given type has any qualifiers that are +/// relevant for substitutions. +static bool hasMangledSubstitutionQualifiers(QualType T) { + Qualifiers Qs = T.getQualifiers(); + return Qs.getCVRQualifiers() || Qs.hasAddressSpace(); +} + +bool CXXNameMangler::mangleSubstitution(QualType T) { + if (!hasMangledSubstitutionQualifiers(T)) { + if (const RecordType *RT = T->getAs<RecordType>()) + return mangleSubstitution(RT->getDecl()); + } + + uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr()); + + return mangleSubstitution(TypePtr); +} + +bool CXXNameMangler::mangleSubstitution(TemplateName Template) { + if (TemplateDecl *TD = Template.getAsTemplateDecl()) + return mangleSubstitution(TD); + + Template = Context.getASTContext().getCanonicalTemplateName(Template); + return mangleSubstitution( + reinterpret_cast<uintptr_t>(Template.getAsVoidPointer())); +} + +bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) { + llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr); + if (I == Substitutions.end()) + return false; + + unsigned SeqID = I->second; + if (SeqID == 0) + Out << "S_"; + else { + SeqID--; + + // <seq-id> is encoded in base-36, using digits and upper case letters. + char Buffer[10]; + char *BufferPtr = llvm::array_endof(Buffer); + + if (SeqID == 0) *--BufferPtr = '0'; + + while (SeqID) { + assert(BufferPtr > Buffer && "Buffer overflow!"); + + char c = static_cast<char>(SeqID % 36); + + *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10); + SeqID /= 36; + } + + Out << 'S' + << StringRef(BufferPtr, llvm::array_endof(Buffer)-BufferPtr) + << '_'; + } + + return true; +} + +static bool isCharType(QualType T) { + if (T.isNull()) + return false; + + return T->isSpecificBuiltinType(BuiltinType::Char_S) || + T->isSpecificBuiltinType(BuiltinType::Char_U); +} + +/// isCharSpecialization - Returns whether a given type is a template +/// specialization of a given name with a single argument of type char. +static bool isCharSpecialization(QualType T, const char *Name) { + if (T.isNull()) + return false; + + const RecordType *RT = T->getAs<RecordType>(); + if (!RT) + return false; + + const ClassTemplateSpecializationDecl *SD = + dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl()); + if (!SD) + return false; + + if (!isStdNamespace(getEffectiveDeclContext(SD))) + return false; + + const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); + if (TemplateArgs.size() != 1) + return false; + + if (!isCharType(TemplateArgs[0].getAsType())) + return false; + + return SD->getIdentifier()->getName() == Name; +} + +template <std::size_t StrLen> +static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD, + const char (&Str)[StrLen]) { + if (!SD->getIdentifier()->isStr(Str)) + return false; + + const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); + if (TemplateArgs.size() != 2) + return false; + + if (!isCharType(TemplateArgs[0].getAsType())) + return false; + + if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits")) + return false; + + return true; +} + +bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) { + // <substitution> ::= St # ::std:: + if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) { + if (isStd(NS)) { + Out << "St"; + return true; + } + } + + if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) { + if (!isStdNamespace(getEffectiveDeclContext(TD))) + return false; + + // <substitution> ::= Sa # ::std::allocator + if (TD->getIdentifier()->isStr("allocator")) { + Out << "Sa"; + return true; + } + + // <<substitution> ::= Sb # ::std::basic_string + if (TD->getIdentifier()->isStr("basic_string")) { + Out << "Sb"; + return true; + } + } + + if (const ClassTemplateSpecializationDecl *SD = + dyn_cast<ClassTemplateSpecializationDecl>(ND)) { + if (!isStdNamespace(getEffectiveDeclContext(SD))) + return false; + + // <substitution> ::= Ss # ::std::basic_string<char, + // ::std::char_traits<char>, + // ::std::allocator<char> > + if (SD->getIdentifier()->isStr("basic_string")) { + const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); + + if (TemplateArgs.size() != 3) + return false; + + if (!isCharType(TemplateArgs[0].getAsType())) + return false; + + if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits")) + return false; + + if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator")) + return false; + + Out << "Ss"; + return true; + } + + // <substitution> ::= Si # ::std::basic_istream<char, + // ::std::char_traits<char> > + if (isStreamCharSpecialization(SD, "basic_istream")) { + Out << "Si"; + return true; + } + + // <substitution> ::= So # ::std::basic_ostream<char, + // ::std::char_traits<char> > + if (isStreamCharSpecialization(SD, "basic_ostream")) { + Out << "So"; + return true; + } + + // <substitution> ::= Sd # ::std::basic_iostream<char, + // ::std::char_traits<char> > + if (isStreamCharSpecialization(SD, "basic_iostream")) { + Out << "Sd"; + return true; + } + } + return false; +} + +void CXXNameMangler::addSubstitution(QualType T) { + if (!hasMangledSubstitutionQualifiers(T)) { + if (const RecordType *RT = T->getAs<RecordType>()) { + addSubstitution(RT->getDecl()); + return; + } + } + + uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr()); + addSubstitution(TypePtr); +} + +void CXXNameMangler::addSubstitution(TemplateName Template) { + if (TemplateDecl *TD = Template.getAsTemplateDecl()) + return addSubstitution(TD); + + Template = Context.getASTContext().getCanonicalTemplateName(Template); + addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer())); +} + +void CXXNameMangler::addSubstitution(uintptr_t Ptr) { + assert(!Substitutions.count(Ptr) && "Substitution already exists!"); + Substitutions[Ptr] = SeqID++; +} + +// + +/// \brief Mangles the name of the declaration D and emits that name to the +/// given output stream. +/// +/// If the declaration D requires a mangled name, this routine will emit that +/// mangled name to \p os and return true. Otherwise, \p os will be unchanged +/// and this routine will return false. In this case, the caller should just +/// emit the identifier of the declaration (\c D->getIdentifier()) as its +/// name. +void ItaniumMangleContextImpl::mangleCXXName(const NamedDecl *D, + raw_ostream &Out) { + assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) && + "Invalid mangleName() call, argument is not a variable or function!"); + assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) && + "Invalid mangleName() call on 'structor decl!"); + + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + getASTContext().getSourceManager(), + "Mangling declaration"); + + CXXNameMangler Mangler(*this, Out, D); + return Mangler.mangle(D); +} + +void ItaniumMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D, + CXXCtorType Type, + raw_ostream &Out) { + CXXNameMangler Mangler(*this, Out, D, Type); + Mangler.mangle(D); +} + +void ItaniumMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D, + CXXDtorType Type, + raw_ostream &Out) { + CXXNameMangler Mangler(*this, Out, D, Type); + Mangler.mangle(D); +} + +void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, + raw_ostream &Out) { + // <special-name> ::= T <call-offset> <base encoding> + // # base is the nominal target function of thunk + // <special-name> ::= Tc <call-offset> <call-offset> <base encoding> + // # base is the nominal target function of thunk + // # first call-offset is 'this' adjustment + // # second call-offset is result adjustment + + assert(!isa<CXXDestructorDecl>(MD) && + "Use mangleCXXDtor for destructor decls!"); + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZT"; + if (!Thunk.Return.isEmpty()) + Mangler.getStream() << 'c'; + + // Mangle the 'this' pointer adjustment. + Mangler.mangleCallOffset(Thunk.This.NonVirtual, + Thunk.This.Virtual.Itanium.VCallOffsetOffset); + + // Mangle the return pointer adjustment if there is one. + if (!Thunk.Return.isEmpty()) + Mangler.mangleCallOffset(Thunk.Return.NonVirtual, + Thunk.Return.Virtual.Itanium.VBaseOffsetOffset); + + Mangler.mangleFunctionEncoding(MD); +} + +void ItaniumMangleContextImpl::mangleCXXDtorThunk( + const CXXDestructorDecl *DD, CXXDtorType Type, + const ThisAdjustment &ThisAdjustment, raw_ostream &Out) { + // <special-name> ::= T <call-offset> <base encoding> + // # base is the nominal target function of thunk + CXXNameMangler Mangler(*this, Out, DD, Type); + Mangler.getStream() << "_ZT"; + + // Mangle the 'this' pointer adjustment. + Mangler.mangleCallOffset(ThisAdjustment.NonVirtual, + ThisAdjustment.Virtual.Itanium.VCallOffsetOffset); + + Mangler.mangleFunctionEncoding(DD); +} + +/// mangleGuardVariable - Returns the mangled name for a guard variable +/// for the passed in VarDecl. +void ItaniumMangleContextImpl::mangleStaticGuardVariable(const VarDecl *D, + raw_ostream &Out) { + // <special-name> ::= GV <object name> # Guard variable for one-time + // # initialization + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZGV"; + Mangler.mangleName(D); +} + +void ItaniumMangleContextImpl::mangleDynamicInitializer(const VarDecl *MD, + raw_ostream &Out) { + // These symbols are internal in the Itanium ABI, so the names don't matter. + // Clang has traditionally used this symbol and allowed LLVM to adjust it to + // avoid duplicate symbols. + Out << "__cxx_global_var_init"; +} + +void ItaniumMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D, + raw_ostream &Out) { + // Prefix the mangling of D with __dtor_. + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "__dtor_"; + if (shouldMangleDeclName(D)) + Mangler.mangle(D); + else + Mangler.getStream() << D->getName(); +} + +void ItaniumMangleContextImpl::mangleItaniumThreadLocalInit(const VarDecl *D, + raw_ostream &Out) { + // <special-name> ::= TH <object name> + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZTH"; + Mangler.mangleName(D); +} + +void +ItaniumMangleContextImpl::mangleItaniumThreadLocalWrapper(const VarDecl *D, + raw_ostream &Out) { + // <special-name> ::= TW <object name> + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZTW"; + Mangler.mangleName(D); +} + +void ItaniumMangleContextImpl::mangleReferenceTemporary(const VarDecl *D, + raw_ostream &Out) { + // We match the GCC mangling here. + // <special-name> ::= GR <object name> + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZGR"; + Mangler.mangleName(D); +} + +void ItaniumMangleContextImpl::mangleCXXVTable(const CXXRecordDecl *RD, + raw_ostream &Out) { + // <special-name> ::= TV <type> # virtual table + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZTV"; + Mangler.mangleNameOrStandardSubstitution(RD); +} + +void ItaniumMangleContextImpl::mangleCXXVTT(const CXXRecordDecl *RD, + raw_ostream &Out) { + // <special-name> ::= TT <type> # VTT structure + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZTT"; + Mangler.mangleNameOrStandardSubstitution(RD); +} + +void ItaniumMangleContextImpl::mangleCXXCtorVTable(const CXXRecordDecl *RD, + int64_t Offset, + const CXXRecordDecl *Type, + raw_ostream &Out) { + // <special-name> ::= TC <type> <offset number> _ <base type> + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZTC"; + Mangler.mangleNameOrStandardSubstitution(RD); + Mangler.getStream() << Offset; + Mangler.getStream() << '_'; + Mangler.mangleNameOrStandardSubstitution(Type); +} + +void ItaniumMangleContextImpl::mangleCXXRTTI(QualType Ty, raw_ostream &Out) { + // <special-name> ::= TI <type> # typeinfo structure + assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers"); + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZTI"; + Mangler.mangleType(Ty); +} + +void ItaniumMangleContextImpl::mangleCXXRTTIName(QualType Ty, + raw_ostream &Out) { + // <special-name> ::= TS <type> # typeinfo name (null terminated byte string) + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZTS"; + Mangler.mangleType(Ty); +} + +void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) { + mangleCXXRTTIName(Ty, Out); +} + +ItaniumMangleContext * +ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) { + return new ItaniumMangleContextImpl(Context, Diags); +} diff --git a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp new file mode 100644 index 000000000000..231ef036d829 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp @@ -0,0 +1,252 @@ +//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implements generic name mangling support for blocks and Objective-C. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/Attr.h" +#include "clang/AST/Mangle.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ExprCXX.h" +#include "clang/Basic/ABI.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +#define MANGLE_CHECKER 0 + +#if MANGLE_CHECKER +#include <cxxabi.h> +#endif + +using namespace clang; + +// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves +// much to be desired. Come up with a better mangling scheme. + +static void mangleFunctionBlock(MangleContext &Context, + StringRef Outer, + const BlockDecl *BD, + raw_ostream &Out) { + unsigned discriminator = Context.getBlockId(BD, true); + if (discriminator == 0) + Out << "__" << Outer << "_block_invoke"; + else + Out << "__" << Outer << "_block_invoke_" << discriminator+1; +} + +void MangleContext::anchor() { } + +enum StdOrFastCC { + SOF_OTHER, + SOF_FAST, + SOF_STD +}; + +static bool isExternC(const NamedDecl *ND) { + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) + return FD->isExternC(); + return cast<VarDecl>(ND)->isExternC(); +} + +static StdOrFastCC getStdOrFastCallMangling(const ASTContext &Context, + const NamedDecl *ND) { + const TargetInfo &TI = Context.getTargetInfo(); + llvm::Triple Triple = TI.getTriple(); + if (!Triple.isOSWindows() || Triple.getArch() != llvm::Triple::x86) + return SOF_OTHER; + + if (Context.getLangOpts().CPlusPlus && !isExternC(ND) && + TI.getCXXABI() == TargetCXXABI::Microsoft) + return SOF_OTHER; + + const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND); + if (!FD) + return SOF_OTHER; + QualType T = FD->getType(); + + const FunctionType *FT = T->castAs<FunctionType>(); + + CallingConv CC = FT->getCallConv(); + switch (CC) { + default: + return SOF_OTHER; + case CC_X86FastCall: + return SOF_FAST; + case CC_X86StdCall: + return SOF_STD; + } +} + +bool MangleContext::shouldMangleDeclName(const NamedDecl *D) { + const ASTContext &ASTContext = getASTContext(); + + StdOrFastCC CC = getStdOrFastCallMangling(ASTContext, D); + if (CC != SOF_OTHER) + return true; + + // In C, functions with no attributes never need to be mangled. Fastpath them. + if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs()) + return false; + + // Any decl can be declared with __asm("foo") on it, and this takes precedence + // over all other naming in the .o file. + if (D->hasAttr<AsmLabelAttr>()) + return true; + + return shouldMangleCXXName(D); +} + +void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) { + // Any decl can be declared with __asm("foo") on it, and this takes precedence + // over all other naming in the .o file. + if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) { + // If we have an asm name, then we use it as the mangling. + + // Adding the prefix can cause problems when one file has a "foo" and + // another has a "\01foo". That is known to happen on ELF with the + // tricks normally used for producing aliases (PR9177). Fortunately the + // llvm mangler on ELF is a nop, so we can just avoid adding the \01 + // marker. We also avoid adding the marker if this is an alias for an + // LLVM intrinsic. + StringRef UserLabelPrefix = + getASTContext().getTargetInfo().getUserLabelPrefix(); + if (!UserLabelPrefix.empty() && !ALA->getLabel().startswith("llvm.")) + Out << '\01'; // LLVM IR Marker for __asm("foo") + + Out << ALA->getLabel(); + return; + } + + const ASTContext &ASTContext = getASTContext(); + StdOrFastCC CC = getStdOrFastCallMangling(ASTContext, D); + bool MCXX = shouldMangleCXXName(D); + const TargetInfo &TI = Context.getTargetInfo(); + if (CC == SOF_OTHER || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) { + mangleCXXName(D, Out); + return; + } + + Out << '\01'; + if (CC == SOF_STD) + Out << '_'; + else + Out << '@'; + + if (!MCXX) + Out << D->getIdentifier()->getName(); + else + mangleCXXName(D, Out); + + const FunctionDecl *FD = cast<FunctionDecl>(D); + const FunctionType *FT = FD->getType()->castAs<FunctionType>(); + const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT); + Out << '@'; + if (!Proto) { + Out << '0'; + return; + } + assert(!Proto->isVariadic()); + unsigned ArgWords = 0; + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) + if (!MD->isStatic()) + ++ArgWords; + for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(), + ArgEnd = Proto->arg_type_end(); + Arg != ArgEnd; ++Arg) { + QualType AT = *Arg; + // Size should be aligned to DWORD boundary + ArgWords += llvm::RoundUpToAlignment(ASTContext.getTypeSize(AT), 32) / 32; + } + Out << 4 * ArgWords; +} + +void MangleContext::mangleGlobalBlock(const BlockDecl *BD, + const NamedDecl *ID, + raw_ostream &Out) { + unsigned discriminator = getBlockId(BD, false); + if (ID) { + if (shouldMangleDeclName(ID)) + mangleName(ID, Out); + else { + Out << ID->getIdentifier()->getName(); + } + } + if (discriminator == 0) + Out << "_block_invoke"; + else + Out << "_block_invoke_" << discriminator+1; +} + +void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD, + CXXCtorType CT, const BlockDecl *BD, + raw_ostream &ResStream) { + SmallString<64> Buffer; + llvm::raw_svector_ostream Out(Buffer); + mangleCXXCtor(CD, CT, Out); + Out.flush(); + mangleFunctionBlock(*this, Buffer, BD, ResStream); +} + +void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD, + CXXDtorType DT, const BlockDecl *BD, + raw_ostream &ResStream) { + SmallString<64> Buffer; + llvm::raw_svector_ostream Out(Buffer); + mangleCXXDtor(DD, DT, Out); + Out.flush(); + mangleFunctionBlock(*this, Buffer, BD, ResStream); +} + +void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD, + raw_ostream &Out) { + assert(!isa<CXXConstructorDecl>(DC) && !isa<CXXDestructorDecl>(DC)); + + SmallString<64> Buffer; + llvm::raw_svector_ostream Stream(Buffer); + if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) { + mangleObjCMethodName(Method, Stream); + } else { + const NamedDecl *ND = cast<NamedDecl>(DC); + if (!shouldMangleDeclName(ND) && ND->getIdentifier()) + Stream << ND->getIdentifier()->getName(); + else { + // FIXME: We were doing a mangleUnqualifiedName() before, but that's + // a private member of a class that will soon itself be private to the + // Itanium C++ ABI object. What should we do now? Right now, I'm just + // calling the mangleName() method on the MangleContext; is there a + // better way? + mangleName(ND, Stream); + } + } + Stream.flush(); + mangleFunctionBlock(*this, Buffer, BD, Out); +} + +void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD, + raw_ostream &Out) { + SmallString<64> Name; + llvm::raw_svector_ostream OS(Name); + + const ObjCContainerDecl *CD = + dyn_cast<ObjCContainerDecl>(MD->getDeclContext()); + assert (CD && "Missing container decl in GetNameForMethod"); + OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName(); + if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD)) + OS << '(' << *CID << ')'; + OS << ' ' << MD->getSelector().getAsString() << ']'; + + Out << OS.str().size() << OS.str(); +} diff --git a/contrib/llvm/tools/clang/lib/AST/MangleNumberingContext.cpp b/contrib/llvm/tools/clang/lib/AST/MangleNumberingContext.cpp new file mode 100644 index 000000000000..91ef0e2240d8 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/MangleNumberingContext.cpp @@ -0,0 +1,43 @@ +//===--- MangleNumberingContext.cpp - Context for mangling numbers --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the LambdaMangleContext class, which keeps track of +// the Itanium C++ ABI mangling numbers for lambda expressions. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/MangleNumberingContext.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" + +using namespace clang; + +unsigned +MangleNumberingContext::getManglingNumber(const CXXMethodDecl *CallOperator) { + const FunctionProtoType *Proto + = CallOperator->getType()->getAs<FunctionProtoType>(); + ASTContext &Context = CallOperator->getASTContext(); + + QualType Key = Context.getFunctionType(Context.VoidTy, Proto->getArgTypes(), + FunctionProtoType::ExtProtoInfo()); + Key = Context.getCanonicalType(Key); + return ++ManglingNumbers[Key->castAs<FunctionProtoType>()]; +} + +unsigned +MangleNumberingContext::getManglingNumber(const BlockDecl *BD) { + // FIXME: Compute a BlockPointerType? Not obvious how. + const Type *Ty = 0; + return ++ManglingNumbers[Ty]; +} + +unsigned +MangleNumberingContext::getManglingNumber(const TagDecl *TD) { + return ++TagManglingNumbers[TD->getIdentifier()]; +} diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp new file mode 100644 index 000000000000..4a93ea1f417f --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp @@ -0,0 +1,202 @@ +//===------- MicrosoftCXXABI.cpp - AST support for the Microsoft C++ ABI --===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides C++ AST support targeting the Microsoft Visual C++ +// ABI. +// +//===----------------------------------------------------------------------===// + +#include "CXXABI.h" +#include "clang/AST/Attr.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/MangleNumberingContext.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/Type.h" +#include "clang/Basic/TargetInfo.h" + +using namespace clang; + +namespace { + +/// \brief Numbers things which need to correspond across multiple TUs. +/// Typically these are things like static locals, lambdas, or blocks. +class MicrosoftNumberingContext : public MangleNumberingContext { + unsigned NumStaticLocals; + +public: + MicrosoftNumberingContext() : NumStaticLocals(0) { } + + /// Static locals are numbered by source order. + virtual unsigned getManglingNumber(const VarDecl *VD) { + assert(VD->isStaticLocal()); + return ++NumStaticLocals; + } +}; + +class MicrosoftCXXABI : public CXXABI { + ASTContext &Context; +public: + MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { } + + std::pair<uint64_t, unsigned> + getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const; + + CallingConv getDefaultMethodCallConv(bool isVariadic) const { + if (!isVariadic && + Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86) + return CC_X86ThisCall; + return CC_C; + } + + bool isNearlyEmpty(const CXXRecordDecl *RD) const { + // FIXME: Audit the corners + if (!RD->isDynamicClass()) + return false; + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // In the Microsoft ABI, classes can have one or two vtable pointers. + CharUnits PointerSize = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + return Layout.getNonVirtualSize() == PointerSize || + Layout.getNonVirtualSize() == PointerSize * 2; + } + + MangleNumberingContext *createMangleNumberingContext() const { + return new MicrosoftNumberingContext(); + } +}; +} + +// getNumBases() seems to only give us the number of direct bases, and not the +// total. This function tells us if we inherit from anybody that uses MI, or if +// we have a non-primary base class, which uses the multiple inheritance model. +static bool usesMultipleInheritanceModel(const CXXRecordDecl *RD) { + while (RD->getNumBases() > 0) { + if (RD->getNumBases() > 1) + return true; + assert(RD->getNumBases() == 1); + const CXXRecordDecl *Base = + RD->bases_begin()->getType()->getAsCXXRecordDecl(); + if (RD->isPolymorphic() && !Base->isPolymorphic()) + return true; + RD = Base; + } + return false; +} + +static MSInheritanceModel MSInheritanceAttrToModel(attr::Kind Kind) { + switch (Kind) { + default: llvm_unreachable("expected MS inheritance attribute"); + case attr::SingleInheritance: return MSIM_Single; + case attr::MultipleInheritance: return MSIM_Multiple; + case attr::VirtualInheritance: return MSIM_Virtual; + case attr::UnspecifiedInheritance: return MSIM_Unspecified; + } +} + +MSInheritanceModel CXXRecordDecl::getMSInheritanceModel() const { + if (Attr *IA = this->getAttr<MSInheritanceAttr>()) + return MSInheritanceAttrToModel(IA->getKind()); + // If there was no explicit attribute, the record must be defined already, and + // we can figure out the inheritance model from its other properties. + if (this->getNumVBases() > 0) + return MSIM_Virtual; + if (usesMultipleInheritanceModel(this)) + return this->isPolymorphic() ? MSIM_MultiplePolymorphic : MSIM_Multiple; + return this->isPolymorphic() ? MSIM_SinglePolymorphic : MSIM_Single; +} + +// Returns the number of pointer and integer slots used to represent a member +// pointer in the MS C++ ABI. +// +// Member function pointers have the following general form; however, fields +// are dropped as permitted (under the MSVC interpretation) by the inheritance +// model of the actual class. +// +// struct { +// // A pointer to the member function to call. If the member function is +// // virtual, this will be a thunk that forwards to the appropriate vftable +// // slot. +// void *FunctionPointerOrVirtualThunk; +// +// // An offset to add to the address of the vbtable pointer after (possibly) +// // selecting the virtual base but before resolving and calling the function. +// // Only needed if the class has any virtual bases or bases at a non-zero +// // offset. +// int NonVirtualBaseAdjustment; +// +// // An offset within the vb-table that selects the virtual base containing +// // the member. Loading from this offset produces a new offset that is +// // added to the address of the vb-table pointer to produce the base. +// int VirtualBaseAdjustmentOffset; +// +// // The offset of the vb-table pointer within the object. Only needed for +// // incomplete types. +// int VBPtrOffset; +// }; +static std::pair<unsigned, unsigned> +getMSMemberPointerSlots(const MemberPointerType *MPT) { + const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl(); + MSInheritanceModel Inheritance = RD->getMSInheritanceModel(); + unsigned Ptrs; + unsigned Ints = 0; + if (MPT->isMemberFunctionPointer()) { + // Member function pointers are a struct of a function pointer followed by a + // variable number of ints depending on the inheritance model used. The + // function pointer is a real function if it is non-virtual and a vftable + // slot thunk if it is virtual. The ints select the object base passed for + // the 'this' pointer. + Ptrs = 1; // First slot is always a function pointer. + switch (Inheritance) { + case MSIM_Unspecified: ++Ints; // VBTableOffset + case MSIM_Virtual: ++Ints; // VirtualBaseAdjustmentOffset + case MSIM_MultiplePolymorphic: + case MSIM_Multiple: ++Ints; // NonVirtualBaseAdjustment + case MSIM_SinglePolymorphic: + case MSIM_Single: break; // Nothing + } + } else { + // Data pointers are an aggregate of ints. The first int is an offset + // followed by vbtable-related offsets. + Ptrs = 0; + switch (Inheritance) { + case MSIM_Unspecified: ++Ints; // VBTableOffset + case MSIM_Virtual: ++Ints; // VirtualBaseAdjustmentOffset + case MSIM_MultiplePolymorphic: + case MSIM_Multiple: // Nothing + case MSIM_SinglePolymorphic: + case MSIM_Single: ++Ints; // Field offset + } + } + return std::make_pair(Ptrs, Ints); +} + +std::pair<uint64_t, unsigned> MicrosoftCXXABI::getMemberPointerWidthAndAlign( + const MemberPointerType *MPT) const { + const TargetInfo &Target = Context.getTargetInfo(); + assert(Target.getTriple().getArch() == llvm::Triple::x86 || + Target.getTriple().getArch() == llvm::Triple::x86_64); + unsigned Ptrs, Ints; + llvm::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT); + // The nominal struct is laid out with pointers followed by ints and aligned + // to a pointer width if any are present and an int width otherwise. + unsigned PtrSize = Target.getPointerWidth(0); + unsigned IntSize = Target.getIntWidth(); + uint64_t Width = Ptrs * PtrSize + Ints * IntSize; + unsigned Align = Ptrs > 0 ? Target.getPointerAlign(0) : Target.getIntAlign(); + Width = llvm::RoundUpToAlignment(Width, Align); + return std::make_pair(Width, Align); +} + +CXXABI *clang::CreateMicrosoftCXXABI(ASTContext &Ctx) { + return new MicrosoftCXXABI(Ctx); +} + diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp new file mode 100644 index 000000000000..52565019d41d --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp @@ -0,0 +1,2098 @@ +//===--- MicrosoftMangle.cpp - Microsoft Visual C++ Name Mangling ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides C++ name mangling targeting the Microsoft Visual C++ ABI. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Mangle.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ExprCXX.h" +#include "clang/Basic/ABI.h" +#include "clang/Basic/DiagnosticOptions.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/StringMap.h" + +using namespace clang; + +namespace { + +/// \brief Retrieve the declaration context that should be used when mangling +/// the given declaration. +static const DeclContext *getEffectiveDeclContext(const Decl *D) { + // The ABI assumes that lambda closure types that occur within + // default arguments live in the context of the function. However, due to + // the way in which Clang parses and creates function declarations, this is + // not the case: the lambda closure type ends up living in the context + // where the function itself resides, because the function declaration itself + // had not yet been created. Fix the context here. + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + if (RD->isLambda()) + if (ParmVarDecl *ContextParam = + dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl())) + return ContextParam->getDeclContext(); + } + + // Perform the same check for block literals. + if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + if (ParmVarDecl *ContextParam = + dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl())) + return ContextParam->getDeclContext(); + } + + const DeclContext *DC = D->getDeclContext(); + if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(DC)) + return getEffectiveDeclContext(CD); + + return DC; +} + +static const DeclContext *getEffectiveParentContext(const DeclContext *DC) { + return getEffectiveDeclContext(cast<Decl>(DC)); +} + +static const FunctionDecl *getStructor(const FunctionDecl *fn) { + if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate()) + return ftd->getTemplatedDecl(); + + return fn; +} + +/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the +/// Microsoft Visual C++ ABI. +class MicrosoftCXXNameMangler { + MangleContext &Context; + raw_ostream &Out; + + /// The "structor" is the top-level declaration being mangled, if + /// that's not a template specialization; otherwise it's the pattern + /// for that specialization. + const NamedDecl *Structor; + unsigned StructorType; + + typedef llvm::StringMap<unsigned> BackRefMap; + BackRefMap NameBackReferences; + bool UseNameBackReferences; + + typedef llvm::DenseMap<void*, unsigned> ArgBackRefMap; + ArgBackRefMap TypeBackReferences; + + ASTContext &getASTContext() const { return Context.getASTContext(); } + + // FIXME: If we add support for __ptr32/64 qualifiers, then we should push + // this check into mangleQualifiers(). + const bool PointersAre64Bit; + +public: + enum QualifierMangleMode { QMM_Drop, QMM_Mangle, QMM_Escape, QMM_Result }; + + MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_) + : Context(C), Out(Out_), + Structor(0), StructorType(-1), + UseNameBackReferences(true), + PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) == + 64) { } + + MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_, + const CXXDestructorDecl *D, CXXDtorType Type) + : Context(C), Out(Out_), + Structor(getStructor(D)), StructorType(Type), + UseNameBackReferences(true), + PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) == + 64) { } + + raw_ostream &getStream() const { return Out; } + + void mangle(const NamedDecl *D, StringRef Prefix = "\01?"); + void mangleName(const NamedDecl *ND); + void mangleDeclaration(const NamedDecl *ND); + void mangleFunctionEncoding(const FunctionDecl *FD); + void mangleVariableEncoding(const VarDecl *VD); + void mangleNumber(int64_t Number); + void mangleType(QualType T, SourceRange Range, + QualifierMangleMode QMM = QMM_Mangle); + void mangleFunctionType(const FunctionType *T, const FunctionDecl *D = 0, + bool ForceInstMethod = false); + void manglePostfix(const DeclContext *DC, bool NoFunction = false); + +private: + void disableBackReferences() { UseNameBackReferences = false; } + void mangleUnqualifiedName(const NamedDecl *ND) { + mangleUnqualifiedName(ND, ND->getDeclName()); + } + void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name); + void mangleSourceName(StringRef Name); + void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc); + void mangleCXXDtorType(CXXDtorType T); + void mangleQualifiers(Qualifiers Quals, bool IsMember); + void manglePointerQualifiers(Qualifiers Quals); + + void mangleUnscopedTemplateName(const TemplateDecl *ND); + void mangleTemplateInstantiationName(const TemplateDecl *TD, + const TemplateArgumentList &TemplateArgs); + void mangleObjCMethodName(const ObjCMethodDecl *MD); + void mangleLocalName(const FunctionDecl *FD); + + void mangleArgumentType(QualType T, SourceRange Range); + + // Declare manglers for every type class. +#define ABSTRACT_TYPE(CLASS, PARENT) +#define NON_CANONICAL_TYPE(CLASS, PARENT) +#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T, \ + SourceRange Range); +#include "clang/AST/TypeNodes.def" +#undef ABSTRACT_TYPE +#undef NON_CANONICAL_TYPE +#undef TYPE + + void mangleType(const TagDecl *TD); + void mangleDecayedArrayType(const ArrayType *T); + void mangleArrayType(const ArrayType *T); + void mangleFunctionClass(const FunctionDecl *FD); + void mangleCallingConvention(const FunctionType *T); + void mangleIntegerLiteral(const llvm::APSInt &Number, bool IsBoolean); + void mangleExpression(const Expr *E); + void mangleThrowSpecification(const FunctionProtoType *T); + + void mangleTemplateArgs(const TemplateDecl *TD, + const TemplateArgumentList &TemplateArgs); + void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA); +}; + +/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the +/// Microsoft Visual C++ ABI. +class MicrosoftMangleContextImpl : public MicrosoftMangleContext { +public: + MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags) + : MicrosoftMangleContext(Context, Diags) {} + virtual bool shouldMangleCXXName(const NamedDecl *D); + virtual void mangleCXXName(const NamedDecl *D, raw_ostream &Out); + virtual void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD, + uint64_t OffsetInVFTable, + raw_ostream &); + virtual void mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, + raw_ostream &); + virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type, + const ThisAdjustment &ThisAdjustment, + raw_ostream &); + virtual void mangleCXXVFTable(const CXXRecordDecl *Derived, + ArrayRef<const CXXRecordDecl *> BasePath, + raw_ostream &Out); + virtual void mangleCXXVBTable(const CXXRecordDecl *Derived, + ArrayRef<const CXXRecordDecl *> BasePath, + raw_ostream &Out); + virtual void mangleCXXRTTI(QualType T, raw_ostream &); + virtual void mangleCXXRTTIName(QualType T, raw_ostream &); + virtual void mangleTypeName(QualType T, raw_ostream &); + virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type, + raw_ostream &); + virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, + raw_ostream &); + virtual void mangleReferenceTemporary(const VarDecl *, raw_ostream &); + virtual void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out); + virtual void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out); + virtual void mangleDynamicAtExitDestructor(const VarDecl *D, + raw_ostream &Out); + +private: + void mangleInitFiniStub(const VarDecl *D, raw_ostream &Out, char CharCode); +}; + +} + +bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) { + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + LanguageLinkage L = FD->getLanguageLinkage(); + // Overloadable functions need mangling. + if (FD->hasAttr<OverloadableAttr>()) + return true; + + // The ABI expects that we would never mangle "typical" user-defined entry + // points regardless of visibility or freestanding-ness. + // + // N.B. This is distinct from asking about "main". "main" has a lot of + // special rules associated with it in the standard while these + // user-defined entry points are outside of the purview of the standard. + // For example, there can be only one definition for "main" in a standards + // compliant program; however nothing forbids the existence of wmain and + // WinMain in the same translation unit. + if (FD->isMSVCRTEntryPoint()) + return false; + + // C++ functions and those whose names are not a simple identifier need + // mangling. + if (!FD->getDeclName().isIdentifier() || L == CXXLanguageLinkage) + return true; + + // C functions are not mangled. + if (L == CLanguageLinkage) + return false; + } + + // Otherwise, no mangling is done outside C++ mode. + if (!getASTContext().getLangOpts().CPlusPlus) + return false; + + if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { + // C variables are not mangled. + if (VD->isExternC()) + return false; + + // Variables at global scope with non-internal linkage are not mangled. + const DeclContext *DC = getEffectiveDeclContext(D); + // Check for extern variable declared locally. + if (DC->isFunctionOrMethod() && D->hasLinkage()) + while (!DC->isNamespace() && !DC->isTranslationUnit()) + DC = getEffectiveParentContext(DC); + + if (DC->isTranslationUnit() && D->getFormalLinkage() == InternalLinkage && + !isa<VarTemplateSpecializationDecl>(D)) + return false; + } + + return true; +} + +void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, + StringRef Prefix) { + // MSVC doesn't mangle C++ names the same way it mangles extern "C" names. + // Therefore it's really important that we don't decorate the + // name with leading underscores or leading/trailing at signs. So, by + // default, we emit an asm marker at the start so we get the name right. + // Callers can override this with a custom prefix. + + // <mangled-name> ::= ? <name> <type-encoding> + Out << Prefix; + mangleName(D); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) + mangleFunctionEncoding(FD); + else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) + mangleVariableEncoding(VD); + else { + // TODO: Fields? Can MSVC even mangle them? + // Issue a diagnostic for now. + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this declaration yet"); + Diags.Report(D->getLocation(), DiagID) + << D->getSourceRange(); + } +} + +void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { + // <type-encoding> ::= <function-class> <function-type> + + // Since MSVC operates on the type as written and not the canonical type, it + // actually matters which decl we have here. MSVC appears to choose the + // first, since it is most likely to be the declaration in a header file. + FD = FD->getFirstDecl(); + + // We should never ever see a FunctionNoProtoType at this point. + // We don't even know how to mangle their types anyway :). + const FunctionProtoType *FT = FD->getType()->castAs<FunctionProtoType>(); + + // extern "C" functions can hold entities that must be mangled. + // As it stands, these functions still need to get expressed in the full + // external name. They have their class and type omitted, replaced with '9'. + if (Context.shouldMangleDeclName(FD)) { + // First, the function class. + mangleFunctionClass(FD); + + mangleFunctionType(FT, FD); + } else + Out << '9'; +} + +void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) { + // <type-encoding> ::= <storage-class> <variable-type> + // <storage-class> ::= 0 # private static member + // ::= 1 # protected static member + // ::= 2 # public static member + // ::= 3 # global + // ::= 4 # static local + + // The first character in the encoding (after the name) is the storage class. + if (VD->isStaticDataMember()) { + // If it's a static member, it also encodes the access level. + switch (VD->getAccess()) { + default: + case AS_private: Out << '0'; break; + case AS_protected: Out << '1'; break; + case AS_public: Out << '2'; break; + } + } + else if (!VD->isStaticLocal()) + Out << '3'; + else + Out << '4'; + // Now mangle the type. + // <variable-type> ::= <type> <cvr-qualifiers> + // ::= <type> <pointee-cvr-qualifiers> # pointers, references + // Pointers and references are odd. The type of 'int * const foo;' gets + // mangled as 'QAHA' instead of 'PAHB', for example. + TypeLoc TL = VD->getTypeSourceInfo()->getTypeLoc(); + QualType Ty = TL.getType(); + if (Ty->isPointerType() || Ty->isReferenceType() || + Ty->isMemberPointerType()) { + mangleType(Ty, TL.getSourceRange(), QMM_Drop); + if (PointersAre64Bit) + Out << 'E'; + if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) { + mangleQualifiers(MPT->getPointeeType().getQualifiers(), true); + // Member pointers are suffixed with a back reference to the member + // pointer's class name. + mangleName(MPT->getClass()->getAsCXXRecordDecl()); + } else + mangleQualifiers(Ty->getPointeeType().getQualifiers(), false); + } else if (const ArrayType *AT = getASTContext().getAsArrayType(Ty)) { + // Global arrays are funny, too. + mangleDecayedArrayType(AT); + if (AT->getElementType()->isArrayType()) + Out << 'A'; + else + mangleQualifiers(Ty.getQualifiers(), false); + } else { + mangleType(Ty, TL.getSourceRange(), QMM_Drop); + mangleQualifiers(Ty.getLocalQualifiers(), false); + } +} + +void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) { + // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @ + const DeclContext *DC = ND->getDeclContext(); + + // Always start with the unqualified name. + mangleUnqualifiedName(ND); + + // If this is an extern variable declared locally, the relevant DeclContext + // is that of the containing namespace, or the translation unit. + if (isa<FunctionDecl>(DC) && ND->hasLinkage()) + while (!DC->isNamespace() && !DC->isTranslationUnit()) + DC = DC->getParent(); + + manglePostfix(DC); + + // Terminate the whole name with an '@'. + Out << '@'; +} + +void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) { + // <non-negative integer> ::= A@ # when Number == 0 + // ::= <decimal digit> # when 1 <= Number <= 10 + // ::= <hex digit>+ @ # when Number >= 10 + // + // <number> ::= [?] <non-negative integer> + + uint64_t Value = static_cast<uint64_t>(Number); + if (Number < 0) { + Value = -Value; + Out << '?'; + } + + if (Value == 0) + Out << "A@"; + else if (Value >= 1 && Value <= 10) + Out << (Value - 1); + else { + // Numbers that are not encoded as decimal digits are represented as nibbles + // in the range of ASCII characters 'A' to 'P'. + // The number 0x123450 would be encoded as 'BCDEFA' + char EncodedNumberBuffer[sizeof(uint64_t) * 2]; + llvm::MutableArrayRef<char> BufferRef(EncodedNumberBuffer); + llvm::MutableArrayRef<char>::reverse_iterator I = BufferRef.rbegin(); + for (; Value != 0; Value >>= 4) + *I++ = 'A' + (Value & 0xf); + Out.write(I.base(), I - BufferRef.rbegin()); + Out << '@'; + } +} + +static const TemplateDecl * +isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) { + // Check if we have a function template. + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){ + if (const TemplateDecl *TD = FD->getPrimaryTemplate()) { + TemplateArgs = FD->getTemplateSpecializationArgs(); + return TD; + } + } + + // Check if we have a class template. + if (const ClassTemplateSpecializationDecl *Spec = + dyn_cast<ClassTemplateSpecializationDecl>(ND)) { + TemplateArgs = &Spec->getTemplateArgs(); + return Spec->getSpecializedTemplate(); + } + + return 0; +} + +void +MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, + DeclarationName Name) { + // <unqualified-name> ::= <operator-name> + // ::= <ctor-dtor-name> + // ::= <source-name> + // ::= <template-name> + + // Check if we have a template. + const TemplateArgumentList *TemplateArgs = 0; + if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) { + // Function templates aren't considered for name back referencing. This + // makes sense since function templates aren't likely to occur multiple + // times in a symbol. + // FIXME: Test alias template mangling with MSVC 2013. + if (!isa<ClassTemplateDecl>(TD)) { + mangleTemplateInstantiationName(TD, *TemplateArgs); + return; + } + + // We have a class template. + // Here comes the tricky thing: if we need to mangle something like + // void foo(A::X<Y>, B::X<Y>), + // the X<Y> part is aliased. However, if you need to mangle + // void foo(A::X<A::Y>, A::X<B::Y>), + // the A::X<> part is not aliased. + // That said, from the mangler's perspective we have a structure like this: + // namespace[s] -> type[ -> template-parameters] + // but from the Clang perspective we have + // type [ -> template-parameters] + // \-> namespace[s] + // What we do is we create a new mangler, mangle the same type (without + // a namespace suffix) using the extra mangler with back references + // disabled (to avoid infinite recursion) and then use the mangled type + // name as a key to check the mangling of different types for aliasing. + + std::string BackReferenceKey; + BackRefMap::iterator Found; + if (UseNameBackReferences) { + llvm::raw_string_ostream Stream(BackReferenceKey); + MicrosoftCXXNameMangler Extra(Context, Stream); + Extra.disableBackReferences(); + Extra.mangleUnqualifiedName(ND, Name); + Stream.flush(); + + Found = NameBackReferences.find(BackReferenceKey); + } + if (!UseNameBackReferences || Found == NameBackReferences.end()) { + mangleTemplateInstantiationName(TD, *TemplateArgs); + if (UseNameBackReferences && NameBackReferences.size() < 10) { + size_t Size = NameBackReferences.size(); + NameBackReferences[BackReferenceKey] = Size; + } + } else { + Out << Found->second; + } + return; + } + + switch (Name.getNameKind()) { + case DeclarationName::Identifier: { + if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) { + mangleSourceName(II->getName()); + break; + } + + // Otherwise, an anonymous entity. We must have a declaration. + assert(ND && "mangling empty name without declaration"); + + if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) { + if (NS->isAnonymousNamespace()) { + Out << "?A@"; + break; + } + } + + // We must have an anonymous struct. + const TagDecl *TD = cast<TagDecl>(ND); + if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) { + assert(TD->getDeclContext() == D->getDeclContext() && + "Typedef should not be in another decl context!"); + assert(D->getDeclName().getAsIdentifierInfo() && + "Typedef was not named!"); + mangleSourceName(D->getDeclName().getAsIdentifierInfo()->getName()); + break; + } + + if (TD->hasDeclaratorForAnonDecl()) { + // Anonymous types with no tag or typedef get the name of their + // declarator mangled in. + llvm::SmallString<64> Name("<unnamed-type-"); + Name += TD->getDeclaratorForAnonDecl()->getName(); + Name += ">"; + mangleSourceName(Name.str()); + } else { + // Anonymous types with no tag, no typedef, or declarator get + // '<unnamed-tag>'. + mangleSourceName("<unnamed-tag>"); + } + break; + } + + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + llvm_unreachable("Can't mangle Objective-C selector names here!"); + + case DeclarationName::CXXConstructorName: + if (ND == Structor) { + assert(StructorType == Ctor_Complete && + "Should never be asked to mangle a ctor other than complete"); + } + Out << "?0"; + break; + + case DeclarationName::CXXDestructorName: + if (ND == Structor) + // If the named decl is the C++ destructor we're mangling, + // use the type we were given. + mangleCXXDtorType(static_cast<CXXDtorType>(StructorType)); + else + // Otherwise, use the base destructor name. This is relevant if a + // class with a destructor is declared within a destructor. + mangleCXXDtorType(Dtor_Base); + break; + + case DeclarationName::CXXConversionFunctionName: + // <operator-name> ::= ?B # (cast) + // The target type is encoded as the return type. + Out << "?B"; + break; + + case DeclarationName::CXXOperatorName: + mangleOperatorName(Name.getCXXOverloadedOperator(), ND->getLocation()); + break; + + case DeclarationName::CXXLiteralOperatorName: { + // FIXME: Was this added in VS2010? Does MS even know how to mangle this? + DiagnosticsEngine Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this literal operator yet"); + Diags.Report(ND->getLocation(), DiagID); + break; + } + + case DeclarationName::CXXUsingDirective: + llvm_unreachable("Can't mangle a using directive name!"); + } +} + +void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC, + bool NoFunction) { + // <postfix> ::= <unqualified-name> [<postfix>] + // ::= <substitution> [<postfix>] + + if (!DC) return; + + while (isa<LinkageSpecDecl>(DC)) + DC = DC->getParent(); + + if (DC->isTranslationUnit()) + return; + + if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) { + DiagnosticsEngine Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle a local inside this block yet"); + Diags.Report(BD->getLocation(), DiagID); + + // FIXME: This is completely, utterly, wrong; see ItaniumMangle + // for how this should be done. + Out << "__block_invoke" << Context.getBlockId(BD, false); + Out << '@'; + return manglePostfix(DC->getParent(), NoFunction); + } else if (isa<CapturedDecl>(DC)) { + // Skip CapturedDecl context. + manglePostfix(DC->getParent(), NoFunction); + return; + } + + if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC))) + return; + else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) + mangleObjCMethodName(Method); + else if (const FunctionDecl *Func = dyn_cast<FunctionDecl>(DC)) + mangleLocalName(Func); + else { + mangleUnqualifiedName(cast<NamedDecl>(DC)); + manglePostfix(DC->getParent(), NoFunction); + } +} + +void MicrosoftCXXNameMangler::mangleCXXDtorType(CXXDtorType T) { + // Microsoft uses the names on the case labels for these dtor variants. Clang + // uses the Itanium terminology internally. Everything in this ABI delegates + // towards the base dtor. + switch (T) { + // <operator-name> ::= ?1 # destructor + case Dtor_Base: Out << "?1"; return; + // <operator-name> ::= ?_D # vbase destructor + case Dtor_Complete: Out << "?_D"; return; + // <operator-name> ::= ?_G # scalar deleting destructor + case Dtor_Deleting: Out << "?_G"; return; + // <operator-name> ::= ?_E # vector deleting destructor + // FIXME: Add a vector deleting dtor type. It goes in the vtable, so we need + // it. + } + llvm_unreachable("Unsupported dtor type?"); +} + +void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, + SourceLocation Loc) { + switch (OO) { + // ?0 # constructor + // ?1 # destructor + // <operator-name> ::= ?2 # new + case OO_New: Out << "?2"; break; + // <operator-name> ::= ?3 # delete + case OO_Delete: Out << "?3"; break; + // <operator-name> ::= ?4 # = + case OO_Equal: Out << "?4"; break; + // <operator-name> ::= ?5 # >> + case OO_GreaterGreater: Out << "?5"; break; + // <operator-name> ::= ?6 # << + case OO_LessLess: Out << "?6"; break; + // <operator-name> ::= ?7 # ! + case OO_Exclaim: Out << "?7"; break; + // <operator-name> ::= ?8 # == + case OO_EqualEqual: Out << "?8"; break; + // <operator-name> ::= ?9 # != + case OO_ExclaimEqual: Out << "?9"; break; + // <operator-name> ::= ?A # [] + case OO_Subscript: Out << "?A"; break; + // ?B # conversion + // <operator-name> ::= ?C # -> + case OO_Arrow: Out << "?C"; break; + // <operator-name> ::= ?D # * + case OO_Star: Out << "?D"; break; + // <operator-name> ::= ?E # ++ + case OO_PlusPlus: Out << "?E"; break; + // <operator-name> ::= ?F # -- + case OO_MinusMinus: Out << "?F"; break; + // <operator-name> ::= ?G # - + case OO_Minus: Out << "?G"; break; + // <operator-name> ::= ?H # + + case OO_Plus: Out << "?H"; break; + // <operator-name> ::= ?I # & + case OO_Amp: Out << "?I"; break; + // <operator-name> ::= ?J # ->* + case OO_ArrowStar: Out << "?J"; break; + // <operator-name> ::= ?K # / + case OO_Slash: Out << "?K"; break; + // <operator-name> ::= ?L # % + case OO_Percent: Out << "?L"; break; + // <operator-name> ::= ?M # < + case OO_Less: Out << "?M"; break; + // <operator-name> ::= ?N # <= + case OO_LessEqual: Out << "?N"; break; + // <operator-name> ::= ?O # > + case OO_Greater: Out << "?O"; break; + // <operator-name> ::= ?P # >= + case OO_GreaterEqual: Out << "?P"; break; + // <operator-name> ::= ?Q # , + case OO_Comma: Out << "?Q"; break; + // <operator-name> ::= ?R # () + case OO_Call: Out << "?R"; break; + // <operator-name> ::= ?S # ~ + case OO_Tilde: Out << "?S"; break; + // <operator-name> ::= ?T # ^ + case OO_Caret: Out << "?T"; break; + // <operator-name> ::= ?U # | + case OO_Pipe: Out << "?U"; break; + // <operator-name> ::= ?V # && + case OO_AmpAmp: Out << "?V"; break; + // <operator-name> ::= ?W # || + case OO_PipePipe: Out << "?W"; break; + // <operator-name> ::= ?X # *= + case OO_StarEqual: Out << "?X"; break; + // <operator-name> ::= ?Y # += + case OO_PlusEqual: Out << "?Y"; break; + // <operator-name> ::= ?Z # -= + case OO_MinusEqual: Out << "?Z"; break; + // <operator-name> ::= ?_0 # /= + case OO_SlashEqual: Out << "?_0"; break; + // <operator-name> ::= ?_1 # %= + case OO_PercentEqual: Out << "?_1"; break; + // <operator-name> ::= ?_2 # >>= + case OO_GreaterGreaterEqual: Out << "?_2"; break; + // <operator-name> ::= ?_3 # <<= + case OO_LessLessEqual: Out << "?_3"; break; + // <operator-name> ::= ?_4 # &= + case OO_AmpEqual: Out << "?_4"; break; + // <operator-name> ::= ?_5 # |= + case OO_PipeEqual: Out << "?_5"; break; + // <operator-name> ::= ?_6 # ^= + case OO_CaretEqual: Out << "?_6"; break; + // ?_7 # vftable + // ?_8 # vbtable + // ?_9 # vcall + // ?_A # typeof + // ?_B # local static guard + // ?_C # string + // ?_D # vbase destructor + // ?_E # vector deleting destructor + // ?_F # default constructor closure + // ?_G # scalar deleting destructor + // ?_H # vector constructor iterator + // ?_I # vector destructor iterator + // ?_J # vector vbase constructor iterator + // ?_K # virtual displacement map + // ?_L # eh vector constructor iterator + // ?_M # eh vector destructor iterator + // ?_N # eh vector vbase constructor iterator + // ?_O # copy constructor closure + // ?_P<name> # udt returning <name> + // ?_Q # <unknown> + // ?_R0 # RTTI Type Descriptor + // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d) + // ?_R2 # RTTI Base Class Array + // ?_R3 # RTTI Class Hierarchy Descriptor + // ?_R4 # RTTI Complete Object Locator + // ?_S # local vftable + // ?_T # local vftable constructor closure + // <operator-name> ::= ?_U # new[] + case OO_Array_New: Out << "?_U"; break; + // <operator-name> ::= ?_V # delete[] + case OO_Array_Delete: Out << "?_V"; break; + + case OO_Conditional: { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this conditional operator yet"); + Diags.Report(Loc, DiagID); + break; + } + + case OO_None: + case NUM_OVERLOADED_OPERATORS: + llvm_unreachable("Not an overloaded operator"); + } +} + +void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) { + // <source name> ::= <identifier> @ + BackRefMap::iterator Found; + if (UseNameBackReferences) + Found = NameBackReferences.find(Name); + if (!UseNameBackReferences || Found == NameBackReferences.end()) { + Out << Name << '@'; + if (UseNameBackReferences && NameBackReferences.size() < 10) { + size_t Size = NameBackReferences.size(); + NameBackReferences[Name] = Size; + } + } else { + Out << Found->second; + } +} + +void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) { + Context.mangleObjCMethodName(MD, Out); +} + +// Find out how many function decls live above this one and return an integer +// suitable for use as the number in a numbered anonymous scope. +// TODO: Memoize. +static unsigned getLocalNestingLevel(const FunctionDecl *FD) { + const DeclContext *DC = FD->getParent(); + int level = 1; + + while (DC && !DC->isTranslationUnit()) { + if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) level++; + DC = DC->getParent(); + } + + return 2*level; +} + +void MicrosoftCXXNameMangler::mangleLocalName(const FunctionDecl *FD) { + // <nested-name> ::= <numbered-anonymous-scope> ? <mangled-name> + // <numbered-anonymous-scope> ::= ? <number> + // Even though the name is rendered in reverse order (e.g. + // A::B::C is rendered as C@B@A), VC numbers the scopes from outermost to + // innermost. So a method bar in class C local to function foo gets mangled + // as something like: + // ?bar@C@?1??foo@@YAXXZ@QAEXXZ + // This is more apparent when you have a type nested inside a method of a + // type nested inside a function. A method baz in class D local to method + // bar of class C local to function foo gets mangled as: + // ?baz@D@?3??bar@C@?1??foo@@YAXXZ@QAEXXZ@QAEXXZ + // This scheme is general enough to support GCC-style nested + // functions. You could have a method baz of class C inside a function bar + // inside a function foo, like so: + // ?baz@C@?3??bar@?1??foo@@YAXXZ@YAXXZ@QAEXXZ + unsigned NestLevel = getLocalNestingLevel(FD); + Out << '?'; + mangleNumber(NestLevel); + Out << '?'; + mangle(FD, "?"); +} + +void MicrosoftCXXNameMangler::mangleTemplateInstantiationName( + const TemplateDecl *TD, + const TemplateArgumentList &TemplateArgs) { + // <template-name> ::= <unscoped-template-name> <template-args> + // ::= <substitution> + // Always start with the unqualified name. + + // Templates have their own context for back references. + ArgBackRefMap OuterArgsContext; + BackRefMap OuterTemplateContext; + NameBackReferences.swap(OuterTemplateContext); + TypeBackReferences.swap(OuterArgsContext); + + mangleUnscopedTemplateName(TD); + mangleTemplateArgs(TD, TemplateArgs); + + // Restore the previous back reference contexts. + NameBackReferences.swap(OuterTemplateContext); + TypeBackReferences.swap(OuterArgsContext); +} + +void +MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) { + // <unscoped-template-name> ::= ?$ <unqualified-name> + Out << "?$"; + mangleUnqualifiedName(TD); +} + +void +MicrosoftCXXNameMangler::mangleIntegerLiteral(const llvm::APSInt &Value, + bool IsBoolean) { + // <integer-literal> ::= $0 <number> + Out << "$0"; + // Make sure booleans are encoded as 0/1. + if (IsBoolean && Value.getBoolValue()) + mangleNumber(1); + else + mangleNumber(Value.getSExtValue()); +} + +void +MicrosoftCXXNameMangler::mangleExpression(const Expr *E) { + // See if this is a constant expression. + llvm::APSInt Value; + if (E->isIntegerConstantExpr(Value, Context.getASTContext())) { + mangleIntegerLiteral(Value, E->getType()->isBooleanType()); + return; + } + + const CXXUuidofExpr *UE = 0; + if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { + if (UO->getOpcode() == UO_AddrOf) + UE = dyn_cast<CXXUuidofExpr>(UO->getSubExpr()); + } else + UE = dyn_cast<CXXUuidofExpr>(E); + + if (UE) { + // This CXXUuidofExpr is mangled as-if it were actually a VarDecl from + // const __s_GUID _GUID_{lower case UUID with underscores} + StringRef Uuid = UE->getUuidAsStringRef(Context.getASTContext()); + std::string Name = "_GUID_" + Uuid.lower(); + std::replace(Name.begin(), Name.end(), '-', '_'); + + // If we had to peek through an address-of operator, treat this like we are + // dealing with a pointer type. Otherwise, treat it like a const reference. + // + // N.B. This matches up with the handling of TemplateArgument::Declaration + // in mangleTemplateArg + if (UE == E) + Out << "$E?"; + else + Out << "$1?"; + Out << Name << "@@3U__s_GUID@@B"; + return; + } + + // As bad as this diagnostic is, it's better than crashing. + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot yet mangle expression type %0"); + Diags.Report(E->getExprLoc(), DiagID) + << E->getStmtClassName() << E->getSourceRange(); +} + +void +MicrosoftCXXNameMangler::mangleTemplateArgs(const TemplateDecl *TD, + const TemplateArgumentList &TemplateArgs) { + // <template-args> ::= {<type> | <integer-literal>}+ @ + unsigned NumTemplateArgs = TemplateArgs.size(); + for (unsigned i = 0; i < NumTemplateArgs; ++i) { + const TemplateArgument &TA = TemplateArgs[i]; + mangleTemplateArg(TD, TA); + } + Out << '@'; +} + +void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, + const TemplateArgument &TA) { + switch (TA.getKind()) { + case TemplateArgument::Null: + llvm_unreachable("Can't mangle null template arguments!"); + case TemplateArgument::TemplateExpansion: + llvm_unreachable("Can't mangle template expansion arguments!"); + case TemplateArgument::Type: { + QualType T = TA.getAsType(); + mangleType(T, SourceRange(), QMM_Escape); + break; + } + case TemplateArgument::Declaration: { + const NamedDecl *ND = cast<NamedDecl>(TA.getAsDecl()); + mangle(ND, TA.isDeclForReferenceParam() ? "$E?" : "$1?"); + break; + } + case TemplateArgument::Integral: + mangleIntegerLiteral(TA.getAsIntegral(), + TA.getIntegralType()->isBooleanType()); + break; + case TemplateArgument::NullPtr: + Out << "$0A@"; + break; + case TemplateArgument::Expression: + mangleExpression(TA.getAsExpr()); + break; + case TemplateArgument::Pack: + // Unlike Itanium, there is no character code to indicate an argument pack. + for (TemplateArgument::pack_iterator I = TA.pack_begin(), E = TA.pack_end(); + I != E; ++I) + mangleTemplateArg(TD, *I); + break; + case TemplateArgument::Template: + mangleType(cast<TagDecl>( + TA.getAsTemplate().getAsTemplateDecl()->getTemplatedDecl())); + break; + } +} + +void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals, + bool IsMember) { + // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers> + // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only); + // 'I' means __restrict (32/64-bit). + // Note that the MSVC __restrict keyword isn't the same as the C99 restrict + // keyword! + // <base-cvr-qualifiers> ::= A # near + // ::= B # near const + // ::= C # near volatile + // ::= D # near const volatile + // ::= E # far (16-bit) + // ::= F # far const (16-bit) + // ::= G # far volatile (16-bit) + // ::= H # far const volatile (16-bit) + // ::= I # huge (16-bit) + // ::= J # huge const (16-bit) + // ::= K # huge volatile (16-bit) + // ::= L # huge const volatile (16-bit) + // ::= M <basis> # based + // ::= N <basis> # based const + // ::= O <basis> # based volatile + // ::= P <basis> # based const volatile + // ::= Q # near member + // ::= R # near const member + // ::= S # near volatile member + // ::= T # near const volatile member + // ::= U # far member (16-bit) + // ::= V # far const member (16-bit) + // ::= W # far volatile member (16-bit) + // ::= X # far const volatile member (16-bit) + // ::= Y # huge member (16-bit) + // ::= Z # huge const member (16-bit) + // ::= 0 # huge volatile member (16-bit) + // ::= 1 # huge const volatile member (16-bit) + // ::= 2 <basis> # based member + // ::= 3 <basis> # based const member + // ::= 4 <basis> # based volatile member + // ::= 5 <basis> # based const volatile member + // ::= 6 # near function (pointers only) + // ::= 7 # far function (pointers only) + // ::= 8 # near method (pointers only) + // ::= 9 # far method (pointers only) + // ::= _A <basis> # based function (pointers only) + // ::= _B <basis> # based function (far?) (pointers only) + // ::= _C <basis> # based method (pointers only) + // ::= _D <basis> # based method (far?) (pointers only) + // ::= _E # block (Clang) + // <basis> ::= 0 # __based(void) + // ::= 1 # __based(segment)? + // ::= 2 <name> # __based(name) + // ::= 3 # ? + // ::= 4 # ? + // ::= 5 # not really based + bool HasConst = Quals.hasConst(), + HasVolatile = Quals.hasVolatile(); + + if (!IsMember) { + if (HasConst && HasVolatile) { + Out << 'D'; + } else if (HasVolatile) { + Out << 'C'; + } else if (HasConst) { + Out << 'B'; + } else { + Out << 'A'; + } + } else { + if (HasConst && HasVolatile) { + Out << 'T'; + } else if (HasVolatile) { + Out << 'S'; + } else if (HasConst) { + Out << 'R'; + } else { + Out << 'Q'; + } + } + + // FIXME: For now, just drop all extension qualifiers on the floor. +} + +void MicrosoftCXXNameMangler::manglePointerQualifiers(Qualifiers Quals) { + // <pointer-cvr-qualifiers> ::= P # no qualifiers + // ::= Q # const + // ::= R # volatile + // ::= S # const volatile + bool HasConst = Quals.hasConst(), + HasVolatile = Quals.hasVolatile(); + if (HasConst && HasVolatile) { + Out << 'S'; + } else if (HasVolatile) { + Out << 'R'; + } else if (HasConst) { + Out << 'Q'; + } else { + Out << 'P'; + } +} + +void MicrosoftCXXNameMangler::mangleArgumentType(QualType T, + SourceRange Range) { + // MSVC will backreference two canonically equivalent types that have slightly + // different manglings when mangled alone. + + // Decayed types do not match up with non-decayed versions of the same type. + // + // e.g. + // void (*x)(void) will not form a backreference with void x(void) + void *TypePtr; + if (const DecayedType *DT = T->getAs<DecayedType>()) { + TypePtr = DT->getOriginalType().getCanonicalType().getAsOpaquePtr(); + // If the original parameter was textually written as an array, + // instead treat the decayed parameter like it's const. + // + // e.g. + // int [] -> int * const + if (DT->getOriginalType()->isArrayType()) + T = T.withConst(); + } else + TypePtr = T.getCanonicalType().getAsOpaquePtr(); + + ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr); + + if (Found == TypeBackReferences.end()) { + size_t OutSizeBefore = Out.GetNumBytesInBuffer(); + + mangleType(T, Range, QMM_Drop); + + // See if it's worth creating a back reference. + // Only types longer than 1 character are considered + // and only 10 back references slots are available: + bool LongerThanOneChar = (Out.GetNumBytesInBuffer() - OutSizeBefore > 1); + if (LongerThanOneChar && TypeBackReferences.size() < 10) { + size_t Size = TypeBackReferences.size(); + TypeBackReferences[TypePtr] = Size; + } + } else { + Out << Found->second; + } +} + +void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range, + QualifierMangleMode QMM) { + // Don't use the canonical types. MSVC includes things like 'const' on + // pointer arguments to function pointers that canonicalization strips away. + T = T.getDesugaredType(getASTContext()); + Qualifiers Quals = T.getLocalQualifiers(); + if (const ArrayType *AT = getASTContext().getAsArrayType(T)) { + // If there were any Quals, getAsArrayType() pushed them onto the array + // element type. + if (QMM == QMM_Mangle) + Out << 'A'; + else if (QMM == QMM_Escape || QMM == QMM_Result) + Out << "$$B"; + mangleArrayType(AT); + return; + } + + bool IsPointer = T->isAnyPointerType() || T->isMemberPointerType() || + T->isBlockPointerType(); + + switch (QMM) { + case QMM_Drop: + break; + case QMM_Mangle: + if (const FunctionType *FT = dyn_cast<FunctionType>(T)) { + Out << '6'; + mangleFunctionType(FT); + return; + } + mangleQualifiers(Quals, false); + break; + case QMM_Escape: + if (!IsPointer && Quals) { + Out << "$$C"; + mangleQualifiers(Quals, false); + } + break; + case QMM_Result: + if ((!IsPointer && Quals) || isa<TagType>(T)) { + Out << '?'; + mangleQualifiers(Quals, false); + } + break; + } + + // We have to mangle these now, while we still have enough information. + if (IsPointer) + manglePointerQualifiers(Quals); + const Type *ty = T.getTypePtr(); + + switch (ty->getTypeClass()) { +#define ABSTRACT_TYPE(CLASS, PARENT) +#define NON_CANONICAL_TYPE(CLASS, PARENT) \ + case Type::CLASS: \ + llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \ + return; +#define TYPE(CLASS, PARENT) \ + case Type::CLASS: \ + mangleType(cast<CLASS##Type>(ty), Range); \ + break; +#include "clang/AST/TypeNodes.def" +#undef ABSTRACT_TYPE +#undef NON_CANONICAL_TYPE +#undef TYPE + } +} + +void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, + SourceRange Range) { + // <type> ::= <builtin-type> + // <builtin-type> ::= X # void + // ::= C # signed char + // ::= D # char + // ::= E # unsigned char + // ::= F # short + // ::= G # unsigned short (or wchar_t if it's not a builtin) + // ::= H # int + // ::= I # unsigned int + // ::= J # long + // ::= K # unsigned long + // L # <none> + // ::= M # float + // ::= N # double + // ::= O # long double (__float80 is mangled differently) + // ::= _J # long long, __int64 + // ::= _K # unsigned long long, __int64 + // ::= _L # __int128 + // ::= _M # unsigned __int128 + // ::= _N # bool + // _O # <array in parameter> + // ::= _T # __float80 (Intel) + // ::= _W # wchar_t + // ::= _Z # __float80 (Digital Mars) + switch (T->getKind()) { + case BuiltinType::Void: Out << 'X'; break; + case BuiltinType::SChar: Out << 'C'; break; + case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break; + case BuiltinType::UChar: Out << 'E'; break; + case BuiltinType::Short: Out << 'F'; break; + case BuiltinType::UShort: Out << 'G'; break; + case BuiltinType::Int: Out << 'H'; break; + case BuiltinType::UInt: Out << 'I'; break; + case BuiltinType::Long: Out << 'J'; break; + case BuiltinType::ULong: Out << 'K'; break; + case BuiltinType::Float: Out << 'M'; break; + case BuiltinType::Double: Out << 'N'; break; + // TODO: Determine size and mangle accordingly + case BuiltinType::LongDouble: Out << 'O'; break; + case BuiltinType::LongLong: Out << "_J"; break; + case BuiltinType::ULongLong: Out << "_K"; break; + case BuiltinType::Int128: Out << "_L"; break; + case BuiltinType::UInt128: Out << "_M"; break; + case BuiltinType::Bool: Out << "_N"; break; + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: Out << "_W"; break; + +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) \ + case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + case BuiltinType::Dependent: + llvm_unreachable("placeholder types shouldn't get to name mangling"); + + case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break; + case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break; + case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break; + + case BuiltinType::OCLImage1d: Out << "PAUocl_image1d@@"; break; + case BuiltinType::OCLImage1dArray: Out << "PAUocl_image1darray@@"; break; + case BuiltinType::OCLImage1dBuffer: Out << "PAUocl_image1dbuffer@@"; break; + case BuiltinType::OCLImage2d: Out << "PAUocl_image2d@@"; break; + case BuiltinType::OCLImage2dArray: Out << "PAUocl_image2darray@@"; break; + case BuiltinType::OCLImage3d: Out << "PAUocl_image3d@@"; break; + case BuiltinType::OCLSampler: Out << "PAUocl_sampler@@"; break; + case BuiltinType::OCLEvent: Out << "PAUocl_event@@"; break; + + case BuiltinType::NullPtr: Out << "$$T"; break; + + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Half: { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this built-in %0 type yet"); + Diags.Report(Range.getBegin(), DiagID) + << T->getName(Context.getASTContext().getPrintingPolicy()) + << Range; + break; + } + } +} + +// <type> ::= <function-type> +void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, + SourceRange) { + // Structors only appear in decls, so at this point we know it's not a + // structor type. + // FIXME: This may not be lambda-friendly. + Out << "$$A6"; + mangleFunctionType(T); +} +void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T, + SourceRange) { + llvm_unreachable("Can't mangle K&R function prototypes"); +} + +void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, + const FunctionDecl *D, + bool ForceInstMethod) { + // <function-type> ::= <this-cvr-qualifiers> <calling-convention> + // <return-type> <argument-list> <throw-spec> + const FunctionProtoType *Proto = cast<FunctionProtoType>(T); + + SourceRange Range; + if (D) Range = D->getSourceRange(); + + bool IsStructor = false, IsInstMethod = ForceInstMethod; + if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(D)) { + if (MD->isInstance()) + IsInstMethod = true; + if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) + IsStructor = true; + } + + // If this is a C++ instance method, mangle the CVR qualifiers for the + // this pointer. + if (IsInstMethod) { + if (PointersAre64Bit) + Out << 'E'; + mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false); + } + + mangleCallingConvention(T); + + // <return-type> ::= <type> + // ::= @ # structors (they have no declared return type) + if (IsStructor) { + if (isa<CXXDestructorDecl>(D) && D == Structor && + StructorType == Dtor_Deleting) { + // The scalar deleting destructor takes an extra int argument. + // However, the FunctionType generated has 0 arguments. + // FIXME: This is a temporary hack. + // Maybe should fix the FunctionType creation instead? + Out << (PointersAre64Bit ? "PEAXI@Z" : "PAXI@Z"); + return; + } + Out << '@'; + } else { + QualType ResultType = Proto->getResultType(); + if (ResultType->isVoidType()) + ResultType = ResultType.getUnqualifiedType(); + mangleType(ResultType, Range, QMM_Result); + } + + // <argument-list> ::= X # void + // ::= <type>+ @ + // ::= <type>* Z # varargs + if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) { + Out << 'X'; + } else { + // Happens for function pointer type arguments for example. + for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(), + ArgEnd = Proto->arg_type_end(); + Arg != ArgEnd; ++Arg) + mangleArgumentType(*Arg, Range); + // <builtin-type> ::= Z # ellipsis + if (Proto->isVariadic()) + Out << 'Z'; + else + Out << '@'; + } + + mangleThrowSpecification(Proto); +} + +void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) { + // <function-class> ::= <member-function> E? # E designates a 64-bit 'this' + // # pointer. in 64-bit mode *all* + // # 'this' pointers are 64-bit. + // ::= <global-function> + // <member-function> ::= A # private: near + // ::= B # private: far + // ::= C # private: static near + // ::= D # private: static far + // ::= E # private: virtual near + // ::= F # private: virtual far + // ::= I # protected: near + // ::= J # protected: far + // ::= K # protected: static near + // ::= L # protected: static far + // ::= M # protected: virtual near + // ::= N # protected: virtual far + // ::= Q # public: near + // ::= R # public: far + // ::= S # public: static near + // ::= T # public: static far + // ::= U # public: virtual near + // ::= V # public: virtual far + // <global-function> ::= Y # global near + // ::= Z # global far + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { + switch (MD->getAccess()) { + case AS_none: + llvm_unreachable("Unsupported access specifier"); + case AS_private: + if (MD->isStatic()) + Out << 'C'; + else if (MD->isVirtual()) + Out << 'E'; + else + Out << 'A'; + break; + case AS_protected: + if (MD->isStatic()) + Out << 'K'; + else if (MD->isVirtual()) + Out << 'M'; + else + Out << 'I'; + break; + case AS_public: + if (MD->isStatic()) + Out << 'S'; + else if (MD->isVirtual()) + Out << 'U'; + else + Out << 'Q'; + } + } else + Out << 'Y'; +} +void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) { + // <calling-convention> ::= A # __cdecl + // ::= B # __export __cdecl + // ::= C # __pascal + // ::= D # __export __pascal + // ::= E # __thiscall + // ::= F # __export __thiscall + // ::= G # __stdcall + // ::= H # __export __stdcall + // ::= I # __fastcall + // ::= J # __export __fastcall + // The 'export' calling conventions are from a bygone era + // (*cough*Win16*cough*) when functions were declared for export with + // that keyword. (It didn't actually export them, it just made them so + // that they could be in a DLL and somebody from another module could call + // them.) + CallingConv CC = T->getCallConv(); + switch (CC) { + default: + llvm_unreachable("Unsupported CC for mangling"); + case CC_X86_64Win64: + case CC_X86_64SysV: + case CC_C: Out << 'A'; break; + case CC_X86Pascal: Out << 'C'; break; + case CC_X86ThisCall: Out << 'E'; break; + case CC_X86StdCall: Out << 'G'; break; + case CC_X86FastCall: Out << 'I'; break; + } +} +void MicrosoftCXXNameMangler::mangleThrowSpecification( + const FunctionProtoType *FT) { + // <throw-spec> ::= Z # throw(...) (default) + // ::= @ # throw() or __declspec/__attribute__((nothrow)) + // ::= <type>+ + // NOTE: Since the Microsoft compiler ignores throw specifications, they are + // all actually mangled as 'Z'. (They're ignored because their associated + // functionality isn't implemented, and probably never will be.) + Out << 'Z'; +} + +void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T, + SourceRange Range) { + // Probably should be mangled as a template instantiation; need to see what + // VC does first. + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this unresolved dependent type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type> +// <union-type> ::= T <name> +// <struct-type> ::= U <name> +// <class-type> ::= V <name> +// <enum-type> ::= W <size> <name> +void MicrosoftCXXNameMangler::mangleType(const EnumType *T, SourceRange) { + mangleType(cast<TagType>(T)->getDecl()); +} +void MicrosoftCXXNameMangler::mangleType(const RecordType *T, SourceRange) { + mangleType(cast<TagType>(T)->getDecl()); +} +void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) { + switch (TD->getTagKind()) { + case TTK_Union: + Out << 'T'; + break; + case TTK_Struct: + case TTK_Interface: + Out << 'U'; + break; + case TTK_Class: + Out << 'V'; + break; + case TTK_Enum: + Out << 'W'; + Out << getASTContext().getTypeSizeInChars( + cast<EnumDecl>(TD)->getIntegerType()).getQuantity(); + break; + } + mangleName(TD); +} + +// <type> ::= <array-type> +// <array-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> +// [Y <dimension-count> <dimension>+] +// <element-type> # as global, E is never required +// It's supposed to be the other way around, but for some strange reason, it +// isn't. Today this behavior is retained for the sole purpose of backwards +// compatibility. +void MicrosoftCXXNameMangler::mangleDecayedArrayType(const ArrayType *T) { + // This isn't a recursive mangling, so now we have to do it all in this + // one call. + manglePointerQualifiers(T->getElementType().getQualifiers()); + mangleType(T->getElementType(), SourceRange()); +} +void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T, + SourceRange) { + llvm_unreachable("Should have been special cased"); +} +void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T, + SourceRange) { + llvm_unreachable("Should have been special cased"); +} +void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T, + SourceRange) { + llvm_unreachable("Should have been special cased"); +} +void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T, + SourceRange) { + llvm_unreachable("Should have been special cased"); +} +void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) { + QualType ElementTy(T, 0); + SmallVector<llvm::APInt, 3> Dimensions; + for (;;) { + if (const ConstantArrayType *CAT = + getASTContext().getAsConstantArrayType(ElementTy)) { + Dimensions.push_back(CAT->getSize()); + ElementTy = CAT->getElementType(); + } else if (ElementTy->isVariableArrayType()) { + const VariableArrayType *VAT = + getASTContext().getAsVariableArrayType(ElementTy); + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this variable-length array yet"); + Diags.Report(VAT->getSizeExpr()->getExprLoc(), DiagID) + << VAT->getBracketsRange(); + return; + } else if (ElementTy->isDependentSizedArrayType()) { + // The dependent expression has to be folded into a constant (TODO). + const DependentSizedArrayType *DSAT = + getASTContext().getAsDependentSizedArrayType(ElementTy); + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this dependent-length array yet"); + Diags.Report(DSAT->getSizeExpr()->getExprLoc(), DiagID) + << DSAT->getBracketsRange(); + return; + } else if (const IncompleteArrayType *IAT = + getASTContext().getAsIncompleteArrayType(ElementTy)) { + Dimensions.push_back(llvm::APInt(32, 0)); + ElementTy = IAT->getElementType(); + } + else break; + } + Out << 'Y'; + // <dimension-count> ::= <number> # number of extra dimensions + mangleNumber(Dimensions.size()); + for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) + mangleNumber(Dimensions[Dim].getLimitedValue()); + mangleType(ElementTy, SourceRange(), QMM_Escape); +} + +// <type> ::= <pointer-to-member-type> +// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> +// <class name> <type> +void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T, + SourceRange Range) { + QualType PointeeType = T->getPointeeType(); + if (const FunctionProtoType *FPT = PointeeType->getAs<FunctionProtoType>()) { + Out << '8'; + mangleName(T->getClass()->castAs<RecordType>()->getDecl()); + mangleFunctionType(FPT, 0, true); + } else { + if (PointersAre64Bit && !T->getPointeeType()->isFunctionType()) + Out << 'E'; + mangleQualifiers(PointeeType.getQualifiers(), true); + mangleName(T->getClass()->castAs<RecordType>()->getDecl()); + mangleType(PointeeType, Range, QMM_Drop); + } +} + +void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this template type parameter type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType( + const SubstTemplateTypeParmPackType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this substituted parameter pack yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +// <type> ::= <pointer-type> +// <pointer-type> ::= E? <pointer-cvr-qualifiers> <cvr-qualifiers> <type> +// # the E is required for 64-bit non static pointers +void MicrosoftCXXNameMangler::mangleType(const PointerType *T, + SourceRange Range) { + QualType PointeeTy = T->getPointeeType(); + if (PointersAre64Bit && !T->getPointeeType()->isFunctionType()) + Out << 'E'; + mangleType(PointeeTy, Range); +} +void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T, + SourceRange Range) { + // Object pointers never have qualifiers. + Out << 'A'; + if (PointersAre64Bit && !T->getPointeeType()->isFunctionType()) + Out << 'E'; + mangleType(T->getPointeeType(), Range); +} + +// <type> ::= <reference-type> +// <reference-type> ::= A E? <cvr-qualifiers> <type> +// # the E is required for 64-bit non static lvalue references +void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T, + SourceRange Range) { + Out << 'A'; + if (PointersAre64Bit && !T->getPointeeType()->isFunctionType()) + Out << 'E'; + mangleType(T->getPointeeType(), Range); +} + +// <type> ::= <r-value-reference-type> +// <r-value-reference-type> ::= $$Q E? <cvr-qualifiers> <type> +// # the E is required for 64-bit non static rvalue references +void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T, + SourceRange Range) { + Out << "$$Q"; + if (PointersAre64Bit && !T->getPointeeType()->isFunctionType()) + Out << 'E'; + mangleType(T->getPointeeType(), Range); +} + +void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this complex number type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const VectorType *T, + SourceRange Range) { + const BuiltinType *ET = T->getElementType()->getAs<BuiltinType>(); + assert(ET && "vectors with non-builtin elements are unsupported"); + uint64_t Width = getASTContext().getTypeSize(T); + // Pattern match exactly the typedefs in our intrinsic headers. Anything that + // doesn't match the Intel types uses a custom mangling below. + bool IntelVector = true; + if (Width == 64 && ET->getKind() == BuiltinType::LongLong) { + Out << "T__m64"; + } else if (Width == 128 || Width == 256) { + if (ET->getKind() == BuiltinType::Float) + Out << "T__m" << Width; + else if (ET->getKind() == BuiltinType::LongLong) + Out << "T__m" << Width << 'i'; + else if (ET->getKind() == BuiltinType::Double) + Out << "U__m" << Width << 'd'; + else + IntelVector = false; + } else { + IntelVector = false; + } + + if (!IntelVector) { + // The MS ABI doesn't have a special mangling for vector types, so we define + // our own mangling to handle uses of __vector_size__ on user-specified + // types, and for extensions like __v4sf. + Out << "T__clang_vec" << T->getNumElements() << '_'; + mangleType(ET, Range); + } + + Out << "@@"; +} + +void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this extended vector type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} +void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this dependent-sized extended vector type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, + SourceRange) { + // ObjC interfaces have structs underlying them. + Out << 'U'; + mangleName(T->getDecl()); +} + +void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, + SourceRange Range) { + // We don't allow overloading by different protocol qualification, + // so mangling them isn't necessary. + mangleType(T->getBaseType(), Range); +} + +void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T, + SourceRange Range) { + Out << "_E"; + + QualType pointee = T->getPointeeType(); + mangleFunctionType(pointee->castAs<FunctionProtoType>()); +} + +void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *, + SourceRange) { + llvm_unreachable("Cannot mangle injected class name type."); +} + +void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this template specialization type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this dependent name type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType( + const DependentTemplateSpecializationType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this dependent template specialization type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this pack expansion yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this typeof(type) yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this typeof(expression) yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this decltype() yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this unary transform type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const AutoType *T, SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this 'auto' type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, + SourceRange Range) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this C11 atomic type yet"); + Diags.Report(Range.getBegin(), DiagID) + << Range; +} + +void MicrosoftMangleContextImpl::mangleCXXName(const NamedDecl *D, + raw_ostream &Out) { + assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) && + "Invalid mangleName() call, argument is not a variable or function!"); + assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) && + "Invalid mangleName() call on 'structor decl!"); + + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + getASTContext().getSourceManager(), + "Mangling declaration"); + + MicrosoftCXXNameMangler Mangler(*this, Out); + return Mangler.mangle(D); +} + +// <this-adjustment> ::= <no-adjustment> | <static-adjustment> | +// <virtual-adjustment> +// <no-adjustment> ::= A # private near +// ::= B # private far +// ::= I # protected near +// ::= J # protected far +// ::= Q # public near +// ::= R # public far +// <static-adjustment> ::= G <static-offset> # private near +// ::= H <static-offset> # private far +// ::= O <static-offset> # protected near +// ::= P <static-offset> # protected far +// ::= W <static-offset> # public near +// ::= X <static-offset> # public far +// <virtual-adjustment> ::= $0 <virtual-shift> <static-offset> # private near +// ::= $1 <virtual-shift> <static-offset> # private far +// ::= $2 <virtual-shift> <static-offset> # protected near +// ::= $3 <virtual-shift> <static-offset> # protected far +// ::= $4 <virtual-shift> <static-offset> # public near +// ::= $5 <virtual-shift> <static-offset> # public far +// <virtual-shift> ::= <vtordisp-shift> | <vtordispex-shift> +// <vtordisp-shift> ::= <offset-to-vtordisp> +// <vtordispex-shift> ::= <offset-to-vbptr> <vbase-offset-offset> +// <offset-to-vtordisp> +static void mangleThunkThisAdjustment(const CXXMethodDecl *MD, + const ThisAdjustment &Adjustment, + MicrosoftCXXNameMangler &Mangler, + raw_ostream &Out) { + if (!Adjustment.Virtual.isEmpty()) { + Out << '$'; + char AccessSpec; + switch (MD->getAccess()) { + case AS_none: + llvm_unreachable("Unsupported access specifier"); + case AS_private: + AccessSpec = '0'; + break; + case AS_protected: + AccessSpec = '2'; + break; + case AS_public: + AccessSpec = '4'; + } + if (Adjustment.Virtual.Microsoft.VBPtrOffset) { + Out << 'R' << AccessSpec; + Mangler.mangleNumber( + static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VBPtrOffset)); + Mangler.mangleNumber( + static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VBOffsetOffset)); + Mangler.mangleNumber( + static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VtordispOffset)); + Mangler.mangleNumber(static_cast<uint32_t>(Adjustment.NonVirtual)); + } else { + Out << AccessSpec; + Mangler.mangleNumber( + static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VtordispOffset)); + Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual)); + } + } else if (Adjustment.NonVirtual != 0) { + switch (MD->getAccess()) { + case AS_none: + llvm_unreachable("Unsupported access specifier"); + case AS_private: + Out << 'G'; + break; + case AS_protected: + Out << 'O'; + break; + case AS_public: + Out << 'W'; + } + Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual)); + } else { + switch (MD->getAccess()) { + case AS_none: + llvm_unreachable("Unsupported access specifier"); + case AS_private: + Out << 'A'; + break; + case AS_protected: + Out << 'I'; + break; + case AS_public: + Out << 'Q'; + } + } +} + +void MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk( + const CXXMethodDecl *MD, uint64_t OffsetInVFTable, raw_ostream &Out) { + bool Is64Bit = getASTContext().getTargetInfo().getPointerWidth(0) == 64; + + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "\01??_9"; + Mangler.mangleName(MD->getParent()); + Mangler.getStream() << "$B"; + Mangler.mangleNumber(OffsetInVFTable); + Mangler.getStream() << "A"; + Mangler.getStream() << (Is64Bit ? "A" : "E"); +} + +void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, + raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + Out << "\01?"; + Mangler.mangleName(MD); + mangleThunkThisAdjustment(MD, Thunk.This, Mangler, Out); + if (!Thunk.Return.isEmpty()) + assert(Thunk.Method != 0 && "Thunk info should hold the overridee decl"); + + const CXXMethodDecl *DeclForFPT = Thunk.Method ? Thunk.Method : MD; + Mangler.mangleFunctionType( + DeclForFPT->getType()->castAs<FunctionProtoType>(), MD); +} + +void MicrosoftMangleContextImpl::mangleCXXDtorThunk( + const CXXDestructorDecl *DD, CXXDtorType Type, + const ThisAdjustment &Adjustment, raw_ostream &Out) { + // FIXME: Actually, the dtor thunk should be emitted for vector deleting + // dtors rather than scalar deleting dtors. Just use the vector deleting dtor + // mangling manually until we support both deleting dtor types. + assert(Type == Dtor_Deleting); + MicrosoftCXXNameMangler Mangler(*this, Out, DD, Type); + Out << "\01??_E"; + Mangler.mangleName(DD->getParent()); + mangleThunkThisAdjustment(DD, Adjustment, Mangler, Out); + Mangler.mangleFunctionType(DD->getType()->castAs<FunctionProtoType>(), DD); +} + +void MicrosoftMangleContextImpl::mangleCXXVFTable( + const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath, + raw_ostream &Out) { + // <mangled-name> ::= ?_7 <class-name> <storage-class> + // <cvr-qualifiers> [<name>] @ + // NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class> + // is always '6' for vftables. + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "\01??_7"; + Mangler.mangleName(Derived); + Mangler.getStream() << "6B"; // '6' for vftable, 'B' for const. + for (ArrayRef<const CXXRecordDecl *>::iterator I = BasePath.begin(), + E = BasePath.end(); + I != E; ++I) { + Mangler.mangleName(*I); + } + Mangler.getStream() << '@'; +} + +void MicrosoftMangleContextImpl::mangleCXXVBTable( + const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath, + raw_ostream &Out) { + // <mangled-name> ::= ?_8 <class-name> <storage-class> + // <cvr-qualifiers> [<name>] @ + // NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class> + // is always '7' for vbtables. + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "\01??_8"; + Mangler.mangleName(Derived); + Mangler.getStream() << "7B"; // '7' for vbtable, 'B' for const. + for (ArrayRef<const CXXRecordDecl *>::iterator I = BasePath.begin(), + E = BasePath.end(); + I != E; ++I) { + Mangler.mangleName(*I); + } + Mangler.getStream() << '@'; +} + +void MicrosoftMangleContextImpl::mangleCXXRTTI(QualType T, raw_ostream &) { + // FIXME: Give a location... + unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle RTTI descriptors for type %0 yet"); + getDiags().Report(DiagID) + << T.getBaseTypeIdentifier(); +} + +void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T, raw_ostream &) { + // FIXME: Give a location... + unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle the name of type %0 into RTTI descriptors yet"); + getDiags().Report(DiagID) + << T.getBaseTypeIdentifier(); +} + +void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) { + // This is just a made up unique string for the purposes of tbaa. undname + // does *not* know how to demangle it. + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << '?'; + Mangler.mangleType(T, SourceRange()); +} + +void MicrosoftMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D, + CXXCtorType Type, + raw_ostream &Out) { + MicrosoftCXXNameMangler mangler(*this, Out); + mangler.mangle(D); +} + +void MicrosoftMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D, + CXXDtorType Type, + raw_ostream &Out) { + MicrosoftCXXNameMangler mangler(*this, Out, D, Type); + mangler.mangle(D); +} + +void MicrosoftMangleContextImpl::mangleReferenceTemporary(const VarDecl *VD, + raw_ostream &) { + unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, + "cannot mangle this reference temporary yet"); + getDiags().Report(VD->getLocation(), DiagID); +} + +void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD, + raw_ostream &Out) { + // <guard-name> ::= ?_B <postfix> @51 + // ::= ?$S <guard-num> @ <postfix> @4IA + + // The first mangling is what MSVC uses to guard static locals in inline + // functions. It uses a different mangling in external functions to support + // guarding more than 32 variables. MSVC rejects inline functions with more + // than 32 static locals. We don't fully implement the second mangling + // because those guards are not externally visible, and instead use LLVM's + // default renaming when creating a new guard variable. + MicrosoftCXXNameMangler Mangler(*this, Out); + + bool Visible = VD->isExternallyVisible(); + // <operator-name> ::= ?_B # local static guard + Mangler.getStream() << (Visible ? "\01??_B" : "\01?$S1@"); + Mangler.manglePostfix(VD->getDeclContext()); + Mangler.getStream() << (Visible ? "@51" : "@4IA"); +} + +void MicrosoftMangleContextImpl::mangleInitFiniStub(const VarDecl *D, + raw_ostream &Out, + char CharCode) { + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "\01??__" << CharCode; + Mangler.mangleName(D); + // This is the function class mangling. These stubs are global, non-variadic, + // cdecl functions that return void and take no args. + Mangler.getStream() << "YAXXZ"; +} + +void MicrosoftMangleContextImpl::mangleDynamicInitializer(const VarDecl *D, + raw_ostream &Out) { + // <initializer-name> ::= ?__E <name> YAXXZ + mangleInitFiniStub(D, Out, 'E'); +} + +void +MicrosoftMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D, + raw_ostream &Out) { + // <destructor-name> ::= ?__F <name> YAXXZ + mangleInitFiniStub(D, Out, 'F'); +} + +MicrosoftMangleContext * +MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) { + return new MicrosoftMangleContextImpl(Context, Diags); +} diff --git a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp new file mode 100644 index 000000000000..a862630bbf8d --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp @@ -0,0 +1,429 @@ +//===--- NSAPI.cpp - NSFoundation APIs ------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/NSAPI.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Expr.h" + +using namespace clang; + +NSAPI::NSAPI(ASTContext &ctx) + : Ctx(ctx), ClassIds(), BOOLId(0), NSIntegerId(0), NSUIntegerId(0), + NSASCIIStringEncodingId(0), NSUTF8StringEncodingId(0) { +} + +IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const { + static const char *ClassName[NumClassIds] = { + "NSObject", + "NSString", + "NSArray", + "NSMutableArray", + "NSDictionary", + "NSMutableDictionary", + "NSNumber" + }; + + if (!ClassIds[K]) + return (ClassIds[K] = &Ctx.Idents.get(ClassName[K])); + + return ClassIds[K]; +} + +Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const { + if (NSStringSelectors[MK].isNull()) { + Selector Sel; + switch (MK) { + case NSStr_stringWithString: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithString")); + break; + case NSStr_stringWithUTF8String: + Sel = Ctx.Selectors.getUnarySelector( + &Ctx.Idents.get("stringWithUTF8String")); + break; + case NSStr_stringWithCStringEncoding: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("stringWithCString"), + &Ctx.Idents.get("encoding") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSStr_stringWithCString: + Sel= Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithCString")); + break; + case NSStr_initWithString: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithString")); + break; + } + return (NSStringSelectors[MK] = Sel); + } + + return NSStringSelectors[MK]; +} + +Optional<NSAPI::NSStringMethodKind> +NSAPI::getNSStringMethodKind(Selector Sel) const { + for (unsigned i = 0; i != NumNSStringMethods; ++i) { + NSStringMethodKind MK = NSStringMethodKind(i); + if (Sel == getNSStringSelector(MK)) + return MK; + } + + return None; +} + +Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { + if (NSArraySelectors[MK].isNull()) { + Selector Sel; + switch (MK) { + case NSArr_array: + Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("array")); + break; + case NSArr_arrayWithArray: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithArray")); + break; + case NSArr_arrayWithObject: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObject")); + break; + case NSArr_arrayWithObjects: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects")); + break; + case NSArr_arrayWithObjectsCount: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("arrayWithObjects"), + &Ctx.Idents.get("count") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSArr_initWithArray: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithArray")); + break; + case NSArr_initWithObjects: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithObjects")); + break; + case NSArr_objectAtIndex: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex")); + break; + case NSMutableArr_replaceObjectAtIndex: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("replaceObjectAtIndex"), + &Ctx.Idents.get("withObject") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + } + return (NSArraySelectors[MK] = Sel); + } + + return NSArraySelectors[MK]; +} + +Optional<NSAPI::NSArrayMethodKind> NSAPI::getNSArrayMethodKind(Selector Sel) { + for (unsigned i = 0; i != NumNSArrayMethods; ++i) { + NSArrayMethodKind MK = NSArrayMethodKind(i); + if (Sel == getNSArraySelector(MK)) + return MK; + } + + return None; +} + +Selector NSAPI::getNSDictionarySelector( + NSDictionaryMethodKind MK) const { + if (NSDictionarySelectors[MK].isNull()) { + Selector Sel; + switch (MK) { + case NSDict_dictionary: + Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("dictionary")); + break; + case NSDict_dictionaryWithDictionary: + Sel = Ctx.Selectors.getUnarySelector( + &Ctx.Idents.get("dictionaryWithDictionary")); + break; + case NSDict_dictionaryWithObjectForKey: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObject"), + &Ctx.Idents.get("forKey") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSDict_dictionaryWithObjectsForKeys: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObjects"), + &Ctx.Idents.get("forKeys") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSDict_dictionaryWithObjectsForKeysCount: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObjects"), + &Ctx.Idents.get("forKeys"), + &Ctx.Idents.get("count") + }; + Sel = Ctx.Selectors.getSelector(3, KeyIdents); + break; + } + case NSDict_dictionaryWithObjectsAndKeys: + Sel = Ctx.Selectors.getUnarySelector( + &Ctx.Idents.get("dictionaryWithObjectsAndKeys")); + break; + case NSDict_initWithDictionary: + Sel = Ctx.Selectors.getUnarySelector( + &Ctx.Idents.get("initWithDictionary")); + break; + case NSDict_initWithObjectsAndKeys: + Sel = Ctx.Selectors.getUnarySelector( + &Ctx.Idents.get("initWithObjectsAndKeys")); + break; + case NSDict_initWithObjectsForKeys: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("initWithObjects"), + &Ctx.Idents.get("forKeys") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSDict_objectForKey: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey")); + break; + case NSMutableDict_setObjectForKey: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), + &Ctx.Idents.get("forKey") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + } + return (NSDictionarySelectors[MK] = Sel); + } + + return NSDictionarySelectors[MK]; +} + +Optional<NSAPI::NSDictionaryMethodKind> +NSAPI::getNSDictionaryMethodKind(Selector Sel) { + for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) { + NSDictionaryMethodKind MK = NSDictionaryMethodKind(i); + if (Sel == getNSDictionarySelector(MK)) + return MK; + } + + return None; +} + +Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK, + bool Instance) const { + static const char *ClassSelectorName[NumNSNumberLiteralMethods] = { + "numberWithChar", + "numberWithUnsignedChar", + "numberWithShort", + "numberWithUnsignedShort", + "numberWithInt", + "numberWithUnsignedInt", + "numberWithLong", + "numberWithUnsignedLong", + "numberWithLongLong", + "numberWithUnsignedLongLong", + "numberWithFloat", + "numberWithDouble", + "numberWithBool", + "numberWithInteger", + "numberWithUnsignedInteger" + }; + static const char *InstanceSelectorName[NumNSNumberLiteralMethods] = { + "initWithChar", + "initWithUnsignedChar", + "initWithShort", + "initWithUnsignedShort", + "initWithInt", + "initWithUnsignedInt", + "initWithLong", + "initWithUnsignedLong", + "initWithLongLong", + "initWithUnsignedLongLong", + "initWithFloat", + "initWithDouble", + "initWithBool", + "initWithInteger", + "initWithUnsignedInteger" + }; + + Selector *Sels; + const char **Names; + if (Instance) { + Sels = NSNumberInstanceSelectors; + Names = InstanceSelectorName; + } else { + Sels = NSNumberClassSelectors; + Names = ClassSelectorName; + } + + if (Sels[MK].isNull()) + Sels[MK] = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(Names[MK])); + return Sels[MK]; +} + +Optional<NSAPI::NSNumberLiteralMethodKind> +NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const { + for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) { + NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i); + if (isNSNumberLiteralSelector(MK, Sel)) + return MK; + } + + return None; +} + +Optional<NSAPI::NSNumberLiteralMethodKind> +NSAPI::getNSNumberFactoryMethodKind(QualType T) const { + const BuiltinType *BT = T->getAs<BuiltinType>(); + if (!BT) + return None; + + const TypedefType *TDT = T->getAs<TypedefType>(); + if (TDT) { + QualType TDTTy = QualType(TDT, 0); + if (isObjCBOOLType(TDTTy)) + return NSAPI::NSNumberWithBool; + if (isObjCNSIntegerType(TDTTy)) + return NSAPI::NSNumberWithInteger; + if (isObjCNSUIntegerType(TDTTy)) + return NSAPI::NSNumberWithUnsignedInteger; + } + + switch (BT->getKind()) { + case BuiltinType::Char_S: + case BuiltinType::SChar: + return NSAPI::NSNumberWithChar; + case BuiltinType::Char_U: + case BuiltinType::UChar: + return NSAPI::NSNumberWithUnsignedChar; + case BuiltinType::Short: + return NSAPI::NSNumberWithShort; + case BuiltinType::UShort: + return NSAPI::NSNumberWithUnsignedShort; + case BuiltinType::Int: + return NSAPI::NSNumberWithInt; + case BuiltinType::UInt: + return NSAPI::NSNumberWithUnsignedInt; + case BuiltinType::Long: + return NSAPI::NSNumberWithLong; + case BuiltinType::ULong: + return NSAPI::NSNumberWithUnsignedLong; + case BuiltinType::LongLong: + return NSAPI::NSNumberWithLongLong; + case BuiltinType::ULongLong: + return NSAPI::NSNumberWithUnsignedLongLong; + case BuiltinType::Float: + return NSAPI::NSNumberWithFloat; + case BuiltinType::Double: + return NSAPI::NSNumberWithDouble; + case BuiltinType::Bool: + return NSAPI::NSNumberWithBool; + + case BuiltinType::Void: + case BuiltinType::WChar_U: + case BuiltinType::WChar_S: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Int128: + case BuiltinType::LongDouble: + case BuiltinType::UInt128: + case BuiltinType::NullPtr: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCId: + case BuiltinType::ObjCSel: + case BuiltinType::OCLImage1d: + case BuiltinType::OCLImage1dArray: + case BuiltinType::OCLImage1dBuffer: + case BuiltinType::OCLImage2d: + case BuiltinType::OCLImage2dArray: + case BuiltinType::OCLImage3d: + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::BoundMember: + case BuiltinType::Dependent: + case BuiltinType::Overload: + case BuiltinType::UnknownAny: + case BuiltinType::ARCUnbridgedCast: + case BuiltinType::Half: + case BuiltinType::PseudoObject: + case BuiltinType::BuiltinFn: + break; + } + + return None; +} + +/// \brief Returns true if \param T is a typedef of "BOOL" in objective-c. +bool NSAPI::isObjCBOOLType(QualType T) const { + return isObjCTypedef(T, "BOOL", BOOLId); +} +/// \brief Returns true if \param T is a typedef of "NSInteger" in objective-c. +bool NSAPI::isObjCNSIntegerType(QualType T) const { + return isObjCTypedef(T, "NSInteger", NSIntegerId); +} +/// \brief Returns true if \param T is a typedef of "NSUInteger" in objective-c. +bool NSAPI::isObjCNSUIntegerType(QualType T) const { + return isObjCTypedef(T, "NSUInteger", NSUIntegerId); +} + +bool NSAPI::isObjCTypedef(QualType T, + StringRef name, IdentifierInfo *&II) const { + if (!Ctx.getLangOpts().ObjC1) + return false; + if (T.isNull()) + return false; + + if (!II) + II = &Ctx.Idents.get(name); + + while (const TypedefType *TDT = T->getAs<TypedefType>()) { + if (TDT->getDecl()->getDeclName().getAsIdentifierInfo() == II) + return true; + T = TDT->desugar(); + } + + return false; +} + +bool NSAPI::isObjCEnumerator(const Expr *E, + StringRef name, IdentifierInfo *&II) const { + if (!Ctx.getLangOpts().ObjC1) + return false; + if (!E) + return false; + + if (!II) + II = &Ctx.Idents.get(name); + + if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) + if (const EnumConstantDecl * + EnumD = dyn_cast_or_null<EnumConstantDecl>(DRE->getDecl())) + return EnumD->getIdentifier() == II; + + return false; +} + +Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids, + Selector &Sel) const { + if (Sel.isNull()) { + SmallVector<IdentifierInfo *, 4> Idents; + for (ArrayRef<StringRef>::const_iterator + I = Ids.begin(), E = Ids.end(); I != E; ++I) + Idents.push_back(&Ctx.Idents.get(*I)); + Sel = Ctx.Selectors.getSelector(Idents.size(), Idents.data()); + } + return Sel; +} diff --git a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp new file mode 100644 index 000000000000..b03c4e09fa46 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp @@ -0,0 +1,632 @@ +//===--- NestedNameSpecifier.cpp - C++ nested name specifiers -----*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the NestedNameSpecifier class, which represents +// a C++ nested-name-specifier. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/NestedNameSpecifier.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/Type.h" +#include "clang/AST/TypeLoc.h" +#include "llvm/Support/AlignOf.h" +#include "llvm/Support/raw_ostream.h" +#include <cassert> + +using namespace clang; + +NestedNameSpecifier * +NestedNameSpecifier::FindOrInsert(const ASTContext &Context, + const NestedNameSpecifier &Mockup) { + llvm::FoldingSetNodeID ID; + Mockup.Profile(ID); + + void *InsertPos = 0; + NestedNameSpecifier *NNS + = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos); + if (!NNS) { + NNS = new (Context, llvm::alignOf<NestedNameSpecifier>()) + NestedNameSpecifier(Mockup); + Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos); + } + + return NNS; +} + +NestedNameSpecifier * +NestedNameSpecifier::Create(const ASTContext &Context, + NestedNameSpecifier *Prefix, IdentifierInfo *II) { + assert(II && "Identifier cannot be NULL"); + assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent"); + + NestedNameSpecifier Mockup; + Mockup.Prefix.setPointer(Prefix); + Mockup.Prefix.setInt(StoredIdentifier); + Mockup.Specifier = II; + return FindOrInsert(Context, Mockup); +} + +NestedNameSpecifier * +NestedNameSpecifier::Create(const ASTContext &Context, + NestedNameSpecifier *Prefix, + const NamespaceDecl *NS) { + assert(NS && "Namespace cannot be NULL"); + assert((!Prefix || + (Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) && + "Broken nested name specifier"); + NestedNameSpecifier Mockup; + Mockup.Prefix.setPointer(Prefix); + Mockup.Prefix.setInt(StoredNamespaceOrAlias); + Mockup.Specifier = const_cast<NamespaceDecl *>(NS); + return FindOrInsert(Context, Mockup); +} + +NestedNameSpecifier * +NestedNameSpecifier::Create(const ASTContext &Context, + NestedNameSpecifier *Prefix, + NamespaceAliasDecl *Alias) { + assert(Alias && "Namespace alias cannot be NULL"); + assert((!Prefix || + (Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) && + "Broken nested name specifier"); + NestedNameSpecifier Mockup; + Mockup.Prefix.setPointer(Prefix); + Mockup.Prefix.setInt(StoredNamespaceOrAlias); + Mockup.Specifier = Alias; + return FindOrInsert(Context, Mockup); +} + +NestedNameSpecifier * +NestedNameSpecifier::Create(const ASTContext &Context, + NestedNameSpecifier *Prefix, + bool Template, const Type *T) { + assert(T && "Type cannot be NULL"); + NestedNameSpecifier Mockup; + Mockup.Prefix.setPointer(Prefix); + Mockup.Prefix.setInt(Template? StoredTypeSpecWithTemplate : StoredTypeSpec); + Mockup.Specifier = const_cast<Type*>(T); + return FindOrInsert(Context, Mockup); +} + +NestedNameSpecifier * +NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) { + assert(II && "Identifier cannot be NULL"); + NestedNameSpecifier Mockup; + Mockup.Prefix.setPointer(0); + Mockup.Prefix.setInt(StoredIdentifier); + Mockup.Specifier = II; + return FindOrInsert(Context, Mockup); +} + +NestedNameSpecifier * +NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) { + if (!Context.GlobalNestedNameSpecifier) + Context.GlobalNestedNameSpecifier = + new (Context, llvm::alignOf<NestedNameSpecifier>()) + NestedNameSpecifier(); + return Context.GlobalNestedNameSpecifier; +} + +NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const { + if (Specifier == 0) + return Global; + + switch (Prefix.getInt()) { + case StoredIdentifier: + return Identifier; + + case StoredNamespaceOrAlias: + return isa<NamespaceDecl>(static_cast<NamedDecl *>(Specifier))? Namespace + : NamespaceAlias; + + case StoredTypeSpec: + return TypeSpec; + + case StoredTypeSpecWithTemplate: + return TypeSpecWithTemplate; + } + + llvm_unreachable("Invalid NNS Kind!"); +} + +/// \brief Retrieve the namespace stored in this nested name +/// specifier. +NamespaceDecl *NestedNameSpecifier::getAsNamespace() const { + if (Prefix.getInt() == StoredNamespaceOrAlias) + return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier)); + + return 0; +} + +/// \brief Retrieve the namespace alias stored in this nested name +/// specifier. +NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const { + if (Prefix.getInt() == StoredNamespaceOrAlias) + return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier)); + + return 0; +} + + +/// \brief Whether this nested name specifier refers to a dependent +/// type or not. +bool NestedNameSpecifier::isDependent() const { + switch (getKind()) { + case Identifier: + // Identifier specifiers always represent dependent types + return true; + + case Namespace: + case NamespaceAlias: + case Global: + return false; + + case TypeSpec: + case TypeSpecWithTemplate: + return getAsType()->isDependentType(); + } + + llvm_unreachable("Invalid NNS Kind!"); +} + +/// \brief Whether this nested name specifier refers to a dependent +/// type or not. +bool NestedNameSpecifier::isInstantiationDependent() const { + switch (getKind()) { + case Identifier: + // Identifier specifiers always represent dependent types + return true; + + case Namespace: + case NamespaceAlias: + case Global: + return false; + + case TypeSpec: + case TypeSpecWithTemplate: + return getAsType()->isInstantiationDependentType(); + } + + llvm_unreachable("Invalid NNS Kind!"); +} + +bool NestedNameSpecifier::containsUnexpandedParameterPack() const { + switch (getKind()) { + case Identifier: + return getPrefix() && getPrefix()->containsUnexpandedParameterPack(); + + case Namespace: + case NamespaceAlias: + case Global: + return false; + + case TypeSpec: + case TypeSpecWithTemplate: + return getAsType()->containsUnexpandedParameterPack(); + } + + llvm_unreachable("Invalid NNS Kind!"); +} + +/// \brief Print this nested name specifier to the given output +/// stream. +void +NestedNameSpecifier::print(raw_ostream &OS, + const PrintingPolicy &Policy) const { + if (getPrefix()) + getPrefix()->print(OS, Policy); + + switch (getKind()) { + case Identifier: + OS << getAsIdentifier()->getName(); + break; + + case Namespace: + if (getAsNamespace()->isAnonymousNamespace()) + return; + + OS << getAsNamespace()->getName(); + break; + + case NamespaceAlias: + OS << getAsNamespaceAlias()->getName(); + break; + + case Global: + break; + + case TypeSpecWithTemplate: + OS << "template "; + // Fall through to print the type. + + case TypeSpec: { + const Type *T = getAsType(); + + PrintingPolicy InnerPolicy(Policy); + InnerPolicy.SuppressScope = true; + + // Nested-name-specifiers are intended to contain minimally-qualified + // types. An actual ElaboratedType will not occur, since we'll store + // just the type that is referred to in the nested-name-specifier (e.g., + // a TypedefType, TagType, etc.). However, when we are dealing with + // dependent template-id types (e.g., Outer<T>::template Inner<U>), + // the type requires its own nested-name-specifier for uniqueness, so we + // suppress that nested-name-specifier during printing. + assert(!isa<ElaboratedType>(T) && + "Elaborated type in nested-name-specifier"); + if (const TemplateSpecializationType *SpecType + = dyn_cast<TemplateSpecializationType>(T)) { + // Print the template name without its corresponding + // nested-name-specifier. + SpecType->getTemplateName().print(OS, InnerPolicy, true); + + // Print the template argument list. + TemplateSpecializationType::PrintTemplateArgumentList( + OS, SpecType->getArgs(), SpecType->getNumArgs(), InnerPolicy); + } else { + // Print the type normally + QualType(T, 0).print(OS, InnerPolicy); + } + break; + } + } + + OS << "::"; +} + +void NestedNameSpecifier::dump(const LangOptions &LO) { + print(llvm::errs(), PrintingPolicy(LO)); +} + +unsigned +NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) { + assert(Qualifier && "Expected a non-NULL qualifier"); + + // Location of the trailing '::'. + unsigned Length = sizeof(unsigned); + + switch (Qualifier->getKind()) { + case NestedNameSpecifier::Global: + // Nothing more to add. + break; + + case NestedNameSpecifier::Identifier: + case NestedNameSpecifier::Namespace: + case NestedNameSpecifier::NamespaceAlias: + // The location of the identifier or namespace name. + Length += sizeof(unsigned); + break; + + case NestedNameSpecifier::TypeSpecWithTemplate: + case NestedNameSpecifier::TypeSpec: + // The "void*" that points at the TypeLoc data. + // Note: the 'template' keyword is part of the TypeLoc. + Length += sizeof(void *); + break; + } + + return Length; +} + +unsigned +NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) { + unsigned Length = 0; + for (; Qualifier; Qualifier = Qualifier->getPrefix()) + Length += getLocalDataLength(Qualifier); + return Length; +} + +namespace { + /// \brief Load a (possibly unaligned) source location from a given address + /// and offset. + SourceLocation LoadSourceLocation(void *Data, unsigned Offset) { + unsigned Raw; + memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(unsigned)); + return SourceLocation::getFromRawEncoding(Raw); + } + + /// \brief Load a (possibly unaligned) pointer from a given address and + /// offset. + void *LoadPointer(void *Data, unsigned Offset) { + void *Result; + memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void*)); + return Result; + } +} + +SourceRange NestedNameSpecifierLoc::getSourceRange() const { + if (!Qualifier) + return SourceRange(); + + NestedNameSpecifierLoc First = *this; + while (NestedNameSpecifierLoc Prefix = First.getPrefix()) + First = Prefix; + + return SourceRange(First.getLocalSourceRange().getBegin(), + getLocalSourceRange().getEnd()); +} + +SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const { + if (!Qualifier) + return SourceRange(); + + unsigned Offset = getDataLength(Qualifier->getPrefix()); + switch (Qualifier->getKind()) { + case NestedNameSpecifier::Global: + return LoadSourceLocation(Data, Offset); + + case NestedNameSpecifier::Identifier: + case NestedNameSpecifier::Namespace: + case NestedNameSpecifier::NamespaceAlias: + return SourceRange(LoadSourceLocation(Data, Offset), + LoadSourceLocation(Data, Offset + sizeof(unsigned))); + + case NestedNameSpecifier::TypeSpecWithTemplate: + case NestedNameSpecifier::TypeSpec: { + // The "void*" that points at the TypeLoc data. + // Note: the 'template' keyword is part of the TypeLoc. + void *TypeData = LoadPointer(Data, Offset); + TypeLoc TL(Qualifier->getAsType(), TypeData); + return SourceRange(TL.getBeginLoc(), + LoadSourceLocation(Data, Offset + sizeof(void*))); + } + } + + llvm_unreachable("Invalid NNS Kind!"); +} + +TypeLoc NestedNameSpecifierLoc::getTypeLoc() const { + assert((Qualifier->getKind() == NestedNameSpecifier::TypeSpec || + Qualifier->getKind() == NestedNameSpecifier::TypeSpecWithTemplate) && + "Nested-name-specifier location is not a type"); + + // The "void*" that points at the TypeLoc data. + unsigned Offset = getDataLength(Qualifier->getPrefix()); + void *TypeData = LoadPointer(Data, Offset); + return TypeLoc(Qualifier->getAsType(), TypeData); +} + +namespace { + void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize, + unsigned &BufferCapacity) { + if (BufferSize + (End - Start) > BufferCapacity) { + // Reallocate the buffer. + unsigned NewCapacity + = std::max((unsigned)(BufferCapacity? BufferCapacity * 2 + : sizeof(void*) * 2), + (unsigned)(BufferSize + (End - Start))); + char *NewBuffer = static_cast<char *>(malloc(NewCapacity)); + memcpy(NewBuffer, Buffer, BufferSize); + + if (BufferCapacity) + free(Buffer); + Buffer = NewBuffer; + BufferCapacity = NewCapacity; + } + + memcpy(Buffer + BufferSize, Start, End - Start); + BufferSize += End-Start; + } + + /// \brief Save a source location to the given buffer. + void SaveSourceLocation(SourceLocation Loc, char *&Buffer, + unsigned &BufferSize, unsigned &BufferCapacity) { + unsigned Raw = Loc.getRawEncoding(); + Append(reinterpret_cast<char *>(&Raw), + reinterpret_cast<char *>(&Raw) + sizeof(unsigned), + Buffer, BufferSize, BufferCapacity); + } + + /// \brief Save a pointer to the given buffer. + void SavePointer(void *Ptr, char *&Buffer, unsigned &BufferSize, + unsigned &BufferCapacity) { + Append(reinterpret_cast<char *>(&Ptr), + reinterpret_cast<char *>(&Ptr) + sizeof(void *), + Buffer, BufferSize, BufferCapacity); + } +} + +NestedNameSpecifierLocBuilder:: +NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other) + : Representation(Other.Representation), Buffer(0), + BufferSize(0), BufferCapacity(0) +{ + if (!Other.Buffer) + return; + + if (Other.BufferCapacity == 0) { + // Shallow copy is okay. + Buffer = Other.Buffer; + BufferSize = Other.BufferSize; + return; + } + + // Deep copy + BufferSize = Other.BufferSize; + BufferCapacity = Other.BufferSize; + Buffer = static_cast<char *>(malloc(BufferCapacity)); + memcpy(Buffer, Other.Buffer, BufferSize); +} + +NestedNameSpecifierLocBuilder & +NestedNameSpecifierLocBuilder:: +operator=(const NestedNameSpecifierLocBuilder &Other) { + Representation = Other.Representation; + + if (Buffer && Other.Buffer && BufferCapacity >= Other.BufferSize) { + // Re-use our storage. + BufferSize = Other.BufferSize; + memcpy(Buffer, Other.Buffer, BufferSize); + return *this; + } + + // Free our storage, if we have any. + if (BufferCapacity) { + free(Buffer); + BufferCapacity = 0; + } + + if (!Other.Buffer) { + // Empty. + Buffer = 0; + BufferSize = 0; + return *this; + } + + if (Other.BufferCapacity == 0) { + // Shallow copy is okay. + Buffer = Other.Buffer; + BufferSize = Other.BufferSize; + return *this; + } + + // Deep copy. + BufferSize = Other.BufferSize; + BufferCapacity = BufferSize; + Buffer = static_cast<char *>(malloc(BufferSize)); + memcpy(Buffer, Other.Buffer, BufferSize); + return *this; +} + +void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, + SourceLocation TemplateKWLoc, + TypeLoc TL, + SourceLocation ColonColonLoc) { + Representation = NestedNameSpecifier::Create(Context, Representation, + TemplateKWLoc.isValid(), + TL.getTypePtr()); + + // Push source-location info into the buffer. + SavePointer(TL.getOpaqueData(), Buffer, BufferSize, BufferCapacity); + SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity); +} + +void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, + IdentifierInfo *Identifier, + SourceLocation IdentifierLoc, + SourceLocation ColonColonLoc) { + Representation = NestedNameSpecifier::Create(Context, Representation, + Identifier); + + // Push source-location info into the buffer. + SaveSourceLocation(IdentifierLoc, Buffer, BufferSize, BufferCapacity); + SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity); +} + +void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, + NamespaceDecl *Namespace, + SourceLocation NamespaceLoc, + SourceLocation ColonColonLoc) { + Representation = NestedNameSpecifier::Create(Context, Representation, + Namespace); + + // Push source-location info into the buffer. + SaveSourceLocation(NamespaceLoc, Buffer, BufferSize, BufferCapacity); + SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity); +} + +void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context, + NamespaceAliasDecl *Alias, + SourceLocation AliasLoc, + SourceLocation ColonColonLoc) { + Representation = NestedNameSpecifier::Create(Context, Representation, Alias); + + // Push source-location info into the buffer. + SaveSourceLocation(AliasLoc, Buffer, BufferSize, BufferCapacity); + SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity); +} + +void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context, + SourceLocation ColonColonLoc) { + assert(!Representation && "Already have a nested-name-specifier!?"); + Representation = NestedNameSpecifier::GlobalSpecifier(Context); + + // Push source-location info into the buffer. + SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity); +} + +void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context, + NestedNameSpecifier *Qualifier, + SourceRange R) { + Representation = Qualifier; + + // Construct bogus (but well-formed) source information for the + // nested-name-specifier. + BufferSize = 0; + SmallVector<NestedNameSpecifier *, 4> Stack; + for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix()) + Stack.push_back(NNS); + while (!Stack.empty()) { + NestedNameSpecifier *NNS = Stack.pop_back_val(); + switch (NNS->getKind()) { + case NestedNameSpecifier::Identifier: + case NestedNameSpecifier::Namespace: + case NestedNameSpecifier::NamespaceAlias: + SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity); + break; + + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: { + TypeSourceInfo *TSInfo + = Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0), + R.getBegin()); + SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize, + BufferCapacity); + break; + } + + case NestedNameSpecifier::Global: + break; + } + + // Save the location of the '::'. + SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(), + Buffer, BufferSize, BufferCapacity); + } +} + +void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) { + if (BufferCapacity) + free(Buffer); + + if (!Other) { + Representation = 0; + BufferSize = 0; + return; + } + + // Rather than copying the data (which is wasteful), "adopt" the + // pointer (which points into the ASTContext) but set the capacity to zero to + // indicate that we don't own it. + Representation = Other.getNestedNameSpecifier(); + Buffer = static_cast<char *>(Other.getOpaqueData()); + BufferSize = Other.getDataLength(); + BufferCapacity = 0; +} + +NestedNameSpecifierLoc +NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const { + if (!Representation) + return NestedNameSpecifierLoc(); + + // If we adopted our data pointer from elsewhere in the AST context, there's + // no need to copy the memory. + if (BufferCapacity == 0) + return NestedNameSpecifierLoc(Representation, Buffer); + + // FIXME: After copying the source-location information, should we free + // our (temporary) buffer and adopt the ASTContext-allocated memory? + // Doing so would optimize repeated calls to getWithLocInContext(). + void *Mem = Context.Allocate(BufferSize, llvm::alignOf<void *>()); + memcpy(Mem, Buffer, BufferSize); + return NestedNameSpecifierLoc(Representation, Mem); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp new file mode 100644 index 000000000000..ff44d938d3ae --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp @@ -0,0 +1,198 @@ +//===--- ParentMap.cpp - Mappings from Stmts to their Parents ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the ParentMap class. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ParentMap.h" +#include "clang/AST/Decl.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "llvm/ADT/DenseMap.h" + +using namespace clang; + +typedef llvm::DenseMap<Stmt*, Stmt*> MapTy; + +enum OpaqueValueMode { + OV_Transparent, + OV_Opaque +}; + +static void BuildParentMap(MapTy& M, Stmt* S, + OpaqueValueMode OVMode = OV_Transparent) { + + switch (S->getStmtClass()) { + case Stmt::PseudoObjectExprClass: { + assert(OVMode == OV_Transparent && "Should not appear alongside OVEs"); + PseudoObjectExpr *POE = cast<PseudoObjectExpr>(S); + + // If we are rebuilding the map, clear out any existing state. + if (M[POE->getSyntacticForm()]) + for (Stmt::child_range I = S->children(); I; ++I) + M[*I] = 0; + + M[POE->getSyntacticForm()] = S; + BuildParentMap(M, POE->getSyntacticForm(), OV_Transparent); + + for (PseudoObjectExpr::semantics_iterator I = POE->semantics_begin(), + E = POE->semantics_end(); + I != E; ++I) { + M[*I] = S; + BuildParentMap(M, *I, OV_Opaque); + } + break; + } + case Stmt::BinaryConditionalOperatorClass: { + assert(OVMode == OV_Transparent && "Should not appear alongside OVEs"); + BinaryConditionalOperator *BCO = cast<BinaryConditionalOperator>(S); + + M[BCO->getCommon()] = S; + BuildParentMap(M, BCO->getCommon(), OV_Transparent); + + M[BCO->getCond()] = S; + BuildParentMap(M, BCO->getCond(), OV_Opaque); + + M[BCO->getTrueExpr()] = S; + BuildParentMap(M, BCO->getTrueExpr(), OV_Opaque); + + M[BCO->getFalseExpr()] = S; + BuildParentMap(M, BCO->getFalseExpr(), OV_Transparent); + + break; + } + case Stmt::OpaqueValueExprClass: { + // FIXME: This isn't correct; it assumes that multiple OpaqueValueExprs + // share a single source expression, but in the AST a single + // OpaqueValueExpr is shared among multiple parent expressions. + // The right thing to do is to give the OpaqueValueExpr its syntactic + // parent, then not reassign that when traversing the semantic expressions. + OpaqueValueExpr *OVE = cast<OpaqueValueExpr>(S); + if (OVMode == OV_Transparent || !M[OVE->getSourceExpr()]) { + M[OVE->getSourceExpr()] = S; + BuildParentMap(M, OVE->getSourceExpr(), OV_Transparent); + } + break; + } + default: + for (Stmt::child_range I = S->children(); I; ++I) { + if (*I) { + M[*I] = S; + BuildParentMap(M, *I, OVMode); + } + } + break; + } +} + +ParentMap::ParentMap(Stmt* S) : Impl(0) { + if (S) { + MapTy *M = new MapTy(); + BuildParentMap(*M, S); + Impl = M; + } +} + +ParentMap::~ParentMap() { + delete (MapTy*) Impl; +} + +void ParentMap::addStmt(Stmt* S) { + if (S) { + BuildParentMap(*(MapTy*) Impl, S); + } +} + +void ParentMap::setParent(const Stmt *S, const Stmt *Parent) { + assert(S); + assert(Parent); + MapTy *M = reinterpret_cast<MapTy *>(Impl); + M->insert(std::make_pair(const_cast<Stmt *>(S), const_cast<Stmt *>(Parent))); +} + +Stmt* ParentMap::getParent(Stmt* S) const { + MapTy* M = (MapTy*) Impl; + MapTy::iterator I = M->find(S); + return I == M->end() ? 0 : I->second; +} + +Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const { + do { S = getParent(S); } while (S && isa<ParenExpr>(S)); + return S; +} + +Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const { + do { + S = getParent(S); + } + while (S && (isa<ParenExpr>(S) || isa<CastExpr>(S))); + + return S; +} + +Stmt *ParentMap::getParentIgnoreParenImpCasts(Stmt *S) const { + do { + S = getParent(S); + } while (S && isa<Expr>(S) && cast<Expr>(S)->IgnoreParenImpCasts() != S); + + return S; +} + +Stmt *ParentMap::getOuterParenParent(Stmt *S) const { + Stmt *Paren = 0; + while (isa<ParenExpr>(S)) { + Paren = S; + S = getParent(S); + }; + return Paren; +} + +bool ParentMap::isConsumedExpr(Expr* E) const { + Stmt *P = getParent(E); + Stmt *DirectChild = E; + + // Ignore parents that don't guarantee consumption. + while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P) || + isa<ExprWithCleanups>(P))) { + DirectChild = P; + P = getParent(P); + } + + if (!P) + return false; + + switch (P->getStmtClass()) { + default: + return isa<Expr>(P); + case Stmt::DeclStmtClass: + return true; + case Stmt::BinaryOperatorClass: { + BinaryOperator *BE = cast<BinaryOperator>(P); + // If it is a comma, only the right side is consumed. + // If it isn't a comma, both sides are consumed. + return BE->getOpcode()!=BO_Comma ||DirectChild==BE->getRHS(); + } + case Stmt::ForStmtClass: + return DirectChild == cast<ForStmt>(P)->getCond(); + case Stmt::WhileStmtClass: + return DirectChild == cast<WhileStmt>(P)->getCond(); + case Stmt::DoStmtClass: + return DirectChild == cast<DoStmt>(P)->getCond(); + case Stmt::IfStmtClass: + return DirectChild == cast<IfStmt>(P)->getCond(); + case Stmt::IndirectGotoStmtClass: + return DirectChild == cast<IndirectGotoStmt>(P)->getTarget(); + case Stmt::SwitchStmtClass: + return DirectChild == cast<SwitchStmt>(P)->getCond(); + case Stmt::ReturnStmtClass: + return true; + } +} + diff --git a/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp new file mode 100644 index 000000000000..1fa7cea1d498 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/RawCommentList.cpp @@ -0,0 +1,254 @@ +//===--- RawCommentList.cpp - Processing raw comments -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/RawCommentList.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Comment.h" +#include "clang/AST/CommentBriefParser.h" +#include "clang/AST/CommentCommandTraits.h" +#include "clang/AST/CommentLexer.h" +#include "clang/AST/CommentParser.h" +#include "clang/AST/CommentSema.h" +#include "llvm/ADT/STLExtras.h" + +using namespace clang; + +namespace { +/// Get comment kind and bool describing if it is a trailing comment. +std::pair<RawComment::CommentKind, bool> getCommentKind(StringRef Comment, + bool ParseAllComments) { + const size_t MinCommentLength = ParseAllComments ? 2 : 3; + if ((Comment.size() < MinCommentLength) || Comment[0] != '/') + return std::make_pair(RawComment::RCK_Invalid, false); + + RawComment::CommentKind K; + if (Comment[1] == '/') { + if (Comment.size() < 3) + return std::make_pair(RawComment::RCK_OrdinaryBCPL, false); + + if (Comment[2] == '/') + K = RawComment::RCK_BCPLSlash; + else if (Comment[2] == '!') + K = RawComment::RCK_BCPLExcl; + else + return std::make_pair(RawComment::RCK_OrdinaryBCPL, false); + } else { + assert(Comment.size() >= 4); + + // Comment lexer does not understand escapes in comment markers, so pretend + // that this is not a comment. + if (Comment[1] != '*' || + Comment[Comment.size() - 2] != '*' || + Comment[Comment.size() - 1] != '/') + return std::make_pair(RawComment::RCK_Invalid, false); + + if (Comment[2] == '*') + K = RawComment::RCK_JavaDoc; + else if (Comment[2] == '!') + K = RawComment::RCK_Qt; + else + return std::make_pair(RawComment::RCK_OrdinaryC, false); + } + const bool TrailingComment = (Comment.size() > 3) && (Comment[3] == '<'); + return std::make_pair(K, TrailingComment); +} + +bool mergedCommentIsTrailingComment(StringRef Comment) { + return (Comment.size() > 3) && (Comment[3] == '<'); +} +} // unnamed namespace + +RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR, + bool Merged, bool ParseAllComments) : + Range(SR), RawTextValid(false), BriefTextValid(false), + IsAttached(false), IsAlmostTrailingComment(false), + ParseAllComments(ParseAllComments) { + // Extract raw comment text, if possible. + if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) { + Kind = RCK_Invalid; + return; + } + + if (!Merged) { + // Guess comment kind. + std::pair<CommentKind, bool> K = getCommentKind(RawText, ParseAllComments); + Kind = K.first; + IsTrailingComment = K.second; + + IsAlmostTrailingComment = RawText.startswith("//<") || + RawText.startswith("/*<"); + } else { + Kind = RCK_Merged; + IsTrailingComment = mergedCommentIsTrailingComment(RawText); + } +} + +StringRef RawComment::getRawTextSlow(const SourceManager &SourceMgr) const { + FileID BeginFileID; + FileID EndFileID; + unsigned BeginOffset; + unsigned EndOffset; + + llvm::tie(BeginFileID, BeginOffset) = + SourceMgr.getDecomposedLoc(Range.getBegin()); + llvm::tie(EndFileID, EndOffset) = + SourceMgr.getDecomposedLoc(Range.getEnd()); + + const unsigned Length = EndOffset - BeginOffset; + if (Length < 2) + return StringRef(); + + // The comment can't begin in one file and end in another. + assert(BeginFileID == EndFileID); + + bool Invalid = false; + const char *BufferStart = SourceMgr.getBufferData(BeginFileID, + &Invalid).data(); + if (Invalid) + return StringRef(); + + return StringRef(BufferStart + BeginOffset, Length); +} + +const char *RawComment::extractBriefText(const ASTContext &Context) const { + // Make sure that RawText is valid. + getRawText(Context.getSourceManager()); + + // Since we will be copying the resulting text, all allocations made during + // parsing are garbage after resulting string is formed. Thus we can use + // a separate allocator for all temporary stuff. + llvm::BumpPtrAllocator Allocator; + + comments::Lexer L(Allocator, Context.getDiagnostics(), + Context.getCommentCommandTraits(), + Range.getBegin(), + RawText.begin(), RawText.end()); + comments::BriefParser P(L, Context.getCommentCommandTraits()); + + const std::string Result = P.Parse(); + const unsigned BriefTextLength = Result.size(); + char *BriefTextPtr = new (Context) char[BriefTextLength + 1]; + memcpy(BriefTextPtr, Result.c_str(), BriefTextLength + 1); + BriefText = BriefTextPtr; + BriefTextValid = true; + + return BriefTextPtr; +} + +comments::FullComment *RawComment::parse(const ASTContext &Context, + const Preprocessor *PP, + const Decl *D) const { + // Make sure that RawText is valid. + getRawText(Context.getSourceManager()); + + comments::Lexer L(Context.getAllocator(), Context.getDiagnostics(), + Context.getCommentCommandTraits(), + getSourceRange().getBegin(), + RawText.begin(), RawText.end()); + comments::Sema S(Context.getAllocator(), Context.getSourceManager(), + Context.getDiagnostics(), + Context.getCommentCommandTraits(), + PP); + S.setDecl(D); + comments::Parser P(L, S, Context.getAllocator(), Context.getSourceManager(), + Context.getDiagnostics(), + Context.getCommentCommandTraits()); + + return P.parseFullComment(); +} + +static bool onlyWhitespaceBetween(SourceManager &SM, + SourceLocation Loc1, SourceLocation Loc2, + unsigned MaxNewlinesAllowed) { + std::pair<FileID, unsigned> Loc1Info = SM.getDecomposedLoc(Loc1); + std::pair<FileID, unsigned> Loc2Info = SM.getDecomposedLoc(Loc2); + + // Question does not make sense if locations are in different files. + if (Loc1Info.first != Loc2Info.first) + return false; + + bool Invalid = false; + const char *Buffer = SM.getBufferData(Loc1Info.first, &Invalid).data(); + if (Invalid) + return false; + + unsigned NumNewlines = 0; + assert(Loc1Info.second <= Loc2Info.second && "Loc1 after Loc2!"); + // Look for non-whitespace characters and remember any newlines seen. + for (unsigned I = Loc1Info.second; I != Loc2Info.second; ++I) { + switch (Buffer[I]) { + default: + return false; + case ' ': + case '\t': + case '\f': + case '\v': + break; + case '\r': + case '\n': + ++NumNewlines; + + // Check if we have found more than the maximum allowed number of + // newlines. + if (NumNewlines > MaxNewlinesAllowed) + return false; + + // Collapse \r\n and \n\r into a single newline. + if (I + 1 != Loc2Info.second && + (Buffer[I + 1] == '\n' || Buffer[I + 1] == '\r') && + Buffer[I] != Buffer[I + 1]) + ++I; + break; + } + } + + return true; +} + +void RawCommentList::addComment(const RawComment &RC, + llvm::BumpPtrAllocator &Allocator) { + if (RC.isInvalid()) + return; + + // Check if the comments are not in source order. + while (!Comments.empty() && + !SourceMgr.isBeforeInTranslationUnit(Comments.back()->getLocStart(), + RC.getLocStart())) { + // If they are, just pop a few last comments that don't fit. + // This happens if an \#include directive contains comments. + Comments.pop_back(); + } + + // Ordinary comments are not interesting for us. + if (RC.isOrdinary()) + return; + + // If this is the first Doxygen comment, save it (because there isn't + // anything to merge it with). + if (Comments.empty()) { + Comments.push_back(new (Allocator) RawComment(RC)); + return; + } + + const RawComment &C1 = *Comments.back(); + const RawComment &C2 = RC; + + // Merge comments only if there is only whitespace between them. + // Can't merge trailing and non-trailing comments. + // Merge comments if they are on same or consecutive lines. + if (C1.isTrailingComment() == C2.isTrailingComment() && + onlyWhitespaceBetween(SourceMgr, C1.getLocEnd(), C2.getLocStart(), + /*MaxNewlinesAllowed=*/1)) { + SourceRange MergedRange(C1.getLocStart(), C2.getLocEnd()); + *Comments.back() = RawComment(SourceMgr, MergedRange, true, + RC.isParseAllComments()); + } else { + Comments.push_back(new (Allocator) RawComment(RC)); + } +} diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp new file mode 100644 index 000000000000..71e44ecf9219 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp @@ -0,0 +1,95 @@ +//===-- RecordLayout.cpp - Layout information for a struct/union -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the RecordLayout interface. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/RecordLayout.h" +#include "clang/Basic/TargetInfo.h" + +using namespace clang; + +void ASTRecordLayout::Destroy(ASTContext &Ctx) { + if (FieldOffsets) + Ctx.Deallocate(FieldOffsets); + if (CXXInfo) { + Ctx.Deallocate(CXXInfo); + CXXInfo->~CXXRecordLayoutInfo(); + } + this->~ASTRecordLayout(); + Ctx.Deallocate(this); +} + +ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size, + CharUnits alignment, CharUnits datasize, + const uint64_t *fieldoffsets, + unsigned fieldcount) + : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0), + FieldCount(fieldcount), CXXInfo(0) { + if (FieldCount > 0) { + FieldOffsets = new (Ctx) uint64_t[FieldCount]; + memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets)); + } +} + +// Constructor for C++ records. +ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, + CharUnits size, CharUnits alignment, + bool hasOwnVFPtr, bool hasExtendableVFPtr, + CharUnits vbptroffset, + CharUnits datasize, + const uint64_t *fieldoffsets, + unsigned fieldcount, + CharUnits nonvirtualsize, + CharUnits nonvirtualalign, + CharUnits SizeOfLargestEmptySubobject, + const CXXRecordDecl *PrimaryBase, + bool IsPrimaryBaseVirtual, + const CXXRecordDecl *BaseSharingVBPtr, + bool AlignAfterVBases, + const BaseOffsetsMapTy& BaseOffsets, + const VBaseOffsetsMapTy& VBaseOffsets) + : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0), + FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo) +{ + if (FieldCount > 0) { + FieldOffsets = new (Ctx) uint64_t[FieldCount]; + memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets)); + } + + CXXInfo->PrimaryBase.setPointer(PrimaryBase); + CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual); + CXXInfo->NonVirtualSize = nonvirtualsize; + CXXInfo->NonVirtualAlign = nonvirtualalign; + CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject; + CXXInfo->BaseOffsets = BaseOffsets; + CXXInfo->VBaseOffsets = VBaseOffsets; + CXXInfo->HasOwnVFPtr = hasOwnVFPtr; + CXXInfo->VBPtrOffset = vbptroffset; + CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr; + CXXInfo->BaseSharingVBPtr = BaseSharingVBPtr; + CXXInfo->AlignAfterVBases = AlignAfterVBases; + + +#ifndef NDEBUG + if (const CXXRecordDecl *PrimaryBase = getPrimaryBase()) { + if (isPrimaryBaseVirtual()) { + if (Ctx.getTargetInfo().getCXXABI().hasPrimaryVBases()) { + assert(getVBaseClassOffset(PrimaryBase).isZero() && + "Primary virtual base must be at offset 0!"); + } + } else { + assert(getBaseClassOffset(PrimaryBase).isZero() && + "Primary base must be at offset 0!"); + } + } +#endif +} diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp new file mode 100644 index 000000000000..4390e66c8b1c --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp @@ -0,0 +1,3040 @@ +//=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/RecordLayout.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/Expr.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/Sema/SemaDiagnostic.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/Support/CrashRecoveryContext.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/MathExtras.h" + +using namespace clang; + +namespace { + +/// BaseSubobjectInfo - Represents a single base subobject in a complete class. +/// For a class hierarchy like +/// +/// class A { }; +/// class B : A { }; +/// class C : A, B { }; +/// +/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo +/// instances, one for B and two for A. +/// +/// If a base is virtual, it will only have one BaseSubobjectInfo allocated. +struct BaseSubobjectInfo { + /// Class - The class for this base info. + const CXXRecordDecl *Class; + + /// IsVirtual - Whether the BaseInfo represents a virtual base or not. + bool IsVirtual; + + /// Bases - Information about the base subobjects. + SmallVector<BaseSubobjectInfo*, 4> Bases; + + /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base + /// of this base info (if one exists). + BaseSubobjectInfo *PrimaryVirtualBaseInfo; + + // FIXME: Document. + const BaseSubobjectInfo *Derived; +}; + +/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different +/// offsets while laying out a C++ class. +class EmptySubobjectMap { + const ASTContext &Context; + uint64_t CharWidth; + + /// Class - The class whose empty entries we're keeping track of. + const CXXRecordDecl *Class; + + /// EmptyClassOffsets - A map from offsets to empty record decls. + typedef SmallVector<const CXXRecordDecl *, 1> ClassVectorTy; + typedef llvm::DenseMap<CharUnits, ClassVectorTy> EmptyClassOffsetsMapTy; + EmptyClassOffsetsMapTy EmptyClassOffsets; + + /// MaxEmptyClassOffset - The highest offset known to contain an empty + /// base subobject. + CharUnits MaxEmptyClassOffset; + + /// ComputeEmptySubobjectSizes - Compute the size of the largest base or + /// member subobject that is empty. + void ComputeEmptySubobjectSizes(); + + void AddSubobjectAtOffset(const CXXRecordDecl *RD, CharUnits Offset); + + void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info, + CharUnits Offset, bool PlacingEmptyBase); + + void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + CharUnits Offset); + void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset); + + /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty + /// subobjects beyond the given offset. + bool AnyEmptySubobjectsBeyondOffset(CharUnits Offset) const { + return Offset <= MaxEmptyClassOffset; + } + + CharUnits + getFieldOffset(const ASTRecordLayout &Layout, unsigned FieldNo) const { + uint64_t FieldOffset = Layout.getFieldOffset(FieldNo); + assert(FieldOffset % CharWidth == 0 && + "Field offset not at char boundary!"); + + return Context.toCharUnitsFromBits(FieldOffset); + } + +protected: + bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD, + CharUnits Offset) const; + + bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info, + CharUnits Offset); + + bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + CharUnits Offset) const; + bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD, + CharUnits Offset) const; + +public: + /// This holds the size of the largest empty subobject (either a base + /// or a member). Will be zero if the record being built doesn't contain + /// any empty classes. + CharUnits SizeOfLargestEmptySubobject; + + EmptySubobjectMap(const ASTContext &Context, const CXXRecordDecl *Class) + : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) { + ComputeEmptySubobjectSizes(); + } + + /// CanPlaceBaseAtOffset - Return whether the given base class can be placed + /// at the given offset. + /// Returns false if placing the record will result in two components + /// (direct or indirect) of the same type having the same offset. + bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info, + CharUnits Offset); + + /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given + /// offset. + bool CanPlaceFieldAtOffset(const FieldDecl *FD, CharUnits Offset); +}; + +void EmptySubobjectMap::ComputeEmptySubobjectSizes() { + // Check the bases. + for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(), + E = Class->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + CharUnits EmptySize; + const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl); + if (BaseDecl->isEmpty()) { + // If the class decl is empty, get its size. + EmptySize = Layout.getSize(); + } else { + // Otherwise, we get the largest empty subobject for the decl. + EmptySize = Layout.getSizeOfLargestEmptySubobject(); + } + + if (EmptySize > SizeOfLargestEmptySubobject) + SizeOfLargestEmptySubobject = EmptySize; + } + + // Check the fields. + for (CXXRecordDecl::field_iterator I = Class->field_begin(), + E = Class->field_end(); I != E; ++I) { + + const RecordType *RT = + Context.getBaseElementType(I->getType())->getAs<RecordType>(); + + // We only care about record types. + if (!RT) + continue; + + CharUnits EmptySize; + const CXXRecordDecl *MemberDecl = cast<CXXRecordDecl>(RT->getDecl()); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl); + if (MemberDecl->isEmpty()) { + // If the class decl is empty, get its size. + EmptySize = Layout.getSize(); + } else { + // Otherwise, we get the largest empty subobject for the decl. + EmptySize = Layout.getSizeOfLargestEmptySubobject(); + } + + if (EmptySize > SizeOfLargestEmptySubobject) + SizeOfLargestEmptySubobject = EmptySize; + } +} + +bool +EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD, + CharUnits Offset) const { + // We only need to check empty bases. + if (!RD->isEmpty()) + return true; + + EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset); + if (I == EmptyClassOffsets.end()) + return true; + + const ClassVectorTy& Classes = I->second; + if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end()) + return true; + + // There is already an empty class of the same type at this offset. + return false; +} + +void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD, + CharUnits Offset) { + // We only care about empty bases. + if (!RD->isEmpty()) + return; + + // If we have empty structures inside a union, we can assign both + // the same offset. Just avoid pushing them twice in the list. + ClassVectorTy& Classes = EmptyClassOffsets[Offset]; + if (std::find(Classes.begin(), Classes.end(), RD) != Classes.end()) + return; + + Classes.push_back(RD); + + // Update the empty class offset. + if (Offset > MaxEmptyClassOffset) + MaxEmptyClassOffset = Offset; +} + +bool +EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info, + CharUnits Offset) { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(Offset)) + return true; + + if (!CanPlaceSubobjectAtOffset(Info->Class, Offset)) + return false; + + // Traverse all non-virtual bases. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); + for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) { + BaseSubobjectInfo* Base = Info->Bases[I]; + if (Base->IsVirtual) + continue; + + CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class); + + if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset)) + return false; + } + + if (Info->PrimaryVirtualBaseInfo) { + BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo; + + if (Info == PrimaryVirtualBaseInfo->Derived) { + if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset)) + return false; + } + } + + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(), + E = Info->Class->field_end(); I != E; ++I, ++FieldNo) { + if (I->isBitField()) + continue; + + CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo); + if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset)) + return false; + } + + return true; +} + +void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info, + CharUnits Offset, + bool PlacingEmptyBase) { + if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) { + // We know that the only empty subobjects that can conflict with empty + // subobject of non-empty bases, are empty bases that can be placed at + // offset zero. Because of this, we only need to keep track of empty base + // subobjects with offsets less than the size of the largest empty + // subobject for our class. + return; + } + + AddSubobjectAtOffset(Info->Class, Offset); + + // Traverse all non-virtual bases. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); + for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) { + BaseSubobjectInfo* Base = Info->Bases[I]; + if (Base->IsVirtual) + continue; + + CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class); + UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase); + } + + if (Info->PrimaryVirtualBaseInfo) { + BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo; + + if (Info == PrimaryVirtualBaseInfo->Derived) + UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset, + PlacingEmptyBase); + } + + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(), + E = Info->Class->field_end(); I != E; ++I, ++FieldNo) { + if (I->isBitField()) + continue; + + CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo); + UpdateEmptyFieldSubobjects(*I, FieldOffset); + } +} + +bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info, + CharUnits Offset) { + // If we know this class doesn't have any empty subobjects we don't need to + // bother checking. + if (SizeOfLargestEmptySubobject.isZero()) + return true; + + if (!CanPlaceBaseSubobjectAtOffset(Info, Offset)) + return false; + + // We are able to place the base at this offset. Make sure to update the + // empty base subobject map. + UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty()); + return true; +} + +bool +EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + CharUnits Offset) const { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(Offset)) + return true; + + if (!CanPlaceSubobjectAtOffset(RD, Offset)) + return false; + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // Traverse all non-virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl); + if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset)) + return false; + } + + if (RD == Class) { + // This is the most derived class, traverse virtual bases as well. + for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), + E = RD->vbases_end(); I != E; ++I) { + const CXXRecordDecl *VBaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl); + if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset)) + return false; + } + } + + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I, ++FieldNo) { + if (I->isBitField()) + continue; + + CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo); + + if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset)) + return false; + } + + return true; +} + +bool +EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD, + CharUnits Offset) const { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(Offset)) + return true; + + QualType T = FD->getType(); + if (const RecordType *RT = T->getAs<RecordType>()) { + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset); + } + + // If we have an array type we need to look at every element. + if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) { + QualType ElemTy = Context.getBaseElementType(AT); + const RecordType *RT = ElemTy->getAs<RecordType>(); + if (!RT) + return true; + + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + uint64_t NumElements = Context.getConstantArrayElementCount(AT); + CharUnits ElementOffset = Offset; + for (uint64_t I = 0; I != NumElements; ++I) { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(ElementOffset)) + return true; + + if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset)) + return false; + + ElementOffset += Layout.getSize(); + } + } + + return true; +} + +bool +EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, + CharUnits Offset) { + if (!CanPlaceFieldSubobjectAtOffset(FD, Offset)) + return false; + + // We are able to place the member variable at this offset. + // Make sure to update the empty base subobject map. + UpdateEmptyFieldSubobjects(FD, Offset); + return true; +} + +void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + CharUnits Offset) { + // We know that the only empty subobjects that can conflict with empty + // field subobjects are subobjects of empty bases that can be placed at offset + // zero. Because of this, we only need to keep track of empty field + // subobjects with offsets less than the size of the largest empty + // subobject for our class. + if (Offset >= SizeOfLargestEmptySubobject) + return; + + AddSubobjectAtOffset(RD, Offset); + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // Traverse all non-virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl); + UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset); + } + + if (RD == Class) { + // This is the most derived class, traverse virtual bases as well. + for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), + E = RD->vbases_end(); I != E; ++I) { + const CXXRecordDecl *VBaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl); + UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset); + } + } + + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I, ++FieldNo) { + if (I->isBitField()) + continue; + + CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo); + + UpdateEmptyFieldSubobjects(*I, FieldOffset); + } +} + +void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD, + CharUnits Offset) { + QualType T = FD->getType(); + if (const RecordType *RT = T->getAs<RecordType>()) { + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + UpdateEmptyFieldSubobjects(RD, RD, Offset); + return; + } + + // If we have an array type we need to update every element. + if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) { + QualType ElemTy = Context.getBaseElementType(AT); + const RecordType *RT = ElemTy->getAs<RecordType>(); + if (!RT) + return; + + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + uint64_t NumElements = Context.getConstantArrayElementCount(AT); + CharUnits ElementOffset = Offset; + + for (uint64_t I = 0; I != NumElements; ++I) { + // We know that the only empty subobjects that can conflict with empty + // field subobjects are subobjects of empty bases that can be placed at + // offset zero. Because of this, we only need to keep track of empty field + // subobjects with offsets less than the size of the largest empty + // subobject for our class. + if (ElementOffset >= SizeOfLargestEmptySubobject) + return; + + UpdateEmptyFieldSubobjects(RD, RD, ElementOffset); + ElementOffset += Layout.getSize(); + } + } +} + +typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> ClassSetTy; + +class RecordLayoutBuilder { +protected: + // FIXME: Remove this and make the appropriate fields public. + friend class clang::ASTContext; + + const ASTContext &Context; + + EmptySubobjectMap *EmptySubobjects; + + /// Size - The current size of the record layout. + uint64_t Size; + + /// Alignment - The current alignment of the record layout. + CharUnits Alignment; + + /// \brief The alignment if attribute packed is not used. + CharUnits UnpackedAlignment; + + SmallVector<uint64_t, 16> FieldOffsets; + + /// \brief Whether the external AST source has provided a layout for this + /// record. + unsigned ExternalLayout : 1; + + /// \brief Whether we need to infer alignment, even when we have an + /// externally-provided layout. + unsigned InferAlignment : 1; + + /// Packed - Whether the record is packed or not. + unsigned Packed : 1; + + unsigned IsUnion : 1; + + unsigned IsMac68kAlign : 1; + + unsigned IsMsStruct : 1; + + /// UnfilledBitsInLastUnit - If the last field laid out was a bitfield, + /// this contains the number of bits in the last unit that can be used for + /// an adjacent bitfield if necessary. The unit in question is usually + /// a byte, but larger units are used if IsMsStruct. + unsigned char UnfilledBitsInLastUnit; + /// LastBitfieldTypeSize - If IsMsStruct, represents the size of the type + /// of the previous field if it was a bitfield. + unsigned char LastBitfieldTypeSize; + + /// MaxFieldAlignment - The maximum allowed field alignment. This is set by + /// #pragma pack. + CharUnits MaxFieldAlignment; + + /// DataSize - The data size of the record being laid out. + uint64_t DataSize; + + CharUnits NonVirtualSize; + CharUnits NonVirtualAlignment; + + /// PrimaryBase - the primary base class (if one exists) of the class + /// we're laying out. + const CXXRecordDecl *PrimaryBase; + + /// PrimaryBaseIsVirtual - Whether the primary base of the class we're laying + /// out is virtual. + bool PrimaryBaseIsVirtual; + + /// HasOwnVFPtr - Whether the class provides its own vtable/vftbl + /// pointer, as opposed to inheriting one from a primary base class. + bool HasOwnVFPtr; + + typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy; + + /// Bases - base classes and their offsets in the record. + BaseOffsetsMapTy Bases; + + // VBases - virtual base classes and their offsets in the record. + ASTRecordLayout::VBaseOffsetsMapTy VBases; + + /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are + /// primary base classes for some other direct or indirect base class. + CXXIndirectPrimaryBaseSet IndirectPrimaryBases; + + /// FirstNearlyEmptyVBase - The first nearly empty virtual base class in + /// inheritance graph order. Used for determining the primary base class. + const CXXRecordDecl *FirstNearlyEmptyVBase; + + /// VisitedVirtualBases - A set of all the visited virtual bases, used to + /// avoid visiting virtual bases more than once. + llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases; + + /// \brief Externally-provided size. + uint64_t ExternalSize; + + /// \brief Externally-provided alignment. + uint64_t ExternalAlign; + + /// \brief Externally-provided field offsets. + llvm::DenseMap<const FieldDecl *, uint64_t> ExternalFieldOffsets; + + /// \brief Externally-provided direct, non-virtual base offsets. + llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalBaseOffsets; + + /// \brief Externally-provided virtual base offsets. + llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalVirtualBaseOffsets; + + RecordLayoutBuilder(const ASTContext &Context, + EmptySubobjectMap *EmptySubobjects) + : Context(Context), EmptySubobjects(EmptySubobjects), Size(0), + Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()), + ExternalLayout(false), InferAlignment(false), + Packed(false), IsUnion(false), IsMac68kAlign(false), IsMsStruct(false), + UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0), + MaxFieldAlignment(CharUnits::Zero()), + DataSize(0), NonVirtualSize(CharUnits::Zero()), + NonVirtualAlignment(CharUnits::One()), + PrimaryBase(0), PrimaryBaseIsVirtual(false), + HasOwnVFPtr(false), + FirstNearlyEmptyVBase(0) { } + + /// Reset this RecordLayoutBuilder to a fresh state, using the given + /// alignment as the initial alignment. This is used for the + /// correct layout of vb-table pointers in MSVC. + void resetWithTargetAlignment(CharUnits TargetAlignment) { + const ASTContext &Context = this->Context; + EmptySubobjectMap *EmptySubobjects = this->EmptySubobjects; + this->~RecordLayoutBuilder(); + new (this) RecordLayoutBuilder(Context, EmptySubobjects); + Alignment = UnpackedAlignment = TargetAlignment; + } + + void Layout(const RecordDecl *D); + void Layout(const CXXRecordDecl *D); + void Layout(const ObjCInterfaceDecl *D); + + void LayoutFields(const RecordDecl *D); + void LayoutField(const FieldDecl *D); + void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize, + bool FieldPacked, const FieldDecl *D); + void LayoutBitField(const FieldDecl *D); + + TargetCXXABI getCXXABI() const { + return Context.getTargetInfo().getCXXABI(); + } + + /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects. + llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator; + + typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *> + BaseSubobjectInfoMapTy; + + /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases + /// of the class we're laying out to their base subobject info. + BaseSubobjectInfoMapTy VirtualBaseInfo; + + /// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the + /// class we're laying out to their base subobject info. + BaseSubobjectInfoMapTy NonVirtualBaseInfo; + + /// ComputeBaseSubobjectInfo - Compute the base subobject information for the + /// bases of the given class. + void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD); + + /// ComputeBaseSubobjectInfo - Compute the base subobject information for a + /// single class and all of its base classes. + BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD, + bool IsVirtual, + BaseSubobjectInfo *Derived); + + /// DeterminePrimaryBase - Determine the primary base of the given class. + void DeterminePrimaryBase(const CXXRecordDecl *RD); + + void SelectPrimaryVBase(const CXXRecordDecl *RD); + + void EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign); + + /// LayoutNonVirtualBases - Determines the primary base class (if any) and + /// lays it out. Will then proceed to lay out all non-virtual base clasess. + void LayoutNonVirtualBases(const CXXRecordDecl *RD); + + /// LayoutNonVirtualBase - Lays out a single non-virtual base. + void LayoutNonVirtualBase(const BaseSubobjectInfo *Base); + + void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info, + CharUnits Offset); + + /// LayoutVirtualBases - Lays out all the virtual bases. + void LayoutVirtualBases(const CXXRecordDecl *RD, + const CXXRecordDecl *MostDerivedClass); + + /// LayoutVirtualBase - Lays out a single virtual base. + void LayoutVirtualBase(const BaseSubobjectInfo *Base); + + /// LayoutBase - Will lay out a base and return the offset where it was + /// placed, in chars. + CharUnits LayoutBase(const BaseSubobjectInfo *Base); + + /// InitializeLayout - Initialize record layout for the given record decl. + void InitializeLayout(const Decl *D); + + /// FinishLayout - Finalize record layout. Adjust record size based on the + /// alignment. + void FinishLayout(const NamedDecl *D); + + void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment); + void UpdateAlignment(CharUnits NewAlignment) { + UpdateAlignment(NewAlignment, NewAlignment); + } + + /// \brief Retrieve the externally-supplied field offset for the given + /// field. + /// + /// \param Field The field whose offset is being queried. + /// \param ComputedOffset The offset that we've computed for this field. + uint64_t updateExternalFieldOffset(const FieldDecl *Field, + uint64_t ComputedOffset); + + void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset, + uint64_t UnpackedOffset, unsigned UnpackedAlign, + bool isPacked, const FieldDecl *D); + + DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); + + CharUnits getSize() const { + assert(Size % Context.getCharWidth() == 0); + return Context.toCharUnitsFromBits(Size); + } + uint64_t getSizeInBits() const { return Size; } + + void setSize(CharUnits NewSize) { Size = Context.toBits(NewSize); } + void setSize(uint64_t NewSize) { Size = NewSize; } + + CharUnits getAligment() const { return Alignment; } + + CharUnits getDataSize() const { + assert(DataSize % Context.getCharWidth() == 0); + return Context.toCharUnitsFromBits(DataSize); + } + uint64_t getDataSizeInBits() const { return DataSize; } + + void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); } + void setDataSize(uint64_t NewSize) { DataSize = NewSize; } + + RecordLayoutBuilder(const RecordLayoutBuilder &) LLVM_DELETED_FUNCTION; + void operator=(const RecordLayoutBuilder &) LLVM_DELETED_FUNCTION; +}; +} // end anonymous namespace + +void +RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) { + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + assert(!I->getType()->isDependentType() && + "Cannot layout class with dependent bases."); + + const CXXRecordDecl *Base = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + // Check if this is a nearly empty virtual base. + if (I->isVirtual() && Context.isNearlyEmpty(Base)) { + // If it's not an indirect primary base, then we've found our primary + // base. + if (!IndirectPrimaryBases.count(Base)) { + PrimaryBase = Base; + PrimaryBaseIsVirtual = true; + return; + } + + // Is this the first nearly empty virtual base? + if (!FirstNearlyEmptyVBase) + FirstNearlyEmptyVBase = Base; + } + + SelectPrimaryVBase(Base); + if (PrimaryBase) + return; + } +} + +/// DeterminePrimaryBase - Determine the primary base of the given class. +void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) { + // If the class isn't dynamic, it won't have a primary base. + if (!RD->isDynamicClass()) + return; + + // Compute all the primary virtual bases for all of our direct and + // indirect bases, and record all their primary virtual base classes. + RD->getIndirectPrimaryBases(IndirectPrimaryBases); + + // If the record has a dynamic base class, attempt to choose a primary base + // class. It is the first (in direct base class order) non-virtual dynamic + // base class, if one exists. + for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), + e = RD->bases_end(); i != e; ++i) { + // Ignore virtual bases. + if (i->isVirtual()) + continue; + + const CXXRecordDecl *Base = + cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); + + if (Base->isDynamicClass()) { + // We found it. + PrimaryBase = Base; + PrimaryBaseIsVirtual = false; + return; + } + } + + // Under the Itanium ABI, if there is no non-virtual primary base class, + // try to compute the primary virtual base. The primary virtual base is + // the first nearly empty virtual base that is not an indirect primary + // virtual base class, if one exists. + if (RD->getNumVBases() != 0) { + SelectPrimaryVBase(RD); + if (PrimaryBase) + return; + } + + // Otherwise, it is the first indirect primary base class, if one exists. + if (FirstNearlyEmptyVBase) { + PrimaryBase = FirstNearlyEmptyVBase; + PrimaryBaseIsVirtual = true; + return; + } + + assert(!PrimaryBase && "Should not get here with a primary base!"); +} + +BaseSubobjectInfo * +RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD, + bool IsVirtual, + BaseSubobjectInfo *Derived) { + BaseSubobjectInfo *Info; + + if (IsVirtual) { + // Check if we already have info about this virtual base. + BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD]; + if (InfoSlot) { + assert(InfoSlot->Class == RD && "Wrong class for virtual base info!"); + return InfoSlot; + } + + // We don't, create it. + InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo; + Info = InfoSlot; + } else { + Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo; + } + + Info->Class = RD; + Info->IsVirtual = IsVirtual; + Info->Derived = 0; + Info->PrimaryVirtualBaseInfo = 0; + + const CXXRecordDecl *PrimaryVirtualBase = 0; + BaseSubobjectInfo *PrimaryVirtualBaseInfo = 0; + + // Check if this base has a primary virtual base. + if (RD->getNumVBases()) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + if (Layout.isPrimaryBaseVirtual()) { + // This base does have a primary virtual base. + PrimaryVirtualBase = Layout.getPrimaryBase(); + assert(PrimaryVirtualBase && "Didn't have a primary virtual base!"); + + // Now check if we have base subobject info about this primary base. + PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase); + + if (PrimaryVirtualBaseInfo) { + if (PrimaryVirtualBaseInfo->Derived) { + // We did have info about this primary base, and it turns out that it + // has already been claimed as a primary virtual base for another + // base. + PrimaryVirtualBase = 0; + } else { + // We can claim this base as our primary base. + Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo; + PrimaryVirtualBaseInfo->Derived = Info; + } + } + } + } + + // Now go through all direct bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + bool IsVirtual = I->isVirtual(); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info)); + } + + if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) { + // Traversing the bases must have created the base info for our primary + // virtual base. + PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase); + assert(PrimaryVirtualBaseInfo && + "Did not create a primary virtual base!"); + + // Claim the primary virtual base as our primary virtual base. + Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo; + PrimaryVirtualBaseInfo->Derived = Info; + } + + return Info; +} + +void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) { + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + bool IsVirtual = I->isVirtual(); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + // Compute the base subobject info for this base. + BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0); + + if (IsVirtual) { + // ComputeBaseInfo has already added this base for us. + assert(VirtualBaseInfo.count(BaseDecl) && + "Did not add virtual base!"); + } else { + // Add the base info to the map of non-virtual bases. + assert(!NonVirtualBaseInfo.count(BaseDecl) && + "Non-virtual base already exists!"); + NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info)); + } + } +} + +void +RecordLayoutBuilder::EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign) { + CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign; + + // The maximum field alignment overrides base align. + if (!MaxFieldAlignment.isZero()) { + BaseAlign = std::min(BaseAlign, MaxFieldAlignment); + UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment); + } + + // Round up the current record size to pointer alignment. + setSize(getSize().RoundUpToAlignment(BaseAlign)); + setDataSize(getSize()); + + // Update the alignment. + UpdateAlignment(BaseAlign, UnpackedBaseAlign); +} + +void +RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) { + // Then, determine the primary base class. + DeterminePrimaryBase(RD); + + // Compute base subobject info. + ComputeBaseSubobjectInfo(RD); + + // If we have a primary base class, lay it out. + if (PrimaryBase) { + if (PrimaryBaseIsVirtual) { + // If the primary virtual base was a primary virtual base of some other + // base class we'll have to steal it. + BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase); + PrimaryBaseInfo->Derived = 0; + + // We have a virtual primary base, insert it as an indirect primary base. + IndirectPrimaryBases.insert(PrimaryBase); + + assert(!VisitedVirtualBases.count(PrimaryBase) && + "vbase already visited!"); + VisitedVirtualBases.insert(PrimaryBase); + + LayoutVirtualBase(PrimaryBaseInfo); + } else { + BaseSubobjectInfo *PrimaryBaseInfo = + NonVirtualBaseInfo.lookup(PrimaryBase); + assert(PrimaryBaseInfo && + "Did not find base info for non-virtual primary base!"); + + LayoutNonVirtualBase(PrimaryBaseInfo); + } + + // If this class needs a vtable/vf-table and didn't get one from a + // primary base, add it in now. + } else if (RD->isDynamicClass()) { + assert(DataSize == 0 && "Vtable pointer must be at offset zero!"); + CharUnits PtrWidth = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + CharUnits PtrAlign = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0)); + EnsureVTablePointerAlignment(PtrAlign); + HasOwnVFPtr = true; + setSize(getSize() + PtrWidth); + setDataSize(getSize()); + } + + // Now lay out the non-virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + + // Ignore virtual bases. + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); + + // Skip the primary base, because we've already laid it out. The + // !PrimaryBaseIsVirtual check is required because we might have a + // non-virtual base of the same type as a primary virtual base. + if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual) + continue; + + // Lay out the base. + BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl); + assert(BaseInfo && "Did not find base info for non-virtual base!"); + + LayoutNonVirtualBase(BaseInfo); + } +} + +void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) { + // Layout the base. + CharUnits Offset = LayoutBase(Base); + + // Add its base class offset. + assert(!Bases.count(Base->Class) && "base offset already exists!"); + Bases.insert(std::make_pair(Base->Class, Offset)); + + AddPrimaryVirtualBaseOffsets(Base, Offset); +} + +void +RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info, + CharUnits Offset) { + // This base isn't interesting, it has no virtual bases. + if (!Info->Class->getNumVBases()) + return; + + // First, check if we have a virtual primary base to add offsets for. + if (Info->PrimaryVirtualBaseInfo) { + assert(Info->PrimaryVirtualBaseInfo->IsVirtual && + "Primary virtual base is not virtual!"); + if (Info->PrimaryVirtualBaseInfo->Derived == Info) { + // Add the offset. + assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) && + "primary vbase offset already exists!"); + VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class, + ASTRecordLayout::VBaseInfo(Offset, false))); + + // Traverse the primary virtual base. + AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset); + } + } + + // Now go through all direct non-virtual bases. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); + for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) { + const BaseSubobjectInfo *Base = Info->Bases[I]; + if (Base->IsVirtual) + continue; + + CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class); + AddPrimaryVirtualBaseOffsets(Base, BaseOffset); + } +} + +void +RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, + const CXXRecordDecl *MostDerivedClass) { + const CXXRecordDecl *PrimaryBase; + bool PrimaryBaseIsVirtual; + + if (MostDerivedClass == RD) { + PrimaryBase = this->PrimaryBase; + PrimaryBaseIsVirtual = this->PrimaryBaseIsVirtual; + } else { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + PrimaryBase = Layout.getPrimaryBase(); + PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual(); + } + + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + assert(!I->getType()->isDependentType() && + "Cannot layout class with dependent bases."); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); + + if (I->isVirtual()) { + if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) { + bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl); + + // Only lay out the virtual base if it's not an indirect primary base. + if (!IndirectPrimaryBase) { + // Only visit virtual bases once. + if (!VisitedVirtualBases.insert(BaseDecl)) + continue; + + const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl); + assert(BaseInfo && "Did not find virtual base info!"); + LayoutVirtualBase(BaseInfo); + } + } + } + + if (!BaseDecl->getNumVBases()) { + // This base isn't interesting since it doesn't have any virtual bases. + continue; + } + + LayoutVirtualBases(BaseDecl, MostDerivedClass); + } +} + +void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) { + assert(!Base->Derived && "Trying to lay out a primary virtual base!"); + + // Layout the base. + CharUnits Offset = LayoutBase(Base); + + // Add its base class offset. + assert(!VBases.count(Base->Class) && "vbase offset already exists!"); + VBases.insert(std::make_pair(Base->Class, + ASTRecordLayout::VBaseInfo(Offset, false))); + + AddPrimaryVirtualBaseOffsets(Base, Offset); +} + +CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class); + + + CharUnits Offset; + + // Query the external layout to see if it provides an offset. + bool HasExternalLayout = false; + if (ExternalLayout) { + llvm::DenseMap<const CXXRecordDecl *, CharUnits>::iterator Known; + if (Base->IsVirtual) { + Known = ExternalVirtualBaseOffsets.find(Base->Class); + if (Known != ExternalVirtualBaseOffsets.end()) { + Offset = Known->second; + HasExternalLayout = true; + } + } else { + Known = ExternalBaseOffsets.find(Base->Class); + if (Known != ExternalBaseOffsets.end()) { + Offset = Known->second; + HasExternalLayout = true; + } + } + } + + CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlign(); + CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign; + + // If we have an empty base class, try to place it at offset 0. + if (Base->Class->isEmpty() && + (!HasExternalLayout || Offset == CharUnits::Zero()) && + EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) { + setSize(std::max(getSize(), Layout.getSize())); + UpdateAlignment(BaseAlign, UnpackedBaseAlign); + + return CharUnits::Zero(); + } + + // The maximum field alignment overrides base align. + if (!MaxFieldAlignment.isZero()) { + BaseAlign = std::min(BaseAlign, MaxFieldAlignment); + UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment); + } + + if (!HasExternalLayout) { + // Round up the current record size to the base's alignment boundary. + Offset = getDataSize().RoundUpToAlignment(BaseAlign); + + // Try to place the base. + while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset)) + Offset += BaseAlign; + } else { + bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset); + (void)Allowed; + assert(Allowed && "Base subobject externally placed at overlapping offset"); + + if (InferAlignment && Offset < getDataSize().RoundUpToAlignment(BaseAlign)){ + // The externally-supplied base offset is before the base offset we + // computed. Assume that the structure is packed. + Alignment = CharUnits::One(); + InferAlignment = false; + } + } + + if (!Base->Class->isEmpty()) { + // Update the data size. + setDataSize(Offset + Layout.getNonVirtualSize()); + + setSize(std::max(getSize(), getDataSize())); + } else + setSize(std::max(getSize(), Offset + Layout.getSize())); + + // Remember max struct/class alignment. + UpdateAlignment(BaseAlign, UnpackedBaseAlign); + + return Offset; +} + +void RecordLayoutBuilder::InitializeLayout(const Decl *D) { + if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) { + IsUnion = RD->isUnion(); + IsMsStruct = RD->isMsStruct(Context); + } + + Packed = D->hasAttr<PackedAttr>(); + + // Honor the default struct packing maximum alignment flag. + if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) { + MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment); + } + + // mac68k alignment supersedes maximum field alignment and attribute aligned, + // and forces all structures to have 2-byte alignment. The IBM docs on it + // allude to additional (more complicated) semantics, especially with regard + // to bit-fields, but gcc appears not to follow that. + if (D->hasAttr<AlignMac68kAttr>()) { + IsMac68kAlign = true; + MaxFieldAlignment = CharUnits::fromQuantity(2); + Alignment = CharUnits::fromQuantity(2); + } else { + if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>()) + MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment()); + + if (unsigned MaxAlign = D->getMaxAlignment()) + UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign)); + } + + // If there is an external AST source, ask it for the various offsets. + if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) + if (ExternalASTSource *External = Context.getExternalSource()) { + ExternalLayout = External->layoutRecordType(RD, + ExternalSize, + ExternalAlign, + ExternalFieldOffsets, + ExternalBaseOffsets, + ExternalVirtualBaseOffsets); + + // Update based on external alignment. + if (ExternalLayout) { + if (ExternalAlign > 0) { + Alignment = Context.toCharUnitsFromBits(ExternalAlign); + } else { + // The external source didn't have alignment information; infer it. + InferAlignment = true; + } + } + } +} + +void RecordLayoutBuilder::Layout(const RecordDecl *D) { + InitializeLayout(D); + LayoutFields(D); + + // Finally, round the size of the total struct up to the alignment of the + // struct itself. + FinishLayout(D); +} + +void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) { + InitializeLayout(RD); + + // Lay out the vtable and the non-virtual bases. + LayoutNonVirtualBases(RD); + + LayoutFields(RD); + + NonVirtualSize = Context.toCharUnitsFromBits( + llvm::RoundUpToAlignment(getSizeInBits(), + Context.getTargetInfo().getCharAlign())); + NonVirtualAlignment = Alignment; + + // Lay out the virtual bases and add the primary virtual base offsets. + LayoutVirtualBases(RD, RD); + + // Finally, round the size of the total struct up to the alignment + // of the struct itself. + FinishLayout(RD); + +#ifndef NDEBUG + // Check that we have base offsets for all bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + assert(Bases.count(BaseDecl) && "Did not find base offset!"); + } + + // And all virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), + E = RD->vbases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + assert(VBases.count(BaseDecl) && "Did not find base offset!"); + } +#endif +} + +void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) { + if (ObjCInterfaceDecl *SD = D->getSuperClass()) { + const ASTRecordLayout &SL = Context.getASTObjCInterfaceLayout(SD); + + UpdateAlignment(SL.getAlignment()); + + // We start laying out ivars not at the end of the superclass + // structure, but at the next byte following the last field. + setSize(SL.getDataSize()); + setDataSize(getSize()); + } + + InitializeLayout(D); + // Layout each ivar sequentially. + for (const ObjCIvarDecl *IVD = D->all_declared_ivar_begin(); IVD; + IVD = IVD->getNextIvar()) + LayoutField(IVD); + + // Finally, round the size of the total struct up to the alignment of the + // struct itself. + FinishLayout(D); +} + +void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) { + // Layout each field, for now, just sequentially, respecting alignment. In + // the future, this will need to be tweakable by targets. + for (RecordDecl::field_iterator Field = D->field_begin(), + FieldEnd = D->field_end(); Field != FieldEnd; ++Field) + LayoutField(*Field); +} + +void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize, + uint64_t TypeSize, + bool FieldPacked, + const FieldDecl *D) { + assert(Context.getLangOpts().CPlusPlus && + "Can only have wide bit-fields in C++!"); + + // Itanium C++ ABI 2.4: + // If sizeof(T)*8 < n, let T' be the largest integral POD type with + // sizeof(T')*8 <= n. + + QualType IntegralPODTypes[] = { + Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy, + Context.UnsignedLongTy, Context.UnsignedLongLongTy + }; + + QualType Type; + for (unsigned I = 0, E = llvm::array_lengthof(IntegralPODTypes); + I != E; ++I) { + uint64_t Size = Context.getTypeSize(IntegralPODTypes[I]); + + if (Size > FieldSize) + break; + + Type = IntegralPODTypes[I]; + } + assert(!Type.isNull() && "Did not find a type!"); + + CharUnits TypeAlign = Context.getTypeAlignInChars(Type); + + // We're not going to use any of the unfilled bits in the last byte. + UnfilledBitsInLastUnit = 0; + LastBitfieldTypeSize = 0; + + uint64_t FieldOffset; + uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; + + if (IsUnion) { + setDataSize(std::max(getDataSizeInBits(), FieldSize)); + FieldOffset = 0; + } else { + // The bitfield is allocated starting at the next offset aligned + // appropriately for T', with length n bits. + FieldOffset = llvm::RoundUpToAlignment(getDataSizeInBits(), + Context.toBits(TypeAlign)); + + uint64_t NewSizeInBits = FieldOffset + FieldSize; + + setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, + Context.getTargetInfo().getCharAlign())); + UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits; + } + + // Place this field at the current location. + FieldOffsets.push_back(FieldOffset); + + CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, FieldOffset, + Context.toBits(TypeAlign), FieldPacked, D); + + // Update the size. + setSize(std::max(getSizeInBits(), getDataSizeInBits())); + + // Remember max struct/class alignment. + UpdateAlignment(TypeAlign); +} + +void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { + bool FieldPacked = Packed || D->hasAttr<PackedAttr>(); + uint64_t FieldSize = D->getBitWidthValue(Context); + std::pair<uint64_t, unsigned> FieldInfo = Context.getTypeInfo(D->getType()); + uint64_t TypeSize = FieldInfo.first; + unsigned FieldAlign = FieldInfo.second; + + if (IsMsStruct) { + // The field alignment for integer types in ms_struct structs is + // always the size. + FieldAlign = TypeSize; + // Ignore zero-length bitfields after non-bitfields in ms_struct structs. + if (!FieldSize && !LastBitfieldTypeSize) + FieldAlign = 1; + // If a bitfield is followed by a bitfield of a different size, don't + // pack the bits together in ms_struct structs. + if (LastBitfieldTypeSize != TypeSize) { + UnfilledBitsInLastUnit = 0; + LastBitfieldTypeSize = 0; + } + } + + uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; + uint64_t FieldOffset = IsUnion ? 0 : UnpaddedFieldOffset; + + bool ZeroLengthBitfield = false; + if (!Context.getTargetInfo().useBitFieldTypeAlignment() && + Context.getTargetInfo().useZeroLengthBitfieldAlignment() && + FieldSize == 0) { + // The alignment of a zero-length bitfield affects the alignment + // of the next member. The alignment is the max of the zero + // length bitfield's alignment and a target specific fixed value. + ZeroLengthBitfield = true; + unsigned ZeroLengthBitfieldBoundary = + Context.getTargetInfo().getZeroLengthBitfieldBoundary(); + if (ZeroLengthBitfieldBoundary > FieldAlign) + FieldAlign = ZeroLengthBitfieldBoundary; + } + + if (FieldSize > TypeSize) { + LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D); + return; + } + + // The align if the field is not packed. This is to check if the attribute + // was unnecessary (-Wpacked). + unsigned UnpackedFieldAlign = FieldAlign; + uint64_t UnpackedFieldOffset = FieldOffset; + if (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield) + UnpackedFieldAlign = 1; + + if (FieldPacked || + (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield)) + FieldAlign = 1; + FieldAlign = std::max(FieldAlign, D->getMaxAlignment()); + UnpackedFieldAlign = std::max(UnpackedFieldAlign, D->getMaxAlignment()); + + // The maximum field alignment overrides the aligned attribute. + if (!MaxFieldAlignment.isZero() && FieldSize != 0) { + unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment); + FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits); + UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits); + } + + // ms_struct bitfields always have to start at a round alignment. + if (IsMsStruct && !LastBitfieldTypeSize) { + FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign); + UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset, + UnpackedFieldAlign); + } + + // Check if we need to add padding to give the field the correct alignment. + if (FieldSize == 0 || + (MaxFieldAlignment.isZero() && + (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)) + FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign); + + if (FieldSize == 0 || + (MaxFieldAlignment.isZero() && + (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize)) + UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset, + UnpackedFieldAlign); + + // Padding members don't affect overall alignment, unless zero length bitfield + // alignment is enabled. + if (!D->getIdentifier() && + !Context.getTargetInfo().useZeroLengthBitfieldAlignment() && + !IsMsStruct) + FieldAlign = UnpackedFieldAlign = 1; + + if (ExternalLayout) + FieldOffset = updateExternalFieldOffset(D, FieldOffset); + + // Place this field at the current location. + FieldOffsets.push_back(FieldOffset); + + if (!ExternalLayout) + CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset, + UnpackedFieldAlign, FieldPacked, D); + + // Update DataSize to include the last byte containing (part of) the bitfield. + if (IsUnion) { + // FIXME: I think FieldSize should be TypeSize here. + setDataSize(std::max(getDataSizeInBits(), FieldSize)); + } else { + if (IsMsStruct && FieldSize) { + // Under ms_struct, a bitfield always takes up space equal to the size + // of the type. We can't just change the alignment computation on the + // other codepath because of the way this interacts with #pragma pack: + // in a packed struct, we need to allocate misaligned space in the + // struct to hold the bitfield. + if (!UnfilledBitsInLastUnit) { + setDataSize(FieldOffset + TypeSize); + UnfilledBitsInLastUnit = TypeSize - FieldSize; + } else if (UnfilledBitsInLastUnit < FieldSize) { + setDataSize(getDataSizeInBits() + TypeSize); + UnfilledBitsInLastUnit = TypeSize - FieldSize; + } else { + UnfilledBitsInLastUnit -= FieldSize; + } + LastBitfieldTypeSize = TypeSize; + } else { + uint64_t NewSizeInBits = FieldOffset + FieldSize; + uint64_t BitfieldAlignment = Context.getTargetInfo().getCharAlign(); + setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, BitfieldAlignment)); + UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits; + LastBitfieldTypeSize = 0; + } + } + + // Update the size. + setSize(std::max(getSizeInBits(), getDataSizeInBits())); + + // Remember max struct/class alignment. + UpdateAlignment(Context.toCharUnitsFromBits(FieldAlign), + Context.toCharUnitsFromBits(UnpackedFieldAlign)); +} + +void RecordLayoutBuilder::LayoutField(const FieldDecl *D) { + if (D->isBitField()) { + LayoutBitField(D); + return; + } + + uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit; + + // Reset the unfilled bits. + UnfilledBitsInLastUnit = 0; + LastBitfieldTypeSize = 0; + + bool FieldPacked = Packed || D->hasAttr<PackedAttr>(); + CharUnits FieldOffset = + IsUnion ? CharUnits::Zero() : getDataSize(); + CharUnits FieldSize; + CharUnits FieldAlign; + + if (D->getType()->isIncompleteArrayType()) { + // This is a flexible array member; we can't directly + // query getTypeInfo about these, so we figure it out here. + // Flexible array members don't have any size, but they + // have to be aligned appropriately for their element type. + FieldSize = CharUnits::Zero(); + const ArrayType* ATy = Context.getAsArrayType(D->getType()); + FieldAlign = Context.getTypeAlignInChars(ATy->getElementType()); + } else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) { + unsigned AS = RT->getPointeeType().getAddressSpace(); + FieldSize = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS)); + FieldAlign = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS)); + } else { + std::pair<CharUnits, CharUnits> FieldInfo = + Context.getTypeInfoInChars(D->getType()); + FieldSize = FieldInfo.first; + FieldAlign = FieldInfo.second; + + if (IsMsStruct) { + // If MS bitfield layout is required, figure out what type is being + // laid out and align the field to the width of that type. + + // Resolve all typedefs down to their base type and round up the field + // alignment if necessary. + QualType T = Context.getBaseElementType(D->getType()); + if (const BuiltinType *BTy = T->getAs<BuiltinType>()) { + CharUnits TypeSize = Context.getTypeSizeInChars(BTy); + if (TypeSize > FieldAlign) + FieldAlign = TypeSize; + } + } + } + + // The align if the field is not packed. This is to check if the attribute + // was unnecessary (-Wpacked). + CharUnits UnpackedFieldAlign = FieldAlign; + CharUnits UnpackedFieldOffset = FieldOffset; + + if (FieldPacked) + FieldAlign = CharUnits::One(); + CharUnits MaxAlignmentInChars = + Context.toCharUnitsFromBits(D->getMaxAlignment()); + FieldAlign = std::max(FieldAlign, MaxAlignmentInChars); + UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars); + + // The maximum field alignment overrides the aligned attribute. + if (!MaxFieldAlignment.isZero()) { + FieldAlign = std::min(FieldAlign, MaxFieldAlignment); + UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment); + } + + // Round up the current record size to the field's alignment boundary. + FieldOffset = FieldOffset.RoundUpToAlignment(FieldAlign); + UnpackedFieldOffset = + UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign); + + if (ExternalLayout) { + FieldOffset = Context.toCharUnitsFromBits( + updateExternalFieldOffset(D, Context.toBits(FieldOffset))); + + if (!IsUnion && EmptySubobjects) { + // Record the fact that we're placing a field at this offset. + bool Allowed = EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset); + (void)Allowed; + assert(Allowed && "Externally-placed field cannot be placed here"); + } + } else { + if (!IsUnion && EmptySubobjects) { + // Check if we can place the field at this offset. + while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) { + // We couldn't place the field at the offset. Try again at a new offset. + FieldOffset += FieldAlign; + } + } + } + + // Place this field at the current location. + FieldOffsets.push_back(Context.toBits(FieldOffset)); + + if (!ExternalLayout) + CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset, + Context.toBits(UnpackedFieldOffset), + Context.toBits(UnpackedFieldAlign), FieldPacked, D); + + // Reserve space for this field. + uint64_t FieldSizeInBits = Context.toBits(FieldSize); + if (IsUnion) + setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits)); + else + setDataSize(FieldOffset + FieldSize); + + // Update the size. + setSize(std::max(getSizeInBits(), getDataSizeInBits())); + + // Remember max struct/class alignment. + UpdateAlignment(FieldAlign, UnpackedFieldAlign); +} + +void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) { + // In C++, records cannot be of size 0. + if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) { + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + // Compatibility with gcc requires a class (pod or non-pod) + // which is not empty but of size 0; such as having fields of + // array of zero-length, remains of Size 0 + if (RD->isEmpty()) + setSize(CharUnits::One()); + } + else + setSize(CharUnits::One()); + } + + // Finally, round the size of the record up to the alignment of the + // record itself. + uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit; + uint64_t UnpackedSizeInBits = + llvm::RoundUpToAlignment(getSizeInBits(), + Context.toBits(UnpackedAlignment)); + CharUnits UnpackedSize = Context.toCharUnitsFromBits(UnpackedSizeInBits); + uint64_t RoundedSize + = llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment)); + + if (ExternalLayout) { + // If we're inferring alignment, and the external size is smaller than + // our size after we've rounded up to alignment, conservatively set the + // alignment to 1. + if (InferAlignment && ExternalSize < RoundedSize) { + Alignment = CharUnits::One(); + InferAlignment = false; + } + setSize(ExternalSize); + return; + } + + // Set the size to the final size. + setSize(RoundedSize); + + unsigned CharBitNum = Context.getTargetInfo().getCharWidth(); + if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) { + // Warn if padding was introduced to the struct/class/union. + if (getSizeInBits() > UnpaddedSize) { + unsigned PadSize = getSizeInBits() - UnpaddedSize; + bool InBits = true; + if (PadSize % CharBitNum == 0) { + PadSize = PadSize / CharBitNum; + InBits = false; + } + Diag(RD->getLocation(), diag::warn_padded_struct_size) + << Context.getTypeDeclType(RD) + << PadSize + << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not + } + + // Warn if we packed it unnecessarily. If the alignment is 1 byte don't + // bother since there won't be alignment issues. + if (Packed && UnpackedAlignment > CharUnits::One() && + getSize() == UnpackedSize) + Diag(D->getLocation(), diag::warn_unnecessary_packed) + << Context.getTypeDeclType(RD); + } +} + +void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment, + CharUnits UnpackedNewAlignment) { + // The alignment is not modified when using 'mac68k' alignment or when + // we have an externally-supplied layout that also provides overall alignment. + if (IsMac68kAlign || (ExternalLayout && !InferAlignment)) + return; + + if (NewAlignment > Alignment) { + assert(llvm::isPowerOf2_32(NewAlignment.getQuantity() && + "Alignment not a power of 2")); + Alignment = NewAlignment; + } + + if (UnpackedNewAlignment > UnpackedAlignment) { + assert(llvm::isPowerOf2_32(UnpackedNewAlignment.getQuantity() && + "Alignment not a power of 2")); + UnpackedAlignment = UnpackedNewAlignment; + } +} + +uint64_t +RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field, + uint64_t ComputedOffset) { + assert(ExternalFieldOffsets.find(Field) != ExternalFieldOffsets.end() && + "Field does not have an external offset"); + + uint64_t ExternalFieldOffset = ExternalFieldOffsets[Field]; + + if (InferAlignment && ExternalFieldOffset < ComputedOffset) { + // The externally-supplied field offset is before the field offset we + // computed. Assume that the structure is packed. + Alignment = CharUnits::One(); + InferAlignment = false; + } + + // Use the externally-supplied field offset. + return ExternalFieldOffset; +} + +/// \brief Get diagnostic %select index for tag kind for +/// field padding diagnostic message. +/// WARNING: Indexes apply to particular diagnostics only! +/// +/// \returns diagnostic %select index. +static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) { + switch (Tag) { + case TTK_Struct: return 0; + case TTK_Interface: return 1; + case TTK_Class: return 2; + default: llvm_unreachable("Invalid tag kind for field padding diagnostic!"); + } +} + +void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset, + uint64_t UnpaddedOffset, + uint64_t UnpackedOffset, + unsigned UnpackedAlign, + bool isPacked, + const FieldDecl *D) { + // We let objc ivars without warning, objc interfaces generally are not used + // for padding tricks. + if (isa<ObjCIvarDecl>(D)) + return; + + // Don't warn about structs created without a SourceLocation. This can + // be done by clients of the AST, such as codegen. + if (D->getLocation().isInvalid()) + return; + + unsigned CharBitNum = Context.getTargetInfo().getCharWidth(); + + // Warn if padding was introduced to the struct/class. + if (!IsUnion && Offset > UnpaddedOffset) { + unsigned PadSize = Offset - UnpaddedOffset; + bool InBits = true; + if (PadSize % CharBitNum == 0) { + PadSize = PadSize / CharBitNum; + InBits = false; + } + if (D->getIdentifier()) + Diag(D->getLocation(), diag::warn_padded_struct_field) + << getPaddingDiagFromTagKind(D->getParent()->getTagKind()) + << Context.getTypeDeclType(D->getParent()) + << PadSize + << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1) // plural or not + << D->getIdentifier(); + else + Diag(D->getLocation(), diag::warn_padded_struct_anon_field) + << getPaddingDiagFromTagKind(D->getParent()->getTagKind()) + << Context.getTypeDeclType(D->getParent()) + << PadSize + << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not + } + + // Warn if we packed it unnecessarily. If the alignment is 1 byte don't + // bother since there won't be alignment issues. + if (isPacked && UnpackedAlign > CharBitNum && Offset == UnpackedOffset) + Diag(D->getLocation(), diag::warn_unnecessary_packed) + << D->getIdentifier(); +} + +static const CXXMethodDecl *computeKeyFunction(ASTContext &Context, + const CXXRecordDecl *RD) { + // If a class isn't polymorphic it doesn't have a key function. + if (!RD->isPolymorphic()) + return 0; + + // A class that is not externally visible doesn't have a key function. (Or + // at least, there's no point to assigning a key function to such a class; + // this doesn't affect the ABI.) + if (!RD->isExternallyVisible()) + return 0; + + // Template instantiations don't have key functions,see Itanium C++ ABI 5.2.6. + // Same behavior as GCC. + TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind(); + if (TSK == TSK_ImplicitInstantiation || + TSK == TSK_ExplicitInstantiationDefinition) + return 0; + + bool allowInlineFunctions = + Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline(); + + for (CXXRecordDecl::method_iterator I = RD->method_begin(), + E = RD->method_end(); I != E; ++I) { + const CXXMethodDecl *MD = *I; + + if (!MD->isVirtual()) + continue; + + if (MD->isPure()) + continue; + + // Ignore implicit member functions, they are always marked as inline, but + // they don't have a body until they're defined. + if (MD->isImplicit()) + continue; + + if (MD->isInlineSpecified()) + continue; + + if (MD->hasInlineBody()) + continue; + + // Ignore inline deleted or defaulted functions. + if (!MD->isUserProvided()) + continue; + + // In certain ABIs, ignore functions with out-of-line inline definitions. + if (!allowInlineFunctions) { + const FunctionDecl *Def; + if (MD->hasBody(Def) && Def->isInlineSpecified()) + continue; + } + + // We found it. + return MD; + } + + return 0; +} + +DiagnosticBuilder +RecordLayoutBuilder::Diag(SourceLocation Loc, unsigned DiagID) { + return Context.getDiagnostics().Report(Loc, DiagID); +} + +/// Does the target C++ ABI require us to skip over the tail-padding +/// of the given class (considering it as a base class) when allocating +/// objects? +static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) { + switch (ABI.getTailPaddingUseRules()) { + case TargetCXXABI::AlwaysUseTailPadding: + return false; + + case TargetCXXABI::UseTailPaddingUnlessPOD03: + // FIXME: To the extent that this is meant to cover the Itanium ABI + // rules, we should implement the restrictions about over-sized + // bitfields: + // + // http://mentorembedded.github.com/cxx-abi/abi.html#POD : + // In general, a type is considered a POD for the purposes of + // layout if it is a POD type (in the sense of ISO C++ + // [basic.types]). However, a POD-struct or POD-union (in the + // sense of ISO C++ [class]) with a bitfield member whose + // declared width is wider than the declared type of the + // bitfield is not a POD for the purpose of layout. Similarly, + // an array type is not a POD for the purpose of layout if the + // element type of the array is not a POD for the purpose of + // layout. + // + // Where references to the ISO C++ are made in this paragraph, + // the Technical Corrigendum 1 version of the standard is + // intended. + return RD->isPOD(); + + case TargetCXXABI::UseTailPaddingUnlessPOD11: + // This is equivalent to RD->getTypeForDecl().isCXX11PODType(), + // but with a lot of abstraction penalty stripped off. This does + // assume that these properties are set correctly even in C++98 + // mode; fortunately, that is true because we want to assign + // consistently semantics to the type-traits intrinsics (or at + // least as many of them as possible). + return RD->isTrivial() && RD->isStandardLayout(); + } + + llvm_unreachable("bad tail-padding use kind"); +} + +static bool isMsLayout(const RecordDecl* D) { + return D->getASTContext().getTargetInfo().getCXXABI().isMicrosoft(); +} + +// This section contains an implementation of struct layout that is, up to the +// included tests, compatible with cl.exe (2012). The layout produced is +// significantly different than those produced by the Itanium ABI. Here we note +// the most important differences. +// +// * The alignment of bitfields in unions is ignored when computing the +// alignment of the union. +// * The existance of zero-width bitfield that occurs after anything other than +// a non-zero length bitfield is ignored. +// * The Itanium equivalent vtable pointers are split into a vfptr (virtual +// function pointer) and a vbptr (virtual base pointer). They can each be +// shared with a, non-virtual bases. These bases need not be the same. vfptrs +// always occur at offset 0. vbptrs can occur at an +// arbitrary offset and are placed after non-virtual bases but before fields. +// * Virtual bases sometimes require a 'vtordisp' field that is laid out before +// the virtual base and is used in conjunction with virtual overrides during +// construction and destruction. +// * vfptrs are allocated in a block of memory equal to the alignment of the +// fields and non-virtual bases at offset 0 in 32 bit mode and in a pointer +// sized block of memory in 64 bit mode. +// * vbptrs are allocated in a block of memory equal to the alignment of the +// fields and non-virtual bases. This block is at a potentially unaligned +// offset. If the allocation slot is unaligned and the alignment is less than +// or equal to the pointer size, additional space is allocated so that the +// pointer can be aligned properly. This causes very strange effects on the +// placement of objects after the allocated block. (see the code). +// * vtordisps are allocated in a block of memory with size and alignment equal +// to the alignment of the completed structure (before applying __declspec( +// align())). The vtordisp always occur at the end of the allocation block, +// immediately prior to the virtual base. +// * The last zero sized non-virtual base is allocated after the placement of +// vbptr if one exists and can be placed at the end of the struct, potentially +// aliasing either the first member or another struct allocated after this +// one. +// * The last zero size virtual base may be placed at the end of the struct. +// and can potentially alias a zero sized type in the next struct. +// * If the last field is a non-zero length bitfield and we have any virtual +// bases then some extra padding is added before the virtual bases for no +// obvious reason. +// * When laying out empty non-virtual bases, an extra byte of padding is added +// if the non-virtual base before the empty non-virtual base has a vbptr. + + +namespace { +struct MicrosoftRecordLayoutBuilder { + typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy; + MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {} +private: + MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) + LLVM_DELETED_FUNCTION; + void operator=(const MicrosoftRecordLayoutBuilder &) LLVM_DELETED_FUNCTION; +public: + + void layout(const RecordDecl *RD); + void cxxLayout(const CXXRecordDecl *RD); + /// \brief Initializes size and alignment and honors some flags. + void initializeLayout(const RecordDecl *RD); + /// \brief Initialized C++ layout, compute alignment and virtual alignment and + /// existance of vfptrs and vbptrs. Alignment is needed before the vfptr is + /// laid out. + void initializeCXXLayout(const CXXRecordDecl *RD); + void layoutVFPtr(const CXXRecordDecl *RD); + void layoutNonVirtualBases(const CXXRecordDecl *RD); + void layoutNonVirtualBase(const CXXRecordDecl *RD); + void layoutVBPtr(const CXXRecordDecl *RD); + /// \brief Lays out the fields of the record. Also rounds size up to + /// alignment. + void layoutFields(const RecordDecl *RD); + void layoutField(const FieldDecl *FD); + void layoutBitField(const FieldDecl *FD); + /// \brief Lays out a single zero-width bit-field in the record and handles + /// special cases associated with zero-width bit-fields. + void layoutZeroWidthBitField(const FieldDecl *FD); + void layoutVirtualBases(const CXXRecordDecl *RD); + void layoutVirtualBase(const CXXRecordDecl *RD, bool HasVtordisp); + /// \brief Flushes the lazy virtual base and conditionally rounds up to + /// alignment. + void finalizeCXXLayout(const CXXRecordDecl *RD); + void honorDeclspecAlign(const RecordDecl *RD); + + /// \brief Updates the alignment of the type. This function doesn't take any + /// properties (such as packedness) into account. getAdjustedFieldInfo() + /// adjustes for packedness. + void updateAlignment(CharUnits NewAlignment) { + Alignment = std::max(Alignment, NewAlignment); + } + /// \brief Gets the size and alignment taking attributes into account. + std::pair<CharUnits, CharUnits> getAdjustedFieldInfo(const FieldDecl *FD); + /// \brief Places a field at offset 0. + void placeFieldAtZero() { FieldOffsets.push_back(0); } + /// \brief Places a field at an offset in CharUnits. + void placeFieldAtOffset(CharUnits FieldOffset) { + FieldOffsets.push_back(Context.toBits(FieldOffset)); + } + /// \brief Places a bitfield at a bit offset. + void placeFieldAtBitOffset(uint64_t FieldOffset) { + FieldOffsets.push_back(FieldOffset); + } + /// \brief Compute the set of virtual bases for which vtordisps are required. + llvm::SmallPtrSet<const CXXRecordDecl *, 2> + computeVtorDispSet(const CXXRecordDecl *RD); + + const ASTContext &Context; + /// \brief The size of the record being laid out. + CharUnits Size; + /// \brief The current alignment of the record layout. + CharUnits Alignment; + /// \brief The collection of field offsets. + SmallVector<uint64_t, 16> FieldOffsets; + /// \brief The maximum allowed field alignment. This is set by #pragma pack. + CharUnits MaxFieldAlignment; + /// \brief Alignment does not occur for virtual bases unless something + /// forces it to by explicitly using __declspec(align()) + bool AlignAfterVBases : 1; + bool IsUnion : 1; + /// \brief True if the last field laid out was a bitfield and was not 0 + /// width. + bool LastFieldIsNonZeroWidthBitfield : 1; + /// \brief The size of the allocation of the currently active bitfield. + /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield + /// is true. + CharUnits CurrentBitfieldSize; + /// \brief The number of remaining bits in our last bitfield allocation. + /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is + /// true. + unsigned RemainingBitsInField; + + /// \brief The data alignment of the record layout. + CharUnits DataSize; + /// \brief The alignment of the non-virtual portion of the record layout + /// without the impact of the virtual pointers. + /// Only used for C++ layouts. + CharUnits BasesAndFieldsAlignment; + /// \brief The alignment of the non-virtual portion of the record layout + /// Only used for C++ layouts. + CharUnits NonVirtualAlignment; + /// \brief The additional alignment imposed by the virtual bases. + CharUnits VirtualAlignment; + /// \brief The primary base class (if one exists). + const CXXRecordDecl *PrimaryBase; + /// \brief The class we share our vb-pointer with. + const CXXRecordDecl *SharedVBPtrBase; + /// \brief True if the class has a vftable pointer that can be extended + /// by this class or classes derived from it. Such a vfptr will always occur + /// at offset 0. + bool HasExtendableVFPtr : 1; + /// \brief True if the class has a (not necessarily its own) vbtable pointer. + bool HasVBPtr : 1; + /// \brief Offset to the virtual base table pointer (if one exists). + CharUnits VBPtrOffset; + /// \brief Base classes and their offsets in the record. + BaseOffsetsMapTy Bases; + /// \brief virtual base classes and their offsets in the record. + ASTRecordLayout::VBaseOffsetsMapTy VBases; + /// \brief The size of a pointer. + CharUnits PointerSize; + /// \brief The alignment of a pointer. + CharUnits PointerAlignment; + /// \brief Holds an empty base we haven't yet laid out. + const CXXRecordDecl *LazyEmptyBase; + /// \brief Lets us know if the last base we laid out was empty. Only used + /// when adjusting the placement of a last zero-sized base in 64 bit mode. + bool LastBaseWasEmpty; + /// \brief Lets us know if we're in 64-bit mode + bool Is64BitMode; + /// \brief True if the last non-virtual base has a vbptr. + bool LastNonVirtualBaseHasVBPtr; +}; +} // namespace + +std::pair<CharUnits, CharUnits> +MicrosoftRecordLayoutBuilder::getAdjustedFieldInfo(const FieldDecl *FD) { + std::pair<CharUnits, CharUnits> FieldInfo = + Context.getTypeInfoInChars(FD->getType()); + + // If we're not on win32 and using ms_struct the field alignment will be wrong + // for 64 bit types, so we fix that here. + if (FD->getASTContext().getTargetInfo().getTriple().getOS() != + llvm::Triple::Win32) { + QualType T = Context.getBaseElementType(FD->getType()); + if (const BuiltinType *BTy = T->getAs<BuiltinType>()) { + CharUnits TypeSize = Context.getTypeSizeInChars(BTy); + if (TypeSize > FieldInfo.second) + FieldInfo.second = TypeSize; + } + } + + // Respect packed attribute. + if (FD->hasAttr<PackedAttr>()) + FieldInfo.second = CharUnits::One(); + // Respect pack pragma. + else if (!MaxFieldAlignment.isZero()) + FieldInfo.second = std::min(FieldInfo.second, MaxFieldAlignment); + // Respect alignment attributes. + if (unsigned fieldAlign = FD->getMaxAlignment()) { + CharUnits FieldAlign = Context.toCharUnitsFromBits(fieldAlign); + AlignAfterVBases = true; + FieldInfo.second = std::max(FieldInfo.second, FieldAlign); + } + return FieldInfo; +} + +void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) { + IsUnion = RD->isUnion(); + Is64BitMode = Context.getTargetInfo().getPointerWidth(0) == 64; + + Size = CharUnits::Zero(); + Alignment = CharUnits::One(); + AlignAfterVBases = false; + + // Compute the maximum field alignment. + MaxFieldAlignment = CharUnits::Zero(); + // Honor the default struct packing maximum alignment flag. + if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) + MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment); + // Honor the packing attribute. + if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>()) + MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment()); + // Packed attribute forces max field alignment to be 1. + if (RD->hasAttr<PackedAttr>()) + MaxFieldAlignment = CharUnits::One(); +} + +void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) { + initializeLayout(RD); + layoutFields(RD); + honorDeclspecAlign(RD); +} + +void MicrosoftRecordLayoutBuilder::cxxLayout(const CXXRecordDecl *RD) { + initializeLayout(RD); + initializeCXXLayout(RD); + layoutVFPtr(RD); + layoutNonVirtualBases(RD); + layoutVBPtr(RD); + layoutFields(RD); + DataSize = Size; + NonVirtualAlignment = Alignment; + layoutVirtualBases(RD); + finalizeCXXLayout(RD); + honorDeclspecAlign(RD); +} + +void +MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) { + // Calculate pointer size and alignment. + PointerSize = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + PointerAlignment = PointerSize; + if (!MaxFieldAlignment.isZero()) + PointerAlignment = std::min(PointerAlignment, MaxFieldAlignment); + + // Initialize information about the bases. + HasVBPtr = false; + HasExtendableVFPtr = false; + SharedVBPtrBase = 0; + PrimaryBase = 0; + VirtualAlignment = CharUnits::One(); + AlignAfterVBases = Is64BitMode; + + // If the record has a dynamic base class, attempt to choose a primary base + // class. It is the first (in direct base class order) non-virtual dynamic + // base class, if one exists. + for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), + e = RD->bases_end(); + i != e; ++i) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl); + // Handle forced alignment. + if (Layout.getAlignAfterVBases()) + AlignAfterVBases = true; + // Handle virtual bases. + if (i->isVirtual()) { + VirtualAlignment = std::max(VirtualAlignment, Layout.getAlignment()); + HasVBPtr = true; + continue; + } + // We located a primary base class! + if (!PrimaryBase && Layout.hasExtendableVFPtr()) { + PrimaryBase = BaseDecl; + HasExtendableVFPtr = true; + } + // We located a base to share a VBPtr with! + if (!SharedVBPtrBase && Layout.hasVBPtr()) { + SharedVBPtrBase = BaseDecl; + HasVBPtr = true; + } + updateAlignment(Layout.getAlignment()); + } + + // Use LayoutFields to compute the alignment of the fields. The layout + // is discarded. This is the simplest way to get all of the bit-field + // behavior correct and is not actually very expensive. + layoutFields(RD); + Size = CharUnits::Zero(); + BasesAndFieldsAlignment = Alignment; + FieldOffsets.clear(); +} + +void MicrosoftRecordLayoutBuilder::layoutVFPtr(const CXXRecordDecl *RD) { + // If we have a primary base then our VFPtr was already laid out + if (PrimaryBase) + return; + + // Look at all of our methods to determine if we need a VFPtr. We need a + // vfptr if we define a new virtual function. + if (!HasExtendableVFPtr && RD->isDynamicClass()) + for (CXXRecordDecl::method_iterator i = RD->method_begin(), + e = RD->method_end(); + !HasExtendableVFPtr && i != e; ++i) + HasExtendableVFPtr = i->isVirtual() && i->size_overridden_methods() == 0; + if (!HasExtendableVFPtr) + return; + + // MSVC 32 (but not 64) potentially over-aligns the vf-table pointer by giving + // it the max alignment of all the non-virtual data in the class. The + // resulting layout is essentially { vftbl, { nvdata } }. This is completely + // unnecessary, but we're not here to pass judgment. + updateAlignment(PointerAlignment); + if (Is64BitMode) + Size = Size.RoundUpToAlignment(PointerAlignment) + PointerSize; + else + Size = Size.RoundUpToAlignment(PointerAlignment) + Alignment; +} + +void +MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) { + LazyEmptyBase = 0; + LastBaseWasEmpty = false; + LastNonVirtualBaseHasVBPtr = false; + + // Lay out the primary base first. + if (PrimaryBase) + layoutNonVirtualBase(PrimaryBase); + + // Iterate through the bases and lay out the non-virtual ones. + for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), + e = RD->bases_end(); + i != e; ++i) { + if (i->isVirtual()) + continue; + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl()); + if (BaseDecl != PrimaryBase) + layoutNonVirtualBase(BaseDecl); + } +} + +void +MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(const CXXRecordDecl *RD) { + const ASTRecordLayout *Layout = RD ? &Context.getASTRecordLayout(RD) : 0; + + // If we have a lazy empty base we haven't laid out yet, do that now. + if (LazyEmptyBase) { + const ASTRecordLayout &LazyLayout = + Context.getASTRecordLayout(LazyEmptyBase); + Size = Size.RoundUpToAlignment(LazyLayout.getAlignment()); + // If the last non-virtual base has a vbptr we add a byte of padding for no + // obvious reason. + if (LastNonVirtualBaseHasVBPtr) + Size++; + Bases.insert(std::make_pair(LazyEmptyBase, Size)); + // Empty bases only consume space when followed by another empty base. + if (RD && Layout->getNonVirtualSize().isZero()) { + LastBaseWasEmpty = true; + Size++; + } + LazyEmptyBase = 0; + LastNonVirtualBaseHasVBPtr = false; + } + + // RD is null when flushing the final lazy base. + if (!RD) + return; + + if (Layout->getNonVirtualSize().isZero()) { + LazyEmptyBase = RD; + return; + } + + // Insert the base here. + CharUnits BaseOffset = Size.RoundUpToAlignment(Layout->getAlignment()); + Bases.insert(std::make_pair(RD, BaseOffset)); + Size = BaseOffset + Layout->getDataSize(); + // Note: we don't update alignment here because it was accounted + // for during initalization. + LastBaseWasEmpty = false; + LastNonVirtualBaseHasVBPtr = Layout->hasVBPtr(); +} + +void MicrosoftRecordLayoutBuilder::layoutVBPtr(const CXXRecordDecl *RD) { + if (!HasVBPtr) + VBPtrOffset = CharUnits::fromQuantity(-1); + else if (SharedVBPtrBase) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(SharedVBPtrBase); + VBPtrOffset = Bases[SharedVBPtrBase] + Layout.getVBPtrOffset(); + } else { + VBPtrOffset = Size.RoundUpToAlignment(PointerAlignment); + CharUnits OldSize = Size; + Size = VBPtrOffset + PointerSize; + if (BasesAndFieldsAlignment <= PointerAlignment) { + // Handle strange padding rules for the lazily placed base. I have no + // explanation for why the last virtual base is padded in such an odd way. + // Two things to note about this padding are that the rules are different + // if the alignment of the bases+fields is <= to the alignemnt of a + // pointer and that the rule in 64-bit mode behaves differently depending + // on if the second to last base was also zero sized. + Size += OldSize % BasesAndFieldsAlignment.getQuantity(); + } else { + if (Is64BitMode) + Size += LastBaseWasEmpty ? CharUnits::One() : CharUnits::Zero(); + else + Size = OldSize + BasesAndFieldsAlignment; + } + updateAlignment(PointerAlignment); + } + + // Flush the lazy empty base. + layoutNonVirtualBase(0); +} + +void MicrosoftRecordLayoutBuilder::layoutFields(const RecordDecl *RD) { + LastFieldIsNonZeroWidthBitfield = false; + for (RecordDecl::field_iterator Field = RD->field_begin(), + FieldEnd = RD->field_end(); + Field != FieldEnd; ++Field) + layoutField(*Field); + Size = Size.RoundUpToAlignment(Alignment); +} + +void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) { + if (FD->isBitField()) { + layoutBitField(FD); + return; + } + LastFieldIsNonZeroWidthBitfield = false; + + std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD); + CharUnits FieldSize = FieldInfo.first; + CharUnits FieldAlign = FieldInfo.second; + + updateAlignment(FieldAlign); + if (IsUnion) { + placeFieldAtZero(); + Size = std::max(Size, FieldSize); + } else { + // Round up the current record size to the field's alignment boundary. + CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign); + placeFieldAtOffset(FieldOffset); + Size = FieldOffset + FieldSize; + } +} + +void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) { + unsigned Width = FD->getBitWidthValue(Context); + if (Width == 0) { + layoutZeroWidthBitField(FD); + return; + } + + std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD); + CharUnits FieldSize = FieldInfo.first; + CharUnits FieldAlign = FieldInfo.second; + + // Clamp the bitfield to a containable size for the sake of being able + // to lay them out. Sema will throw an error. + if (Width > Context.toBits(FieldSize)) + Width = Context.toBits(FieldSize); + + // Check to see if this bitfield fits into an existing allocation. Note: + // MSVC refuses to pack bitfields of formal types with different sizes + // into the same allocation. + if (!IsUnion && LastFieldIsNonZeroWidthBitfield && + CurrentBitfieldSize == FieldSize && Width <= RemainingBitsInField) { + placeFieldAtBitOffset(Context.toBits(Size) - RemainingBitsInField); + RemainingBitsInField -= Width; + return; + } + + LastFieldIsNonZeroWidthBitfield = true; + CurrentBitfieldSize = FieldSize; + if (IsUnion) { + placeFieldAtZero(); + Size = std::max(Size, FieldSize); + // TODO: Add a Sema warning that MS ignores bitfield alignment in unions. + } else { + // Allocate a new block of memory and place the bitfield in it. + CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign); + placeFieldAtOffset(FieldOffset); + Size = FieldOffset + FieldSize; + updateAlignment(FieldAlign); + RemainingBitsInField = Context.toBits(FieldSize) - Width; + } +} + +void +MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) { + // Zero-width bitfields are ignored unless they follow a non-zero-width + // bitfield. + std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD); + CharUnits FieldSize = FieldInfo.first; + CharUnits FieldAlign = FieldInfo.second; + + if (!LastFieldIsNonZeroWidthBitfield) { + placeFieldAtOffset(IsUnion ? CharUnits::Zero() : Size); + // TODO: Add a Sema warning that MS ignores alignment for zero + // sized bitfields that occur after zero-size bitfields or non bitfields. + return; + } + + LastFieldIsNonZeroWidthBitfield = false; + if (IsUnion) { + placeFieldAtZero(); + Size = std::max(Size, FieldSize); + } else { + // Round up the current record size to the field's alignment boundary. + CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign); + placeFieldAtOffset(FieldOffset); + Size = FieldOffset; + updateAlignment(FieldAlign); + } +} + +void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) { + if (!HasVBPtr) + return; + + updateAlignment(VirtualAlignment); + + // Zero-sized v-bases obey the alignment attribute so apply it here. The + // alignment attribute is normally accounted for in FinalizeLayout. + if (unsigned MaxAlign = RD->getMaxAlignment()) + updateAlignment(Context.toCharUnitsFromBits(MaxAlign)); + + llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp = + computeVtorDispSet(RD); + + // If the last field we laid out was a non-zero length bitfield then add some + // extra padding for no obvious reason. + if (LastFieldIsNonZeroWidthBitfield) + Size += CurrentBitfieldSize; + + // Iterate through the virtual bases and lay them out. + for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(), + e = RD->vbases_end(); + i != e; ++i) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl()); + layoutVirtualBase(BaseDecl, HasVtordisp.count(BaseDecl)); + } +} + +void MicrosoftRecordLayoutBuilder::layoutVirtualBase(const CXXRecordDecl *RD, + bool HasVtordisp) { + if (LazyEmptyBase) { + const ASTRecordLayout &LazyLayout = + Context.getASTRecordLayout(LazyEmptyBase); + Size = Size.RoundUpToAlignment(LazyLayout.getAlignment()); + VBases.insert( + std::make_pair(LazyEmptyBase, ASTRecordLayout::VBaseInfo(Size, false))); + // Empty bases only consume space when followed by another empty base. + // The space consumed is in an Alignment sized/aligned block and the v-base + // is placed at its alignment offset into the chunk, unless its alignment + // is less than 4 bytes, at which it is placed at 4 byte offset in the + // chunk. We have no idea why. + if (RD && Context.getASTRecordLayout(RD).getNonVirtualSize().isZero()) + Size = Size.RoundUpToAlignment(Alignment) + CharUnits::fromQuantity(4); + LazyEmptyBase = 0; + } + + // RD is null when flushing the final lazy virtual base. + if (!RD) + return; + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + if (Layout.getNonVirtualSize().isZero() && !HasVtordisp) { + LazyEmptyBase = RD; + return; + } + + CharUnits BaseNVSize = Layout.getNonVirtualSize(); + CharUnits BaseAlign = Layout.getAlignment(); + + // vtordisps are always 4 bytes (even in 64-bit mode) + if (HasVtordisp) + Size = Size.RoundUpToAlignment(Alignment) + CharUnits::fromQuantity(4); + Size = Size.RoundUpToAlignment(BaseAlign); + + // Insert the base here. + CharUnits BaseOffset = Size.RoundUpToAlignment(BaseAlign); + VBases.insert( + std::make_pair(RD, ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp))); + Size = BaseOffset + BaseNVSize; + // Note: we don't update alignment here because it was accounted for in + // InitializeLayout. +} + +void MicrosoftRecordLayoutBuilder::finalizeCXXLayout(const CXXRecordDecl *RD) { + // Flush the lazy virtual base. + layoutVirtualBase(0, false); + + if (RD->vbases_begin() == RD->vbases_end() || AlignAfterVBases) + Size = Size.RoundUpToAlignment(Alignment); + + if (Size.isZero()) + Size = Alignment; +} + +void MicrosoftRecordLayoutBuilder::honorDeclspecAlign(const RecordDecl *RD) { + if (unsigned MaxAlign = RD->getMaxAlignment()) { + AlignAfterVBases = true; + updateAlignment(Context.toCharUnitsFromBits(MaxAlign)); + Size = Size.RoundUpToAlignment(Alignment); + } +} + +static bool +RequiresVtordisp(const llvm::SmallPtrSet<const CXXRecordDecl *, 2> &HasVtordisp, + const CXXRecordDecl *RD) { + if (HasVtordisp.count(RD)) + return true; + // If any of a virtual bases non-virtual bases (recursively) requires a + // vtordisp than so does this virtual base. + for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), + e = RD->bases_end(); + i != e; ++i) + if (!i->isVirtual() && + RequiresVtordisp( + HasVtordisp, + cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()))) + return true; + return false; +} + +llvm::SmallPtrSet<const CXXRecordDecl *, 2> +MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) { + llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp; + + // If any of our bases need a vtordisp for this type, so do we. Check our + // direct bases for vtordisp requirements. + for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), + e = RD->bases_end(); + i != e; ++i) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl); + for (ASTRecordLayout::VBaseOffsetsMapTy::const_iterator + bi = Layout.getVBaseOffsetsMap().begin(), + be = Layout.getVBaseOffsetsMap().end(); + bi != be; ++bi) + if (bi->second.hasVtorDisp()) + HasVtordisp.insert(bi->first); + } + + // If we define a constructor or destructor and override a function that is + // defined in a virtual base's vtable, that virtual bases need a vtordisp. + // Here we collect a list of classes with vtables for which our virtual bases + // actually live. The virtual bases with this property will require + // vtordisps. In addition, virtual bases that contain non-virtual bases that + // define functions we override also require vtordisps, this case is checked + // explicitly below. + if (RD->hasUserDeclaredConstructor() || RD->hasUserDeclaredDestructor()) { + llvm::SmallPtrSet<const CXXMethodDecl *, 8> Work; + // Seed the working set with our non-destructor virtual methods. + for (CXXRecordDecl::method_iterator i = RD->method_begin(), + e = RD->method_end(); + i != e; ++i) + if ((*i)->isVirtual() && !isa<CXXDestructorDecl>(*i)) + Work.insert(*i); + while (!Work.empty()) { + const CXXMethodDecl *MD = *Work.begin(); + CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(), + e = MD->end_overridden_methods(); + if (i == e) + // If a virtual method has no-overrides it lives in its parent's vtable. + HasVtordisp.insert(MD->getParent()); + else + Work.insert(i, e); + // We've finished processing this element, remove it from the working set. + Work.erase(MD); + } + } + + // Re-check all of our vbases for vtordisp requirements (in case their + // non-virtual bases have vtordisp requirements). + for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(), + e = RD->vbases_end(); + i != e; ++i) { + const CXXRecordDecl *BaseDecl = i->getType()->getAsCXXRecordDecl(); + if (!HasVtordisp.count(BaseDecl) && RequiresVtordisp(HasVtordisp, BaseDecl)) + HasVtordisp.insert(BaseDecl); + } + + return HasVtordisp; +} + +/// \brief Get or compute information about the layout of the specified record +/// (struct/union/class), which indicates its size and field position +/// information. +const ASTRecordLayout * +ASTContext::BuildMicrosoftASTRecordLayout(const RecordDecl *D) const { + MicrosoftRecordLayoutBuilder Builder(*this); + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + Builder.cxxLayout(RD); + return new (*this) ASTRecordLayout( + *this, Builder.Size, Builder.Alignment, + Builder.HasExtendableVFPtr && !Builder.PrimaryBase, + Builder.HasExtendableVFPtr, + Builder.VBPtrOffset, Builder.DataSize, Builder.FieldOffsets.data(), + Builder.FieldOffsets.size(), Builder.DataSize, + Builder.NonVirtualAlignment, CharUnits::Zero(), Builder.PrimaryBase, + false, Builder.SharedVBPtrBase, Builder.AlignAfterVBases, Builder.Bases, + Builder.VBases); + } else { + Builder.layout(D); + return new (*this) ASTRecordLayout( + *this, Builder.Size, Builder.Alignment, Builder.Size, + Builder.FieldOffsets.data(), Builder.FieldOffsets.size()); + } +} + +/// getASTRecordLayout - Get or compute information about the layout of the +/// specified record (struct/union/class), which indicates its size and field +/// position information. +const ASTRecordLayout & +ASTContext::getASTRecordLayout(const RecordDecl *D) const { + // These asserts test different things. A record has a definition + // as soon as we begin to parse the definition. That definition is + // not a complete definition (which is what isDefinition() tests) + // until we *finish* parsing the definition. + + if (D->hasExternalLexicalStorage() && !D->getDefinition()) + getExternalSource()->CompleteType(const_cast<RecordDecl*>(D)); + + D = D->getDefinition(); + assert(D && "Cannot get layout of forward declarations!"); + assert(!D->isInvalidDecl() && "Cannot get layout of invalid decl!"); + assert(D->isCompleteDefinition() && "Cannot layout type before complete!"); + + // Look up this layout, if already laid out, return what we have. + // Note that we can't save a reference to the entry because this function + // is recursive. + const ASTRecordLayout *Entry = ASTRecordLayouts[D]; + if (Entry) return *Entry; + + const ASTRecordLayout *NewEntry = 0; + + if (isMsLayout(D) && !D->getASTContext().getExternalSource()) { + NewEntry = BuildMicrosoftASTRecordLayout(D); + } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + EmptySubobjectMap EmptySubobjects(*this, RD); + RecordLayoutBuilder Builder(*this, &EmptySubobjects); + Builder.Layout(RD); + + // In certain situations, we are allowed to lay out objects in the + // tail-padding of base classes. This is ABI-dependent. + // FIXME: this should be stored in the record layout. + bool skipTailPadding = + mustSkipTailPadding(getTargetInfo().getCXXABI(), cast<CXXRecordDecl>(D)); + + // FIXME: This should be done in FinalizeLayout. + CharUnits DataSize = + skipTailPadding ? Builder.getSize() : Builder.getDataSize(); + CharUnits NonVirtualSize = + skipTailPadding ? DataSize : Builder.NonVirtualSize; + NewEntry = + new (*this) ASTRecordLayout(*this, Builder.getSize(), + Builder.Alignment, + Builder.HasOwnVFPtr, + RD->isDynamicClass(), + CharUnits::fromQuantity(-1), + DataSize, + Builder.FieldOffsets.data(), + Builder.FieldOffsets.size(), + NonVirtualSize, + Builder.NonVirtualAlignment, + EmptySubobjects.SizeOfLargestEmptySubobject, + Builder.PrimaryBase, + Builder.PrimaryBaseIsVirtual, + 0, true, + Builder.Bases, Builder.VBases); + } else { + RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0); + Builder.Layout(D); + + NewEntry = + new (*this) ASTRecordLayout(*this, Builder.getSize(), + Builder.Alignment, + Builder.getSize(), + Builder.FieldOffsets.data(), + Builder.FieldOffsets.size()); + } + + ASTRecordLayouts[D] = NewEntry; + + if (getLangOpts().DumpRecordLayouts) { + llvm::outs() << "\n*** Dumping AST Record Layout\n"; + DumpRecordLayout(D, llvm::outs(), getLangOpts().DumpRecordLayoutsSimple); + } + + return *NewEntry; +} + +const CXXMethodDecl *ASTContext::getCurrentKeyFunction(const CXXRecordDecl *RD) { + if (!getTargetInfo().getCXXABI().hasKeyFunctions()) + return 0; + + assert(RD->getDefinition() && "Cannot get key function for forward decl!"); + RD = cast<CXXRecordDecl>(RD->getDefinition()); + + LazyDeclPtr &Entry = KeyFunctions[RD]; + if (!Entry) + Entry = const_cast<CXXMethodDecl*>(computeKeyFunction(*this, RD)); + + return cast_or_null<CXXMethodDecl>(Entry.get(getExternalSource())); +} + +void ASTContext::setNonKeyFunction(const CXXMethodDecl *Method) { + assert(Method == Method->getFirstDecl() && + "not working with method declaration from class definition"); + + // Look up the cache entry. Since we're working with the first + // declaration, its parent must be the class definition, which is + // the correct key for the KeyFunctions hash. + llvm::DenseMap<const CXXRecordDecl*, LazyDeclPtr>::iterator + I = KeyFunctions.find(Method->getParent()); + + // If it's not cached, there's nothing to do. + if (I == KeyFunctions.end()) return; + + // If it is cached, check whether it's the target method, and if so, + // remove it from the cache. + if (I->second.get(getExternalSource()) == Method) { + // FIXME: remember that we did this for module / chained PCH state? + KeyFunctions.erase(I); + } +} + +static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD) { + const ASTRecordLayout &Layout = C.getASTRecordLayout(FD->getParent()); + return Layout.getFieldOffset(FD->getFieldIndex()); +} + +uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const { + uint64_t OffsetInBits; + if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) { + OffsetInBits = ::getFieldOffset(*this, FD); + } else { + const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD); + + OffsetInBits = 0; + for (IndirectFieldDecl::chain_iterator CI = IFD->chain_begin(), + CE = IFD->chain_end(); + CI != CE; ++CI) + OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(*CI)); + } + + return OffsetInBits; +} + +/// getObjCLayout - Get or compute information about the layout of the +/// given interface. +/// +/// \param Impl - If given, also include the layout of the interface's +/// implementation. This may differ by including synthesized ivars. +const ASTRecordLayout & +ASTContext::getObjCLayout(const ObjCInterfaceDecl *D, + const ObjCImplementationDecl *Impl) const { + // Retrieve the definition + if (D->hasExternalLexicalStorage() && !D->getDefinition()) + getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D)); + D = D->getDefinition(); + assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!"); + + // Look up this layout, if already laid out, return what we have. + const ObjCContainerDecl *Key = + Impl ? (const ObjCContainerDecl*) Impl : (const ObjCContainerDecl*) D; + if (const ASTRecordLayout *Entry = ObjCLayouts[Key]) + return *Entry; + + // Add in synthesized ivar count if laying out an implementation. + if (Impl) { + unsigned SynthCount = CountNonClassIvars(D); + // If there aren't any sythesized ivars then reuse the interface + // entry. Note we can't cache this because we simply free all + // entries later; however we shouldn't look up implementations + // frequently. + if (SynthCount == 0) + return getObjCLayout(D, 0); + } + + RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0); + Builder.Layout(D); + + const ASTRecordLayout *NewEntry = + new (*this) ASTRecordLayout(*this, Builder.getSize(), + Builder.Alignment, + Builder.getDataSize(), + Builder.FieldOffsets.data(), + Builder.FieldOffsets.size()); + + ObjCLayouts[Key] = NewEntry; + + return *NewEntry; +} + +static void PrintOffset(raw_ostream &OS, + CharUnits Offset, unsigned IndentLevel) { + OS << llvm::format("%4" PRId64 " | ", (int64_t)Offset.getQuantity()); + OS.indent(IndentLevel * 2); +} + +static void PrintIndentNoOffset(raw_ostream &OS, unsigned IndentLevel) { + OS << " | "; + OS.indent(IndentLevel * 2); +} + +static void DumpCXXRecordLayout(raw_ostream &OS, + const CXXRecordDecl *RD, const ASTContext &C, + CharUnits Offset, + unsigned IndentLevel, + const char* Description, + bool IncludeVirtualBases) { + const ASTRecordLayout &Layout = C.getASTRecordLayout(RD); + + PrintOffset(OS, Offset, IndentLevel); + OS << C.getTypeDeclType(const_cast<CXXRecordDecl *>(RD)).getAsString(); + if (Description) + OS << ' ' << Description; + if (RD->isEmpty()) + OS << " (empty)"; + OS << '\n'; + + IndentLevel++; + + const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); + bool HasOwnVFPtr = Layout.hasOwnVFPtr(); + bool HasOwnVBPtr = Layout.hasOwnVBPtr(); + + // Vtable pointer. + if (RD->isDynamicClass() && !PrimaryBase && !isMsLayout(RD)) { + PrintOffset(OS, Offset, IndentLevel); + OS << '(' << *RD << " vtable pointer)\n"; + } else if (HasOwnVFPtr) { + PrintOffset(OS, Offset, IndentLevel); + // vfptr (for Microsoft C++ ABI) + OS << '(' << *RD << " vftable pointer)\n"; + } + + // Dump (non-virtual) bases + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + assert(!I->getType()->isDependentType() && + "Cannot layout class with dependent bases."); + if (I->isVirtual()) + continue; + + const CXXRecordDecl *Base = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base); + + DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel, + Base == PrimaryBase ? "(primary base)" : "(base)", + /*IncludeVirtualBases=*/false); + } + + // vbptr (for Microsoft C++ ABI) + if (HasOwnVBPtr) { + PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel); + OS << '(' << *RD << " vbtable pointer)\n"; + } + + // Dump fields. + uint64_t FieldNo = 0; + for (CXXRecordDecl::field_iterator I = RD->field_begin(), + E = RD->field_end(); I != E; ++I, ++FieldNo) { + const FieldDecl &Field = **I; + CharUnits FieldOffset = Offset + + C.toCharUnitsFromBits(Layout.getFieldOffset(FieldNo)); + + if (const RecordType *RT = Field.getType()->getAs<RecordType>()) { + if (const CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(RT->getDecl())) { + DumpCXXRecordLayout(OS, D, C, FieldOffset, IndentLevel, + Field.getName().data(), + /*IncludeVirtualBases=*/true); + continue; + } + } + + PrintOffset(OS, FieldOffset, IndentLevel); + OS << Field.getType().getAsString() << ' ' << Field << '\n'; + } + + if (!IncludeVirtualBases) + return; + + // Dump virtual bases. + const ASTRecordLayout::VBaseOffsetsMapTy &vtordisps = + Layout.getVBaseOffsetsMap(); + for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), + E = RD->vbases_end(); I != E; ++I) { + assert(I->isVirtual() && "Found non-virtual class!"); + const CXXRecordDecl *VBase = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase); + + if (vtordisps.find(VBase)->second.hasVtorDisp()) { + PrintOffset(OS, VBaseOffset - CharUnits::fromQuantity(4), IndentLevel); + OS << "(vtordisp for vbase " << *VBase << ")\n"; + } + + DumpCXXRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel, + VBase == PrimaryBase ? + "(primary virtual base)" : "(virtual base)", + /*IncludeVirtualBases=*/false); + } + + PrintIndentNoOffset(OS, IndentLevel - 1); + OS << "[sizeof=" << Layout.getSize().getQuantity(); + if (!isMsLayout(RD)) + OS << ", dsize=" << Layout.getDataSize().getQuantity(); + OS << ", align=" << Layout.getAlignment().getQuantity() << '\n'; + + PrintIndentNoOffset(OS, IndentLevel - 1); + OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity(); + OS << ", nvalign=" << Layout.getNonVirtualAlign().getQuantity() << "]\n"; + OS << '\n'; +} + +void ASTContext::DumpRecordLayout(const RecordDecl *RD, + raw_ostream &OS, + bool Simple) const { + const ASTRecordLayout &Info = getASTRecordLayout(RD); + + if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) + if (!Simple) + return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, 0, + /*IncludeVirtualBases=*/true); + + OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n"; + if (!Simple) { + OS << "Record: "; + RD->dump(); + } + OS << "\nLayout: "; + OS << "<ASTRecordLayout\n"; + OS << " Size:" << toBits(Info.getSize()) << "\n"; + if (!isMsLayout(RD)) + OS << " DataSize:" << toBits(Info.getDataSize()) << "\n"; + OS << " Alignment:" << toBits(Info.getAlignment()) << "\n"; + OS << " FieldOffsets: ["; + for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) { + if (i) OS << ", "; + OS << Info.getFieldOffset(i); + } + OS << "]>\n"; +} diff --git a/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp b/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp new file mode 100644 index 000000000000..671207a7f2d9 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp @@ -0,0 +1,128 @@ +//===--- SelectorLocationsKind.cpp - Kind of selector locations -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Describes whether the identifier locations for a selector are "standard" +// or not. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/SelectorLocationsKind.h" +#include "clang/AST/Expr.h" + +using namespace clang; + +static SourceLocation getStandardSelLoc(unsigned Index, + Selector Sel, + bool WithArgSpace, + SourceLocation ArgLoc, + SourceLocation EndLoc) { + unsigned NumSelArgs = Sel.getNumArgs(); + if (NumSelArgs == 0) { + assert(Index == 0); + if (EndLoc.isInvalid()) + return SourceLocation(); + IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0); + unsigned Len = II ? II->getLength() : 0; + return EndLoc.getLocWithOffset(-Len); + } + + assert(Index < NumSelArgs); + if (ArgLoc.isInvalid()) + return SourceLocation(); + IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index); + unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1; + if (WithArgSpace) + ++Len; + return ArgLoc.getLocWithOffset(-Len); +} + +namespace { + +template <typename T> +SourceLocation getArgLoc(T* Arg); + +template <> +SourceLocation getArgLoc<Expr>(Expr *Arg) { + return Arg->getLocStart(); +} + +template <> +SourceLocation getArgLoc<ParmVarDecl>(ParmVarDecl *Arg) { + SourceLocation Loc = Arg->getLocStart(); + if (Loc.isInvalid()) + return Loc; + // -1 to point to left paren of the method parameter's type. + return Loc.getLocWithOffset(-1); +} + +template <typename T> +SourceLocation getArgLoc(unsigned Index, ArrayRef<T*> Args) { + return Index < Args.size() ? getArgLoc(Args[Index]) : SourceLocation(); +} + +template <typename T> +SelectorLocationsKind hasStandardSelLocs(Selector Sel, + ArrayRef<SourceLocation> SelLocs, + ArrayRef<T *> Args, + SourceLocation EndLoc) { + // Are selector locations in standard position with no space between args ? + unsigned i; + for (i = 0; i != SelLocs.size(); ++i) { + if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/false, + Args, EndLoc)) + break; + } + if (i == SelLocs.size()) + return SelLoc_StandardNoSpace; + + // Are selector locations in standard position with space between args ? + for (i = 0; i != SelLocs.size(); ++i) { + if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/true, + Args, EndLoc)) + return SelLoc_NonStandard; + } + + return SelLoc_StandardWithSpace; +} + +} // anonymous namespace + +SelectorLocationsKind +clang::hasStandardSelectorLocs(Selector Sel, + ArrayRef<SourceLocation> SelLocs, + ArrayRef<Expr *> Args, + SourceLocation EndLoc) { + return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc); +} + +SourceLocation clang::getStandardSelectorLoc(unsigned Index, + Selector Sel, + bool WithArgSpace, + ArrayRef<Expr *> Args, + SourceLocation EndLoc) { + return getStandardSelLoc(Index, Sel, WithArgSpace, + getArgLoc(Index, Args), EndLoc); +} + +SelectorLocationsKind +clang::hasStandardSelectorLocs(Selector Sel, + ArrayRef<SourceLocation> SelLocs, + ArrayRef<ParmVarDecl *> Args, + SourceLocation EndLoc) { + return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc); +} + +SourceLocation clang::getStandardSelectorLoc(unsigned Index, + Selector Sel, + bool WithArgSpace, + ArrayRef<ParmVarDecl *> Args, + SourceLocation EndLoc) { + return getStandardSelLoc(Index, Sel, WithArgSpace, + getArgLoc(Index, Args), EndLoc); +} diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp new file mode 100644 index 000000000000..de851615cb7a --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp @@ -0,0 +1,1220 @@ +//===--- Stmt.cpp - Statement AST Node Implementation ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Stmt class and statement subclasses. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTDiagnostic.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/Stmt.h" +#include "clang/AST/StmtCXX.h" +#include "clang/AST/StmtObjC.h" +#include "clang/AST/StmtOpenMP.h" +#include "clang/AST/Type.h" +#include "clang/Basic/CharInfo.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/Lex/Token.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; + +static struct StmtClassNameTable { + const char *Name; + unsigned Counter; + unsigned Size; +} StmtClassInfo[Stmt::lastStmtConstant+1]; + +static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) { + static bool Initialized = false; + if (Initialized) + return StmtClassInfo[E]; + + // Intialize the table on the first use. + Initialized = true; +#define ABSTRACT_STMT(STMT) +#define STMT(CLASS, PARENT) \ + StmtClassInfo[(unsigned)Stmt::CLASS##Class].Name = #CLASS; \ + StmtClassInfo[(unsigned)Stmt::CLASS##Class].Size = sizeof(CLASS); +#include "clang/AST/StmtNodes.inc" + + return StmtClassInfo[E]; +} + +void *Stmt::operator new(size_t bytes, const ASTContext& C, + unsigned alignment) { + return ::operator new(bytes, C, alignment); +} + +const char *Stmt::getStmtClassName() const { + return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name; +} + +void Stmt::PrintStats() { + // Ensure the table is primed. + getStmtInfoTableEntry(Stmt::NullStmtClass); + + unsigned sum = 0; + llvm::errs() << "\n*** Stmt/Expr Stats:\n"; + for (int i = 0; i != Stmt::lastStmtConstant+1; i++) { + if (StmtClassInfo[i].Name == 0) continue; + sum += StmtClassInfo[i].Counter; + } + llvm::errs() << " " << sum << " stmts/exprs total.\n"; + sum = 0; + for (int i = 0; i != Stmt::lastStmtConstant+1; i++) { + if (StmtClassInfo[i].Name == 0) continue; + if (StmtClassInfo[i].Counter == 0) continue; + llvm::errs() << " " << StmtClassInfo[i].Counter << " " + << StmtClassInfo[i].Name << ", " << StmtClassInfo[i].Size + << " each (" << StmtClassInfo[i].Counter*StmtClassInfo[i].Size + << " bytes)\n"; + sum += StmtClassInfo[i].Counter*StmtClassInfo[i].Size; + } + + llvm::errs() << "Total bytes = " << sum << "\n"; +} + +void Stmt::addStmtClass(StmtClass s) { + ++getStmtInfoTableEntry(s).Counter; +} + +bool Stmt::StatisticsEnabled = false; +void Stmt::EnableStatistics() { + StatisticsEnabled = true; +} + +Stmt *Stmt::IgnoreImplicit() { + Stmt *s = this; + + if (ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(s)) + s = ewc->getSubExpr(); + + while (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(s)) + s = ice->getSubExpr(); + + return s; +} + +/// \brief Strip off all label-like statements. +/// +/// This will strip off label statements, case statements, attributed +/// statements and default statements recursively. +const Stmt *Stmt::stripLabelLikeStatements() const { + const Stmt *S = this; + while (true) { + if (const LabelStmt *LS = dyn_cast<LabelStmt>(S)) + S = LS->getSubStmt(); + else if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) + S = SC->getSubStmt(); + else if (const AttributedStmt *AS = dyn_cast<AttributedStmt>(S)) + S = AS->getSubStmt(); + else + return S; + } +} + +namespace { + struct good {}; + struct bad {}; + + // These silly little functions have to be static inline to suppress + // unused warnings, and they have to be defined to suppress other + // warnings. + static inline good is_good(good) { return good(); } + + typedef Stmt::child_range children_t(); + template <class T> good implements_children(children_t T::*) { + return good(); + } + LLVM_ATTRIBUTE_UNUSED + static inline bad implements_children(children_t Stmt::*) { + return bad(); + } + + typedef SourceLocation getLocStart_t() const; + template <class T> good implements_getLocStart(getLocStart_t T::*) { + return good(); + } + LLVM_ATTRIBUTE_UNUSED + static inline bad implements_getLocStart(getLocStart_t Stmt::*) { + return bad(); + } + + typedef SourceLocation getLocEnd_t() const; + template <class T> good implements_getLocEnd(getLocEnd_t T::*) { + return good(); + } + LLVM_ATTRIBUTE_UNUSED + static inline bad implements_getLocEnd(getLocEnd_t Stmt::*) { + return bad(); + } + +#define ASSERT_IMPLEMENTS_children(type) \ + (void) is_good(implements_children(&type::children)) +#define ASSERT_IMPLEMENTS_getLocStart(type) \ + (void) is_good(implements_getLocStart(&type::getLocStart)) +#define ASSERT_IMPLEMENTS_getLocEnd(type) \ + (void) is_good(implements_getLocEnd(&type::getLocEnd)) +} + +/// Check whether the various Stmt classes implement their member +/// functions. +LLVM_ATTRIBUTE_UNUSED +static inline void check_implementations() { +#define ABSTRACT_STMT(type) +#define STMT(type, base) \ + ASSERT_IMPLEMENTS_children(type); \ + ASSERT_IMPLEMENTS_getLocStart(type); \ + ASSERT_IMPLEMENTS_getLocEnd(type); +#include "clang/AST/StmtNodes.inc" +} + +Stmt::child_range Stmt::children() { + switch (getStmtClass()) { + case Stmt::NoStmtClass: llvm_unreachable("statement without class"); +#define ABSTRACT_STMT(type) +#define STMT(type, base) \ + case Stmt::type##Class: \ + return static_cast<type*>(this)->children(); +#include "clang/AST/StmtNodes.inc" + } + llvm_unreachable("unknown statement kind!"); +} + +// Amusing macro metaprogramming hack: check whether a class provides +// a more specific implementation of getSourceRange. +// +// See also Expr.cpp:getExprLoc(). +namespace { + /// This implementation is used when a class provides a custom + /// implementation of getSourceRange. + template <class S, class T> + SourceRange getSourceRangeImpl(const Stmt *stmt, + SourceRange (T::*v)() const) { + return static_cast<const S*>(stmt)->getSourceRange(); + } + + /// This implementation is used when a class doesn't provide a custom + /// implementation of getSourceRange. Overload resolution should pick it over + /// the implementation above because it's more specialized according to + /// function template partial ordering. + template <class S> + SourceRange getSourceRangeImpl(const Stmt *stmt, + SourceRange (Stmt::*v)() const) { + return SourceRange(static_cast<const S*>(stmt)->getLocStart(), + static_cast<const S*>(stmt)->getLocEnd()); + } +} + +SourceRange Stmt::getSourceRange() const { + switch (getStmtClass()) { + case Stmt::NoStmtClass: llvm_unreachable("statement without class"); +#define ABSTRACT_STMT(type) +#define STMT(type, base) \ + case Stmt::type##Class: \ + return getSourceRangeImpl<type>(this, &type::getSourceRange); +#include "clang/AST/StmtNodes.inc" + } + llvm_unreachable("unknown statement kind!"); +} + +SourceLocation Stmt::getLocStart() const { +// llvm::errs() << "getLocStart() for " << getStmtClassName() << "\n"; + switch (getStmtClass()) { + case Stmt::NoStmtClass: llvm_unreachable("statement without class"); +#define ABSTRACT_STMT(type) +#define STMT(type, base) \ + case Stmt::type##Class: \ + return static_cast<const type*>(this)->getLocStart(); +#include "clang/AST/StmtNodes.inc" + } + llvm_unreachable("unknown statement kind"); +} + +SourceLocation Stmt::getLocEnd() const { + switch (getStmtClass()) { + case Stmt::NoStmtClass: llvm_unreachable("statement without class"); +#define ABSTRACT_STMT(type) +#define STMT(type, base) \ + case Stmt::type##Class: \ + return static_cast<const type*>(this)->getLocEnd(); +#include "clang/AST/StmtNodes.inc" + } + llvm_unreachable("unknown statement kind"); +} + +CompoundStmt::CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, + SourceLocation LB, SourceLocation RB) + : Stmt(CompoundStmtClass), LBracLoc(LB), RBracLoc(RB) { + CompoundStmtBits.NumStmts = Stmts.size(); + assert(CompoundStmtBits.NumStmts == Stmts.size() && + "NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!"); + + if (Stmts.size() == 0) { + Body = 0; + return; + } + + Body = new (C) Stmt*[Stmts.size()]; + std::copy(Stmts.begin(), Stmts.end(), Body); +} + +void CompoundStmt::setStmts(const ASTContext &C, Stmt **Stmts, + unsigned NumStmts) { + if (this->Body) + C.Deallocate(Body); + this->CompoundStmtBits.NumStmts = NumStmts; + + Body = new (C) Stmt*[NumStmts]; + memcpy(Body, Stmts, sizeof(Stmt *) * NumStmts); +} + +const char *LabelStmt::getName() const { + return getDecl()->getIdentifier()->getNameStart(); +} + +AttributedStmt *AttributedStmt::Create(const ASTContext &C, SourceLocation Loc, + ArrayRef<const Attr*> Attrs, + Stmt *SubStmt) { + void *Mem = C.Allocate(sizeof(AttributedStmt) + + sizeof(Attr*) * (Attrs.size() - 1), + llvm::alignOf<AttributedStmt>()); + return new (Mem) AttributedStmt(Loc, Attrs, SubStmt); +} + +AttributedStmt *AttributedStmt::CreateEmpty(const ASTContext &C, + unsigned NumAttrs) { + assert(NumAttrs > 0 && "NumAttrs should be greater than zero"); + void *Mem = C.Allocate(sizeof(AttributedStmt) + + sizeof(Attr*) * (NumAttrs - 1), + llvm::alignOf<AttributedStmt>()); + return new (Mem) AttributedStmt(EmptyShell(), NumAttrs); +} + +std::string AsmStmt::generateAsmString(const ASTContext &C) const { + if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this)) + return gccAsmStmt->generateAsmString(C); + if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this)) + return msAsmStmt->generateAsmString(C); + llvm_unreachable("unknown asm statement kind!"); +} + +StringRef AsmStmt::getOutputConstraint(unsigned i) const { + if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this)) + return gccAsmStmt->getOutputConstraint(i); + if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this)) + return msAsmStmt->getOutputConstraint(i); + llvm_unreachable("unknown asm statement kind!"); +} + +const Expr *AsmStmt::getOutputExpr(unsigned i) const { + if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this)) + return gccAsmStmt->getOutputExpr(i); + if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this)) + return msAsmStmt->getOutputExpr(i); + llvm_unreachable("unknown asm statement kind!"); +} + +StringRef AsmStmt::getInputConstraint(unsigned i) const { + if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this)) + return gccAsmStmt->getInputConstraint(i); + if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this)) + return msAsmStmt->getInputConstraint(i); + llvm_unreachable("unknown asm statement kind!"); +} + +const Expr *AsmStmt::getInputExpr(unsigned i) const { + if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this)) + return gccAsmStmt->getInputExpr(i); + if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this)) + return msAsmStmt->getInputExpr(i); + llvm_unreachable("unknown asm statement kind!"); +} + +StringRef AsmStmt::getClobber(unsigned i) const { + if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this)) + return gccAsmStmt->getClobber(i); + if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this)) + return msAsmStmt->getClobber(i); + llvm_unreachable("unknown asm statement kind!"); +} + +/// getNumPlusOperands - Return the number of output operands that have a "+" +/// constraint. +unsigned AsmStmt::getNumPlusOperands() const { + unsigned Res = 0; + for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) + if (isOutputPlusConstraint(i)) + ++Res; + return Res; +} + +StringRef GCCAsmStmt::getClobber(unsigned i) const { + return getClobberStringLiteral(i)->getString(); +} + +Expr *GCCAsmStmt::getOutputExpr(unsigned i) { + return cast<Expr>(Exprs[i]); +} + +/// getOutputConstraint - Return the constraint string for the specified +/// output operand. All output constraints are known to be non-empty (either +/// '=' or '+'). +StringRef GCCAsmStmt::getOutputConstraint(unsigned i) const { + return getOutputConstraintLiteral(i)->getString(); +} + +Expr *GCCAsmStmt::getInputExpr(unsigned i) { + return cast<Expr>(Exprs[i + NumOutputs]); +} +void GCCAsmStmt::setInputExpr(unsigned i, Expr *E) { + Exprs[i + NumOutputs] = E; +} + +/// getInputConstraint - Return the specified input constraint. Unlike output +/// constraints, these can be empty. +StringRef GCCAsmStmt::getInputConstraint(unsigned i) const { + return getInputConstraintLiteral(i)->getString(); +} + +void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C, + IdentifierInfo **Names, + StringLiteral **Constraints, + Stmt **Exprs, + unsigned NumOutputs, + unsigned NumInputs, + StringLiteral **Clobbers, + unsigned NumClobbers) { + this->NumOutputs = NumOutputs; + this->NumInputs = NumInputs; + this->NumClobbers = NumClobbers; + + unsigned NumExprs = NumOutputs + NumInputs; + + C.Deallocate(this->Names); + this->Names = new (C) IdentifierInfo*[NumExprs]; + std::copy(Names, Names + NumExprs, this->Names); + + C.Deallocate(this->Exprs); + this->Exprs = new (C) Stmt*[NumExprs]; + std::copy(Exprs, Exprs + NumExprs, this->Exprs); + + C.Deallocate(this->Constraints); + this->Constraints = new (C) StringLiteral*[NumExprs]; + std::copy(Constraints, Constraints + NumExprs, this->Constraints); + + C.Deallocate(this->Clobbers); + this->Clobbers = new (C) StringLiteral*[NumClobbers]; + std::copy(Clobbers, Clobbers + NumClobbers, this->Clobbers); +} + +/// getNamedOperand - Given a symbolic operand reference like %[foo], +/// translate this into a numeric value needed to reference the same operand. +/// This returns -1 if the operand name is invalid. +int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const { + unsigned NumPlusOperands = 0; + + // Check if this is an output operand. + for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) { + if (getOutputName(i) == SymbolicName) + return i; + } + + for (unsigned i = 0, e = getNumInputs(); i != e; ++i) + if (getInputName(i) == SymbolicName) + return getNumOutputs() + NumPlusOperands + i; + + // Not found. + return -1; +} + +/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing +/// it into pieces. If the asm string is erroneous, emit errors and return +/// true, otherwise return false. +unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces, + const ASTContext &C, unsigned &DiagOffs) const { + StringRef Str = getAsmString()->getString(); + const char *StrStart = Str.begin(); + const char *StrEnd = Str.end(); + const char *CurPtr = StrStart; + + // "Simple" inline asms have no constraints or operands, just convert the asm + // string to escape $'s. + if (isSimple()) { + std::string Result; + for (; CurPtr != StrEnd; ++CurPtr) { + switch (*CurPtr) { + case '$': + Result += "$$"; + break; + default: + Result += *CurPtr; + break; + } + } + Pieces.push_back(AsmStringPiece(Result)); + return 0; + } + + // CurStringPiece - The current string that we are building up as we scan the + // asm string. + std::string CurStringPiece; + + bool HasVariants = !C.getTargetInfo().hasNoAsmVariants(); + + while (1) { + // Done with the string? + if (CurPtr == StrEnd) { + if (!CurStringPiece.empty()) + Pieces.push_back(AsmStringPiece(CurStringPiece)); + return 0; + } + + char CurChar = *CurPtr++; + switch (CurChar) { + case '$': CurStringPiece += "$$"; continue; + case '{': CurStringPiece += (HasVariants ? "$(" : "{"); continue; + case '|': CurStringPiece += (HasVariants ? "$|" : "|"); continue; + case '}': CurStringPiece += (HasVariants ? "$)" : "}"); continue; + case '%': + break; + default: + CurStringPiece += CurChar; + continue; + } + + // Escaped "%" character in asm string. + if (CurPtr == StrEnd) { + // % at end of string is invalid (no escape). + DiagOffs = CurPtr-StrStart-1; + return diag::err_asm_invalid_escape; + } + + char EscapedChar = *CurPtr++; + if (EscapedChar == '%') { // %% -> % + // Escaped percentage sign. + CurStringPiece += '%'; + continue; + } + + if (EscapedChar == '=') { // %= -> Generate an unique ID. + CurStringPiece += "${:uid}"; + continue; + } + + // Otherwise, we have an operand. If we have accumulated a string so far, + // add it to the Pieces list. + if (!CurStringPiece.empty()) { + Pieces.push_back(AsmStringPiece(CurStringPiece)); + CurStringPiece.clear(); + } + + // Handle %x4 and %x[foo] by capturing x as the modifier character. + char Modifier = '\0'; + if (isLetter(EscapedChar)) { + if (CurPtr == StrEnd) { // Premature end. + DiagOffs = CurPtr-StrStart-1; + return diag::err_asm_invalid_escape; + } + Modifier = EscapedChar; + EscapedChar = *CurPtr++; + } + + if (isDigit(EscapedChar)) { + // %n - Assembler operand n + unsigned N = 0; + + --CurPtr; + while (CurPtr != StrEnd && isDigit(*CurPtr)) + N = N*10 + ((*CurPtr++)-'0'); + + unsigned NumOperands = + getNumOutputs() + getNumPlusOperands() + getNumInputs(); + if (N >= NumOperands) { + DiagOffs = CurPtr-StrStart-1; + return diag::err_asm_invalid_operand_number; + } + + Pieces.push_back(AsmStringPiece(N, Modifier)); + continue; + } + + // Handle %[foo], a symbolic operand reference. + if (EscapedChar == '[') { + DiagOffs = CurPtr-StrStart-1; + + // Find the ']'. + const char *NameEnd = (const char*)memchr(CurPtr, ']', StrEnd-CurPtr); + if (NameEnd == 0) + return diag::err_asm_unterminated_symbolic_operand_name; + if (NameEnd == CurPtr) + return diag::err_asm_empty_symbolic_operand_name; + + StringRef SymbolicName(CurPtr, NameEnd - CurPtr); + + int N = getNamedOperand(SymbolicName); + if (N == -1) { + // Verify that an operand with that name exists. + DiagOffs = CurPtr-StrStart; + return diag::err_asm_unknown_symbolic_operand_name; + } + Pieces.push_back(AsmStringPiece(N, Modifier)); + + CurPtr = NameEnd+1; + continue; + } + + DiagOffs = CurPtr-StrStart-1; + return diag::err_asm_invalid_escape; + } +} + +/// Assemble final IR asm string (GCC-style). +std::string GCCAsmStmt::generateAsmString(const ASTContext &C) const { + // Analyze the asm string to decompose it into its pieces. We know that Sema + // has already done this, so it is guaranteed to be successful. + SmallVector<GCCAsmStmt::AsmStringPiece, 4> Pieces; + unsigned DiagOffs; + AnalyzeAsmString(Pieces, C, DiagOffs); + + std::string AsmString; + for (unsigned i = 0, e = Pieces.size(); i != e; ++i) { + if (Pieces[i].isString()) + AsmString += Pieces[i].getString(); + else if (Pieces[i].getModifier() == '\0') + AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo()); + else + AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' + + Pieces[i].getModifier() + '}'; + } + return AsmString; +} + +/// Assemble final IR asm string (MS-style). +std::string MSAsmStmt::generateAsmString(const ASTContext &C) const { + // FIXME: This needs to be translated into the IR string representation. + return AsmStr; +} + +Expr *MSAsmStmt::getOutputExpr(unsigned i) { + return cast<Expr>(Exprs[i]); +} + +Expr *MSAsmStmt::getInputExpr(unsigned i) { + return cast<Expr>(Exprs[i + NumOutputs]); +} +void MSAsmStmt::setInputExpr(unsigned i, Expr *E) { + Exprs[i + NumOutputs] = E; +} + +QualType CXXCatchStmt::getCaughtType() const { + if (ExceptionDecl) + return ExceptionDecl->getType(); + return QualType(); +} + +//===----------------------------------------------------------------------===// +// Constructors +//===----------------------------------------------------------------------===// + +GCCAsmStmt::GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, + bool issimple, bool isvolatile, unsigned numoutputs, + unsigned numinputs, IdentifierInfo **names, + StringLiteral **constraints, Expr **exprs, + StringLiteral *asmstr, unsigned numclobbers, + StringLiteral **clobbers, SourceLocation rparenloc) + : AsmStmt(GCCAsmStmtClass, asmloc, issimple, isvolatile, numoutputs, + numinputs, numclobbers), RParenLoc(rparenloc), AsmStr(asmstr) { + + unsigned NumExprs = NumOutputs + NumInputs; + + Names = new (C) IdentifierInfo*[NumExprs]; + std::copy(names, names + NumExprs, Names); + + Exprs = new (C) Stmt*[NumExprs]; + std::copy(exprs, exprs + NumExprs, Exprs); + + Constraints = new (C) StringLiteral*[NumExprs]; + std::copy(constraints, constraints + NumExprs, Constraints); + + Clobbers = new (C) StringLiteral*[NumClobbers]; + std::copy(clobbers, clobbers + NumClobbers, Clobbers); +} + +MSAsmStmt::MSAsmStmt(const ASTContext &C, SourceLocation asmloc, + SourceLocation lbraceloc, bool issimple, bool isvolatile, + ArrayRef<Token> asmtoks, unsigned numoutputs, + unsigned numinputs, + ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, + StringRef asmstr, ArrayRef<StringRef> clobbers, + SourceLocation endloc) + : AsmStmt(MSAsmStmtClass, asmloc, issimple, isvolatile, numoutputs, + numinputs, clobbers.size()), LBraceLoc(lbraceloc), + EndLoc(endloc), NumAsmToks(asmtoks.size()) { + + initialize(C, asmstr, asmtoks, constraints, exprs, clobbers); +} + +static StringRef copyIntoContext(const ASTContext &C, StringRef str) { + size_t size = str.size(); + char *buffer = new (C) char[size]; + memcpy(buffer, str.data(), size); + return StringRef(buffer, size); +} + +void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr, + ArrayRef<Token> asmtoks, + ArrayRef<StringRef> constraints, + ArrayRef<Expr*> exprs, + ArrayRef<StringRef> clobbers) { + assert(NumAsmToks == asmtoks.size()); + assert(NumClobbers == clobbers.size()); + + unsigned NumExprs = exprs.size(); + assert(NumExprs == NumOutputs + NumInputs); + assert(NumExprs == constraints.size()); + + AsmStr = copyIntoContext(C, asmstr); + + Exprs = new (C) Stmt*[NumExprs]; + for (unsigned i = 0, e = NumExprs; i != e; ++i) + Exprs[i] = exprs[i]; + + AsmToks = new (C) Token[NumAsmToks]; + for (unsigned i = 0, e = NumAsmToks; i != e; ++i) + AsmToks[i] = asmtoks[i]; + + Constraints = new (C) StringRef[NumExprs]; + for (unsigned i = 0, e = NumExprs; i != e; ++i) { + Constraints[i] = copyIntoContext(C, constraints[i]); + } + + Clobbers = new (C) StringRef[NumClobbers]; + for (unsigned i = 0, e = NumClobbers; i != e; ++i) { + // FIXME: Avoid the allocation/copy if at all possible. + Clobbers[i] = copyIntoContext(C, clobbers[i]); + } +} + +ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect, + Stmt *Body, SourceLocation FCL, + SourceLocation RPL) +: Stmt(ObjCForCollectionStmtClass) { + SubExprs[ELEM] = Elem; + SubExprs[COLLECTION] = Collect; + SubExprs[BODY] = Body; + ForLoc = FCL; + RParenLoc = RPL; +} + +ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt, + Stmt **CatchStmts, unsigned NumCatchStmts, + Stmt *atFinallyStmt) + : Stmt(ObjCAtTryStmtClass), AtTryLoc(atTryLoc), + NumCatchStmts(NumCatchStmts), HasFinally(atFinallyStmt != 0) +{ + Stmt **Stmts = getStmts(); + Stmts[0] = atTryStmt; + for (unsigned I = 0; I != NumCatchStmts; ++I) + Stmts[I + 1] = CatchStmts[I]; + + if (HasFinally) + Stmts[NumCatchStmts + 1] = atFinallyStmt; +} + +ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context, + SourceLocation atTryLoc, + Stmt *atTryStmt, + Stmt **CatchStmts, + unsigned NumCatchStmts, + Stmt *atFinallyStmt) { + unsigned Size = sizeof(ObjCAtTryStmt) + + (1 + NumCatchStmts + (atFinallyStmt != 0)) * sizeof(Stmt *); + void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>()); + return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts, + atFinallyStmt); +} + +ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context, + unsigned NumCatchStmts, + bool HasFinally) { + unsigned Size = sizeof(ObjCAtTryStmt) + + (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *); + void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>()); + return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally); +} + +SourceLocation ObjCAtTryStmt::getLocEnd() const { + if (HasFinally) + return getFinallyStmt()->getLocEnd(); + if (NumCatchStmts) + return getCatchStmt(NumCatchStmts - 1)->getLocEnd(); + return getTryBody()->getLocEnd(); +} + +CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc, + Stmt *tryBlock, ArrayRef<Stmt*> handlers) { + std::size_t Size = sizeof(CXXTryStmt); + Size += ((handlers.size() + 1) * sizeof(Stmt)); + + void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>()); + return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers); +} + +CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty, + unsigned numHandlers) { + std::size_t Size = sizeof(CXXTryStmt); + Size += ((numHandlers + 1) * sizeof(Stmt)); + + void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>()); + return new (Mem) CXXTryStmt(Empty, numHandlers); +} + +CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock, + ArrayRef<Stmt*> handlers) + : Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) { + Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1); + Stmts[0] = tryBlock; + std::copy(handlers.begin(), handlers.end(), Stmts + 1); +} + +CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEndStmt, + Expr *Cond, Expr *Inc, DeclStmt *LoopVar, + Stmt *Body, SourceLocation FL, + SourceLocation CL, SourceLocation RPL) + : Stmt(CXXForRangeStmtClass), ForLoc(FL), ColonLoc(CL), RParenLoc(RPL) { + SubExprs[RANGE] = Range; + SubExprs[BEGINEND] = BeginEndStmt; + SubExprs[COND] = Cond; + SubExprs[INC] = Inc; + SubExprs[LOOPVAR] = LoopVar; + SubExprs[BODY] = Body; +} + +Expr *CXXForRangeStmt::getRangeInit() { + DeclStmt *RangeStmt = getRangeStmt(); + VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl()); + assert(RangeDecl &&& "for-range should have a single var decl"); + return RangeDecl->getInit(); +} + +const Expr *CXXForRangeStmt::getRangeInit() const { + return const_cast<CXXForRangeStmt*>(this)->getRangeInit(); +} + +VarDecl *CXXForRangeStmt::getLoopVariable() { + Decl *LV = cast<DeclStmt>(getLoopVarStmt())->getSingleDecl(); + assert(LV && "No loop variable in CXXForRangeStmt"); + return cast<VarDecl>(LV); +} + +const VarDecl *CXXForRangeStmt::getLoopVariable() const { + return const_cast<CXXForRangeStmt*>(this)->getLoopVariable(); +} + +IfStmt::IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, + Stmt *then, SourceLocation EL, Stmt *elsev) + : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL) +{ + setConditionVariable(C, var); + SubExprs[COND] = cond; + SubExprs[THEN] = then; + SubExprs[ELSE] = elsev; +} + +VarDecl *IfStmt::getConditionVariable() const { + if (!SubExprs[VAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void IfStmt::setConditionVariable(const ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[VAR] = 0; + return; + } + + SourceRange VarRange = V->getSourceRange(); + SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(), + VarRange.getEnd()); +} + +ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, + Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, + SourceLocation RP) + : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP) +{ + SubExprs[INIT] = Init; + setConditionVariable(C, condVar); + SubExprs[COND] = Cond; + SubExprs[INC] = Inc; + SubExprs[BODY] = Body; +} + +VarDecl *ForStmt::getConditionVariable() const { + if (!SubExprs[CONDVAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void ForStmt::setConditionVariable(const ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[CONDVAR] = 0; + return; + } + + SourceRange VarRange = V->getSourceRange(); + SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(), + VarRange.getEnd()); +} + +SwitchStmt::SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond) + : Stmt(SwitchStmtClass), FirstCase(0), AllEnumCasesCovered(0) +{ + setConditionVariable(C, Var); + SubExprs[COND] = cond; + SubExprs[BODY] = NULL; +} + +VarDecl *SwitchStmt::getConditionVariable() const { + if (!SubExprs[VAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void SwitchStmt::setConditionVariable(const ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[VAR] = 0; + return; + } + + SourceRange VarRange = V->getSourceRange(); + SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(), + VarRange.getEnd()); +} + +Stmt *SwitchCase::getSubStmt() { + if (isa<CaseStmt>(this)) + return cast<CaseStmt>(this)->getSubStmt(); + return cast<DefaultStmt>(this)->getSubStmt(); +} + +WhileStmt::WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, + SourceLocation WL) + : Stmt(WhileStmtClass) { + setConditionVariable(C, Var); + SubExprs[COND] = cond; + SubExprs[BODY] = body; + WhileLoc = WL; +} + +VarDecl *WhileStmt::getConditionVariable() const { + if (!SubExprs[VAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void WhileStmt::setConditionVariable(const ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[VAR] = 0; + return; + } + + SourceRange VarRange = V->getSourceRange(); + SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(), + VarRange.getEnd()); +} + +// IndirectGotoStmt +LabelDecl *IndirectGotoStmt::getConstantTarget() { + if (AddrLabelExpr *E = + dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts())) + return E->getLabel(); + return 0; +} + +// ReturnStmt +const Expr* ReturnStmt::getRetValue() const { + return cast_or_null<Expr>(RetExpr); +} +Expr* ReturnStmt::getRetValue() { + return cast_or_null<Expr>(RetExpr); +} + +SEHTryStmt::SEHTryStmt(bool IsCXXTry, + SourceLocation TryLoc, + Stmt *TryBlock, + Stmt *Handler) + : Stmt(SEHTryStmtClass), + IsCXXTry(IsCXXTry), + TryLoc(TryLoc) +{ + Children[TRY] = TryBlock; + Children[HANDLER] = Handler; +} + +SEHTryStmt* SEHTryStmt::Create(const ASTContext &C, bool IsCXXTry, + SourceLocation TryLoc, Stmt *TryBlock, + Stmt *Handler) { + return new(C) SEHTryStmt(IsCXXTry,TryLoc,TryBlock,Handler); +} + +SEHExceptStmt* SEHTryStmt::getExceptHandler() const { + return dyn_cast<SEHExceptStmt>(getHandler()); +} + +SEHFinallyStmt* SEHTryStmt::getFinallyHandler() const { + return dyn_cast<SEHFinallyStmt>(getHandler()); +} + +SEHExceptStmt::SEHExceptStmt(SourceLocation Loc, + Expr *FilterExpr, + Stmt *Block) + : Stmt(SEHExceptStmtClass), + Loc(Loc) +{ + Children[FILTER_EXPR] = FilterExpr; + Children[BLOCK] = Block; +} + +SEHExceptStmt* SEHExceptStmt::Create(const ASTContext &C, SourceLocation Loc, + Expr *FilterExpr, Stmt *Block) { + return new(C) SEHExceptStmt(Loc,FilterExpr,Block); +} + +SEHFinallyStmt::SEHFinallyStmt(SourceLocation Loc, + Stmt *Block) + : Stmt(SEHFinallyStmtClass), + Loc(Loc), + Block(Block) +{} + +SEHFinallyStmt* SEHFinallyStmt::Create(const ASTContext &C, SourceLocation Loc, + Stmt *Block) { + return new(C)SEHFinallyStmt(Loc,Block); +} + +CapturedStmt::Capture *CapturedStmt::getStoredCaptures() const { + unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1); + + // Offset of the first Capture object. + unsigned FirstCaptureOffset = + llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>()); + + return reinterpret_cast<Capture *>( + reinterpret_cast<char *>(const_cast<CapturedStmt *>(this)) + + FirstCaptureOffset); +} + +CapturedStmt::CapturedStmt(Stmt *S, CapturedRegionKind Kind, + ArrayRef<Capture> Captures, + ArrayRef<Expr *> CaptureInits, + CapturedDecl *CD, + RecordDecl *RD) + : Stmt(CapturedStmtClass), NumCaptures(Captures.size()), + CapDeclAndKind(CD, Kind), TheRecordDecl(RD) { + assert( S && "null captured statement"); + assert(CD && "null captured declaration for captured statement"); + assert(RD && "null record declaration for captured statement"); + + // Copy initialization expressions. + Stmt **Stored = getStoredStmts(); + for (unsigned I = 0, N = NumCaptures; I != N; ++I) + *Stored++ = CaptureInits[I]; + + // Copy the statement being captured. + *Stored = S; + + // Copy all Capture objects. + Capture *Buffer = getStoredCaptures(); + std::copy(Captures.begin(), Captures.end(), Buffer); +} + +CapturedStmt::CapturedStmt(EmptyShell Empty, unsigned NumCaptures) + : Stmt(CapturedStmtClass, Empty), NumCaptures(NumCaptures), + CapDeclAndKind(0, CR_Default), TheRecordDecl(0) { + getStoredStmts()[NumCaptures] = 0; +} + +CapturedStmt *CapturedStmt::Create(const ASTContext &Context, Stmt *S, + CapturedRegionKind Kind, + ArrayRef<Capture> Captures, + ArrayRef<Expr *> CaptureInits, + CapturedDecl *CD, + RecordDecl *RD) { + // The layout is + // + // ----------------------------------------------------------- + // | CapturedStmt, Init, ..., Init, S, Capture, ..., Capture | + // ----------------^-------------------^---------------------- + // getStoredStmts() getStoredCaptures() + // + // where S is the statement being captured. + // + assert(CaptureInits.size() == Captures.size() && "wrong number of arguments"); + + unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (Captures.size() + 1); + if (!Captures.empty()) { + // Realign for the following Capture array. + Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>()); + Size += sizeof(Capture) * Captures.size(); + } + + void *Mem = Context.Allocate(Size); + return new (Mem) CapturedStmt(S, Kind, Captures, CaptureInits, CD, RD); +} + +CapturedStmt *CapturedStmt::CreateDeserialized(const ASTContext &Context, + unsigned NumCaptures) { + unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1); + if (NumCaptures > 0) { + // Realign for the following Capture array. + Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>()); + Size += sizeof(Capture) * NumCaptures; + } + + void *Mem = Context.Allocate(Size); + return new (Mem) CapturedStmt(EmptyShell(), NumCaptures); +} + +Stmt::child_range CapturedStmt::children() { + // Children are captured field initilizers. + return child_range(getStoredStmts(), getStoredStmts() + NumCaptures); +} + +bool CapturedStmt::capturesVariable(const VarDecl *Var) const { + for (const_capture_iterator I = capture_begin(), + E = capture_end(); I != E; ++I) { + if (!I->capturesVariable()) + continue; + + // This does not handle variable redeclarations. This should be + // extended to capture variables with redeclarations, for example + // a thread-private variable in OpenMP. + if (I->getCapturedVar() == Var) + return true; + } + + return false; +} + +StmtRange OMPClause::children() { + switch(getClauseKind()) { + default : break; +#define OPENMP_CLAUSE(Name, Class) \ + case OMPC_ ## Name : return static_cast<Class *>(this)->children(); +#include "clang/Basic/OpenMPKinds.def" + } + llvm_unreachable("unknown OMPClause"); +} + +OMPPrivateClause *OMPPrivateClause::Create(const ASTContext &C, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc, + ArrayRef<Expr *> VL) { + void *Mem = C.Allocate(sizeof(OMPPrivateClause) + sizeof(Expr *) * VL.size(), + llvm::alignOf<OMPPrivateClause>()); + OMPPrivateClause *Clause = new (Mem) OMPPrivateClause(StartLoc, LParenLoc, + EndLoc, VL.size()); + Clause->setVarRefs(VL); + return Clause; +} + +OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C, + unsigned N) { + void *Mem = C.Allocate(sizeof(OMPPrivateClause) + sizeof(Expr *) * N, + llvm::alignOf<OMPPrivateClause>()); + return new (Mem) OMPPrivateClause(N); +} + +OMPFirstprivateClause *OMPFirstprivateClause::Create(const ASTContext &C, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc, + ArrayRef<Expr *> VL) { + void *Mem = C.Allocate(sizeof(OMPFirstprivateClause) + + sizeof(Expr *) * VL.size(), + llvm::alignOf<OMPFirstprivateClause>()); + OMPFirstprivateClause *Clause = new (Mem) OMPFirstprivateClause(StartLoc, + LParenLoc, + EndLoc, + VL.size()); + Clause->setVarRefs(VL); + return Clause; +} + +OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C, + unsigned N) { + void *Mem = C.Allocate(sizeof(OMPFirstprivateClause) + sizeof(Expr *) * N, + llvm::alignOf<OMPFirstprivateClause>()); + return new (Mem) OMPFirstprivateClause(N); +} + +OMPSharedClause *OMPSharedClause::Create(const ASTContext &C, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc, + ArrayRef<Expr *> VL) { + void *Mem = C.Allocate(sizeof(OMPSharedClause) + sizeof(Expr *) * VL.size(), + llvm::alignOf<OMPSharedClause>()); + OMPSharedClause *Clause = new (Mem) OMPSharedClause(StartLoc, LParenLoc, + EndLoc, VL.size()); + Clause->setVarRefs(VL); + return Clause; +} + +OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C, + unsigned N) { + void *Mem = C.Allocate(sizeof(OMPSharedClause) + sizeof(Expr *) * N, + llvm::alignOf<OMPSharedClause>()); + return new (Mem) OMPSharedClause(N); +} + +void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) { + assert(Clauses.size() == this->Clauses.size() && + "Number of clauses is not the same as the preallocated buffer"); + std::copy(Clauses.begin(), Clauses.end(), this->Clauses.begin()); +} + +OMPParallelDirective *OMPParallelDirective::Create( + const ASTContext &C, + SourceLocation StartLoc, + SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses, + Stmt *AssociatedStmt) { + void *Mem = C.Allocate(sizeof(OMPParallelDirective) + + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *), + llvm::alignOf<OMPParallelDirective>()); + OMPParallelDirective *Dir = new (Mem) OMPParallelDirective(StartLoc, EndLoc, + Clauses.size()); + Dir->setClauses(Clauses); + Dir->setAssociatedStmt(AssociatedStmt); + return Dir; +} + +OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C, + unsigned N, + EmptyShell) { + void *Mem = C.Allocate(sizeof(OMPParallelDirective) + + sizeof(OMPClause *) * N + sizeof(Stmt *), + llvm::alignOf<OMPParallelDirective>()); + return new (Mem) OMPParallelDirective(N); +} diff --git a/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp new file mode 100644 index 000000000000..6e85375ed22f --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp @@ -0,0 +1,114 @@ +//===--- StmtIterator.cpp - Iterators for Statements ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines internal methods for StmtIterator. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/StmtIterator.h" +#include "clang/AST/Decl.h" + +using namespace clang; + +// FIXME: Add support for dependent-sized array types in C++? +// Does it even make sense to build a CFG for an uninstantiated template? +static inline const VariableArrayType *FindVA(const Type* t) { + while (const ArrayType *vt = dyn_cast<ArrayType>(t)) { + if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt)) + if (vat->getSizeExpr()) + return vat; + + t = vt->getElementType().getTypePtr(); + } + + return NULL; +} + +void StmtIteratorBase::NextVA() { + assert (getVAPtr()); + + const VariableArrayType *p = getVAPtr(); + p = FindVA(p->getElementType().getTypePtr()); + setVAPtr(p); + + if (p) + return; + + if (inDeclGroup()) { + if (VarDecl* VD = dyn_cast<VarDecl>(*DGI)) + if (VD->Init) + return; + + NextDecl(); + } + else { + assert(inSizeOfTypeVA()); + RawVAPtr = 0; + } +} + +void StmtIteratorBase::NextDecl(bool ImmediateAdvance) { + assert (getVAPtr() == NULL); + assert(inDeclGroup()); + + if (ImmediateAdvance) + ++DGI; + + for ( ; DGI != DGE; ++DGI) + if (HandleDecl(*DGI)) + return; + + RawVAPtr = 0; +} + +bool StmtIteratorBase::HandleDecl(Decl* D) { + if (VarDecl* VD = dyn_cast<VarDecl>(D)) { + if (const VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) { + setVAPtr(VAPtr); + return true; + } + + if (VD->getInit()) + return true; + } + else if (TypedefNameDecl* TD = dyn_cast<TypedefNameDecl>(D)) { + if (const VariableArrayType* VAPtr = + FindVA(TD->getUnderlyingType().getTypePtr())) { + setVAPtr(VAPtr); + return true; + } + } + else if (EnumConstantDecl* ECD = dyn_cast<EnumConstantDecl>(D)) { + if (ECD->getInitExpr()) + return true; + } + + return false; +} + +StmtIteratorBase::StmtIteratorBase(Decl** dgi, Decl** dge) + : stmt(0), DGI(dgi), RawVAPtr(DeclGroupMode), DGE(dge) { + NextDecl(false); +} + +StmtIteratorBase::StmtIteratorBase(const VariableArrayType* t) + : stmt(0), DGI(0), RawVAPtr(SizeOfTypeVAMode) { + RawVAPtr |= reinterpret_cast<uintptr_t>(t); +} + +Stmt*& StmtIteratorBase::GetDeclExpr() const { + if (const VariableArrayType* VAPtr = getVAPtr()) { + assert (VAPtr->SizeExpr); + return const_cast<Stmt*&>(VAPtr->SizeExpr); + } + + assert (inDeclGroup()); + VarDecl* VD = cast<VarDecl>(*DGI); + return *VD->getInitAddress(); +} diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp new file mode 100644 index 000000000000..0ecb5b52c24f --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp @@ -0,0 +1,2011 @@ +//===--- StmtPrinter.cpp - Printing implementation for Stmt ASTs ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Stmt::dumpPretty/Stmt::printPretty methods, which +// pretty print the AST back out to C code. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/CharInfo.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/Format.h" +using namespace clang; + +//===----------------------------------------------------------------------===// +// StmtPrinter Visitor +//===----------------------------------------------------------------------===// + +namespace { + class StmtPrinter : public StmtVisitor<StmtPrinter> { + raw_ostream &OS; + unsigned IndentLevel; + clang::PrinterHelper* Helper; + PrintingPolicy Policy; + + public: + StmtPrinter(raw_ostream &os, PrinterHelper* helper, + const PrintingPolicy &Policy, + unsigned Indentation = 0) + : OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy) {} + + void PrintStmt(Stmt *S) { + PrintStmt(S, Policy.Indentation); + } + + void PrintStmt(Stmt *S, int SubIndent) { + IndentLevel += SubIndent; + if (S && isa<Expr>(S)) { + // If this is an expr used in a stmt context, indent and newline it. + Indent(); + Visit(S); + OS << ";\n"; + } else if (S) { + Visit(S); + } else { + Indent() << "<<<NULL STATEMENT>>>\n"; + } + IndentLevel -= SubIndent; + } + + void PrintRawCompoundStmt(CompoundStmt *S); + void PrintRawDecl(Decl *D); + void PrintRawDeclStmt(const DeclStmt *S); + void PrintRawIfStmt(IfStmt *If); + void PrintRawCXXCatchStmt(CXXCatchStmt *Catch); + void PrintCallArgs(CallExpr *E); + void PrintRawSEHExceptHandler(SEHExceptStmt *S); + void PrintRawSEHFinallyStmt(SEHFinallyStmt *S); + + void PrintExpr(Expr *E) { + if (E) + Visit(E); + else + OS << "<null expr>"; + } + + raw_ostream &Indent(int Delta = 0) { + for (int i = 0, e = IndentLevel+Delta; i < e; ++i) + OS << " "; + return OS; + } + + void Visit(Stmt* S) { + if (Helper && Helper->handledStmt(S,OS)) + return; + else StmtVisitor<StmtPrinter>::Visit(S); + } + + void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED { + Indent() << "<<unknown stmt type>>\n"; + } + void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED { + OS << "<<unknown expr type>>"; + } + void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node); + +#define ABSTRACT_STMT(CLASS) +#define STMT(CLASS, PARENT) \ + void Visit##CLASS(CLASS *Node); +#include "clang/AST/StmtNodes.inc" + }; +} + +//===----------------------------------------------------------------------===// +// Stmt printing methods. +//===----------------------------------------------------------------------===// + +/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and +/// with no newline after the }. +void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) { + OS << "{\n"; + for (CompoundStmt::body_iterator I = Node->body_begin(), E = Node->body_end(); + I != E; ++I) + PrintStmt(*I); + + Indent() << "}"; +} + +void StmtPrinter::PrintRawDecl(Decl *D) { + D->print(OS, Policy, IndentLevel); +} + +void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) { + DeclStmt::const_decl_iterator Begin = S->decl_begin(), End = S->decl_end(); + SmallVector<Decl*, 2> Decls; + for ( ; Begin != End; ++Begin) + Decls.push_back(*Begin); + + Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel); +} + +void StmtPrinter::VisitNullStmt(NullStmt *Node) { + Indent() << ";\n"; +} + +void StmtPrinter::VisitDeclStmt(DeclStmt *Node) { + Indent(); + PrintRawDeclStmt(Node); + OS << ";\n"; +} + +void StmtPrinter::VisitCompoundStmt(CompoundStmt *Node) { + Indent(); + PrintRawCompoundStmt(Node); + OS << "\n"; +} + +void StmtPrinter::VisitCaseStmt(CaseStmt *Node) { + Indent(-1) << "case "; + PrintExpr(Node->getLHS()); + if (Node->getRHS()) { + OS << " ... "; + PrintExpr(Node->getRHS()); + } + OS << ":\n"; + + PrintStmt(Node->getSubStmt(), 0); +} + +void StmtPrinter::VisitDefaultStmt(DefaultStmt *Node) { + Indent(-1) << "default:\n"; + PrintStmt(Node->getSubStmt(), 0); +} + +void StmtPrinter::VisitLabelStmt(LabelStmt *Node) { + Indent(-1) << Node->getName() << ":\n"; + PrintStmt(Node->getSubStmt(), 0); +} + +void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) { + OS << "[["; + bool first = true; + for (ArrayRef<const Attr*>::iterator it = Node->getAttrs().begin(), + end = Node->getAttrs().end(); + it != end; ++it) { + if (!first) { + OS << ", "; + first = false; + } + // TODO: check this + (*it)->printPretty(OS, Policy); + } + OS << "]] "; + PrintStmt(Node->getSubStmt(), 0); +} + +void StmtPrinter::PrintRawIfStmt(IfStmt *If) { + OS << "if ("; + if (const DeclStmt *DS = If->getConditionVariableDeclStmt()) + PrintRawDeclStmt(DS); + else + PrintExpr(If->getCond()); + OS << ')'; + + if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) { + OS << ' '; + PrintRawCompoundStmt(CS); + OS << (If->getElse() ? ' ' : '\n'); + } else { + OS << '\n'; + PrintStmt(If->getThen()); + if (If->getElse()) Indent(); + } + + if (Stmt *Else = If->getElse()) { + OS << "else"; + + if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Else)) { + OS << ' '; + PrintRawCompoundStmt(CS); + OS << '\n'; + } else if (IfStmt *ElseIf = dyn_cast<IfStmt>(Else)) { + OS << ' '; + PrintRawIfStmt(ElseIf); + } else { + OS << '\n'; + PrintStmt(If->getElse()); + } + } +} + +void StmtPrinter::VisitIfStmt(IfStmt *If) { + Indent(); + PrintRawIfStmt(If); +} + +void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) { + Indent() << "switch ("; + if (const DeclStmt *DS = Node->getConditionVariableDeclStmt()) + PrintRawDeclStmt(DS); + else + PrintExpr(Node->getCond()); + OS << ")"; + + // Pretty print compoundstmt bodies (very common). + if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) { + OS << " "; + PrintRawCompoundStmt(CS); + OS << "\n"; + } else { + OS << "\n"; + PrintStmt(Node->getBody()); + } +} + +void StmtPrinter::VisitWhileStmt(WhileStmt *Node) { + Indent() << "while ("; + if (const DeclStmt *DS = Node->getConditionVariableDeclStmt()) + PrintRawDeclStmt(DS); + else + PrintExpr(Node->getCond()); + OS << ")\n"; + PrintStmt(Node->getBody()); +} + +void StmtPrinter::VisitDoStmt(DoStmt *Node) { + Indent() << "do "; + if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) { + PrintRawCompoundStmt(CS); + OS << " "; + } else { + OS << "\n"; + PrintStmt(Node->getBody()); + Indent(); + } + + OS << "while ("; + PrintExpr(Node->getCond()); + OS << ");\n"; +} + +void StmtPrinter::VisitForStmt(ForStmt *Node) { + Indent() << "for ("; + if (Node->getInit()) { + if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getInit())) + PrintRawDeclStmt(DS); + else + PrintExpr(cast<Expr>(Node->getInit())); + } + OS << ";"; + if (Node->getCond()) { + OS << " "; + PrintExpr(Node->getCond()); + } + OS << ";"; + if (Node->getInc()) { + OS << " "; + PrintExpr(Node->getInc()); + } + OS << ") "; + + if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) { + PrintRawCompoundStmt(CS); + OS << "\n"; + } else { + OS << "\n"; + PrintStmt(Node->getBody()); + } +} + +void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) { + Indent() << "for ("; + if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getElement())) + PrintRawDeclStmt(DS); + else + PrintExpr(cast<Expr>(Node->getElement())); + OS << " in "; + PrintExpr(Node->getCollection()); + OS << ") "; + + if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) { + PrintRawCompoundStmt(CS); + OS << "\n"; + } else { + OS << "\n"; + PrintStmt(Node->getBody()); + } +} + +void StmtPrinter::VisitCXXForRangeStmt(CXXForRangeStmt *Node) { + Indent() << "for ("; + PrintingPolicy SubPolicy(Policy); + SubPolicy.SuppressInitializers = true; + Node->getLoopVariable()->print(OS, SubPolicy, IndentLevel); + OS << " : "; + PrintExpr(Node->getRangeInit()); + OS << ") {\n"; + PrintStmt(Node->getBody()); + Indent() << "}\n"; +} + +void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) { + Indent(); + if (Node->isIfExists()) + OS << "__if_exists ("; + else + OS << "__if_not_exists ("; + + if (NestedNameSpecifier *Qualifier + = Node->getQualifierLoc().getNestedNameSpecifier()) + Qualifier->print(OS, Policy); + + OS << Node->getNameInfo() << ") "; + + PrintRawCompoundStmt(Node->getSubStmt()); +} + +void StmtPrinter::VisitGotoStmt(GotoStmt *Node) { + Indent() << "goto " << Node->getLabel()->getName() << ";\n"; +} + +void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) { + Indent() << "goto *"; + PrintExpr(Node->getTarget()); + OS << ";\n"; +} + +void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) { + Indent() << "continue;\n"; +} + +void StmtPrinter::VisitBreakStmt(BreakStmt *Node) { + Indent() << "break;\n"; +} + + +void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) { + Indent() << "return"; + if (Node->getRetValue()) { + OS << " "; + PrintExpr(Node->getRetValue()); + } + OS << ";\n"; +} + + +void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) { + Indent() << "asm "; + + if (Node->isVolatile()) + OS << "volatile "; + + OS << "("; + VisitStringLiteral(Node->getAsmString()); + + // Outputs + if (Node->getNumOutputs() != 0 || Node->getNumInputs() != 0 || + Node->getNumClobbers() != 0) + OS << " : "; + + for (unsigned i = 0, e = Node->getNumOutputs(); i != e; ++i) { + if (i != 0) + OS << ", "; + + if (!Node->getOutputName(i).empty()) { + OS << '['; + OS << Node->getOutputName(i); + OS << "] "; + } + + VisitStringLiteral(Node->getOutputConstraintLiteral(i)); + OS << " "; + Visit(Node->getOutputExpr(i)); + } + + // Inputs + if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0) + OS << " : "; + + for (unsigned i = 0, e = Node->getNumInputs(); i != e; ++i) { + if (i != 0) + OS << ", "; + + if (!Node->getInputName(i).empty()) { + OS << '['; + OS << Node->getInputName(i); + OS << "] "; + } + + VisitStringLiteral(Node->getInputConstraintLiteral(i)); + OS << " "; + Visit(Node->getInputExpr(i)); + } + + // Clobbers + if (Node->getNumClobbers() != 0) + OS << " : "; + + for (unsigned i = 0, e = Node->getNumClobbers(); i != e; ++i) { + if (i != 0) + OS << ", "; + + VisitStringLiteral(Node->getClobberStringLiteral(i)); + } + + OS << ");\n"; +} + +void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) { + // FIXME: Implement MS style inline asm statement printer. + Indent() << "__asm "; + if (Node->hasBraces()) + OS << "{\n"; + OS << Node->getAsmString() << "\n"; + if (Node->hasBraces()) + Indent() << "}\n"; +} + +void StmtPrinter::VisitCapturedStmt(CapturedStmt *Node) { + PrintStmt(Node->getCapturedDecl()->getBody()); +} + +void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) { + Indent() << "@try"; + if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) { + PrintRawCompoundStmt(TS); + OS << "\n"; + } + + for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) { + ObjCAtCatchStmt *catchStmt = Node->getCatchStmt(I); + Indent() << "@catch("; + if (catchStmt->getCatchParamDecl()) { + if (Decl *DS = catchStmt->getCatchParamDecl()) + PrintRawDecl(DS); + } + OS << ")"; + if (CompoundStmt *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) { + PrintRawCompoundStmt(CS); + OS << "\n"; + } + } + + if (ObjCAtFinallyStmt *FS = static_cast<ObjCAtFinallyStmt *>( + Node->getFinallyStmt())) { + Indent() << "@finally"; + PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody())); + OS << "\n"; + } +} + +void StmtPrinter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *Node) { +} + +void StmtPrinter::VisitObjCAtCatchStmt (ObjCAtCatchStmt *Node) { + Indent() << "@catch (...) { /* todo */ } \n"; +} + +void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) { + Indent() << "@throw"; + if (Node->getThrowExpr()) { + OS << " "; + PrintExpr(Node->getThrowExpr()); + } + OS << ";\n"; +} + +void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) { + Indent() << "@synchronized ("; + PrintExpr(Node->getSynchExpr()); + OS << ")"; + PrintRawCompoundStmt(Node->getSynchBody()); + OS << "\n"; +} + +void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) { + Indent() << "@autoreleasepool"; + PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt())); + OS << "\n"; +} + +void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) { + OS << "catch ("; + if (Decl *ExDecl = Node->getExceptionDecl()) + PrintRawDecl(ExDecl); + else + OS << "..."; + OS << ") "; + PrintRawCompoundStmt(cast<CompoundStmt>(Node->getHandlerBlock())); +} + +void StmtPrinter::VisitCXXCatchStmt(CXXCatchStmt *Node) { + Indent(); + PrintRawCXXCatchStmt(Node); + OS << "\n"; +} + +void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) { + Indent() << "try "; + PrintRawCompoundStmt(Node->getTryBlock()); + for (unsigned i = 0, e = Node->getNumHandlers(); i < e; ++i) { + OS << " "; + PrintRawCXXCatchStmt(Node->getHandler(i)); + } + OS << "\n"; +} + +void StmtPrinter::VisitSEHTryStmt(SEHTryStmt *Node) { + Indent() << (Node->getIsCXXTry() ? "try " : "__try "); + PrintRawCompoundStmt(Node->getTryBlock()); + SEHExceptStmt *E = Node->getExceptHandler(); + SEHFinallyStmt *F = Node->getFinallyHandler(); + if(E) + PrintRawSEHExceptHandler(E); + else { + assert(F && "Must have a finally block..."); + PrintRawSEHFinallyStmt(F); + } + OS << "\n"; +} + +void StmtPrinter::PrintRawSEHFinallyStmt(SEHFinallyStmt *Node) { + OS << "__finally "; + PrintRawCompoundStmt(Node->getBlock()); + OS << "\n"; +} + +void StmtPrinter::PrintRawSEHExceptHandler(SEHExceptStmt *Node) { + OS << "__except ("; + VisitExpr(Node->getFilterExpr()); + OS << ")\n"; + PrintRawCompoundStmt(Node->getBlock()); + OS << "\n"; +} + +void StmtPrinter::VisitSEHExceptStmt(SEHExceptStmt *Node) { + Indent(); + PrintRawSEHExceptHandler(Node); + OS << "\n"; +} + +void StmtPrinter::VisitSEHFinallyStmt(SEHFinallyStmt *Node) { + Indent(); + PrintRawSEHFinallyStmt(Node); + OS << "\n"; +} + +//===----------------------------------------------------------------------===// +// OpenMP clauses printing methods +//===----------------------------------------------------------------------===// + +namespace { +class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> { + raw_ostream &OS; + /// \brief Process clauses with list of variables. + template <typename T> + void VisitOMPClauseList(T *Node, char StartSym); +public: + OMPClausePrinter(raw_ostream &OS) : OS(OS) { } +#define OPENMP_CLAUSE(Name, Class) \ + void Visit##Class(Class *S); +#include "clang/Basic/OpenMPKinds.def" +}; + +void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) { + OS << "default(" + << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind()) + << ")"; +} + +template<typename T> +void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) { + for (typename T::varlist_iterator I = Node->varlist_begin(), + E = Node->varlist_end(); + I != E; ++I) + OS << (I == Node->varlist_begin() ? StartSym : ',') + << *cast<NamedDecl>(cast<DeclRefExpr>(*I)->getDecl()); +} + +void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) { + if (!Node->varlist_empty()) { + OS << "private"; + VisitOMPClauseList(Node, '('); + OS << ")"; + } +} + +void OMPClausePrinter::VisitOMPFirstprivateClause(OMPFirstprivateClause *Node) { + if (!Node->varlist_empty()) { + OS << "firstprivate"; + VisitOMPClauseList(Node, '('); + OS << ")"; + } +} + +void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) { + if (!Node->varlist_empty()) { + OS << "shared"; + VisitOMPClauseList(Node, '('); + OS << ")"; + } +} + +} + +//===----------------------------------------------------------------------===// +// OpenMP directives printing methods +//===----------------------------------------------------------------------===// + +void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) { + Indent() << "#pragma omp parallel "; + + OMPClausePrinter Printer(OS); + ArrayRef<OMPClause *> Clauses = Node->clauses(); + for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end(); + I != E; ++I) + if (*I && !(*I)->isImplicit()) { + Printer.Visit(*I); + OS << ' '; + } + OS << "\n"; + if (Node->getAssociatedStmt()) { + assert(isa<CapturedStmt>(Node->getAssociatedStmt()) && + "Expected captured statement!"); + Stmt *CS = cast<CapturedStmt>(Node->getAssociatedStmt())->getCapturedStmt(); + PrintStmt(CS); + } +} +//===----------------------------------------------------------------------===// +// Expr printing methods. +//===----------------------------------------------------------------------===// + +void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) { + if (NestedNameSpecifier *Qualifier = Node->getQualifier()) + Qualifier->print(OS, Policy); + if (Node->hasTemplateKeyword()) + OS << "template "; + OS << Node->getNameInfo(); + if (Node->hasExplicitTemplateArgs()) + TemplateSpecializationType::PrintTemplateArgumentList( + OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy); +} + +void StmtPrinter::VisitDependentScopeDeclRefExpr( + DependentScopeDeclRefExpr *Node) { + if (NestedNameSpecifier *Qualifier = Node->getQualifier()) + Qualifier->print(OS, Policy); + if (Node->hasTemplateKeyword()) + OS << "template "; + OS << Node->getNameInfo(); + if (Node->hasExplicitTemplateArgs()) + TemplateSpecializationType::PrintTemplateArgumentList( + OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy); +} + +void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) { + if (Node->getQualifier()) + Node->getQualifier()->print(OS, Policy); + if (Node->hasTemplateKeyword()) + OS << "template "; + OS << Node->getNameInfo(); + if (Node->hasExplicitTemplateArgs()) + TemplateSpecializationType::PrintTemplateArgumentList( + OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy); +} + +void StmtPrinter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) { + if (Node->getBase()) { + PrintExpr(Node->getBase()); + OS << (Node->isArrow() ? "->" : "."); + } + OS << *Node->getDecl(); +} + +void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) { + if (Node->isSuperReceiver()) + OS << "super."; + else if (Node->getBase()) { + PrintExpr(Node->getBase()); + OS << "."; + } + + if (Node->isImplicitProperty()) + OS << Node->getImplicitPropertyGetter()->getSelector().getAsString(); + else + OS << Node->getExplicitProperty()->getName(); +} + +void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) { + + PrintExpr(Node->getBaseExpr()); + OS << "["; + PrintExpr(Node->getKeyExpr()); + OS << "]"; +} + +void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) { + switch (Node->getIdentType()) { + default: + llvm_unreachable("unknown case"); + case PredefinedExpr::Func: + OS << "__func__"; + break; + case PredefinedExpr::Function: + OS << "__FUNCTION__"; + break; + case PredefinedExpr::FuncDName: + OS << "__FUNCDNAME__"; + break; + case PredefinedExpr::LFunction: + OS << "L__FUNCTION__"; + break; + case PredefinedExpr::PrettyFunction: + OS << "__PRETTY_FUNCTION__"; + break; + } +} + +void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) { + unsigned value = Node->getValue(); + + switch (Node->getKind()) { + case CharacterLiteral::Ascii: break; // no prefix. + case CharacterLiteral::Wide: OS << 'L'; break; + case CharacterLiteral::UTF16: OS << 'u'; break; + case CharacterLiteral::UTF32: OS << 'U'; break; + } + + switch (value) { + case '\\': + OS << "'\\\\'"; + break; + case '\'': + OS << "'\\''"; + break; + case '\a': + // TODO: K&R: the meaning of '\\a' is different in traditional C + OS << "'\\a'"; + break; + case '\b': + OS << "'\\b'"; + break; + // Nonstandard escape sequence. + /*case '\e': + OS << "'\\e'"; + break;*/ + case '\f': + OS << "'\\f'"; + break; + case '\n': + OS << "'\\n'"; + break; + case '\r': + OS << "'\\r'"; + break; + case '\t': + OS << "'\\t'"; + break; + case '\v': + OS << "'\\v'"; + break; + default: + if (value < 256 && isPrintable((unsigned char)value)) + OS << "'" << (char)value << "'"; + else if (value < 256) + OS << "'\\x" << llvm::format("%02x", value) << "'"; + else if (value <= 0xFFFF) + OS << "'\\u" << llvm::format("%04x", value) << "'"; + else + OS << "'\\U" << llvm::format("%08x", value) << "'"; + } +} + +void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) { + bool isSigned = Node->getType()->isSignedIntegerType(); + OS << Node->getValue().toString(10, isSigned); + + // Emit suffixes. Integer literals are always a builtin integer type. + switch (Node->getType()->getAs<BuiltinType>()->getKind()) { + default: llvm_unreachable("Unexpected type for integer literal!"); + // FIXME: The Short and UShort cases are to handle cases where a short + // integeral literal is formed during template instantiation. They should + // be removed when template instantiation no longer needs integer literals. + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: break; // no suffix. + case BuiltinType::UInt: OS << 'U'; break; + case BuiltinType::Long: OS << 'L'; break; + case BuiltinType::ULong: OS << "UL"; break; + case BuiltinType::LongLong: OS << "LL"; break; + case BuiltinType::ULongLong: OS << "ULL"; break; + case BuiltinType::Int128: OS << "i128"; break; + case BuiltinType::UInt128: OS << "Ui128"; break; + } +} + +static void PrintFloatingLiteral(raw_ostream &OS, FloatingLiteral *Node, + bool PrintSuffix) { + SmallString<16> Str; + Node->getValue().toString(Str); + OS << Str; + if (Str.find_first_not_of("-0123456789") == StringRef::npos) + OS << '.'; // Trailing dot in order to separate from ints. + + if (!PrintSuffix) + return; + + // Emit suffixes. Float literals are always a builtin float type. + switch (Node->getType()->getAs<BuiltinType>()->getKind()) { + default: llvm_unreachable("Unexpected type for float literal!"); + case BuiltinType::Half: break; // FIXME: suffix? + case BuiltinType::Double: break; // no suffix. + case BuiltinType::Float: OS << 'F'; break; + case BuiltinType::LongDouble: OS << 'L'; break; + } +} + +void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) { + PrintFloatingLiteral(OS, Node, /*PrintSuffix=*/true); +} + +void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) { + PrintExpr(Node->getSubExpr()); + OS << "i"; +} + +void StmtPrinter::VisitStringLiteral(StringLiteral *Str) { + Str->outputString(OS); +} +void StmtPrinter::VisitParenExpr(ParenExpr *Node) { + OS << "("; + PrintExpr(Node->getSubExpr()); + OS << ")"; +} +void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) { + if (!Node->isPostfix()) { + OS << UnaryOperator::getOpcodeStr(Node->getOpcode()); + + // Print a space if this is an "identifier operator" like __real, or if + // it might be concatenated incorrectly like '+'. + switch (Node->getOpcode()) { + default: break; + case UO_Real: + case UO_Imag: + case UO_Extension: + OS << ' '; + break; + case UO_Plus: + case UO_Minus: + if (isa<UnaryOperator>(Node->getSubExpr())) + OS << ' '; + break; + } + } + PrintExpr(Node->getSubExpr()); + + if (Node->isPostfix()) + OS << UnaryOperator::getOpcodeStr(Node->getOpcode()); +} + +void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) { + OS << "__builtin_offsetof("; + Node->getTypeSourceInfo()->getType().print(OS, Policy); + OS << ", "; + bool PrintedSomething = false; + for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) { + OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i); + if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Array) { + // Array node + OS << "["; + PrintExpr(Node->getIndexExpr(ON.getArrayExprIndex())); + OS << "]"; + PrintedSomething = true; + continue; + } + + // Skip implicit base indirections. + if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Base) + continue; + + // Field or identifier node. + IdentifierInfo *Id = ON.getFieldName(); + if (!Id) + continue; + + if (PrintedSomething) + OS << "."; + else + PrintedSomething = true; + OS << Id->getName(); + } + OS << ")"; +} + +void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){ + switch(Node->getKind()) { + case UETT_SizeOf: + OS << "sizeof"; + break; + case UETT_AlignOf: + if (Policy.LangOpts.CPlusPlus) + OS << "alignof"; + else if (Policy.LangOpts.C11) + OS << "_Alignof"; + else + OS << "__alignof"; + break; + case UETT_VecStep: + OS << "vec_step"; + break; + } + if (Node->isArgumentType()) { + OS << '('; + Node->getArgumentType().print(OS, Policy); + OS << ')'; + } else { + OS << " "; + PrintExpr(Node->getArgumentExpr()); + } +} + +void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) { + OS << "_Generic("; + PrintExpr(Node->getControllingExpr()); + for (unsigned i = 0; i != Node->getNumAssocs(); ++i) { + OS << ", "; + QualType T = Node->getAssocType(i); + if (T.isNull()) + OS << "default"; + else + T.print(OS, Policy); + OS << ": "; + PrintExpr(Node->getAssocExpr(i)); + } + OS << ")"; +} + +void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) { + PrintExpr(Node->getLHS()); + OS << "["; + PrintExpr(Node->getRHS()); + OS << "]"; +} + +void StmtPrinter::PrintCallArgs(CallExpr *Call) { + for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) { + if (isa<CXXDefaultArgExpr>(Call->getArg(i))) { + // Don't print any defaulted arguments + break; + } + + if (i) OS << ", "; + PrintExpr(Call->getArg(i)); + } +} + +void StmtPrinter::VisitCallExpr(CallExpr *Call) { + PrintExpr(Call->getCallee()); + OS << "("; + PrintCallArgs(Call); + OS << ")"; +} +void StmtPrinter::VisitMemberExpr(MemberExpr *Node) { + // FIXME: Suppress printing implicit bases (like "this") + PrintExpr(Node->getBase()); + + MemberExpr *ParentMember = dyn_cast<MemberExpr>(Node->getBase()); + FieldDecl *ParentDecl = ParentMember + ? dyn_cast<FieldDecl>(ParentMember->getMemberDecl()) : NULL; + + if (!ParentDecl || !ParentDecl->isAnonymousStructOrUnion()) + OS << (Node->isArrow() ? "->" : "."); + + if (FieldDecl *FD = dyn_cast<FieldDecl>(Node->getMemberDecl())) + if (FD->isAnonymousStructOrUnion()) + return; + + if (NestedNameSpecifier *Qualifier = Node->getQualifier()) + Qualifier->print(OS, Policy); + if (Node->hasTemplateKeyword()) + OS << "template "; + OS << Node->getMemberNameInfo(); + if (Node->hasExplicitTemplateArgs()) + TemplateSpecializationType::PrintTemplateArgumentList( + OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy); +} +void StmtPrinter::VisitObjCIsaExpr(ObjCIsaExpr *Node) { + PrintExpr(Node->getBase()); + OS << (Node->isArrow() ? "->isa" : ".isa"); +} + +void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) { + PrintExpr(Node->getBase()); + OS << "."; + OS << Node->getAccessor().getName(); +} +void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) { + OS << '('; + Node->getTypeAsWritten().print(OS, Policy); + OS << ')'; + PrintExpr(Node->getSubExpr()); +} +void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) { + OS << '('; + Node->getType().print(OS, Policy); + OS << ')'; + PrintExpr(Node->getInitializer()); +} +void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) { + // No need to print anything, simply forward to the sub expression. + PrintExpr(Node->getSubExpr()); +} +void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) { + PrintExpr(Node->getLHS()); + OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " "; + PrintExpr(Node->getRHS()); +} +void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) { + PrintExpr(Node->getLHS()); + OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " "; + PrintExpr(Node->getRHS()); +} +void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) { + PrintExpr(Node->getCond()); + OS << " ? "; + PrintExpr(Node->getLHS()); + OS << " : "; + PrintExpr(Node->getRHS()); +} + +// GNU extensions. + +void +StmtPrinter::VisitBinaryConditionalOperator(BinaryConditionalOperator *Node) { + PrintExpr(Node->getCommon()); + OS << " ?: "; + PrintExpr(Node->getFalseExpr()); +} +void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) { + OS << "&&" << Node->getLabel()->getName(); +} + +void StmtPrinter::VisitStmtExpr(StmtExpr *E) { + OS << "("; + PrintRawCompoundStmt(E->getSubStmt()); + OS << ")"; +} + +void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) { + OS << "__builtin_choose_expr("; + PrintExpr(Node->getCond()); + OS << ", "; + PrintExpr(Node->getLHS()); + OS << ", "; + PrintExpr(Node->getRHS()); + OS << ")"; +} + +void StmtPrinter::VisitGNUNullExpr(GNUNullExpr *) { + OS << "__null"; +} + +void StmtPrinter::VisitShuffleVectorExpr(ShuffleVectorExpr *Node) { + OS << "__builtin_shufflevector("; + for (unsigned i = 0, e = Node->getNumSubExprs(); i != e; ++i) { + if (i) OS << ", "; + PrintExpr(Node->getExpr(i)); + } + OS << ")"; +} + +void StmtPrinter::VisitConvertVectorExpr(ConvertVectorExpr *Node) { + OS << "__builtin_convertvector("; + PrintExpr(Node->getSrcExpr()); + OS << ", "; + Node->getType().print(OS, Policy); + OS << ")"; +} + +void StmtPrinter::VisitInitListExpr(InitListExpr* Node) { + if (Node->getSyntacticForm()) { + Visit(Node->getSyntacticForm()); + return; + } + + OS << "{ "; + for (unsigned i = 0, e = Node->getNumInits(); i != e; ++i) { + if (i) OS << ", "; + if (Node->getInit(i)) + PrintExpr(Node->getInit(i)); + else + OS << "0"; + } + OS << " }"; +} + +void StmtPrinter::VisitParenListExpr(ParenListExpr* Node) { + OS << "( "; + for (unsigned i = 0, e = Node->getNumExprs(); i != e; ++i) { + if (i) OS << ", "; + PrintExpr(Node->getExpr(i)); + } + OS << " )"; +} + +void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) { + for (DesignatedInitExpr::designators_iterator D = Node->designators_begin(), + DEnd = Node->designators_end(); + D != DEnd; ++D) { + if (D->isFieldDesignator()) { + if (D->getDotLoc().isInvalid()) + OS << D->getFieldName()->getName() << ":"; + else + OS << "." << D->getFieldName()->getName(); + } else { + OS << "["; + if (D->isArrayDesignator()) { + PrintExpr(Node->getArrayIndex(*D)); + } else { + PrintExpr(Node->getArrayRangeStart(*D)); + OS << " ... "; + PrintExpr(Node->getArrayRangeEnd(*D)); + } + OS << "]"; + } + } + + OS << " = "; + PrintExpr(Node->getInit()); +} + +void StmtPrinter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *Node) { + if (Policy.LangOpts.CPlusPlus) { + OS << "/*implicit*/"; + Node->getType().print(OS, Policy); + OS << "()"; + } else { + OS << "/*implicit*/("; + Node->getType().print(OS, Policy); + OS << ')'; + if (Node->getType()->isRecordType()) + OS << "{}"; + else + OS << 0; + } +} + +void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) { + OS << "__builtin_va_arg("; + PrintExpr(Node->getSubExpr()); + OS << ", "; + Node->getType().print(OS, Policy); + OS << ")"; +} + +void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) { + PrintExpr(Node->getSyntacticForm()); +} + +void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) { + const char *Name = 0; + switch (Node->getOp()) { +#define BUILTIN(ID, TYPE, ATTRS) +#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ + case AtomicExpr::AO ## ID: \ + Name = #ID "("; \ + break; +#include "clang/Basic/Builtins.def" + } + OS << Name; + + // AtomicExpr stores its subexpressions in a permuted order. + PrintExpr(Node->getPtr()); + if (Node->getOp() != AtomicExpr::AO__c11_atomic_load && + Node->getOp() != AtomicExpr::AO__atomic_load_n) { + OS << ", "; + PrintExpr(Node->getVal1()); + } + if (Node->getOp() == AtomicExpr::AO__atomic_exchange || + Node->isCmpXChg()) { + OS << ", "; + PrintExpr(Node->getVal2()); + } + if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange || + Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) { + OS << ", "; + PrintExpr(Node->getWeak()); + } + if (Node->getOp() != AtomicExpr::AO__c11_atomic_init) { + OS << ", "; + PrintExpr(Node->getOrder()); + } + if (Node->isCmpXChg()) { + OS << ", "; + PrintExpr(Node->getOrderFail()); + } + OS << ")"; +} + +// C++ +void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) { + const char *OpStrings[NUM_OVERLOADED_OPERATORS] = { + "", +#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \ + Spelling, +#include "clang/Basic/OperatorKinds.def" + }; + + OverloadedOperatorKind Kind = Node->getOperator(); + if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) { + if (Node->getNumArgs() == 1) { + OS << OpStrings[Kind] << ' '; + PrintExpr(Node->getArg(0)); + } else { + PrintExpr(Node->getArg(0)); + OS << ' ' << OpStrings[Kind]; + } + } else if (Kind == OO_Arrow) { + PrintExpr(Node->getArg(0)); + } else if (Kind == OO_Call) { + PrintExpr(Node->getArg(0)); + OS << '('; + for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) { + if (ArgIdx > 1) + OS << ", "; + if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx))) + PrintExpr(Node->getArg(ArgIdx)); + } + OS << ')'; + } else if (Kind == OO_Subscript) { + PrintExpr(Node->getArg(0)); + OS << '['; + PrintExpr(Node->getArg(1)); + OS << ']'; + } else if (Node->getNumArgs() == 1) { + OS << OpStrings[Kind] << ' '; + PrintExpr(Node->getArg(0)); + } else if (Node->getNumArgs() == 2) { + PrintExpr(Node->getArg(0)); + OS << ' ' << OpStrings[Kind] << ' '; + PrintExpr(Node->getArg(1)); + } else { + llvm_unreachable("unknown overloaded operator"); + } +} + +void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) { + VisitCallExpr(cast<CallExpr>(Node)); +} + +void StmtPrinter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *Node) { + PrintExpr(Node->getCallee()); + OS << "<<<"; + PrintCallArgs(Node->getConfig()); + OS << ">>>("; + PrintCallArgs(Node); + OS << ")"; +} + +void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) { + OS << Node->getCastName() << '<'; + Node->getTypeAsWritten().print(OS, Policy); + OS << ">("; + PrintExpr(Node->getSubExpr()); + OS << ")"; +} + +void StmtPrinter::VisitCXXStaticCastExpr(CXXStaticCastExpr *Node) { + VisitCXXNamedCastExpr(Node); +} + +void StmtPrinter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *Node) { + VisitCXXNamedCastExpr(Node); +} + +void StmtPrinter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *Node) { + VisitCXXNamedCastExpr(Node); +} + +void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) { + VisitCXXNamedCastExpr(Node); +} + +void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) { + OS << "typeid("; + if (Node->isTypeOperand()) { + Node->getTypeOperandSourceInfo()->getType().print(OS, Policy); + } else { + PrintExpr(Node->getExprOperand()); + } + OS << ")"; +} + +void StmtPrinter::VisitCXXUuidofExpr(CXXUuidofExpr *Node) { + OS << "__uuidof("; + if (Node->isTypeOperand()) { + Node->getTypeOperandSourceInfo()->getType().print(OS, Policy); + } else { + PrintExpr(Node->getExprOperand()); + } + OS << ")"; +} + +void StmtPrinter::VisitMSPropertyRefExpr(MSPropertyRefExpr *Node) { + PrintExpr(Node->getBaseExpr()); + if (Node->isArrow()) + OS << "->"; + else + OS << "."; + if (NestedNameSpecifier *Qualifier = + Node->getQualifierLoc().getNestedNameSpecifier()) + Qualifier->print(OS, Policy); + OS << Node->getPropertyDecl()->getDeclName(); +} + +void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) { + switch (Node->getLiteralOperatorKind()) { + case UserDefinedLiteral::LOK_Raw: + OS << cast<StringLiteral>(Node->getArg(0)->IgnoreImpCasts())->getString(); + break; + case UserDefinedLiteral::LOK_Template: { + DeclRefExpr *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts()); + const TemplateArgumentList *Args = + cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs(); + assert(Args); + const TemplateArgument &Pack = Args->get(0); + for (TemplateArgument::pack_iterator I = Pack.pack_begin(), + E = Pack.pack_end(); I != E; ++I) { + char C = (char)I->getAsIntegral().getZExtValue(); + OS << C; + } + break; + } + case UserDefinedLiteral::LOK_Integer: { + // Print integer literal without suffix. + IntegerLiteral *Int = cast<IntegerLiteral>(Node->getCookedLiteral()); + OS << Int->getValue().toString(10, /*isSigned*/false); + break; + } + case UserDefinedLiteral::LOK_Floating: { + // Print floating literal without suffix. + FloatingLiteral *Float = cast<FloatingLiteral>(Node->getCookedLiteral()); + PrintFloatingLiteral(OS, Float, /*PrintSuffix=*/false); + break; + } + case UserDefinedLiteral::LOK_String: + case UserDefinedLiteral::LOK_Character: + PrintExpr(Node->getCookedLiteral()); + break; + } + OS << Node->getUDSuffix()->getName(); +} + +void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) { + OS << (Node->getValue() ? "true" : "false"); +} + +void StmtPrinter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *Node) { + OS << "nullptr"; +} + +void StmtPrinter::VisitCXXThisExpr(CXXThisExpr *Node) { + OS << "this"; +} + +void StmtPrinter::VisitCXXThrowExpr(CXXThrowExpr *Node) { + if (Node->getSubExpr() == 0) + OS << "throw"; + else { + OS << "throw "; + PrintExpr(Node->getSubExpr()); + } +} + +void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) { + // Nothing to print: we picked up the default argument. +} + +void StmtPrinter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Node) { + // Nothing to print: we picked up the default initializer. +} + +void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) { + Node->getType().print(OS, Policy); + OS << "("; + PrintExpr(Node->getSubExpr()); + OS << ")"; +} + +void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) { + PrintExpr(Node->getSubExpr()); +} + +void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) { + Node->getType().print(OS, Policy); + OS << "("; + for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(), + ArgEnd = Node->arg_end(); + Arg != ArgEnd; ++Arg) { + if (Arg->isDefaultArgument()) + break; + if (Arg != Node->arg_begin()) + OS << ", "; + PrintExpr(*Arg); + } + OS << ")"; +} + +void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) { + OS << '['; + bool NeedComma = false; + switch (Node->getCaptureDefault()) { + case LCD_None: + break; + + case LCD_ByCopy: + OS << '='; + NeedComma = true; + break; + + case LCD_ByRef: + OS << '&'; + NeedComma = true; + break; + } + for (LambdaExpr::capture_iterator C = Node->explicit_capture_begin(), + CEnd = Node->explicit_capture_end(); + C != CEnd; + ++C) { + if (NeedComma) + OS << ", "; + NeedComma = true; + + switch (C->getCaptureKind()) { + case LCK_This: + OS << "this"; + break; + + case LCK_ByRef: + if (Node->getCaptureDefault() != LCD_ByRef || C->isInitCapture()) + OS << '&'; + OS << C->getCapturedVar()->getName(); + break; + + case LCK_ByCopy: + OS << C->getCapturedVar()->getName(); + break; + } + + if (C->isInitCapture()) + PrintExpr(C->getCapturedVar()->getInit()); + } + OS << ']'; + + if (Node->hasExplicitParameters()) { + OS << " ("; + CXXMethodDecl *Method = Node->getCallOperator(); + NeedComma = false; + for (CXXMethodDecl::param_iterator P = Method->param_begin(), + PEnd = Method->param_end(); + P != PEnd; ++P) { + if (NeedComma) { + OS << ", "; + } else { + NeedComma = true; + } + std::string ParamStr = (*P)->getNameAsString(); + (*P)->getOriginalType().print(OS, Policy, ParamStr); + } + if (Method->isVariadic()) { + if (NeedComma) + OS << ", "; + OS << "..."; + } + OS << ')'; + + if (Node->isMutable()) + OS << " mutable"; + + const FunctionProtoType *Proto + = Method->getType()->getAs<FunctionProtoType>(); + Proto->printExceptionSpecification(OS, Policy); + + // FIXME: Attributes + + // Print the trailing return type if it was specified in the source. + if (Node->hasExplicitResultType()) { + OS << " -> "; + Proto->getResultType().print(OS, Policy); + } + } + + // Print the body. + CompoundStmt *Body = Node->getBody(); + OS << ' '; + PrintStmt(Body); +} + +void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) { + if (TypeSourceInfo *TSInfo = Node->getTypeSourceInfo()) + TSInfo->getType().print(OS, Policy); + else + Node->getType().print(OS, Policy); + OS << "()"; +} + +void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) { + if (E->isGlobalNew()) + OS << "::"; + OS << "new "; + unsigned NumPlace = E->getNumPlacementArgs(); + if (NumPlace > 0 && !isa<CXXDefaultArgExpr>(E->getPlacementArg(0))) { + OS << "("; + PrintExpr(E->getPlacementArg(0)); + for (unsigned i = 1; i < NumPlace; ++i) { + if (isa<CXXDefaultArgExpr>(E->getPlacementArg(i))) + break; + OS << ", "; + PrintExpr(E->getPlacementArg(i)); + } + OS << ") "; + } + if (E->isParenTypeId()) + OS << "("; + std::string TypeS; + if (Expr *Size = E->getArraySize()) { + llvm::raw_string_ostream s(TypeS); + s << '['; + Size->printPretty(s, Helper, Policy); + s << ']'; + } + E->getAllocatedType().print(OS, Policy, TypeS); + if (E->isParenTypeId()) + OS << ")"; + + CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle(); + if (InitStyle) { + if (InitStyle == CXXNewExpr::CallInit) + OS << "("; + PrintExpr(E->getInitializer()); + if (InitStyle == CXXNewExpr::CallInit) + OS << ")"; + } +} + +void StmtPrinter::VisitCXXDeleteExpr(CXXDeleteExpr *E) { + if (E->isGlobalDelete()) + OS << "::"; + OS << "delete "; + if (E->isArrayForm()) + OS << "[] "; + PrintExpr(E->getArgument()); +} + +void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { + PrintExpr(E->getBase()); + if (E->isArrow()) + OS << "->"; + else + OS << '.'; + if (E->getQualifier()) + E->getQualifier()->print(OS, Policy); + OS << "~"; + + if (IdentifierInfo *II = E->getDestroyedTypeIdentifier()) + OS << II->getName(); + else + E->getDestroyedType().print(OS, Policy); +} + +void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) { + if (E->isListInitialization()) + OS << "{ "; + + for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { + if (isa<CXXDefaultArgExpr>(E->getArg(i))) { + // Don't print any defaulted arguments + break; + } + + if (i) OS << ", "; + PrintExpr(E->getArg(i)); + } + + if (E->isListInitialization()) + OS << " }"; +} + +void StmtPrinter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { + PrintExpr(E->getSubExpr()); +} + +void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) { + // Just forward to the sub expression. + PrintExpr(E->getSubExpr()); +} + +void +StmtPrinter::VisitCXXUnresolvedConstructExpr( + CXXUnresolvedConstructExpr *Node) { + Node->getTypeAsWritten().print(OS, Policy); + OS << "("; + for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(), + ArgEnd = Node->arg_end(); + Arg != ArgEnd; ++Arg) { + if (Arg != Node->arg_begin()) + OS << ", "; + PrintExpr(*Arg); + } + OS << ")"; +} + +void StmtPrinter::VisitCXXDependentScopeMemberExpr( + CXXDependentScopeMemberExpr *Node) { + if (!Node->isImplicitAccess()) { + PrintExpr(Node->getBase()); + OS << (Node->isArrow() ? "->" : "."); + } + if (NestedNameSpecifier *Qualifier = Node->getQualifier()) + Qualifier->print(OS, Policy); + if (Node->hasTemplateKeyword()) + OS << "template "; + OS << Node->getMemberNameInfo(); + if (Node->hasExplicitTemplateArgs()) + TemplateSpecializationType::PrintTemplateArgumentList( + OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy); +} + +void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) { + if (!Node->isImplicitAccess()) { + PrintExpr(Node->getBase()); + OS << (Node->isArrow() ? "->" : "."); + } + if (NestedNameSpecifier *Qualifier = Node->getQualifier()) + Qualifier->print(OS, Policy); + if (Node->hasTemplateKeyword()) + OS << "template "; + OS << Node->getMemberNameInfo(); + if (Node->hasExplicitTemplateArgs()) + TemplateSpecializationType::PrintTemplateArgumentList( + OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy); +} + +static const char *getTypeTraitName(UnaryTypeTrait UTT) { + switch (UTT) { + case UTT_HasNothrowAssign: return "__has_nothrow_assign"; + case UTT_HasNothrowMoveAssign: return "__has_nothrow_move_assign"; + case UTT_HasNothrowConstructor: return "__has_nothrow_constructor"; + case UTT_HasNothrowCopy: return "__has_nothrow_copy"; + case UTT_HasTrivialAssign: return "__has_trivial_assign"; + case UTT_HasTrivialMoveAssign: return "__has_trivial_move_assign"; + case UTT_HasTrivialMoveConstructor: return "__has_trivial_move_constructor"; + case UTT_HasTrivialDefaultConstructor: return "__has_trivial_constructor"; + case UTT_HasTrivialCopy: return "__has_trivial_copy"; + case UTT_HasTrivialDestructor: return "__has_trivial_destructor"; + case UTT_HasVirtualDestructor: return "__has_virtual_destructor"; + case UTT_IsAbstract: return "__is_abstract"; + case UTT_IsArithmetic: return "__is_arithmetic"; + case UTT_IsArray: return "__is_array"; + case UTT_IsClass: return "__is_class"; + case UTT_IsCompleteType: return "__is_complete_type"; + case UTT_IsCompound: return "__is_compound"; + case UTT_IsConst: return "__is_const"; + case UTT_IsEmpty: return "__is_empty"; + case UTT_IsEnum: return "__is_enum"; + case UTT_IsFinal: return "__is_final"; + case UTT_IsFloatingPoint: return "__is_floating_point"; + case UTT_IsFunction: return "__is_function"; + case UTT_IsFundamental: return "__is_fundamental"; + case UTT_IsIntegral: return "__is_integral"; + case UTT_IsInterfaceClass: return "__is_interface_class"; + case UTT_IsLiteral: return "__is_literal"; + case UTT_IsLvalueReference: return "__is_lvalue_reference"; + case UTT_IsMemberFunctionPointer: return "__is_member_function_pointer"; + case UTT_IsMemberObjectPointer: return "__is_member_object_pointer"; + case UTT_IsMemberPointer: return "__is_member_pointer"; + case UTT_IsObject: return "__is_object"; + case UTT_IsPOD: return "__is_pod"; + case UTT_IsPointer: return "__is_pointer"; + case UTT_IsPolymorphic: return "__is_polymorphic"; + case UTT_IsReference: return "__is_reference"; + case UTT_IsRvalueReference: return "__is_rvalue_reference"; + case UTT_IsScalar: return "__is_scalar"; + case UTT_IsSealed: return "__is_sealed"; + case UTT_IsSigned: return "__is_signed"; + case UTT_IsStandardLayout: return "__is_standard_layout"; + case UTT_IsTrivial: return "__is_trivial"; + case UTT_IsTriviallyCopyable: return "__is_trivially_copyable"; + case UTT_IsUnion: return "__is_union"; + case UTT_IsUnsigned: return "__is_unsigned"; + case UTT_IsVoid: return "__is_void"; + case UTT_IsVolatile: return "__is_volatile"; + } + llvm_unreachable("Type trait not covered by switch statement"); +} + +static const char *getTypeTraitName(BinaryTypeTrait BTT) { + switch (BTT) { + case BTT_IsBaseOf: return "__is_base_of"; + case BTT_IsConvertible: return "__is_convertible"; + case BTT_IsSame: return "__is_same"; + case BTT_TypeCompatible: return "__builtin_types_compatible_p"; + case BTT_IsConvertibleTo: return "__is_convertible_to"; + case BTT_IsTriviallyAssignable: return "__is_trivially_assignable"; + } + llvm_unreachable("Binary type trait not covered by switch"); +} + +static const char *getTypeTraitName(TypeTrait TT) { + switch (TT) { + case clang::TT_IsTriviallyConstructible:return "__is_trivially_constructible"; + } + llvm_unreachable("Type trait not covered by switch"); +} + +static const char *getTypeTraitName(ArrayTypeTrait ATT) { + switch (ATT) { + case ATT_ArrayRank: return "__array_rank"; + case ATT_ArrayExtent: return "__array_extent"; + } + llvm_unreachable("Array type trait not covered by switch"); +} + +static const char *getExpressionTraitName(ExpressionTrait ET) { + switch (ET) { + case ET_IsLValueExpr: return "__is_lvalue_expr"; + case ET_IsRValueExpr: return "__is_rvalue_expr"; + } + llvm_unreachable("Expression type trait not covered by switch"); +} + +void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) { + OS << getTypeTraitName(E->getTrait()) << '('; + E->getQueriedType().print(OS, Policy); + OS << ')'; +} + +void StmtPrinter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) { + OS << getTypeTraitName(E->getTrait()) << '('; + E->getLhsType().print(OS, Policy); + OS << ','; + E->getRhsType().print(OS, Policy); + OS << ')'; +} + +void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) { + OS << getTypeTraitName(E->getTrait()) << "("; + for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) { + if (I > 0) + OS << ", "; + E->getArg(I)->getType().print(OS, Policy); + } + OS << ")"; +} + +void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) { + OS << getTypeTraitName(E->getTrait()) << '('; + E->getQueriedType().print(OS, Policy); + OS << ')'; +} + +void StmtPrinter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) { + OS << getExpressionTraitName(E->getTrait()) << '('; + PrintExpr(E->getQueriedExpression()); + OS << ')'; +} + +void StmtPrinter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { + OS << "noexcept("; + PrintExpr(E->getOperand()); + OS << ")"; +} + +void StmtPrinter::VisitPackExpansionExpr(PackExpansionExpr *E) { + PrintExpr(E->getPattern()); + OS << "..."; +} + +void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) { + OS << "sizeof...(" << *E->getPack() << ")"; +} + +void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr( + SubstNonTypeTemplateParmPackExpr *Node) { + OS << *Node->getParameterPack(); +} + +void StmtPrinter::VisitSubstNonTypeTemplateParmExpr( + SubstNonTypeTemplateParmExpr *Node) { + Visit(Node->getReplacement()); +} + +void StmtPrinter::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) { + OS << *E->getParameterPack(); +} + +void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){ + PrintExpr(Node->GetTemporaryExpr()); +} + +// Obj-C + +void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) { + OS << "@"; + VisitStringLiteral(Node->getString()); +} + +void StmtPrinter::VisitObjCBoxedExpr(ObjCBoxedExpr *E) { + OS << "@"; + Visit(E->getSubExpr()); +} + +void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) { + OS << "@[ "; + StmtRange ch = E->children(); + if (ch.first != ch.second) { + while (1) { + Visit(*ch.first); + ++ch.first; + if (ch.first == ch.second) break; + OS << ", "; + } + } + OS << " ]"; +} + +void StmtPrinter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { + OS << "@{ "; + for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) { + if (I > 0) + OS << ", "; + + ObjCDictionaryElement Element = E->getKeyValueElement(I); + Visit(Element.Key); + OS << " : "; + Visit(Element.Value); + if (Element.isPackExpansion()) + OS << "..."; + } + OS << " }"; +} + +void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) { + OS << "@encode("; + Node->getEncodedType().print(OS, Policy); + OS << ')'; +} + +void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) { + OS << "@selector(" << Node->getSelector().getAsString() << ')'; +} + +void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) { + OS << "@protocol(" << *Node->getProtocol() << ')'; +} + +void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) { + OS << "["; + switch (Mess->getReceiverKind()) { + case ObjCMessageExpr::Instance: + PrintExpr(Mess->getInstanceReceiver()); + break; + + case ObjCMessageExpr::Class: + Mess->getClassReceiver().print(OS, Policy); + break; + + case ObjCMessageExpr::SuperInstance: + case ObjCMessageExpr::SuperClass: + OS << "Super"; + break; + } + + OS << ' '; + Selector selector = Mess->getSelector(); + if (selector.isUnarySelector()) { + OS << selector.getNameForSlot(0); + } else { + for (unsigned i = 0, e = Mess->getNumArgs(); i != e; ++i) { + if (i < selector.getNumArgs()) { + if (i > 0) OS << ' '; + if (selector.getIdentifierInfoForSlot(i)) + OS << selector.getIdentifierInfoForSlot(i)->getName() << ':'; + else + OS << ":"; + } + else OS << ", "; // Handle variadic methods. + + PrintExpr(Mess->getArg(i)); + } + } + OS << "]"; +} + +void StmtPrinter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) { + OS << (Node->getValue() ? "__objc_yes" : "__objc_no"); +} + +void +StmtPrinter::VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) { + PrintExpr(E->getSubExpr()); +} + +void +StmtPrinter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) { + OS << '(' << E->getBridgeKindName(); + E->getType().print(OS, Policy); + OS << ')'; + PrintExpr(E->getSubExpr()); +} + +void StmtPrinter::VisitBlockExpr(BlockExpr *Node) { + BlockDecl *BD = Node->getBlockDecl(); + OS << "^"; + + const FunctionType *AFT = Node->getFunctionType(); + + if (isa<FunctionNoProtoType>(AFT)) { + OS << "()"; + } else if (!BD->param_empty() || cast<FunctionProtoType>(AFT)->isVariadic()) { + OS << '('; + for (BlockDecl::param_iterator AI = BD->param_begin(), + E = BD->param_end(); AI != E; ++AI) { + if (AI != BD->param_begin()) OS << ", "; + std::string ParamStr = (*AI)->getNameAsString(); + (*AI)->getType().print(OS, Policy, ParamStr); + } + + const FunctionProtoType *FT = cast<FunctionProtoType>(AFT); + if (FT->isVariadic()) { + if (!BD->param_empty()) OS << ", "; + OS << "..."; + } + OS << ')'; + } + OS << "{ }"; +} + +void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) { + PrintExpr(Node->getSourceExpr()); +} + +void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) { + OS << "__builtin_astype("; + PrintExpr(Node->getSrcExpr()); + OS << ", "; + Node->getType().print(OS, Policy); + OS << ")"; +} + +//===----------------------------------------------------------------------===// +// Stmt method implementations +//===----------------------------------------------------------------------===// + +void Stmt::dumpPretty(const ASTContext &Context) const { + printPretty(llvm::errs(), 0, PrintingPolicy(Context.getLangOpts())); +} + +void Stmt::printPretty(raw_ostream &OS, + PrinterHelper *Helper, + const PrintingPolicy &Policy, + unsigned Indentation) const { + if (this == 0) { + OS << "<NULL>"; + return; + } + + StmtPrinter P(OS, Helper, Policy, Indentation); + P.Visit(const_cast<Stmt*>(this)); +} + +//===----------------------------------------------------------------------===// +// PrinterHelper +//===----------------------------------------------------------------------===// + +// Implement virtual destructor. +PrinterHelper::~PrinterHelper() {} diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp new file mode 100644 index 000000000000..6805e62befb0 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp @@ -0,0 +1,1282 @@ +//===---- StmtProfile.cpp - Profile implementation for Stmt ASTs ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Stmt::Profile method, which builds a unique bit +// representation that identifies a statement/expression. +// +//===----------------------------------------------------------------------===// +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/ADT/FoldingSet.h" +using namespace clang; + +namespace { + class StmtProfiler : public ConstStmtVisitor<StmtProfiler> { + llvm::FoldingSetNodeID &ID; + const ASTContext &Context; + bool Canonical; + + public: + StmtProfiler(llvm::FoldingSetNodeID &ID, const ASTContext &Context, + bool Canonical) + : ID(ID), Context(Context), Canonical(Canonical) { } + + void VisitStmt(const Stmt *S); + +#define STMT(Node, Base) void Visit##Node(const Node *S); +#include "clang/AST/StmtNodes.inc" + + /// \brief Visit a declaration that is referenced within an expression + /// or statement. + void VisitDecl(const Decl *D); + + /// \brief Visit a type that is referenced within an expression or + /// statement. + void VisitType(QualType T); + + /// \brief Visit a name that occurs within an expression or statement. + void VisitName(DeclarationName Name); + + /// \brief Visit a nested-name-specifier that occurs within an expression + /// or statement. + void VisitNestedNameSpecifier(NestedNameSpecifier *NNS); + + /// \brief Visit a template name that occurs within an expression or + /// statement. + void VisitTemplateName(TemplateName Name); + + /// \brief Visit template arguments that occur within an expression or + /// statement. + void VisitTemplateArguments(const TemplateArgumentLoc *Args, + unsigned NumArgs); + + /// \brief Visit a single template argument. + void VisitTemplateArgument(const TemplateArgument &Arg); + }; +} + +void StmtProfiler::VisitStmt(const Stmt *S) { + ID.AddInteger(S->getStmtClass()); + for (Stmt::const_child_range C = S->children(); C; ++C) { + if (*C) + Visit(*C); + else + ID.AddInteger(0); + } +} + +void StmtProfiler::VisitDeclStmt(const DeclStmt *S) { + VisitStmt(S); + for (DeclStmt::const_decl_iterator D = S->decl_begin(), DEnd = S->decl_end(); + D != DEnd; ++D) + VisitDecl(*D); +} + +void StmtProfiler::VisitNullStmt(const NullStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitCompoundStmt(const CompoundStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitSwitchCase(const SwitchCase *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitCaseStmt(const CaseStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitDefaultStmt(const DefaultStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitLabelStmt(const LabelStmt *S) { + VisitStmt(S); + VisitDecl(S->getDecl()); +} + +void StmtProfiler::VisitAttributedStmt(const AttributedStmt *S) { + VisitStmt(S); + // TODO: maybe visit attributes? +} + +void StmtProfiler::VisitIfStmt(const IfStmt *S) { + VisitStmt(S); + VisitDecl(S->getConditionVariable()); +} + +void StmtProfiler::VisitSwitchStmt(const SwitchStmt *S) { + VisitStmt(S); + VisitDecl(S->getConditionVariable()); +} + +void StmtProfiler::VisitWhileStmt(const WhileStmt *S) { + VisitStmt(S); + VisitDecl(S->getConditionVariable()); +} + +void StmtProfiler::VisitDoStmt(const DoStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitForStmt(const ForStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitGotoStmt(const GotoStmt *S) { + VisitStmt(S); + VisitDecl(S->getLabel()); +} + +void StmtProfiler::VisitIndirectGotoStmt(const IndirectGotoStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitContinueStmt(const ContinueStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitBreakStmt(const BreakStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitGCCAsmStmt(const GCCAsmStmt *S) { + VisitStmt(S); + ID.AddBoolean(S->isVolatile()); + ID.AddBoolean(S->isSimple()); + VisitStringLiteral(S->getAsmString()); + ID.AddInteger(S->getNumOutputs()); + for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) { + ID.AddString(S->getOutputName(I)); + VisitStringLiteral(S->getOutputConstraintLiteral(I)); + } + ID.AddInteger(S->getNumInputs()); + for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) { + ID.AddString(S->getInputName(I)); + VisitStringLiteral(S->getInputConstraintLiteral(I)); + } + ID.AddInteger(S->getNumClobbers()); + for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I) + VisitStringLiteral(S->getClobberStringLiteral(I)); +} + +void StmtProfiler::VisitMSAsmStmt(const MSAsmStmt *S) { + // FIXME: Implement MS style inline asm statement profiler. + VisitStmt(S); +} + +void StmtProfiler::VisitCXXCatchStmt(const CXXCatchStmt *S) { + VisitStmt(S); + VisitType(S->getCaughtType()); +} + +void StmtProfiler::VisitCXXTryStmt(const CXXTryStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitCXXForRangeStmt(const CXXForRangeStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitMSDependentExistsStmt(const MSDependentExistsStmt *S) { + VisitStmt(S); + ID.AddBoolean(S->isIfExists()); + VisitNestedNameSpecifier(S->getQualifierLoc().getNestedNameSpecifier()); + VisitName(S->getNameInfo().getName()); +} + +void StmtProfiler::VisitSEHTryStmt(const SEHTryStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitSEHFinallyStmt(const SEHFinallyStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitSEHExceptStmt(const SEHExceptStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitCapturedStmt(const CapturedStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *S) { + VisitStmt(S); + ID.AddBoolean(S->hasEllipsis()); + if (S->getCatchParamDecl()) + VisitType(S->getCatchParamDecl()->getType()); +} + +void StmtProfiler::VisitObjCAtFinallyStmt(const ObjCAtFinallyStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitObjCAtTryStmt(const ObjCAtTryStmt *S) { + VisitStmt(S); +} + +void +StmtProfiler::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitObjCAtThrowStmt(const ObjCAtThrowStmt *S) { + VisitStmt(S); +} + +void +StmtProfiler::VisitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt *S) { + VisitStmt(S); +} + +namespace { +class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> { + StmtProfiler *Profiler; + /// \brief Process clauses with list of variables. + template <typename T> + void VisitOMPClauseList(T *Node); +public: + OMPClauseProfiler(StmtProfiler *P) : Profiler(P) { } +#define OPENMP_CLAUSE(Name, Class) \ + void Visit##Class(const Class *C); +#include "clang/Basic/OpenMPKinds.def" +}; + +void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { } + +template<typename T> +void OMPClauseProfiler::VisitOMPClauseList(T *Node) { + for (typename T::varlist_const_iterator I = Node->varlist_begin(), + E = Node->varlist_end(); + I != E; ++I) + Profiler->VisitStmt(*I); +} + +void OMPClauseProfiler::VisitOMPPrivateClause(const OMPPrivateClause *C) { + VisitOMPClauseList(C); +} +void OMPClauseProfiler::VisitOMPFirstprivateClause( + const OMPFirstprivateClause *C) { + VisitOMPClauseList(C); +} +void OMPClauseProfiler::VisitOMPSharedClause(const OMPSharedClause *C) { + VisitOMPClauseList(C); +} +} + +void +StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) { + VisitStmt(S); + OMPClauseProfiler P(this); + ArrayRef<OMPClause *> Clauses = S->clauses(); + for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end(); + I != E; ++I) + if (*I) + P.Visit(*I); +} + +void StmtProfiler::VisitExpr(const Expr *S) { + VisitStmt(S); +} + +void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) { + VisitExpr(S); + if (!Canonical) + VisitNestedNameSpecifier(S->getQualifier()); + VisitDecl(S->getDecl()); + if (!Canonical) + VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs()); +} + +void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getIdentType()); +} + +void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) { + VisitExpr(S); + S->getValue().Profile(ID); +} + +void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) { + VisitExpr(S); + ID.AddInteger(S->getKind()); + ID.AddInteger(S->getValue()); +} + +void StmtProfiler::VisitFloatingLiteral(const FloatingLiteral *S) { + VisitExpr(S); + S->getValue().Profile(ID); + ID.AddBoolean(S->isExact()); +} + +void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitStringLiteral(const StringLiteral *S) { + VisitExpr(S); + ID.AddString(S->getBytes()); + ID.AddInteger(S->getKind()); +} + +void StmtProfiler::VisitParenExpr(const ParenExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitParenListExpr(const ParenListExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitUnaryOperator(const UnaryOperator *S) { + VisitExpr(S); + ID.AddInteger(S->getOpcode()); +} + +void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) { + VisitType(S->getTypeSourceInfo()->getType()); + unsigned n = S->getNumComponents(); + for (unsigned i = 0; i < n; ++i) { + const OffsetOfExpr::OffsetOfNode& ON = S->getComponent(i); + ID.AddInteger(ON.getKind()); + switch (ON.getKind()) { + case OffsetOfExpr::OffsetOfNode::Array: + // Expressions handled below. + break; + + case OffsetOfExpr::OffsetOfNode::Field: + VisitDecl(ON.getField()); + break; + + case OffsetOfExpr::OffsetOfNode::Identifier: + ID.AddPointer(ON.getFieldName()); + break; + + case OffsetOfExpr::OffsetOfNode::Base: + // These nodes are implicit, and therefore don't need profiling. + break; + } + } + + VisitExpr(S); +} + +void +StmtProfiler::VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getKind()); + if (S->isArgumentType()) + VisitType(S->getArgumentType()); +} + +void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitCallExpr(const CallExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitMemberExpr(const MemberExpr *S) { + VisitExpr(S); + VisitDecl(S->getMemberDecl()); + if (!Canonical) + VisitNestedNameSpecifier(S->getQualifier()); + ID.AddBoolean(S->isArrow()); +} + +void StmtProfiler::VisitCompoundLiteralExpr(const CompoundLiteralExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->isFileScope()); +} + +void StmtProfiler::VisitCastExpr(const CastExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitImplicitCastExpr(const ImplicitCastExpr *S) { + VisitCastExpr(S); + ID.AddInteger(S->getValueKind()); +} + +void StmtProfiler::VisitExplicitCastExpr(const ExplicitCastExpr *S) { + VisitCastExpr(S); + VisitType(S->getTypeAsWritten()); +} + +void StmtProfiler::VisitCStyleCastExpr(const CStyleCastExpr *S) { + VisitExplicitCastExpr(S); +} + +void StmtProfiler::VisitBinaryOperator(const BinaryOperator *S) { + VisitExpr(S); + ID.AddInteger(S->getOpcode()); +} + +void +StmtProfiler::VisitCompoundAssignOperator(const CompoundAssignOperator *S) { + VisitBinaryOperator(S); +} + +void StmtProfiler::VisitConditionalOperator(const ConditionalOperator *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitBinaryConditionalOperator( + const BinaryConditionalOperator *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitAddrLabelExpr(const AddrLabelExpr *S) { + VisitExpr(S); + VisitDecl(S->getLabel()); +} + +void StmtProfiler::VisitStmtExpr(const StmtExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitShuffleVectorExpr(const ShuffleVectorExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitConvertVectorExpr(const ConvertVectorExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitChooseExpr(const ChooseExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitGNUNullExpr(const GNUNullExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitVAArgExpr(const VAArgExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitInitListExpr(const InitListExpr *S) { + if (S->getSyntacticForm()) { + VisitInitListExpr(S->getSyntacticForm()); + return; + } + + VisitExpr(S); +} + +void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->usesGNUSyntax()); + for (DesignatedInitExpr::const_designators_iterator D = + S->designators_begin(), DEnd = S->designators_end(); + D != DEnd; ++D) { + if (D->isFieldDesignator()) { + ID.AddInteger(0); + VisitName(D->getFieldName()); + continue; + } + + if (D->isArrayDesignator()) { + ID.AddInteger(1); + } else { + assert(D->isArrayRangeDesignator()); + ID.AddInteger(2); + } + ID.AddInteger(D->getFirstExprIndex()); + } +} + +void StmtProfiler::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitExtVectorElementExpr(const ExtVectorElementExpr *S) { + VisitExpr(S); + VisitName(&S->getAccessor()); +} + +void StmtProfiler::VisitBlockExpr(const BlockExpr *S) { + VisitExpr(S); + VisitDecl(S->getBlockDecl()); +} + +void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) { + VisitExpr(S); + for (unsigned i = 0; i != S->getNumAssocs(); ++i) { + QualType T = S->getAssocType(i); + if (T.isNull()) + ID.AddPointer(0); + else + VisitType(T); + VisitExpr(S->getAssocExpr(i)); + } +} + +void StmtProfiler::VisitPseudoObjectExpr(const PseudoObjectExpr *S) { + VisitExpr(S); + for (PseudoObjectExpr::const_semantics_iterator + i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i) + // Normally, we would not profile the source expressions of OVEs. + if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(*i)) + Visit(OVE->getSourceExpr()); +} + +void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getOp()); +} + +static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, + UnaryOperatorKind &UnaryOp, + BinaryOperatorKind &BinaryOp) { + switch (S->getOperator()) { + case OO_None: + case OO_New: + case OO_Delete: + case OO_Array_New: + case OO_Array_Delete: + case OO_Arrow: + case OO_Call: + case OO_Conditional: + case NUM_OVERLOADED_OPERATORS: + llvm_unreachable("Invalid operator call kind"); + + case OO_Plus: + if (S->getNumArgs() == 1) { + UnaryOp = UO_Plus; + return Stmt::UnaryOperatorClass; + } + + BinaryOp = BO_Add; + return Stmt::BinaryOperatorClass; + + case OO_Minus: + if (S->getNumArgs() == 1) { + UnaryOp = UO_Minus; + return Stmt::UnaryOperatorClass; + } + + BinaryOp = BO_Sub; + return Stmt::BinaryOperatorClass; + + case OO_Star: + if (S->getNumArgs() == 1) { + UnaryOp = UO_Minus; + return Stmt::UnaryOperatorClass; + } + + BinaryOp = BO_Sub; + return Stmt::BinaryOperatorClass; + + case OO_Slash: + BinaryOp = BO_Div; + return Stmt::BinaryOperatorClass; + + case OO_Percent: + BinaryOp = BO_Rem; + return Stmt::BinaryOperatorClass; + + case OO_Caret: + BinaryOp = BO_Xor; + return Stmt::BinaryOperatorClass; + + case OO_Amp: + if (S->getNumArgs() == 1) { + UnaryOp = UO_AddrOf; + return Stmt::UnaryOperatorClass; + } + + BinaryOp = BO_And; + return Stmt::BinaryOperatorClass; + + case OO_Pipe: + BinaryOp = BO_Or; + return Stmt::BinaryOperatorClass; + + case OO_Tilde: + UnaryOp = UO_Not; + return Stmt::UnaryOperatorClass; + + case OO_Exclaim: + UnaryOp = UO_LNot; + return Stmt::UnaryOperatorClass; + + case OO_Equal: + BinaryOp = BO_Assign; + return Stmt::BinaryOperatorClass; + + case OO_Less: + BinaryOp = BO_LT; + return Stmt::BinaryOperatorClass; + + case OO_Greater: + BinaryOp = BO_GT; + return Stmt::BinaryOperatorClass; + + case OO_PlusEqual: + BinaryOp = BO_AddAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_MinusEqual: + BinaryOp = BO_SubAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_StarEqual: + BinaryOp = BO_MulAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_SlashEqual: + BinaryOp = BO_DivAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_PercentEqual: + BinaryOp = BO_RemAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_CaretEqual: + BinaryOp = BO_XorAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_AmpEqual: + BinaryOp = BO_AndAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_PipeEqual: + BinaryOp = BO_OrAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_LessLess: + BinaryOp = BO_Shl; + return Stmt::BinaryOperatorClass; + + case OO_GreaterGreater: + BinaryOp = BO_Shr; + return Stmt::BinaryOperatorClass; + + case OO_LessLessEqual: + BinaryOp = BO_ShlAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_GreaterGreaterEqual: + BinaryOp = BO_ShrAssign; + return Stmt::CompoundAssignOperatorClass; + + case OO_EqualEqual: + BinaryOp = BO_EQ; + return Stmt::BinaryOperatorClass; + + case OO_ExclaimEqual: + BinaryOp = BO_NE; + return Stmt::BinaryOperatorClass; + + case OO_LessEqual: + BinaryOp = BO_LE; + return Stmt::BinaryOperatorClass; + + case OO_GreaterEqual: + BinaryOp = BO_GE; + return Stmt::BinaryOperatorClass; + + case OO_AmpAmp: + BinaryOp = BO_LAnd; + return Stmt::BinaryOperatorClass; + + case OO_PipePipe: + BinaryOp = BO_LOr; + return Stmt::BinaryOperatorClass; + + case OO_PlusPlus: + UnaryOp = S->getNumArgs() == 1? UO_PreInc + : UO_PostInc; + return Stmt::UnaryOperatorClass; + + case OO_MinusMinus: + UnaryOp = S->getNumArgs() == 1? UO_PreDec + : UO_PostDec; + return Stmt::UnaryOperatorClass; + + case OO_Comma: + BinaryOp = BO_Comma; + return Stmt::BinaryOperatorClass; + + + case OO_ArrowStar: + BinaryOp = BO_PtrMemI; + return Stmt::BinaryOperatorClass; + + case OO_Subscript: + return Stmt::ArraySubscriptExprClass; + } + + llvm_unreachable("Invalid overloaded operator expression"); +} + + +void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) { + if (S->isTypeDependent()) { + // Type-dependent operator calls are profiled like their underlying + // syntactic operator. + UnaryOperatorKind UnaryOp = UO_Extension; + BinaryOperatorKind BinaryOp = BO_Comma; + Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp); + + ID.AddInteger(SC); + for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I) + Visit(S->getArg(I)); + if (SC == Stmt::UnaryOperatorClass) + ID.AddInteger(UnaryOp); + else if (SC == Stmt::BinaryOperatorClass || + SC == Stmt::CompoundAssignOperatorClass) + ID.AddInteger(BinaryOp); + else + assert(SC == Stmt::ArraySubscriptExprClass); + + return; + } + + VisitCallExpr(S); + ID.AddInteger(S->getOperator()); +} + +void StmtProfiler::VisitCXXMemberCallExpr(const CXXMemberCallExpr *S) { + VisitCallExpr(S); +} + +void StmtProfiler::VisitCUDAKernelCallExpr(const CUDAKernelCallExpr *S) { + VisitCallExpr(S); +} + +void StmtProfiler::VisitAsTypeExpr(const AsTypeExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitCXXNamedCastExpr(const CXXNamedCastExpr *S) { + VisitExplicitCastExpr(S); +} + +void StmtProfiler::VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) { + VisitCXXNamedCastExpr(S); +} + +void StmtProfiler::VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *S) { + VisitCXXNamedCastExpr(S); +} + +void +StmtProfiler::VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *S) { + VisitCXXNamedCastExpr(S); +} + +void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) { + VisitCXXNamedCastExpr(S); +} + +void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) { + VisitCallExpr(S); +} + +void StmtProfiler::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->getValue()); +} + +void StmtProfiler::VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitCXXStdInitializerListExpr( + const CXXStdInitializerListExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitCXXTypeidExpr(const CXXTypeidExpr *S) { + VisitExpr(S); + if (S->isTypeOperand()) + VisitType(S->getTypeOperandSourceInfo()->getType()); +} + +void StmtProfiler::VisitCXXUuidofExpr(const CXXUuidofExpr *S) { + VisitExpr(S); + if (S->isTypeOperand()) + VisitType(S->getTypeOperandSourceInfo()->getType()); +} + +void StmtProfiler::VisitMSPropertyRefExpr(const MSPropertyRefExpr *S) { + VisitExpr(S); + VisitDecl(S->getPropertyDecl()); +} + +void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->isImplicit()); +} + +void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *S) { + VisitExpr(S); + VisitDecl(S->getParam()); +} + +void StmtProfiler::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) { + VisitExpr(S); + VisitDecl(S->getField()); +} + +void StmtProfiler::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) { + VisitExpr(S); + VisitDecl( + const_cast<CXXDestructorDecl *>(S->getTemporary()->getDestructor())); +} + +void StmtProfiler::VisitCXXConstructExpr(const CXXConstructExpr *S) { + VisitExpr(S); + VisitDecl(S->getConstructor()); + ID.AddBoolean(S->isElidable()); +} + +void StmtProfiler::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) { + VisitExplicitCastExpr(S); +} + +void +StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) { + VisitCXXConstructExpr(S); +} + +void +StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) { + VisitExpr(S); + for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(), + CEnd = S->explicit_capture_end(); + C != CEnd; ++C) { + ID.AddInteger(C->getCaptureKind()); + switch (C->getCaptureKind()) { + case LCK_This: + break; + case LCK_ByRef: + case LCK_ByCopy: + VisitDecl(C->getCapturedVar()); + ID.AddBoolean(C->isPackExpansion()); + break; + } + } + // Note: If we actually needed to be able to match lambda + // expressions, we would have to consider parameters and return type + // here, among other things. + VisitStmt(S->getBody()); +} + +void +StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitCXXDeleteExpr(const CXXDeleteExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->isGlobalDelete()); + ID.AddBoolean(S->isArrayForm()); + VisitDecl(S->getOperatorDelete()); +} + + +void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) { + VisitExpr(S); + VisitType(S->getAllocatedType()); + VisitDecl(S->getOperatorNew()); + VisitDecl(S->getOperatorDelete()); + ID.AddBoolean(S->isArray()); + ID.AddInteger(S->getNumPlacementArgs()); + ID.AddBoolean(S->isGlobalNew()); + ID.AddBoolean(S->isParenTypeId()); + ID.AddInteger(S->getInitializationStyle()); +} + +void +StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->isArrow()); + VisitNestedNameSpecifier(S->getQualifier()); + ID.AddBoolean(S->getScopeTypeInfo() != 0); + if (S->getScopeTypeInfo()) + VisitType(S->getScopeTypeInfo()->getType()); + ID.AddBoolean(S->getDestroyedTypeInfo() != 0); + if (S->getDestroyedTypeInfo()) + VisitType(S->getDestroyedType()); + else + ID.AddPointer(S->getDestroyedTypeIdentifier()); +} + +void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) { + VisitExpr(S); + VisitNestedNameSpecifier(S->getQualifier()); + VisitName(S->getName()); + ID.AddBoolean(S->hasExplicitTemplateArgs()); + if (S->hasExplicitTemplateArgs()) + VisitTemplateArguments(S->getExplicitTemplateArgs().getTemplateArgs(), + S->getExplicitTemplateArgs().NumTemplateArgs); +} + +void +StmtProfiler::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *S) { + VisitOverloadExpr(S); +} + +void StmtProfiler::VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getTrait()); + VisitType(S->getQueriedType()); +} + +void StmtProfiler::VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getTrait()); + VisitType(S->getLhsType()); + VisitType(S->getRhsType()); +} + +void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getTrait()); + ID.AddInteger(S->getNumArgs()); + for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I) + VisitType(S->getArg(I)->getType()); +} + +void StmtProfiler::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getTrait()); + VisitType(S->getQueriedType()); +} + +void StmtProfiler::VisitExpressionTraitExpr(const ExpressionTraitExpr *S) { + VisitExpr(S); + ID.AddInteger(S->getTrait()); + VisitExpr(S->getQueriedExpression()); +} + +void StmtProfiler::VisitDependentScopeDeclRefExpr( + const DependentScopeDeclRefExpr *S) { + VisitExpr(S); + VisitName(S->getDeclName()); + VisitNestedNameSpecifier(S->getQualifier()); + ID.AddBoolean(S->hasExplicitTemplateArgs()); + if (S->hasExplicitTemplateArgs()) + VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs()); +} + +void StmtProfiler::VisitExprWithCleanups(const ExprWithCleanups *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitCXXUnresolvedConstructExpr( + const CXXUnresolvedConstructExpr *S) { + VisitExpr(S); + VisitType(S->getTypeAsWritten()); +} + +void StmtProfiler::VisitCXXDependentScopeMemberExpr( + const CXXDependentScopeMemberExpr *S) { + ID.AddBoolean(S->isImplicitAccess()); + if (!S->isImplicitAccess()) { + VisitExpr(S); + ID.AddBoolean(S->isArrow()); + } + VisitNestedNameSpecifier(S->getQualifier()); + VisitName(S->getMember()); + ID.AddBoolean(S->hasExplicitTemplateArgs()); + if (S->hasExplicitTemplateArgs()) + VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs()); +} + +void StmtProfiler::VisitUnresolvedMemberExpr(const UnresolvedMemberExpr *S) { + ID.AddBoolean(S->isImplicitAccess()); + if (!S->isImplicitAccess()) { + VisitExpr(S); + ID.AddBoolean(S->isArrow()); + } + VisitNestedNameSpecifier(S->getQualifier()); + VisitName(S->getMemberName()); + ID.AddBoolean(S->hasExplicitTemplateArgs()); + if (S->hasExplicitTemplateArgs()) + VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs()); +} + +void StmtProfiler::VisitCXXNoexceptExpr(const CXXNoexceptExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitPackExpansionExpr(const PackExpansionExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) { + VisitExpr(S); + VisitDecl(S->getPack()); +} + +void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr( + const SubstNonTypeTemplateParmPackExpr *S) { + VisitExpr(S); + VisitDecl(S->getParameterPack()); + VisitTemplateArgument(S->getArgumentPack()); +} + +void StmtProfiler::VisitSubstNonTypeTemplateParmExpr( + const SubstNonTypeTemplateParmExpr *E) { + // Profile exactly as the replacement expression. + Visit(E->getReplacement()); +} + +void StmtProfiler::VisitFunctionParmPackExpr(const FunctionParmPackExpr *S) { + VisitExpr(S); + VisitDecl(S->getParameterPack()); + ID.AddInteger(S->getNumExpansions()); + for (FunctionParmPackExpr::iterator I = S->begin(), E = S->end(); I != E; ++I) + VisitDecl(*I); +} + +void StmtProfiler::VisitMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) { + VisitExpr(E); +} + +void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) { + VisitExpr(S); +} + +void StmtProfiler::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) { + VisitExpr(E); +} + +void StmtProfiler::VisitObjCArrayLiteral(const ObjCArrayLiteral *E) { + VisitExpr(E); +} + +void StmtProfiler::VisitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E) { + VisitExpr(E); +} + +void StmtProfiler::VisitObjCEncodeExpr(const ObjCEncodeExpr *S) { + VisitExpr(S); + VisitType(S->getEncodedType()); +} + +void StmtProfiler::VisitObjCSelectorExpr(const ObjCSelectorExpr *S) { + VisitExpr(S); + VisitName(S->getSelector()); +} + +void StmtProfiler::VisitObjCProtocolExpr(const ObjCProtocolExpr *S) { + VisitExpr(S); + VisitDecl(S->getProtocol()); +} + +void StmtProfiler::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *S) { + VisitExpr(S); + VisitDecl(S->getDecl()); + ID.AddBoolean(S->isArrow()); + ID.AddBoolean(S->isFreeIvar()); +} + +void StmtProfiler::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *S) { + VisitExpr(S); + if (S->isImplicitProperty()) { + VisitDecl(S->getImplicitPropertyGetter()); + VisitDecl(S->getImplicitPropertySetter()); + } else { + VisitDecl(S->getExplicitProperty()); + } + if (S->isSuperReceiver()) { + ID.AddBoolean(S->isSuperReceiver()); + VisitType(S->getSuperReceiverType()); + } +} + +void StmtProfiler::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *S) { + VisitExpr(S); + VisitDecl(S->getAtIndexMethodDecl()); + VisitDecl(S->setAtIndexMethodDecl()); +} + +void StmtProfiler::VisitObjCMessageExpr(const ObjCMessageExpr *S) { + VisitExpr(S); + VisitName(S->getSelector()); + VisitDecl(S->getMethodDecl()); +} + +void StmtProfiler::VisitObjCIsaExpr(const ObjCIsaExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->isArrow()); +} + +void StmtProfiler::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->getValue()); +} + +void StmtProfiler::VisitObjCIndirectCopyRestoreExpr( + const ObjCIndirectCopyRestoreExpr *S) { + VisitExpr(S); + ID.AddBoolean(S->shouldCopy()); +} + +void StmtProfiler::VisitObjCBridgedCastExpr(const ObjCBridgedCastExpr *S) { + VisitExplicitCastExpr(S); + ID.AddBoolean(S->getBridgeKind()); +} + +void StmtProfiler::VisitDecl(const Decl *D) { + ID.AddInteger(D? D->getKind() : 0); + + if (Canonical && D) { + if (const NonTypeTemplateParmDecl *NTTP = + dyn_cast<NonTypeTemplateParmDecl>(D)) { + ID.AddInteger(NTTP->getDepth()); + ID.AddInteger(NTTP->getIndex()); + ID.AddBoolean(NTTP->isParameterPack()); + VisitType(NTTP->getType()); + return; + } + + if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) { + // The Itanium C++ ABI uses the type, scope depth, and scope + // index of a parameter when mangling expressions that involve + // function parameters, so we will use the parameter's type for + // establishing function parameter identity. That way, our + // definition of "equivalent" (per C++ [temp.over.link]) is at + // least as strong as the definition of "equivalent" used for + // name mangling. + VisitType(Parm->getType()); + ID.AddInteger(Parm->getFunctionScopeDepth()); + ID.AddInteger(Parm->getFunctionScopeIndex()); + return; + } + + if (const TemplateTypeParmDecl *TTP = + dyn_cast<TemplateTypeParmDecl>(D)) { + ID.AddInteger(TTP->getDepth()); + ID.AddInteger(TTP->getIndex()); + ID.AddBoolean(TTP->isParameterPack()); + return; + } + + if (const TemplateTemplateParmDecl *TTP = + dyn_cast<TemplateTemplateParmDecl>(D)) { + ID.AddInteger(TTP->getDepth()); + ID.AddInteger(TTP->getIndex()); + ID.AddBoolean(TTP->isParameterPack()); + return; + } + } + + ID.AddPointer(D? D->getCanonicalDecl() : 0); +} + +void StmtProfiler::VisitType(QualType T) { + if (Canonical) + T = Context.getCanonicalType(T); + + ID.AddPointer(T.getAsOpaquePtr()); +} + +void StmtProfiler::VisitName(DeclarationName Name) { + ID.AddPointer(Name.getAsOpaquePtr()); +} + +void StmtProfiler::VisitNestedNameSpecifier(NestedNameSpecifier *NNS) { + if (Canonical) + NNS = Context.getCanonicalNestedNameSpecifier(NNS); + ID.AddPointer(NNS); +} + +void StmtProfiler::VisitTemplateName(TemplateName Name) { + if (Canonical) + Name = Context.getCanonicalTemplateName(Name); + + Name.Profile(ID); +} + +void StmtProfiler::VisitTemplateArguments(const TemplateArgumentLoc *Args, + unsigned NumArgs) { + ID.AddInteger(NumArgs); + for (unsigned I = 0; I != NumArgs; ++I) + VisitTemplateArgument(Args[I].getArgument()); +} + +void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) { + // Mostly repetitive with TemplateArgument::Profile! + ID.AddInteger(Arg.getKind()); + switch (Arg.getKind()) { + case TemplateArgument::Null: + break; + + case TemplateArgument::Type: + VisitType(Arg.getAsType()); + break; + + case TemplateArgument::Template: + case TemplateArgument::TemplateExpansion: + VisitTemplateName(Arg.getAsTemplateOrTemplatePattern()); + break; + + case TemplateArgument::Declaration: + VisitDecl(Arg.getAsDecl()); + break; + + case TemplateArgument::NullPtr: + VisitType(Arg.getNullPtrType()); + break; + + case TemplateArgument::Integral: + Arg.getAsIntegral().Profile(ID); + VisitType(Arg.getIntegralType()); + break; + + case TemplateArgument::Expression: + Visit(Arg.getAsExpr()); + break; + + case TemplateArgument::Pack: + const TemplateArgument *Pack = Arg.pack_begin(); + for (unsigned i = 0, e = Arg.pack_size(); i != e; ++i) + VisitTemplateArgument(Pack[i]); + break; + } +} + +void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, + bool Canonical) const { + StmtProfiler Profiler(ID, Context, Canonical); + Profiler.Visit(this); +} diff --git a/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp b/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp new file mode 100644 index 000000000000..8be287e7cb21 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp @@ -0,0 +1,62 @@ +//===--- StmtViz.cpp - Graphviz visualization for Stmt ASTs -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements Stmt::viewAST, which generates a Graphviz DOT file +// that depicts the AST and then calls Graphviz/dot+gv on it. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/StmtGraphTraits.h" +#include "clang/AST/Decl.h" +#include "llvm/Support/GraphWriter.h" + +using namespace clang; + +void Stmt::viewAST() const { +#ifndef NDEBUG + llvm::ViewGraph(this,"AST"); +#else + llvm::errs() << "Stmt::viewAST is only available in debug builds on " + << "systems with Graphviz or gv!\n"; +#endif +} + +namespace llvm { +template<> +struct DOTGraphTraits<const Stmt*> : public DefaultDOTGraphTraits { + DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} + + static std::string getNodeLabel(const Stmt* Node, const Stmt* Graph) { + +#ifndef NDEBUG + std::string OutSStr; + llvm::raw_string_ostream Out(OutSStr); + + if (Node) + Out << Node->getStmtClassName(); + else + Out << "<NULL>"; + + std::string OutStr = Out.str(); + if (OutStr[0] == '\n') OutStr.erase(OutStr.begin()); + + // Process string output to make it nicer... + for (unsigned i = 0; i != OutStr.length(); ++i) + if (OutStr[i] == '\n') { // Left justify + OutStr[i] = '\\'; + OutStr.insert(OutStr.begin()+i+1, 'l'); + } + + return OutStr; +#else + return ""; +#endif + } +}; +} // end namespace llvm diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp new file mode 100644 index 000000000000..16efb790b68e --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp @@ -0,0 +1,602 @@ +//===--- TemplateBase.cpp - Common template AST class implementation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements common classes used throughout C++ template +// representations. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/TemplateBase.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclBase.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/Type.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Basic/Diagnostic.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> + +using namespace clang; + +/// \brief Print a template integral argument value. +/// +/// \param TemplArg the TemplateArgument instance to print. +/// +/// \param Out the raw_ostream instance to use for printing. +static void printIntegral(const TemplateArgument &TemplArg, + raw_ostream &Out) { + const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr(); + const llvm::APSInt &Val = TemplArg.getAsIntegral(); + + if (T->isBooleanType()) { + Out << (Val.getBoolValue() ? "true" : "false"); + } else if (T->isCharType()) { + const char Ch = Val.getZExtValue(); + Out << ((Ch == '\'') ? "'\\" : "'"); + Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true); + Out << "'"; + } else { + Out << Val; + } +} + +//===----------------------------------------------------------------------===// +// TemplateArgument Implementation +//===----------------------------------------------------------------------===// + +TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value, + QualType Type) { + Integer.Kind = Integral; + // Copy the APSInt value into our decomposed form. + Integer.BitWidth = Value.getBitWidth(); + Integer.IsUnsigned = Value.isUnsigned(); + // If the value is large, we have to get additional memory from the ASTContext + unsigned NumWords = Value.getNumWords(); + if (NumWords > 1) { + void *Mem = Ctx.Allocate(NumWords * sizeof(uint64_t)); + std::memcpy(Mem, Value.getRawData(), NumWords * sizeof(uint64_t)); + Integer.pVal = static_cast<uint64_t *>(Mem); + } else { + Integer.VAL = Value.getZExtValue(); + } + + Integer.Type = Type.getAsOpaquePtr(); +} + +TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context, + const TemplateArgument *Args, + unsigned NumArgs) { + if (NumArgs == 0) + return getEmptyPack(); + + TemplateArgument *Storage = new (Context) TemplateArgument [NumArgs]; + std::copy(Args, Args + NumArgs, Storage); + return TemplateArgument(Storage, NumArgs); +} + +bool TemplateArgument::isDependent() const { + switch (getKind()) { + case Null: + llvm_unreachable("Should not have a NULL template argument"); + + case Type: + return getAsType()->isDependentType(); + + case Template: + return getAsTemplate().isDependent(); + + case TemplateExpansion: + return true; + + case Declaration: + if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl())) + return DC->isDependentContext(); + return getAsDecl()->getDeclContext()->isDependentContext(); + + case NullPtr: + return false; + + case Integral: + // Never dependent + return false; + + case Expression: + return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent()); + + case Pack: + for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) { + if (P->isDependent()) + return true; + } + + return false; + } + + llvm_unreachable("Invalid TemplateArgument Kind!"); +} + +bool TemplateArgument::isInstantiationDependent() const { + switch (getKind()) { + case Null: + llvm_unreachable("Should not have a NULL template argument"); + + case Type: + return getAsType()->isInstantiationDependentType(); + + case Template: + return getAsTemplate().isInstantiationDependent(); + + case TemplateExpansion: + return true; + + case Declaration: + if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl())) + return DC->isDependentContext(); + return getAsDecl()->getDeclContext()->isDependentContext(); + + case NullPtr: + return false; + + case Integral: + // Never dependent + return false; + + case Expression: + return getAsExpr()->isInstantiationDependent(); + + case Pack: + for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) { + if (P->isInstantiationDependent()) + return true; + } + + return false; + } + + llvm_unreachable("Invalid TemplateArgument Kind!"); +} + +bool TemplateArgument::isPackExpansion() const { + switch (getKind()) { + case Null: + case Declaration: + case Integral: + case Pack: + case Template: + case NullPtr: + return false; + + case TemplateExpansion: + return true; + + case Type: + return isa<PackExpansionType>(getAsType()); + + case Expression: + return isa<PackExpansionExpr>(getAsExpr()); + } + + llvm_unreachable("Invalid TemplateArgument Kind!"); +} + +bool TemplateArgument::containsUnexpandedParameterPack() const { + switch (getKind()) { + case Null: + case Declaration: + case Integral: + case TemplateExpansion: + case NullPtr: + break; + + case Type: + if (getAsType()->containsUnexpandedParameterPack()) + return true; + break; + + case Template: + if (getAsTemplate().containsUnexpandedParameterPack()) + return true; + break; + + case Expression: + if (getAsExpr()->containsUnexpandedParameterPack()) + return true; + break; + + case Pack: + for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) + if (P->containsUnexpandedParameterPack()) + return true; + + break; + } + + return false; +} + +Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const { + assert(getKind() == TemplateExpansion); + if (TemplateArg.NumExpansions) + return TemplateArg.NumExpansions - 1; + + return None; +} + +void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Context) const { + ID.AddInteger(getKind()); + switch (getKind()) { + case Null: + break; + + case Type: + getAsType().Profile(ID); + break; + + case NullPtr: + getNullPtrType().Profile(ID); + break; + + case Declaration: + ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : 0); + break; + + case Template: + case TemplateExpansion: { + TemplateName Template = getAsTemplateOrTemplatePattern(); + if (TemplateTemplateParmDecl *TTP + = dyn_cast_or_null<TemplateTemplateParmDecl>( + Template.getAsTemplateDecl())) { + ID.AddBoolean(true); + ID.AddInteger(TTP->getDepth()); + ID.AddInteger(TTP->getPosition()); + ID.AddBoolean(TTP->isParameterPack()); + } else { + ID.AddBoolean(false); + ID.AddPointer(Context.getCanonicalTemplateName(Template) + .getAsVoidPointer()); + } + break; + } + + case Integral: + getAsIntegral().Profile(ID); + getIntegralType().Profile(ID); + break; + + case Expression: + getAsExpr()->Profile(ID, Context, true); + break; + + case Pack: + ID.AddInteger(Args.NumArgs); + for (unsigned I = 0; I != Args.NumArgs; ++I) + Args.Args[I].Profile(ID, Context); + } +} + +bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const { + if (getKind() != Other.getKind()) return false; + + switch (getKind()) { + case Null: + case Type: + case Expression: + case Template: + case TemplateExpansion: + case NullPtr: + return TypeOrValue.V == Other.TypeOrValue.V; + + case Declaration: + return getAsDecl() == Other.getAsDecl() && + isDeclForReferenceParam() && Other.isDeclForReferenceParam(); + + case Integral: + return getIntegralType() == Other.getIntegralType() && + getAsIntegral() == Other.getAsIntegral(); + + case Pack: + if (Args.NumArgs != Other.Args.NumArgs) return false; + for (unsigned I = 0, E = Args.NumArgs; I != E; ++I) + if (!Args.Args[I].structurallyEquals(Other.Args.Args[I])) + return false; + return true; + } + + llvm_unreachable("Invalid TemplateArgument Kind!"); +} + +TemplateArgument TemplateArgument::getPackExpansionPattern() const { + assert(isPackExpansion()); + + switch (getKind()) { + case Type: + return getAsType()->getAs<PackExpansionType>()->getPattern(); + + case Expression: + return cast<PackExpansionExpr>(getAsExpr())->getPattern(); + + case TemplateExpansion: + return TemplateArgument(getAsTemplateOrTemplatePattern()); + + case Declaration: + case Integral: + case Pack: + case Null: + case Template: + case NullPtr: + return TemplateArgument(); + } + + llvm_unreachable("Invalid TemplateArgument Kind!"); +} + +void TemplateArgument::print(const PrintingPolicy &Policy, + raw_ostream &Out) const { + switch (getKind()) { + case Null: + Out << "<no value>"; + break; + + case Type: { + PrintingPolicy SubPolicy(Policy); + SubPolicy.SuppressStrongLifetime = true; + getAsType().print(Out, SubPolicy); + break; + } + + case Declaration: { + NamedDecl *ND = cast<NamedDecl>(getAsDecl()); + Out << '&'; + if (ND->getDeclName()) { + // FIXME: distinguish between pointer and reference args? + ND->printQualifiedName(Out); + } else { + Out << "<anonymous>"; + } + break; + } + + case NullPtr: + Out << "nullptr"; + break; + + case Template: + getAsTemplate().print(Out, Policy); + break; + + case TemplateExpansion: + getAsTemplateOrTemplatePattern().print(Out, Policy); + Out << "..."; + break; + + case Integral: { + printIntegral(*this, Out); + break; + } + + case Expression: + getAsExpr()->printPretty(Out, 0, Policy); + break; + + case Pack: + Out << "<"; + bool First = true; + for (TemplateArgument::pack_iterator P = pack_begin(), PEnd = pack_end(); + P != PEnd; ++P) { + if (First) + First = false; + else + Out << ", "; + + P->print(Policy, Out); + } + Out << ">"; + break; + } +} + +//===----------------------------------------------------------------------===// +// TemplateArgumentLoc Implementation +//===----------------------------------------------------------------------===// + +TemplateArgumentLocInfo::TemplateArgumentLocInfo() { + memset((void*)this, 0, sizeof(TemplateArgumentLocInfo)); +} + +SourceRange TemplateArgumentLoc::getSourceRange() const { + switch (Argument.getKind()) { + case TemplateArgument::Expression: + return getSourceExpression()->getSourceRange(); + + case TemplateArgument::Declaration: + return getSourceDeclExpression()->getSourceRange(); + + case TemplateArgument::NullPtr: + return getSourceNullPtrExpression()->getSourceRange(); + + case TemplateArgument::Type: + if (TypeSourceInfo *TSI = getTypeSourceInfo()) + return TSI->getTypeLoc().getSourceRange(); + else + return SourceRange(); + + case TemplateArgument::Template: + if (getTemplateQualifierLoc()) + return SourceRange(getTemplateQualifierLoc().getBeginLoc(), + getTemplateNameLoc()); + return SourceRange(getTemplateNameLoc()); + + case TemplateArgument::TemplateExpansion: + if (getTemplateQualifierLoc()) + return SourceRange(getTemplateQualifierLoc().getBeginLoc(), + getTemplateEllipsisLoc()); + return SourceRange(getTemplateNameLoc(), getTemplateEllipsisLoc()); + + case TemplateArgument::Integral: + return getSourceIntegralExpression()->getSourceRange(); + + case TemplateArgument::Pack: + case TemplateArgument::Null: + return SourceRange(); + } + + llvm_unreachable("Invalid TemplateArgument Kind!"); +} + +const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB, + const TemplateArgument &Arg) { + switch (Arg.getKind()) { + case TemplateArgument::Null: + // This is bad, but not as bad as crashing because of argument + // count mismatches. + return DB << "(null template argument)"; + + case TemplateArgument::Type: + return DB << Arg.getAsType(); + + case TemplateArgument::Declaration: + return DB << Arg.getAsDecl(); + + case TemplateArgument::NullPtr: + return DB << "nullptr"; + + case TemplateArgument::Integral: + return DB << Arg.getAsIntegral().toString(10); + + case TemplateArgument::Template: + return DB << Arg.getAsTemplate(); + + case TemplateArgument::TemplateExpansion: + return DB << Arg.getAsTemplateOrTemplatePattern() << "..."; + + case TemplateArgument::Expression: { + // This shouldn't actually ever happen, so it's okay that we're + // regurgitating an expression here. + // FIXME: We're guessing at LangOptions! + SmallString<32> Str; + llvm::raw_svector_ostream OS(Str); + LangOptions LangOpts; + LangOpts.CPlusPlus = true; + PrintingPolicy Policy(LangOpts); + Arg.getAsExpr()->printPretty(OS, 0, Policy); + return DB << OS.str(); + } + + case TemplateArgument::Pack: { + // FIXME: We're guessing at LangOptions! + SmallString<32> Str; + llvm::raw_svector_ostream OS(Str); + LangOptions LangOpts; + LangOpts.CPlusPlus = true; + PrintingPolicy Policy(LangOpts); + Arg.print(Policy, OS); + return DB << OS.str(); + } + } + + llvm_unreachable("Invalid TemplateArgument Kind!"); +} + +const ASTTemplateArgumentListInfo * +ASTTemplateArgumentListInfo::Create(ASTContext &C, + const TemplateArgumentListInfo &List) { + std::size_t size = ASTTemplateArgumentListInfo::sizeFor(List.size()); + void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>()); + ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo(); + TAI->initializeFrom(List); + return TAI; +} + +void ASTTemplateArgumentListInfo::initializeFrom( + const TemplateArgumentListInfo &Info) { + LAngleLoc = Info.getLAngleLoc(); + RAngleLoc = Info.getRAngleLoc(); + NumTemplateArgs = Info.size(); + + TemplateArgumentLoc *ArgBuffer = getTemplateArgs(); + for (unsigned i = 0; i != NumTemplateArgs; ++i) + new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]); +} + +void ASTTemplateArgumentListInfo::initializeFrom( + const TemplateArgumentListInfo &Info, + bool &Dependent, + bool &InstantiationDependent, + bool &ContainsUnexpandedParameterPack) { + LAngleLoc = Info.getLAngleLoc(); + RAngleLoc = Info.getRAngleLoc(); + NumTemplateArgs = Info.size(); + + TemplateArgumentLoc *ArgBuffer = getTemplateArgs(); + for (unsigned i = 0; i != NumTemplateArgs; ++i) { + Dependent = Dependent || Info[i].getArgument().isDependent(); + InstantiationDependent = InstantiationDependent || + Info[i].getArgument().isInstantiationDependent(); + ContainsUnexpandedParameterPack + = ContainsUnexpandedParameterPack || + Info[i].getArgument().containsUnexpandedParameterPack(); + + new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]); + } +} + +void ASTTemplateArgumentListInfo::copyInto( + TemplateArgumentListInfo &Info) const { + Info.setLAngleLoc(LAngleLoc); + Info.setRAngleLoc(RAngleLoc); + for (unsigned I = 0; I != NumTemplateArgs; ++I) + Info.addArgument(getTemplateArgs()[I]); +} + +std::size_t ASTTemplateArgumentListInfo::sizeFor(unsigned NumTemplateArgs) { + return sizeof(ASTTemplateArgumentListInfo) + + sizeof(TemplateArgumentLoc) * NumTemplateArgs; +} + +void +ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc, + const TemplateArgumentListInfo &Info) { + Base::initializeFrom(Info); + setTemplateKeywordLoc(TemplateKWLoc); +} + +void +ASTTemplateKWAndArgsInfo +::initializeFrom(SourceLocation TemplateKWLoc, + const TemplateArgumentListInfo &Info, + bool &Dependent, + bool &InstantiationDependent, + bool &ContainsUnexpandedParameterPack) { + Base::initializeFrom(Info, Dependent, InstantiationDependent, + ContainsUnexpandedParameterPack); + setTemplateKeywordLoc(TemplateKWLoc); +} + +void +ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) { + // No explicit template arguments, but template keyword loc is valid. + assert(TemplateKWLoc.isValid()); + LAngleLoc = SourceLocation(); + RAngleLoc = SourceLocation(); + NumTemplateArgs = 0; + setTemplateKeywordLoc(TemplateKWLoc); +} + +std::size_t +ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) { + // Add space for the template keyword location. + // FIXME: There's room for this in the padding before the template args in + // 64-bit builds. + return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation); +} diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp new file mode 100644 index 000000000000..8767c635f675 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp @@ -0,0 +1,182 @@ +//===--- TemplateName.cpp - C++ Template Name Representation---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TemplateName interface and subclasses. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/TemplateName.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/NestedNameSpecifier.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/TemplateBase.h" +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/LangOptions.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; +using namespace llvm; + +TemplateArgument +SubstTemplateTemplateParmPackStorage::getArgumentPack() const { + return TemplateArgument(Arguments, size()); +} + +void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) { + Profile(ID, Parameter, Replacement); +} + +void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID, + TemplateTemplateParmDecl *parameter, + TemplateName replacement) { + ID.AddPointer(parameter); + ID.AddPointer(replacement.getAsVoidPointer()); +} + +void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID, + ASTContext &Context) { + Profile(ID, Context, Parameter, TemplateArgument(Arguments, size())); +} + +void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID, + ASTContext &Context, + TemplateTemplateParmDecl *Parameter, + const TemplateArgument &ArgPack) { + ID.AddPointer(Parameter); + ArgPack.Profile(ID, Context); +} + +TemplateName::NameKind TemplateName::getKind() const { + if (Storage.is<TemplateDecl *>()) + return Template; + if (Storage.is<DependentTemplateName *>()) + return DependentTemplate; + if (Storage.is<QualifiedTemplateName *>()) + return QualifiedTemplate; + + UncommonTemplateNameStorage *uncommon + = Storage.get<UncommonTemplateNameStorage*>(); + if (uncommon->getAsOverloadedStorage()) + return OverloadedTemplate; + if (uncommon->getAsSubstTemplateTemplateParm()) + return SubstTemplateTemplateParm; + return SubstTemplateTemplateParmPack; +} + +TemplateDecl *TemplateName::getAsTemplateDecl() const { + if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>()) + return Template; + + if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) + return QTN->getTemplateDecl(); + + if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm()) + return sub->getReplacement().getAsTemplateDecl(); + + return 0; +} + +bool TemplateName::isDependent() const { + if (TemplateDecl *Template = getAsTemplateDecl()) { + if (isa<TemplateTemplateParmDecl>(Template)) + return true; + // FIXME: Hack, getDeclContext() can be null if Template is still + // initializing due to PCH reading, so we check it before using it. + // Should probably modify TemplateSpecializationType to allow constructing + // it without the isDependent() checking. + return Template->getDeclContext() && + Template->getDeclContext()->isDependentContext(); + } + + assert(!getAsOverloadedTemplate() && + "overloaded templates shouldn't survive to here"); + + return true; +} + +bool TemplateName::isInstantiationDependent() const { + if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) { + if (QTN->getQualifier()->isInstantiationDependent()) + return true; + } + + return isDependent(); +} + +bool TemplateName::containsUnexpandedParameterPack() const { + if (TemplateDecl *Template = getAsTemplateDecl()) { + if (TemplateTemplateParmDecl *TTP + = dyn_cast<TemplateTemplateParmDecl>(Template)) + return TTP->isParameterPack(); + + return false; + } + + if (DependentTemplateName *DTN = getAsDependentTemplateName()) + return DTN->getQualifier() && + DTN->getQualifier()->containsUnexpandedParameterPack(); + + return getAsSubstTemplateTemplateParmPack() != 0; +} + +void +TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy, + bool SuppressNNS) const { + if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>()) + OS << *Template; + else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) { + if (!SuppressNNS) + QTN->getQualifier()->print(OS, Policy); + if (QTN->hasTemplateKeyword()) + OS << "template "; + OS << *QTN->getDecl(); + } else if (DependentTemplateName *DTN = getAsDependentTemplateName()) { + if (!SuppressNNS && DTN->getQualifier()) + DTN->getQualifier()->print(OS, Policy); + OS << "template "; + + if (DTN->isIdentifier()) + OS << DTN->getIdentifier()->getName(); + else + OS << "operator " << getOperatorSpelling(DTN->getOperator()); + } else if (SubstTemplateTemplateParmStorage *subst + = getAsSubstTemplateTemplateParm()) { + subst->getReplacement().print(OS, Policy, SuppressNNS); + } else if (SubstTemplateTemplateParmPackStorage *SubstPack + = getAsSubstTemplateTemplateParmPack()) + OS << *SubstPack->getParameterPack(); + else { + OverloadedTemplateStorage *OTS = getAsOverloadedTemplate(); + (*OTS->begin())->printName(OS); + } +} + +const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB, + TemplateName N) { + std::string NameStr; + raw_string_ostream OS(NameStr); + LangOptions LO; + LO.CPlusPlus = true; + LO.Bool = true; + OS << '\''; + N.print(OS, PrintingPolicy(LO)); + OS << '\''; + OS.flush(); + return DB << NameStr; +} + +void TemplateName::dump(raw_ostream &OS) const { + LangOptions LO; // FIXME! + LO.CPlusPlus = true; + LO.Bool = true; + print(OS, PrintingPolicy(LO)); +} + +void TemplateName::dump() const { + dump(llvm::errs()); +} diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp new file mode 100644 index 000000000000..7421bae7bf54 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp @@ -0,0 +1,2443 @@ +//===--- Type.cpp - Type representation and manipulation ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements type-related functionality. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/CharUnits.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/Type.h" +#include "clang/AST/TypeVisitor.h" +#include "clang/Basic/Specifiers.h" +#include "llvm/ADT/APSInt.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +using namespace clang; + +bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const { + return (*this != Other) && + // CVR qualifiers superset + (((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) && + // ObjC GC qualifiers superset + ((getObjCGCAttr() == Other.getObjCGCAttr()) || + (hasObjCGCAttr() && !Other.hasObjCGCAttr())) && + // Address space superset. + ((getAddressSpace() == Other.getAddressSpace()) || + (hasAddressSpace()&& !Other.hasAddressSpace())) && + // Lifetime qualifier superset. + ((getObjCLifetime() == Other.getObjCLifetime()) || + (hasObjCLifetime() && !Other.hasObjCLifetime())); +} + +const IdentifierInfo* QualType::getBaseTypeIdentifier() const { + const Type* ty = getTypePtr(); + NamedDecl *ND = NULL; + if (ty->isPointerType() || ty->isReferenceType()) + return ty->getPointeeType().getBaseTypeIdentifier(); + else if (ty->isRecordType()) + ND = ty->getAs<RecordType>()->getDecl(); + else if (ty->isEnumeralType()) + ND = ty->getAs<EnumType>()->getDecl(); + else if (ty->getTypeClass() == Type::Typedef) + ND = ty->getAs<TypedefType>()->getDecl(); + else if (ty->isArrayType()) + return ty->castAsArrayTypeUnsafe()-> + getElementType().getBaseTypeIdentifier(); + + if (ND) + return ND->getIdentifier(); + return NULL; +} + +bool QualType::isConstant(QualType T, ASTContext &Ctx) { + if (T.isConstQualified()) + return true; + + if (const ArrayType *AT = Ctx.getAsArrayType(T)) + return AT->getElementType().isConstant(Ctx); + + return false; +} + +unsigned ConstantArrayType::getNumAddressingBits(ASTContext &Context, + QualType ElementType, + const llvm::APInt &NumElements) { + uint64_t ElementSize = Context.getTypeSizeInChars(ElementType).getQuantity(); + + // Fast path the common cases so we can avoid the conservative computation + // below, which in common cases allocates "large" APSInt values, which are + // slow. + + // If the element size is a power of 2, we can directly compute the additional + // number of addressing bits beyond those required for the element count. + if (llvm::isPowerOf2_64(ElementSize)) { + return NumElements.getActiveBits() + llvm::Log2_64(ElementSize); + } + + // If both the element count and element size fit in 32-bits, we can do the + // computation directly in 64-bits. + if ((ElementSize >> 32) == 0 && NumElements.getBitWidth() <= 64 && + (NumElements.getZExtValue() >> 32) == 0) { + uint64_t TotalSize = NumElements.getZExtValue() * ElementSize; + return 64 - llvm::countLeadingZeros(TotalSize); + } + + // Otherwise, use APSInt to handle arbitrary sized values. + llvm::APSInt SizeExtended(NumElements, true); + unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType()); + SizeExtended = SizeExtended.extend(std::max(SizeTypeBits, + SizeExtended.getBitWidth()) * 2); + + llvm::APSInt TotalSize(llvm::APInt(SizeExtended.getBitWidth(), ElementSize)); + TotalSize *= SizeExtended; + + return TotalSize.getActiveBits(); +} + +unsigned ConstantArrayType::getMaxSizeBits(ASTContext &Context) { + unsigned Bits = Context.getTypeSize(Context.getSizeType()); + + // Limit the number of bits in size_t so that maximal bit size fits 64 bit + // integer (see PR8256). We can do this as currently there is no hardware + // that supports full 64-bit virtual space. + if (Bits > 61) + Bits = 61; + + return Bits; +} + +DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context, + QualType et, QualType can, + Expr *e, ArraySizeModifier sm, + unsigned tq, + SourceRange brackets) + : ArrayType(DependentSizedArray, et, can, sm, tq, + (et->containsUnexpandedParameterPack() || + (e && e->containsUnexpandedParameterPack()))), + Context(Context), SizeExpr((Stmt*) e), Brackets(brackets) +{ +} + +void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Context, + QualType ET, + ArraySizeModifier SizeMod, + unsigned TypeQuals, + Expr *E) { + ID.AddPointer(ET.getAsOpaquePtr()); + ID.AddInteger(SizeMod); + ID.AddInteger(TypeQuals); + E->Profile(ID, Context, true); +} + +DependentSizedExtVectorType::DependentSizedExtVectorType(const + ASTContext &Context, + QualType ElementType, + QualType can, + Expr *SizeExpr, + SourceLocation loc) + : Type(DependentSizedExtVector, can, /*Dependent=*/true, + /*InstantiationDependent=*/true, + ElementType->isVariablyModifiedType(), + (ElementType->containsUnexpandedParameterPack() || + (SizeExpr && SizeExpr->containsUnexpandedParameterPack()))), + Context(Context), SizeExpr(SizeExpr), ElementType(ElementType), + loc(loc) +{ +} + +void +DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Context, + QualType ElementType, Expr *SizeExpr) { + ID.AddPointer(ElementType.getAsOpaquePtr()); + SizeExpr->Profile(ID, Context, true); +} + +VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType, + VectorKind vecKind) + : Type(Vector, canonType, vecType->isDependentType(), + vecType->isInstantiationDependentType(), + vecType->isVariablyModifiedType(), + vecType->containsUnexpandedParameterPack()), + ElementType(vecType) +{ + VectorTypeBits.VecKind = vecKind; + VectorTypeBits.NumElements = nElements; +} + +VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements, + QualType canonType, VectorKind vecKind) + : Type(tc, canonType, vecType->isDependentType(), + vecType->isInstantiationDependentType(), + vecType->isVariablyModifiedType(), + vecType->containsUnexpandedParameterPack()), + ElementType(vecType) +{ + VectorTypeBits.VecKind = vecKind; + VectorTypeBits.NumElements = nElements; +} + +/// getArrayElementTypeNoTypeQual - If this is an array type, return the +/// element type of the array, potentially with type qualifiers missing. +/// This method should never be used when type qualifiers are meaningful. +const Type *Type::getArrayElementTypeNoTypeQual() const { + // If this is directly an array type, return it. + if (const ArrayType *ATy = dyn_cast<ArrayType>(this)) + return ATy->getElementType().getTypePtr(); + + // If the canonical form of this type isn't the right kind, reject it. + if (!isa<ArrayType>(CanonicalType)) + return 0; + + // If this is a typedef for an array type, strip the typedef off without + // losing all typedef information. + return cast<ArrayType>(getUnqualifiedDesugaredType()) + ->getElementType().getTypePtr(); +} + +/// getDesugaredType - Return the specified type with any "sugar" removed from +/// the type. This takes off typedefs, typeof's etc. If the outer level of +/// the type is already concrete, it returns it unmodified. This is similar +/// to getting the canonical type, but it doesn't remove *all* typedefs. For +/// example, it returns "T*" as "T*", (not as "int*"), because the pointer is +/// concrete. +QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) { + SplitQualType split = getSplitDesugaredType(T); + return Context.getQualifiedType(split.Ty, split.Quals); +} + +QualType QualType::getSingleStepDesugaredTypeImpl(QualType type, + const ASTContext &Context) { + SplitQualType split = type.split(); + QualType desugar = split.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); + return Context.getQualifiedType(desugar, split.Quals); +} + +QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const { + switch (getTypeClass()) { +#define ABSTRACT_TYPE(Class, Parent) +#define TYPE(Class, Parent) \ + case Type::Class: { \ + const Class##Type *ty = cast<Class##Type>(this); \ + if (!ty->isSugared()) return QualType(ty, 0); \ + return ty->desugar(); \ + } +#include "clang/AST/TypeNodes.def" + } + llvm_unreachable("bad type kind!"); +} + +SplitQualType QualType::getSplitDesugaredType(QualType T) { + QualifierCollector Qs; + + QualType Cur = T; + while (true) { + const Type *CurTy = Qs.strip(Cur); + switch (CurTy->getTypeClass()) { +#define ABSTRACT_TYPE(Class, Parent) +#define TYPE(Class, Parent) \ + case Type::Class: { \ + const Class##Type *Ty = cast<Class##Type>(CurTy); \ + if (!Ty->isSugared()) \ + return SplitQualType(Ty, Qs); \ + Cur = Ty->desugar(); \ + break; \ + } +#include "clang/AST/TypeNodes.def" + } + } +} + +SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) { + SplitQualType split = type.split(); + + // All the qualifiers we've seen so far. + Qualifiers quals = split.Quals; + + // The last type node we saw with any nodes inside it. + const Type *lastTypeWithQuals = split.Ty; + + while (true) { + QualType next; + + // Do a single-step desugar, aborting the loop if the type isn't + // sugared. + switch (split.Ty->getTypeClass()) { +#define ABSTRACT_TYPE(Class, Parent) +#define TYPE(Class, Parent) \ + case Type::Class: { \ + const Class##Type *ty = cast<Class##Type>(split.Ty); \ + if (!ty->isSugared()) goto done; \ + next = ty->desugar(); \ + break; \ + } +#include "clang/AST/TypeNodes.def" + } + + // Otherwise, split the underlying type. If that yields qualifiers, + // update the information. + split = next.split(); + if (!split.Quals.empty()) { + lastTypeWithQuals = split.Ty; + quals.addConsistentQualifiers(split.Quals); + } + } + + done: + return SplitQualType(lastTypeWithQuals, quals); +} + +QualType QualType::IgnoreParens(QualType T) { + // FIXME: this seems inherently un-qualifiers-safe. + while (const ParenType *PT = T->getAs<ParenType>()) + T = PT->getInnerType(); + return T; +} + +/// \brief This will check for a T (which should be a Type which can act as +/// sugar, such as a TypedefType) by removing any existing sugar until it +/// reaches a T or a non-sugared type. +template<typename T> static const T *getAsSugar(const Type *Cur) { + while (true) { + if (const T *Sugar = dyn_cast<T>(Cur)) + return Sugar; + switch (Cur->getTypeClass()) { +#define ABSTRACT_TYPE(Class, Parent) +#define TYPE(Class, Parent) \ + case Type::Class: { \ + const Class##Type *Ty = cast<Class##Type>(Cur); \ + if (!Ty->isSugared()) return 0; \ + Cur = Ty->desugar().getTypePtr(); \ + break; \ + } +#include "clang/AST/TypeNodes.def" + } + } +} + +template <> const TypedefType *Type::getAs() const { + return getAsSugar<TypedefType>(this); +} + +template <> const TemplateSpecializationType *Type::getAs() const { + return getAsSugar<TemplateSpecializationType>(this); +} + +template <> const AttributedType *Type::getAs() const { + return getAsSugar<AttributedType>(this); +} + +/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic +/// sugar off the given type. This should produce an object of the +/// same dynamic type as the canonical type. +const Type *Type::getUnqualifiedDesugaredType() const { + const Type *Cur = this; + + while (true) { + switch (Cur->getTypeClass()) { +#define ABSTRACT_TYPE(Class, Parent) +#define TYPE(Class, Parent) \ + case Class: { \ + const Class##Type *Ty = cast<Class##Type>(Cur); \ + if (!Ty->isSugared()) return Cur; \ + Cur = Ty->desugar().getTypePtr(); \ + break; \ + } +#include "clang/AST/TypeNodes.def" + } + } +} +bool Type::isClassType() const { + if (const RecordType *RT = getAs<RecordType>()) + return RT->getDecl()->isClass(); + return false; +} +bool Type::isStructureType() const { + if (const RecordType *RT = getAs<RecordType>()) + return RT->getDecl()->isStruct(); + return false; +} +bool Type::isInterfaceType() const { + if (const RecordType *RT = getAs<RecordType>()) + return RT->getDecl()->isInterface(); + return false; +} +bool Type::isStructureOrClassType() const { + if (const RecordType *RT = getAs<RecordType>()) + return RT->getDecl()->isStruct() || RT->getDecl()->isClass() || + RT->getDecl()->isInterface(); + return false; +} +bool Type::isVoidPointerType() const { + if (const PointerType *PT = getAs<PointerType>()) + return PT->getPointeeType()->isVoidType(); + return false; +} + +bool Type::isUnionType() const { + if (const RecordType *RT = getAs<RecordType>()) + return RT->getDecl()->isUnion(); + return false; +} + +bool Type::isComplexType() const { + if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType)) + return CT->getElementType()->isFloatingType(); + return false; +} + +bool Type::isComplexIntegerType() const { + // Check for GCC complex integer extension. + return getAsComplexIntegerType(); +} + +const ComplexType *Type::getAsComplexIntegerType() const { + if (const ComplexType *Complex = getAs<ComplexType>()) + if (Complex->getElementType()->isIntegerType()) + return Complex; + return 0; +} + +QualType Type::getPointeeType() const { + if (const PointerType *PT = getAs<PointerType>()) + return PT->getPointeeType(); + if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) + return OPT->getPointeeType(); + if (const BlockPointerType *BPT = getAs<BlockPointerType>()) + return BPT->getPointeeType(); + if (const ReferenceType *RT = getAs<ReferenceType>()) + return RT->getPointeeType(); + return QualType(); +} + +const RecordType *Type::getAsStructureType() const { + // If this is directly a structure type, return it. + if (const RecordType *RT = dyn_cast<RecordType>(this)) { + if (RT->getDecl()->isStruct()) + return RT; + } + + // If the canonical form of this type isn't the right kind, reject it. + if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) { + if (!RT->getDecl()->isStruct()) + return 0; + + // If this is a typedef for a structure type, strip the typedef off without + // losing all typedef information. + return cast<RecordType>(getUnqualifiedDesugaredType()); + } + return 0; +} + +const RecordType *Type::getAsUnionType() const { + // If this is directly a union type, return it. + if (const RecordType *RT = dyn_cast<RecordType>(this)) { + if (RT->getDecl()->isUnion()) + return RT; + } + + // If the canonical form of this type isn't the right kind, reject it. + if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) { + if (!RT->getDecl()->isUnion()) + return 0; + + // If this is a typedef for a union type, strip the typedef off without + // losing all typedef information. + return cast<RecordType>(getUnqualifiedDesugaredType()); + } + + return 0; +} + +ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base, + ObjCProtocolDecl * const *Protocols, + unsigned NumProtocols) + : Type(ObjCObject, Canonical, false, false, false, false), + BaseType(Base) +{ + ObjCObjectTypeBits.NumProtocols = NumProtocols; + assert(getNumProtocols() == NumProtocols && + "bitfield overflow in protocol count"); + if (NumProtocols) + memcpy(getProtocolStorage(), Protocols, + NumProtocols * sizeof(ObjCProtocolDecl*)); +} + +const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const { + // There is no sugar for ObjCObjectType's, just return the canonical + // type pointer if it is the right class. There is no typedef information to + // return and these cannot be Address-space qualified. + if (const ObjCObjectType *T = getAs<ObjCObjectType>()) + if (T->getNumProtocols() && T->getInterface()) + return T; + return 0; +} + +bool Type::isObjCQualifiedInterfaceType() const { + return getAsObjCQualifiedInterfaceType() != 0; +} + +const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const { + // There is no sugar for ObjCQualifiedIdType's, just return the canonical + // type pointer if it is the right class. + if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) { + if (OPT->isObjCQualifiedIdType()) + return OPT; + } + return 0; +} + +const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const { + // There is no sugar for ObjCQualifiedClassType's, just return the canonical + // type pointer if it is the right class. + if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) { + if (OPT->isObjCQualifiedClassType()) + return OPT; + } + return 0; +} + +const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const { + if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) { + if (OPT->getInterfaceType()) + return OPT; + } + return 0; +} + +const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const { + QualType PointeeType; + if (const PointerType *PT = getAs<PointerType>()) + PointeeType = PT->getPointeeType(); + else if (const ReferenceType *RT = getAs<ReferenceType>()) + PointeeType = RT->getPointeeType(); + else + return 0; + + if (const RecordType *RT = PointeeType->getAs<RecordType>()) + return dyn_cast<CXXRecordDecl>(RT->getDecl()); + + return 0; +} + +CXXRecordDecl *Type::getAsCXXRecordDecl() const { + if (const RecordType *RT = getAs<RecordType>()) + return dyn_cast<CXXRecordDecl>(RT->getDecl()); + else if (const InjectedClassNameType *Injected + = getAs<InjectedClassNameType>()) + return Injected->getDecl(); + + return 0; +} + +namespace { + class GetContainedAutoVisitor : + public TypeVisitor<GetContainedAutoVisitor, AutoType*> { + public: + using TypeVisitor<GetContainedAutoVisitor, AutoType*>::Visit; + AutoType *Visit(QualType T) { + if (T.isNull()) + return 0; + return Visit(T.getTypePtr()); + } + + // The 'auto' type itself. + AutoType *VisitAutoType(const AutoType *AT) { + return const_cast<AutoType*>(AT); + } + + // Only these types can contain the desired 'auto' type. + AutoType *VisitPointerType(const PointerType *T) { + return Visit(T->getPointeeType()); + } + AutoType *VisitBlockPointerType(const BlockPointerType *T) { + return Visit(T->getPointeeType()); + } + AutoType *VisitReferenceType(const ReferenceType *T) { + return Visit(T->getPointeeTypeAsWritten()); + } + AutoType *VisitMemberPointerType(const MemberPointerType *T) { + return Visit(T->getPointeeType()); + } + AutoType *VisitArrayType(const ArrayType *T) { + return Visit(T->getElementType()); + } + AutoType *VisitDependentSizedExtVectorType( + const DependentSizedExtVectorType *T) { + return Visit(T->getElementType()); + } + AutoType *VisitVectorType(const VectorType *T) { + return Visit(T->getElementType()); + } + AutoType *VisitFunctionType(const FunctionType *T) { + return Visit(T->getResultType()); + } + AutoType *VisitParenType(const ParenType *T) { + return Visit(T->getInnerType()); + } + AutoType *VisitAttributedType(const AttributedType *T) { + return Visit(T->getModifiedType()); + } + }; +} + +AutoType *Type::getContainedAutoType() const { + return GetContainedAutoVisitor().Visit(this); +} + +bool Type::hasIntegerRepresentation() const { + if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType)) + return VT->getElementType()->isIntegerType(); + else + return isIntegerType(); +} + +/// \brief Determine whether this type is an integral type. +/// +/// This routine determines whether the given type is an integral type per +/// C++ [basic.fundamental]p7. Although the C standard does not define the +/// term "integral type", it has a similar term "integer type", and in C++ +/// the two terms are equivalent. However, C's "integer type" includes +/// enumeration types, while C++'s "integer type" does not. The \c ASTContext +/// parameter is used to determine whether we should be following the C or +/// C++ rules when determining whether this type is an integral/integer type. +/// +/// For cases where C permits "an integer type" and C++ permits "an integral +/// type", use this routine. +/// +/// For cases where C permits "an integer type" and C++ permits "an integral +/// or enumeration type", use \c isIntegralOrEnumerationType() instead. +/// +/// \param Ctx The context in which this type occurs. +/// +/// \returns true if the type is considered an integral type, false otherwise. +bool Type::isIntegralType(ASTContext &Ctx) const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() >= BuiltinType::Bool && + BT->getKind() <= BuiltinType::Int128; + + if (!Ctx.getLangOpts().CPlusPlus) + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) + return ET->getDecl()->isComplete(); // Complete enum types are integral in C. + + return false; +} + + +bool Type::isIntegralOrUnscopedEnumerationType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() >= BuiltinType::Bool && + BT->getKind() <= BuiltinType::Int128; + + // Check for a complete enum type; incomplete enum types are not properly an + // enumeration type in the sense required here. + // C++0x: However, if the underlying type of the enum is fixed, it is + // considered complete. + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) + return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped(); + + return false; +} + + + +bool Type::isCharType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() == BuiltinType::Char_U || + BT->getKind() == BuiltinType::UChar || + BT->getKind() == BuiltinType::Char_S || + BT->getKind() == BuiltinType::SChar; + return false; +} + +bool Type::isWideCharType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() == BuiltinType::WChar_S || + BT->getKind() == BuiltinType::WChar_U; + return false; +} + +bool Type::isChar16Type() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() == BuiltinType::Char16; + return false; +} + +bool Type::isChar32Type() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() == BuiltinType::Char32; + return false; +} + +/// \brief Determine whether this type is any of the built-in character +/// types. +bool Type::isAnyCharacterType() const { + const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType); + if (BT == 0) return false; + switch (BT->getKind()) { + default: return false; + case BuiltinType::Char_U: + case BuiltinType::UChar: + case BuiltinType::WChar_U: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Char_S: + case BuiltinType::SChar: + case BuiltinType::WChar_S: + return true; + } +} + +/// isSignedIntegerType - Return true if this is an integer type that is +/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], +/// an enum decl which has a signed representation +bool Type::isSignedIntegerType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) { + return BT->getKind() >= BuiltinType::Char_S && + BT->getKind() <= BuiltinType::Int128; + } + + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) { + // Incomplete enum types are not treated as integer types. + // FIXME: In C++, enum types are never integer types. + if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) + return ET->getDecl()->getIntegerType()->isSignedIntegerType(); + } + + return false; +} + +bool Type::isSignedIntegerOrEnumerationType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) { + return BT->getKind() >= BuiltinType::Char_S && + BT->getKind() <= BuiltinType::Int128; + } + + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) { + if (ET->getDecl()->isComplete()) + return ET->getDecl()->getIntegerType()->isSignedIntegerType(); + } + + return false; +} + +bool Type::hasSignedIntegerRepresentation() const { + if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType)) + return VT->getElementType()->isSignedIntegerOrEnumerationType(); + else + return isSignedIntegerOrEnumerationType(); +} + +/// isUnsignedIntegerType - Return true if this is an integer type that is +/// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum +/// decl which has an unsigned representation +bool Type::isUnsignedIntegerType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) { + return BT->getKind() >= BuiltinType::Bool && + BT->getKind() <= BuiltinType::UInt128; + } + + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) { + // Incomplete enum types are not treated as integer types. + // FIXME: In C++, enum types are never integer types. + if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) + return ET->getDecl()->getIntegerType()->isUnsignedIntegerType(); + } + + return false; +} + +bool Type::isUnsignedIntegerOrEnumerationType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) { + return BT->getKind() >= BuiltinType::Bool && + BT->getKind() <= BuiltinType::UInt128; + } + + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) { + if (ET->getDecl()->isComplete()) + return ET->getDecl()->getIntegerType()->isUnsignedIntegerType(); + } + + return false; +} + +bool Type::hasUnsignedIntegerRepresentation() const { + if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType)) + return VT->getElementType()->isUnsignedIntegerOrEnumerationType(); + else + return isUnsignedIntegerOrEnumerationType(); +} + +bool Type::isFloatingType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() >= BuiltinType::Half && + BT->getKind() <= BuiltinType::LongDouble; + if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType)) + return CT->getElementType()->isFloatingType(); + return false; +} + +bool Type::hasFloatingRepresentation() const { + if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType)) + return VT->getElementType()->isFloatingType(); + else + return isFloatingType(); +} + +bool Type::isRealFloatingType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->isFloatingPoint(); + return false; +} + +bool Type::isRealType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() >= BuiltinType::Bool && + BT->getKind() <= BuiltinType::LongDouble; + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) + return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped(); + return false; +} + +bool Type::isArithmeticType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() >= BuiltinType::Bool && + BT->getKind() <= BuiltinType::LongDouble; + if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) + // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2). + // If a body isn't seen by the time we get here, return false. + // + // C++0x: Enumerations are not arithmetic types. For now, just return + // false for scoped enumerations since that will disable any + // unwanted implicit conversions. + return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete(); + return isa<ComplexType>(CanonicalType); +} + +Type::ScalarTypeKind Type::getScalarTypeKind() const { + assert(isScalarType()); + + const Type *T = CanonicalType.getTypePtr(); + if (const BuiltinType *BT = dyn_cast<BuiltinType>(T)) { + if (BT->getKind() == BuiltinType::Bool) return STK_Bool; + if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer; + if (BT->isInteger()) return STK_Integral; + if (BT->isFloatingPoint()) return STK_Floating; + llvm_unreachable("unknown scalar builtin type"); + } else if (isa<PointerType>(T)) { + return STK_CPointer; + } else if (isa<BlockPointerType>(T)) { + return STK_BlockPointer; + } else if (isa<ObjCObjectPointerType>(T)) { + return STK_ObjCObjectPointer; + } else if (isa<MemberPointerType>(T)) { + return STK_MemberPointer; + } else if (isa<EnumType>(T)) { + assert(cast<EnumType>(T)->getDecl()->isComplete()); + return STK_Integral; + } else if (const ComplexType *CT = dyn_cast<ComplexType>(T)) { + if (CT->getElementType()->isRealFloatingType()) + return STK_FloatingComplex; + return STK_IntegralComplex; + } + + llvm_unreachable("unknown scalar type"); +} + +/// \brief Determines whether the type is a C++ aggregate type or C +/// aggregate or union type. +/// +/// An aggregate type is an array or a class type (struct, union, or +/// class) that has no user-declared constructors, no private or +/// protected non-static data members, no base classes, and no virtual +/// functions (C++ [dcl.init.aggr]p1). The notion of an aggregate type +/// subsumes the notion of C aggregates (C99 6.2.5p21) because it also +/// includes union types. +bool Type::isAggregateType() const { + if (const RecordType *Record = dyn_cast<RecordType>(CanonicalType)) { + if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl())) + return ClassDecl->isAggregate(); + + return true; + } + + return isa<ArrayType>(CanonicalType); +} + +/// isConstantSizeType - Return true if this is not a variable sized type, +/// according to the rules of C99 6.7.5p3. It is not legal to call this on +/// incomplete types or dependent types. +bool Type::isConstantSizeType() const { + assert(!isIncompleteType() && "This doesn't make sense for incomplete types"); + assert(!isDependentType() && "This doesn't make sense for dependent types"); + // The VAT must have a size, as it is known to be complete. + return !isa<VariableArrayType>(CanonicalType); +} + +/// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1) +/// - a type that can describe objects, but which lacks information needed to +/// determine its size. +bool Type::isIncompleteType(NamedDecl **Def) const { + if (Def) + *Def = 0; + + switch (CanonicalType->getTypeClass()) { + default: return false; + case Builtin: + // Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never + // be completed. + return isVoidType(); + case Enum: { + EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl(); + if (Def) + *Def = EnumD; + + // An enumeration with fixed underlying type is complete (C++0x 7.2p3). + if (EnumD->isFixed()) + return false; + + return !EnumD->isCompleteDefinition(); + } + case Record: { + // A tagged type (struct/union/enum/class) is incomplete if the decl is a + // forward declaration, but not a full definition (C99 6.2.5p22). + RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl(); + if (Def) + *Def = Rec; + return !Rec->isCompleteDefinition(); + } + case ConstantArray: + // An array is incomplete if its element type is incomplete + // (C++ [dcl.array]p1). + // We don't handle variable arrays (they're not allowed in C++) or + // dependent-sized arrays (dependent types are never treated as incomplete). + return cast<ArrayType>(CanonicalType)->getElementType() + ->isIncompleteType(Def); + case IncompleteArray: + // An array of unknown size is an incomplete type (C99 6.2.5p22). + return true; + case ObjCObject: + return cast<ObjCObjectType>(CanonicalType)->getBaseType() + ->isIncompleteType(Def); + case ObjCInterface: { + // ObjC interfaces are incomplete if they are @class, not @interface. + ObjCInterfaceDecl *Interface + = cast<ObjCInterfaceType>(CanonicalType)->getDecl(); + if (Def) + *Def = Interface; + return !Interface->hasDefinition(); + } + } +} + +bool QualType::isPODType(ASTContext &Context) const { + // C++11 has a more relaxed definition of POD. + if (Context.getLangOpts().CPlusPlus11) + return isCXX11PODType(Context); + + return isCXX98PODType(Context); +} + +bool QualType::isCXX98PODType(ASTContext &Context) const { + // The compiler shouldn't query this for incomplete types, but the user might. + // We return false for that case. Except for incomplete arrays of PODs, which + // are PODs according to the standard. + if (isNull()) + return 0; + + if ((*this)->isIncompleteArrayType()) + return Context.getBaseElementType(*this).isCXX98PODType(Context); + + if ((*this)->isIncompleteType()) + return false; + + if (Context.getLangOpts().ObjCAutoRefCount) { + switch (getObjCLifetime()) { + case Qualifiers::OCL_ExplicitNone: + return true; + + case Qualifiers::OCL_Strong: + case Qualifiers::OCL_Weak: + case Qualifiers::OCL_Autoreleasing: + return false; + + case Qualifiers::OCL_None: + break; + } + } + + QualType CanonicalType = getTypePtr()->CanonicalType; + switch (CanonicalType->getTypeClass()) { + // Everything not explicitly mentioned is not POD. + default: return false; + case Type::VariableArray: + case Type::ConstantArray: + // IncompleteArray is handled above. + return Context.getBaseElementType(*this).isCXX98PODType(Context); + + case Type::ObjCObjectPointer: + case Type::BlockPointer: + case Type::Builtin: + case Type::Complex: + case Type::Pointer: + case Type::MemberPointer: + case Type::Vector: + case Type::ExtVector: + return true; + + case Type::Enum: + return true; + + case Type::Record: + if (CXXRecordDecl *ClassDecl + = dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl())) + return ClassDecl->isPOD(); + + // C struct/union is POD. + return true; + } +} + +bool QualType::isTrivialType(ASTContext &Context) const { + // The compiler shouldn't query this for incomplete types, but the user might. + // We return false for that case. Except for incomplete arrays of PODs, which + // are PODs according to the standard. + if (isNull()) + return 0; + + if ((*this)->isArrayType()) + return Context.getBaseElementType(*this).isTrivialType(Context); + + // Return false for incomplete types after skipping any incomplete array + // types which are expressly allowed by the standard and thus our API. + if ((*this)->isIncompleteType()) + return false; + + if (Context.getLangOpts().ObjCAutoRefCount) { + switch (getObjCLifetime()) { + case Qualifiers::OCL_ExplicitNone: + return true; + + case Qualifiers::OCL_Strong: + case Qualifiers::OCL_Weak: + case Qualifiers::OCL_Autoreleasing: + return false; + + case Qualifiers::OCL_None: + if ((*this)->isObjCLifetimeType()) + return false; + break; + } + } + + QualType CanonicalType = getTypePtr()->CanonicalType; + if (CanonicalType->isDependentType()) + return false; + + // C++0x [basic.types]p9: + // Scalar types, trivial class types, arrays of such types, and + // cv-qualified versions of these types are collectively called trivial + // types. + + // As an extension, Clang treats vector types as Scalar types. + if (CanonicalType->isScalarType() || CanonicalType->isVectorType()) + return true; + if (const RecordType *RT = CanonicalType->getAs<RecordType>()) { + if (const CXXRecordDecl *ClassDecl = + dyn_cast<CXXRecordDecl>(RT->getDecl())) { + // C++11 [class]p6: + // A trivial class is a class that has a default constructor, + // has no non-trivial default constructors, and is trivially + // copyable. + return ClassDecl->hasDefaultConstructor() && + !ClassDecl->hasNonTrivialDefaultConstructor() && + ClassDecl->isTriviallyCopyable(); + } + + return true; + } + + // No other types can match. + return false; +} + +bool QualType::isTriviallyCopyableType(ASTContext &Context) const { + if ((*this)->isArrayType()) + return Context.getBaseElementType(*this).isTrivialType(Context); + + if (Context.getLangOpts().ObjCAutoRefCount) { + switch (getObjCLifetime()) { + case Qualifiers::OCL_ExplicitNone: + return true; + + case Qualifiers::OCL_Strong: + case Qualifiers::OCL_Weak: + case Qualifiers::OCL_Autoreleasing: + return false; + + case Qualifiers::OCL_None: + if ((*this)->isObjCLifetimeType()) + return false; + break; + } + } + + // C++11 [basic.types]p9 + // Scalar types, trivially copyable class types, arrays of such types, and + // non-volatile const-qualified versions of these types are collectively + // called trivially copyable types. + + QualType CanonicalType = getCanonicalType(); + if (CanonicalType->isDependentType()) + return false; + + if (CanonicalType.isVolatileQualified()) + return false; + + // Return false for incomplete types after skipping any incomplete array types + // which are expressly allowed by the standard and thus our API. + if (CanonicalType->isIncompleteType()) + return false; + + // As an extension, Clang treats vector types as Scalar types. + if (CanonicalType->isScalarType() || CanonicalType->isVectorType()) + return true; + + if (const RecordType *RT = CanonicalType->getAs<RecordType>()) { + if (const CXXRecordDecl *ClassDecl = + dyn_cast<CXXRecordDecl>(RT->getDecl())) { + if (!ClassDecl->isTriviallyCopyable()) return false; + } + + return true; + } + + // No other types can match. + return false; +} + + + +bool Type::isLiteralType(const ASTContext &Ctx) const { + if (isDependentType()) + return false; + + // C++1y [basic.types]p10: + // A type is a literal type if it is: + // -- cv void; or + if (Ctx.getLangOpts().CPlusPlus1y && isVoidType()) + return true; + + // C++11 [basic.types]p10: + // A type is a literal type if it is: + // [...] + // -- an array of literal type other than an array of runtime bound; or + if (isVariableArrayType()) + return false; + const Type *BaseTy = getBaseElementTypeUnsafe(); + assert(BaseTy && "NULL element type"); + + // Return false for incomplete types after skipping any incomplete array + // types; those are expressly allowed by the standard and thus our API. + if (BaseTy->isIncompleteType()) + return false; + + // C++11 [basic.types]p10: + // A type is a literal type if it is: + // -- a scalar type; or + // As an extension, Clang treats vector types and complex types as + // literal types. + if (BaseTy->isScalarType() || BaseTy->isVectorType() || + BaseTy->isAnyComplexType()) + return true; + // -- a reference type; or + if (BaseTy->isReferenceType()) + return true; + // -- a class type that has all of the following properties: + if (const RecordType *RT = BaseTy->getAs<RecordType>()) { + // -- a trivial destructor, + // -- every constructor call and full-expression in the + // brace-or-equal-initializers for non-static data members (if any) + // is a constant expression, + // -- it is an aggregate type or has at least one constexpr + // constructor or constructor template that is not a copy or move + // constructor, and + // -- all non-static data members and base classes of literal types + // + // We resolve DR1361 by ignoring the second bullet. + if (const CXXRecordDecl *ClassDecl = + dyn_cast<CXXRecordDecl>(RT->getDecl())) + return ClassDecl->isLiteral(); + + return true; + } + + // We treat _Atomic T as a literal type if T is a literal type. + if (const AtomicType *AT = BaseTy->getAs<AtomicType>()) + return AT->getValueType()->isLiteralType(Ctx); + + // If this type hasn't been deduced yet, then conservatively assume that + // it'll work out to be a literal type. + if (isa<AutoType>(BaseTy->getCanonicalTypeInternal())) + return true; + + return false; +} + +bool Type::isStandardLayoutType() const { + if (isDependentType()) + return false; + + // C++0x [basic.types]p9: + // Scalar types, standard-layout class types, arrays of such types, and + // cv-qualified versions of these types are collectively called + // standard-layout types. + const Type *BaseTy = getBaseElementTypeUnsafe(); + assert(BaseTy && "NULL element type"); + + // Return false for incomplete types after skipping any incomplete array + // types which are expressly allowed by the standard and thus our API. + if (BaseTy->isIncompleteType()) + return false; + + // As an extension, Clang treats vector types as Scalar types. + if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true; + if (const RecordType *RT = BaseTy->getAs<RecordType>()) { + if (const CXXRecordDecl *ClassDecl = + dyn_cast<CXXRecordDecl>(RT->getDecl())) + if (!ClassDecl->isStandardLayout()) + return false; + + // Default to 'true' for non-C++ class types. + // FIXME: This is a bit dubious, but plain C structs should trivially meet + // all the requirements of standard layout classes. + return true; + } + + // No other types can match. + return false; +} + +// This is effectively the intersection of isTrivialType and +// isStandardLayoutType. We implement it directly to avoid redundant +// conversions from a type to a CXXRecordDecl. +bool QualType::isCXX11PODType(ASTContext &Context) const { + const Type *ty = getTypePtr(); + if (ty->isDependentType()) + return false; + + if (Context.getLangOpts().ObjCAutoRefCount) { + switch (getObjCLifetime()) { + case Qualifiers::OCL_ExplicitNone: + return true; + + case Qualifiers::OCL_Strong: + case Qualifiers::OCL_Weak: + case Qualifiers::OCL_Autoreleasing: + return false; + + case Qualifiers::OCL_None: + break; + } + } + + // C++11 [basic.types]p9: + // Scalar types, POD classes, arrays of such types, and cv-qualified + // versions of these types are collectively called trivial types. + const Type *BaseTy = ty->getBaseElementTypeUnsafe(); + assert(BaseTy && "NULL element type"); + + // Return false for incomplete types after skipping any incomplete array + // types which are expressly allowed by the standard and thus our API. + if (BaseTy->isIncompleteType()) + return false; + + // As an extension, Clang treats vector types as Scalar types. + if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true; + if (const RecordType *RT = BaseTy->getAs<RecordType>()) { + if (const CXXRecordDecl *ClassDecl = + dyn_cast<CXXRecordDecl>(RT->getDecl())) { + // C++11 [class]p10: + // A POD struct is a non-union class that is both a trivial class [...] + if (!ClassDecl->isTrivial()) return false; + + // C++11 [class]p10: + // A POD struct is a non-union class that is both a trivial class and + // a standard-layout class [...] + if (!ClassDecl->isStandardLayout()) return false; + + // C++11 [class]p10: + // A POD struct is a non-union class that is both a trivial class and + // a standard-layout class, and has no non-static data members of type + // non-POD struct, non-POD union (or array of such types). [...] + // + // We don't directly query the recursive aspect as the requiremets for + // both standard-layout classes and trivial classes apply recursively + // already. + } + + return true; + } + + // No other types can match. + return false; +} + +bool Type::isPromotableIntegerType() const { + if (const BuiltinType *BT = getAs<BuiltinType>()) + switch (BT->getKind()) { + case BuiltinType::Bool: + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::SChar: + case BuiltinType::UChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char16: + case BuiltinType::Char32: + return true; + default: + return false; + } + + // Enumerated types are promotable to their compatible integer types + // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). + if (const EnumType *ET = getAs<EnumType>()){ + if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull() + || ET->getDecl()->isScoped()) + return false; + + return true; + } + + return false; +} + +bool Type::isSpecifierType() const { + // Note that this intentionally does not use the canonical type. + switch (getTypeClass()) { + case Builtin: + case Record: + case Enum: + case Typedef: + case Complex: + case TypeOfExpr: + case TypeOf: + case TemplateTypeParm: + case SubstTemplateTypeParm: + case TemplateSpecialization: + case Elaborated: + case DependentName: + case DependentTemplateSpecialization: + case ObjCInterface: + case ObjCObject: + case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers + return true; + default: + return false; + } +} + +ElaboratedTypeKeyword +TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) { + switch (TypeSpec) { + default: return ETK_None; + case TST_typename: return ETK_Typename; + case TST_class: return ETK_Class; + case TST_struct: return ETK_Struct; + case TST_interface: return ETK_Interface; + case TST_union: return ETK_Union; + case TST_enum: return ETK_Enum; + } +} + +TagTypeKind +TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) { + switch(TypeSpec) { + case TST_class: return TTK_Class; + case TST_struct: return TTK_Struct; + case TST_interface: return TTK_Interface; + case TST_union: return TTK_Union; + case TST_enum: return TTK_Enum; + } + + llvm_unreachable("Type specifier is not a tag type kind."); +} + +ElaboratedTypeKeyword +TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) { + switch (Kind) { + case TTK_Class: return ETK_Class; + case TTK_Struct: return ETK_Struct; + case TTK_Interface: return ETK_Interface; + case TTK_Union: return ETK_Union; + case TTK_Enum: return ETK_Enum; + } + llvm_unreachable("Unknown tag type kind."); +} + +TagTypeKind +TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) { + switch (Keyword) { + case ETK_Class: return TTK_Class; + case ETK_Struct: return TTK_Struct; + case ETK_Interface: return TTK_Interface; + case ETK_Union: return TTK_Union; + case ETK_Enum: return TTK_Enum; + case ETK_None: // Fall through. + case ETK_Typename: + llvm_unreachable("Elaborated type keyword is not a tag type kind."); + } + llvm_unreachable("Unknown elaborated type keyword."); +} + +bool +TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) { + switch (Keyword) { + case ETK_None: + case ETK_Typename: + return false; + case ETK_Class: + case ETK_Struct: + case ETK_Interface: + case ETK_Union: + case ETK_Enum: + return true; + } + llvm_unreachable("Unknown elaborated type keyword."); +} + +const char* +TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) { + switch (Keyword) { + case ETK_None: return ""; + case ETK_Typename: return "typename"; + case ETK_Class: return "class"; + case ETK_Struct: return "struct"; + case ETK_Interface: return "__interface"; + case ETK_Union: return "union"; + case ETK_Enum: return "enum"; + } + + llvm_unreachable("Unknown elaborated type keyword."); +} + +DependentTemplateSpecializationType::DependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, const IdentifierInfo *Name, + unsigned NumArgs, const TemplateArgument *Args, + QualType Canon) + : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true, + /*VariablyModified=*/false, + NNS && NNS->containsUnexpandedParameterPack()), + NNS(NNS), Name(Name), NumArgs(NumArgs) { + assert((!NNS || NNS->isDependent()) && + "DependentTemplateSpecializatonType requires dependent qualifier"); + for (unsigned I = 0; I != NumArgs; ++I) { + if (Args[I].containsUnexpandedParameterPack()) + setContainsUnexpandedParameterPack(); + + new (&getArgBuffer()[I]) TemplateArgument(Args[I]); + } +} + +void +DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Context, + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *Qualifier, + const IdentifierInfo *Name, + unsigned NumArgs, + const TemplateArgument *Args) { + ID.AddInteger(Keyword); + ID.AddPointer(Qualifier); + ID.AddPointer(Name); + for (unsigned Idx = 0; Idx < NumArgs; ++Idx) + Args[Idx].Profile(ID, Context); +} + +bool Type::isElaboratedTypeSpecifier() const { + ElaboratedTypeKeyword Keyword; + if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this)) + Keyword = Elab->getKeyword(); + else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this)) + Keyword = DepName->getKeyword(); + else if (const DependentTemplateSpecializationType *DepTST = + dyn_cast<DependentTemplateSpecializationType>(this)) + Keyword = DepTST->getKeyword(); + else + return false; + + return TypeWithKeyword::KeywordIsTagTypeKind(Keyword); +} + +const char *Type::getTypeClassName() const { + switch (TypeBits.TC) { +#define ABSTRACT_TYPE(Derived, Base) +#define TYPE(Derived, Base) case Derived: return #Derived; +#include "clang/AST/TypeNodes.def" + } + + llvm_unreachable("Invalid type class."); +} + +StringRef BuiltinType::getName(const PrintingPolicy &Policy) const { + switch (getKind()) { + case Void: return "void"; + case Bool: return Policy.Bool ? "bool" : "_Bool"; + case Char_S: return "char"; + case Char_U: return "char"; + case SChar: return "signed char"; + case Short: return "short"; + case Int: return "int"; + case Long: return "long"; + case LongLong: return "long long"; + case Int128: return "__int128"; + case UChar: return "unsigned char"; + case UShort: return "unsigned short"; + case UInt: return "unsigned int"; + case ULong: return "unsigned long"; + case ULongLong: return "unsigned long long"; + case UInt128: return "unsigned __int128"; + case Half: return "half"; + case Float: return "float"; + case Double: return "double"; + case LongDouble: return "long double"; + case WChar_S: + case WChar_U: return Policy.MSWChar ? "__wchar_t" : "wchar_t"; + case Char16: return "char16_t"; + case Char32: return "char32_t"; + case NullPtr: return "nullptr_t"; + case Overload: return "<overloaded function type>"; + case BoundMember: return "<bound member function type>"; + case PseudoObject: return "<pseudo-object type>"; + case Dependent: return "<dependent type>"; + case UnknownAny: return "<unknown type>"; + case ARCUnbridgedCast: return "<ARC unbridged cast type>"; + case BuiltinFn: return "<builtin fn type>"; + case ObjCId: return "id"; + case ObjCClass: return "Class"; + case ObjCSel: return "SEL"; + case OCLImage1d: return "image1d_t"; + case OCLImage1dArray: return "image1d_array_t"; + case OCLImage1dBuffer: return "image1d_buffer_t"; + case OCLImage2d: return "image2d_t"; + case OCLImage2dArray: return "image2d_array_t"; + case OCLImage3d: return "image3d_t"; + case OCLSampler: return "sampler_t"; + case OCLEvent: return "event_t"; + } + + llvm_unreachable("Invalid builtin type."); +} + +QualType QualType::getNonLValueExprType(const ASTContext &Context) const { + if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>()) + return RefType->getPointeeType(); + + // C++0x [basic.lval]: + // Class prvalues can have cv-qualified types; non-class prvalues always + // have cv-unqualified types. + // + // See also C99 6.3.2.1p2. + if (!Context.getLangOpts().CPlusPlus || + (!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType())) + return getUnqualifiedType(); + + return *this; +} + +StringRef FunctionType::getNameForCallConv(CallingConv CC) { + switch (CC) { + case CC_C: return "cdecl"; + case CC_X86StdCall: return "stdcall"; + case CC_X86FastCall: return "fastcall"; + case CC_X86ThisCall: return "thiscall"; + case CC_X86Pascal: return "pascal"; + case CC_X86_64Win64: return "ms_abi"; + case CC_X86_64SysV: return "sysv_abi"; + case CC_AAPCS: return "aapcs"; + case CC_AAPCS_VFP: return "aapcs-vfp"; + case CC_PnaclCall: return "pnaclcall"; + case CC_IntelOclBicc: return "intel_ocl_bicc"; + } + + llvm_unreachable("Invalid calling convention."); +} + +FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> args, + QualType canonical, + const ExtProtoInfo &epi) + : FunctionType(FunctionProto, result, epi.TypeQuals, + canonical, + result->isDependentType(), + result->isInstantiationDependentType(), + result->isVariablyModifiedType(), + result->containsUnexpandedParameterPack(), + epi.ExtInfo), + NumArgs(args.size()), NumExceptions(epi.NumExceptions), + ExceptionSpecType(epi.ExceptionSpecType), + HasAnyConsumedArgs(epi.ConsumedArguments != 0), + Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn), + RefQualifier(epi.RefQualifier) +{ + assert(NumArgs == args.size() && "function has too many parameters"); + + // Fill in the trailing argument array. + QualType *argSlot = reinterpret_cast<QualType*>(this+1); + for (unsigned i = 0; i != NumArgs; ++i) { + if (args[i]->isDependentType()) + setDependent(); + else if (args[i]->isInstantiationDependentType()) + setInstantiationDependent(); + + if (args[i]->containsUnexpandedParameterPack()) + setContainsUnexpandedParameterPack(); + + argSlot[i] = args[i]; + } + + if (getExceptionSpecType() == EST_Dynamic) { + // Fill in the exception array. + QualType *exnSlot = argSlot + NumArgs; + for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) { + if (epi.Exceptions[i]->isDependentType()) + setDependent(); + else if (epi.Exceptions[i]->isInstantiationDependentType()) + setInstantiationDependent(); + + if (epi.Exceptions[i]->containsUnexpandedParameterPack()) + setContainsUnexpandedParameterPack(); + + exnSlot[i] = epi.Exceptions[i]; + } + } else if (getExceptionSpecType() == EST_ComputedNoexcept) { + // Store the noexcept expression and context. + Expr **noexSlot = reinterpret_cast<Expr**>(argSlot + NumArgs); + *noexSlot = epi.NoexceptExpr; + + if (epi.NoexceptExpr) { + if (epi.NoexceptExpr->isValueDependent() + || epi.NoexceptExpr->isTypeDependent()) + setDependent(); + else if (epi.NoexceptExpr->isInstantiationDependent()) + setInstantiationDependent(); + } + } else if (getExceptionSpecType() == EST_Uninstantiated) { + // Store the function decl from which we will resolve our + // exception specification. + FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + NumArgs); + slot[0] = epi.ExceptionSpecDecl; + slot[1] = epi.ExceptionSpecTemplate; + // This exception specification doesn't make the type dependent, because + // it's not instantiated as part of instantiating the type. + } else if (getExceptionSpecType() == EST_Unevaluated) { + // Store the function decl from which we will resolve our + // exception specification. + FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + NumArgs); + slot[0] = epi.ExceptionSpecDecl; + } + + if (epi.ConsumedArguments) { + bool *consumedArgs = const_cast<bool*>(getConsumedArgsBuffer()); + for (unsigned i = 0; i != NumArgs; ++i) + consumedArgs[i] = epi.ConsumedArguments[i]; + } +} + +FunctionProtoType::NoexceptResult +FunctionProtoType::getNoexceptSpec(const ASTContext &ctx) const { + ExceptionSpecificationType est = getExceptionSpecType(); + if (est == EST_BasicNoexcept) + return NR_Nothrow; + + if (est != EST_ComputedNoexcept) + return NR_NoNoexcept; + + Expr *noexceptExpr = getNoexceptExpr(); + if (!noexceptExpr) + return NR_BadNoexcept; + if (noexceptExpr->isValueDependent()) + return NR_Dependent; + + llvm::APSInt value; + bool isICE = noexceptExpr->isIntegerConstantExpr(value, ctx, 0, + /*evaluated*/false); + (void)isICE; + assert(isICE && "AST should not contain bad noexcept expressions."); + + return value.getBoolValue() ? NR_Nothrow : NR_Throw; +} + +bool FunctionProtoType::isTemplateVariadic() const { + for (unsigned ArgIdx = getNumArgs(); ArgIdx; --ArgIdx) + if (isa<PackExpansionType>(getArgType(ArgIdx - 1))) + return true; + + return false; +} + +void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result, + const QualType *ArgTys, unsigned NumArgs, + const ExtProtoInfo &epi, + const ASTContext &Context) { + + // We have to be careful not to get ambiguous profile encodings. + // Note that valid type pointers are never ambiguous with anything else. + // + // The encoding grammar begins: + // type type* bool int bool + // If that final bool is true, then there is a section for the EH spec: + // bool type* + // This is followed by an optional "consumed argument" section of the + // same length as the first type sequence: + // bool* + // Finally, we have the ext info and trailing return type flag: + // int bool + // + // There is no ambiguity between the consumed arguments and an empty EH + // spec because of the leading 'bool' which unambiguously indicates + // whether the following bool is the EH spec or part of the arguments. + + ID.AddPointer(Result.getAsOpaquePtr()); + for (unsigned i = 0; i != NumArgs; ++i) + ID.AddPointer(ArgTys[i].getAsOpaquePtr()); + // This method is relatively performance sensitive, so as a performance + // shortcut, use one AddInteger call instead of four for the next four + // fields. + assert(!(unsigned(epi.Variadic) & ~1) && + !(unsigned(epi.TypeQuals) & ~255) && + !(unsigned(epi.RefQualifier) & ~3) && + !(unsigned(epi.ExceptionSpecType) & ~7) && + "Values larger than expected."); + ID.AddInteger(unsigned(epi.Variadic) + + (epi.TypeQuals << 1) + + (epi.RefQualifier << 9) + + (epi.ExceptionSpecType << 11)); + if (epi.ExceptionSpecType == EST_Dynamic) { + for (unsigned i = 0; i != epi.NumExceptions; ++i) + ID.AddPointer(epi.Exceptions[i].getAsOpaquePtr()); + } else if (epi.ExceptionSpecType == EST_ComputedNoexcept && epi.NoexceptExpr){ + epi.NoexceptExpr->Profile(ID, Context, false); + } else if (epi.ExceptionSpecType == EST_Uninstantiated || + epi.ExceptionSpecType == EST_Unevaluated) { + ID.AddPointer(epi.ExceptionSpecDecl->getCanonicalDecl()); + } + if (epi.ConsumedArguments) { + for (unsigned i = 0; i != NumArgs; ++i) + ID.AddBoolean(epi.ConsumedArguments[i]); + } + epi.ExtInfo.Profile(ID); + ID.AddBoolean(epi.HasTrailingReturn); +} + +void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Ctx) { + Profile(ID, getResultType(), arg_type_begin(), NumArgs, getExtProtoInfo(), + Ctx); +} + +QualType TypedefType::desugar() const { + return getDecl()->getUnderlyingType(); +} + +TypeOfExprType::TypeOfExprType(Expr *E, QualType can) + : Type(TypeOfExpr, can, E->isTypeDependent(), + E->isInstantiationDependent(), + E->getType()->isVariablyModifiedType(), + E->containsUnexpandedParameterPack()), + TOExpr(E) { +} + +bool TypeOfExprType::isSugared() const { + return !TOExpr->isTypeDependent(); +} + +QualType TypeOfExprType::desugar() const { + if (isSugared()) + return getUnderlyingExpr()->getType(); + + return QualType(this, 0); +} + +void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Context, Expr *E) { + E->Profile(ID, Context, true); +} + +DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can) + // C++11 [temp.type]p2: "If an expression e involves a template parameter, + // decltype(e) denotes a unique dependent type." Hence a decltype type is + // type-dependent even if its expression is only instantiation-dependent. + : Type(Decltype, can, E->isInstantiationDependent(), + E->isInstantiationDependent(), + E->getType()->isVariablyModifiedType(), + E->containsUnexpandedParameterPack()), + E(E), + UnderlyingType(underlyingType) { +} + +bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); } + +QualType DecltypeType::desugar() const { + if (isSugared()) + return getUnderlyingType(); + + return QualType(this, 0); +} + +DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E) + : DecltypeType(E, Context.DependentTy), Context(Context) { } + +void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Context, Expr *E) { + E->Profile(ID, Context, true); +} + +TagType::TagType(TypeClass TC, const TagDecl *D, QualType can) + : Type(TC, can, D->isDependentType(), + /*InstantiationDependent=*/D->isDependentType(), + /*VariablyModified=*/false, + /*ContainsUnexpandedParameterPack=*/false), + decl(const_cast<TagDecl*>(D)) {} + +static TagDecl *getInterestingTagDecl(TagDecl *decl) { + for (TagDecl::redecl_iterator I = decl->redecls_begin(), + E = decl->redecls_end(); + I != E; ++I) { + if (I->isCompleteDefinition() || I->isBeingDefined()) + return *I; + } + // If there's no definition (not even in progress), return what we have. + return decl; +} + +UnaryTransformType::UnaryTransformType(QualType BaseType, + QualType UnderlyingType, + UTTKind UKind, + QualType CanonicalType) + : Type(UnaryTransform, CanonicalType, UnderlyingType->isDependentType(), + UnderlyingType->isInstantiationDependentType(), + UnderlyingType->isVariablyModifiedType(), + BaseType->containsUnexpandedParameterPack()) + , BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind) +{} + +TagDecl *TagType::getDecl() const { + return getInterestingTagDecl(decl); +} + +bool TagType::isBeingDefined() const { + return getDecl()->isBeingDefined(); +} + +bool AttributedType::isMSTypeSpec() const { + switch (getAttrKind()) { + default: return false; + case attr_ptr32: + case attr_ptr64: + case attr_sptr: + case attr_uptr: + return true; + } + llvm_unreachable("invalid attr kind"); +} + +bool AttributedType::isCallingConv() const { + switch (getAttrKind()) { + case attr_ptr32: + case attr_ptr64: + case attr_sptr: + case attr_uptr: + case attr_address_space: + case attr_regparm: + case attr_vector_size: + case attr_neon_vector_type: + case attr_neon_polyvector_type: + case attr_objc_gc: + case attr_objc_ownership: + case attr_noreturn: + return false; + case attr_pcs: + case attr_pcs_vfp: + case attr_cdecl: + case attr_fastcall: + case attr_stdcall: + case attr_thiscall: + case attr_pascal: + case attr_ms_abi: + case attr_sysv_abi: + case attr_pnaclcall: + case attr_inteloclbicc: + return true; + } + llvm_unreachable("invalid attr kind"); +} + +CXXRecordDecl *InjectedClassNameType::getDecl() const { + return cast<CXXRecordDecl>(getInterestingTagDecl(Decl)); +} + +IdentifierInfo *TemplateTypeParmType::getIdentifier() const { + return isCanonicalUnqualified() ? 0 : getDecl()->getIdentifier(); +} + +SubstTemplateTypeParmPackType:: +SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param, + QualType Canon, + const TemplateArgument &ArgPack) + : Type(SubstTemplateTypeParmPack, Canon, true, true, false, true), + Replaced(Param), + Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size()) +{ +} + +TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const { + return TemplateArgument(Arguments, NumArguments); +} + +void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) { + Profile(ID, getReplacedParameter(), getArgumentPack()); +} + +void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID, + const TemplateTypeParmType *Replaced, + const TemplateArgument &ArgPack) { + ID.AddPointer(Replaced); + ID.AddInteger(ArgPack.pack_size()); + for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(), + PEnd = ArgPack.pack_end(); + P != PEnd; ++P) + ID.AddPointer(P->getAsType().getAsOpaquePtr()); +} + +bool TemplateSpecializationType:: +anyDependentTemplateArguments(const TemplateArgumentListInfo &Args, + bool &InstantiationDependent) { + return anyDependentTemplateArguments(Args.getArgumentArray(), Args.size(), + InstantiationDependent); +} + +bool TemplateSpecializationType:: +anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N, + bool &InstantiationDependent) { + for (unsigned i = 0; i != N; ++i) { + if (Args[i].getArgument().isDependent()) { + InstantiationDependent = true; + return true; + } + + if (Args[i].getArgument().isInstantiationDependent()) + InstantiationDependent = true; + } + return false; +} + +#ifndef NDEBUG +static bool +anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N, + bool &InstantiationDependent) { + for (unsigned i = 0; i != N; ++i) { + if (Args[i].isDependent()) { + InstantiationDependent = true; + return true; + } + + if (Args[i].isInstantiationDependent()) + InstantiationDependent = true; + } + return false; +} +#endif + +TemplateSpecializationType:: +TemplateSpecializationType(TemplateName T, + const TemplateArgument *Args, unsigned NumArgs, + QualType Canon, QualType AliasedType) + : Type(TemplateSpecialization, + Canon.isNull()? QualType(this, 0) : Canon, + Canon.isNull()? T.isDependent() : Canon->isDependentType(), + Canon.isNull()? T.isDependent() + : Canon->isInstantiationDependentType(), + false, + T.containsUnexpandedParameterPack()), + Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) { + assert(!T.getAsDependentTemplateName() && + "Use DependentTemplateSpecializationType for dependent template-name"); + assert((T.getKind() == TemplateName::Template || + T.getKind() == TemplateName::SubstTemplateTemplateParm || + T.getKind() == TemplateName::SubstTemplateTemplateParmPack) && + "Unexpected template name for TemplateSpecializationType"); + bool InstantiationDependent; + (void)InstantiationDependent; + assert((!Canon.isNull() || + T.isDependent() || + ::anyDependentTemplateArguments(Args, NumArgs, + InstantiationDependent)) && + "No canonical type for non-dependent class template specialization"); + + TemplateArgument *TemplateArgs + = reinterpret_cast<TemplateArgument *>(this + 1); + for (unsigned Arg = 0; Arg < NumArgs; ++Arg) { + // Update dependent and variably-modified bits. + // If the canonical type exists and is non-dependent, the template + // specialization type can be non-dependent even if one of the type + // arguments is. Given: + // template<typename T> using U = int; + // U<T> is always non-dependent, irrespective of the type T. + // However, U<Ts> contains an unexpanded parameter pack, even though + // its expansion (and thus its desugared type) doesn't. + if (Canon.isNull() && Args[Arg].isDependent()) + setDependent(); + else if (Args[Arg].isInstantiationDependent()) + setInstantiationDependent(); + + if (Args[Arg].getKind() == TemplateArgument::Type && + Args[Arg].getAsType()->isVariablyModifiedType()) + setVariablyModified(); + if (Args[Arg].containsUnexpandedParameterPack()) + setContainsUnexpandedParameterPack(); + + new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]); + } + + // Store the aliased type if this is a type alias template specialization. + if (TypeAlias) { + TemplateArgument *Begin = reinterpret_cast<TemplateArgument *>(this + 1); + *reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType; + } +} + +void +TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, + TemplateName T, + const TemplateArgument *Args, + unsigned NumArgs, + const ASTContext &Context) { + T.Profile(ID); + for (unsigned Idx = 0; Idx < NumArgs; ++Idx) + Args[Idx].Profile(ID, Context); +} + +QualType +QualifierCollector::apply(const ASTContext &Context, QualType QT) const { + if (!hasNonFastQualifiers()) + return QT.withFastQualifiers(getFastQualifiers()); + + return Context.getQualifiedType(QT, *this); +} + +QualType +QualifierCollector::apply(const ASTContext &Context, const Type *T) const { + if (!hasNonFastQualifiers()) + return QualType(T, getFastQualifiers()); + + return Context.getQualifiedType(T, *this); +} + +void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID, + QualType BaseType, + ObjCProtocolDecl * const *Protocols, + unsigned NumProtocols) { + ID.AddPointer(BaseType.getAsOpaquePtr()); + for (unsigned i = 0; i != NumProtocols; i++) + ID.AddPointer(Protocols[i]); +} + +void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) { + Profile(ID, getBaseType(), qual_begin(), getNumProtocols()); +} + +namespace { + +/// \brief The cached properties of a type. +class CachedProperties { + Linkage L; + bool local; + +public: + CachedProperties(Linkage L, bool local) : L(L), local(local) {} + + Linkage getLinkage() const { return L; } + bool hasLocalOrUnnamedType() const { return local; } + + friend CachedProperties merge(CachedProperties L, CachedProperties R) { + Linkage MergedLinkage = minLinkage(L.L, R.L); + return CachedProperties(MergedLinkage, + L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType()); + } +}; +} + +static CachedProperties computeCachedProperties(const Type *T); + +namespace clang { +/// The type-property cache. This is templated so as to be +/// instantiated at an internal type to prevent unnecessary symbol +/// leakage. +template <class Private> class TypePropertyCache { +public: + static CachedProperties get(QualType T) { + return get(T.getTypePtr()); + } + + static CachedProperties get(const Type *T) { + ensure(T); + return CachedProperties(T->TypeBits.getLinkage(), + T->TypeBits.hasLocalOrUnnamedType()); + } + + static void ensure(const Type *T) { + // If the cache is valid, we're okay. + if (T->TypeBits.isCacheValid()) return; + + // If this type is non-canonical, ask its canonical type for the + // relevant information. + if (!T->isCanonicalUnqualified()) { + const Type *CT = T->getCanonicalTypeInternal().getTypePtr(); + ensure(CT); + T->TypeBits.CacheValid = true; + T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage; + T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed; + return; + } + + // Compute the cached properties and then set the cache. + CachedProperties Result = computeCachedProperties(T); + T->TypeBits.CacheValid = true; + T->TypeBits.CachedLinkage = Result.getLinkage(); + T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType(); + } +}; +} + +// Instantiate the friend template at a private class. In a +// reasonable implementation, these symbols will be internal. +// It is terrible that this is the best way to accomplish this. +namespace { class Private {}; } +typedef TypePropertyCache<Private> Cache; + +static CachedProperties computeCachedProperties(const Type *T) { + switch (T->getTypeClass()) { +#define TYPE(Class,Base) +#define NON_CANONICAL_TYPE(Class,Base) case Type::Class: +#include "clang/AST/TypeNodes.def" + llvm_unreachable("didn't expect a non-canonical type here"); + +#define TYPE(Class,Base) +#define DEPENDENT_TYPE(Class,Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class: +#include "clang/AST/TypeNodes.def" + // Treat instantiation-dependent types as external. + assert(T->isInstantiationDependentType()); + return CachedProperties(ExternalLinkage, false); + + case Type::Auto: + // Give non-deduced 'auto' types external linkage. We should only see them + // here in error recovery. + return CachedProperties(ExternalLinkage, false); + + case Type::Builtin: + // C++ [basic.link]p8: + // A type is said to have linkage if and only if: + // - it is a fundamental type (3.9.1); or + return CachedProperties(ExternalLinkage, false); + + case Type::Record: + case Type::Enum: { + const TagDecl *Tag = cast<TagType>(T)->getDecl(); + + // C++ [basic.link]p8: + // - it is a class or enumeration type that is named (or has a name + // for linkage purposes (7.1.3)) and the name has linkage; or + // - it is a specialization of a class template (14); or + Linkage L = Tag->getLinkageInternal(); + bool IsLocalOrUnnamed = + Tag->getDeclContext()->isFunctionOrMethod() || + !Tag->hasNameForLinkage(); + return CachedProperties(L, IsLocalOrUnnamed); + } + + // C++ [basic.link]p8: + // - it is a compound type (3.9.2) other than a class or enumeration, + // compounded exclusively from types that have linkage; or + case Type::Complex: + return Cache::get(cast<ComplexType>(T)->getElementType()); + case Type::Pointer: + return Cache::get(cast<PointerType>(T)->getPointeeType()); + case Type::BlockPointer: + return Cache::get(cast<BlockPointerType>(T)->getPointeeType()); + case Type::LValueReference: + case Type::RValueReference: + return Cache::get(cast<ReferenceType>(T)->getPointeeType()); + case Type::MemberPointer: { + const MemberPointerType *MPT = cast<MemberPointerType>(T); + return merge(Cache::get(MPT->getClass()), + Cache::get(MPT->getPointeeType())); + } + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + return Cache::get(cast<ArrayType>(T)->getElementType()); + case Type::Vector: + case Type::ExtVector: + return Cache::get(cast<VectorType>(T)->getElementType()); + case Type::FunctionNoProto: + return Cache::get(cast<FunctionType>(T)->getResultType()); + case Type::FunctionProto: { + const FunctionProtoType *FPT = cast<FunctionProtoType>(T); + CachedProperties result = Cache::get(FPT->getResultType()); + for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(), + ae = FPT->arg_type_end(); ai != ae; ++ai) + result = merge(result, Cache::get(*ai)); + return result; + } + case Type::ObjCInterface: { + Linkage L = cast<ObjCInterfaceType>(T)->getDecl()->getLinkageInternal(); + return CachedProperties(L, false); + } + case Type::ObjCObject: + return Cache::get(cast<ObjCObjectType>(T)->getBaseType()); + case Type::ObjCObjectPointer: + return Cache::get(cast<ObjCObjectPointerType>(T)->getPointeeType()); + case Type::Atomic: + return Cache::get(cast<AtomicType>(T)->getValueType()); + } + + llvm_unreachable("unhandled type class"); +} + +/// \brief Determine the linkage of this type. +Linkage Type::getLinkage() const { + Cache::ensure(this); + return TypeBits.getLinkage(); +} + +bool Type::hasUnnamedOrLocalType() const { + Cache::ensure(this); + return TypeBits.hasLocalOrUnnamedType(); +} + +static LinkageInfo computeLinkageInfo(QualType T); + +static LinkageInfo computeLinkageInfo(const Type *T) { + switch (T->getTypeClass()) { +#define TYPE(Class,Base) +#define NON_CANONICAL_TYPE(Class,Base) case Type::Class: +#include "clang/AST/TypeNodes.def" + llvm_unreachable("didn't expect a non-canonical type here"); + +#define TYPE(Class,Base) +#define DEPENDENT_TYPE(Class,Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class: +#include "clang/AST/TypeNodes.def" + // Treat instantiation-dependent types as external. + assert(T->isInstantiationDependentType()); + return LinkageInfo::external(); + + case Type::Builtin: + return LinkageInfo::external(); + + case Type::Auto: + return LinkageInfo::external(); + + case Type::Record: + case Type::Enum: + return cast<TagType>(T)->getDecl()->getLinkageAndVisibility(); + + case Type::Complex: + return computeLinkageInfo(cast<ComplexType>(T)->getElementType()); + case Type::Pointer: + return computeLinkageInfo(cast<PointerType>(T)->getPointeeType()); + case Type::BlockPointer: + return computeLinkageInfo(cast<BlockPointerType>(T)->getPointeeType()); + case Type::LValueReference: + case Type::RValueReference: + return computeLinkageInfo(cast<ReferenceType>(T)->getPointeeType()); + case Type::MemberPointer: { + const MemberPointerType *MPT = cast<MemberPointerType>(T); + LinkageInfo LV = computeLinkageInfo(MPT->getClass()); + LV.merge(computeLinkageInfo(MPT->getPointeeType())); + return LV; + } + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + return computeLinkageInfo(cast<ArrayType>(T)->getElementType()); + case Type::Vector: + case Type::ExtVector: + return computeLinkageInfo(cast<VectorType>(T)->getElementType()); + case Type::FunctionNoProto: + return computeLinkageInfo(cast<FunctionType>(T)->getResultType()); + case Type::FunctionProto: { + const FunctionProtoType *FPT = cast<FunctionProtoType>(T); + LinkageInfo LV = computeLinkageInfo(FPT->getResultType()); + for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(), + ae = FPT->arg_type_end(); ai != ae; ++ai) + LV.merge(computeLinkageInfo(*ai)); + return LV; + } + case Type::ObjCInterface: + return cast<ObjCInterfaceType>(T)->getDecl()->getLinkageAndVisibility(); + case Type::ObjCObject: + return computeLinkageInfo(cast<ObjCObjectType>(T)->getBaseType()); + case Type::ObjCObjectPointer: + return computeLinkageInfo(cast<ObjCObjectPointerType>(T)->getPointeeType()); + case Type::Atomic: + return computeLinkageInfo(cast<AtomicType>(T)->getValueType()); + } + + llvm_unreachable("unhandled type class"); +} + +static LinkageInfo computeLinkageInfo(QualType T) { + return computeLinkageInfo(T.getTypePtr()); +} + +bool Type::isLinkageValid() const { + if (!TypeBits.isCacheValid()) + return true; + + return computeLinkageInfo(getCanonicalTypeInternal()).getLinkage() == + TypeBits.getLinkage(); +} + +LinkageInfo Type::getLinkageAndVisibility() const { + if (!isCanonicalUnqualified()) + return computeLinkageInfo(getCanonicalTypeInternal()); + + LinkageInfo LV = computeLinkageInfo(this); + assert(LV.getLinkage() == getLinkage()); + return LV; +} + +Qualifiers::ObjCLifetime Type::getObjCARCImplicitLifetime() const { + if (isObjCARCImplicitlyUnretainedType()) + return Qualifiers::OCL_ExplicitNone; + return Qualifiers::OCL_Strong; +} + +bool Type::isObjCARCImplicitlyUnretainedType() const { + assert(isObjCLifetimeType() && + "cannot query implicit lifetime for non-inferrable type"); + + const Type *canon = getCanonicalTypeInternal().getTypePtr(); + + // Walk down to the base type. We don't care about qualifiers for this. + while (const ArrayType *array = dyn_cast<ArrayType>(canon)) + canon = array->getElementType().getTypePtr(); + + if (const ObjCObjectPointerType *opt + = dyn_cast<ObjCObjectPointerType>(canon)) { + // Class and Class<Protocol> don't require retension. + if (opt->getObjectType()->isObjCClass()) + return true; + } + + return false; +} + +bool Type::isObjCNSObjectType() const { + if (const TypedefType *typedefType = dyn_cast<TypedefType>(this)) + return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>(); + return false; +} +bool Type::isObjCRetainableType() const { + return isObjCObjectPointerType() || + isBlockPointerType() || + isObjCNSObjectType(); +} +bool Type::isObjCIndirectLifetimeType() const { + if (isObjCLifetimeType()) + return true; + if (const PointerType *OPT = getAs<PointerType>()) + return OPT->getPointeeType()->isObjCIndirectLifetimeType(); + if (const ReferenceType *Ref = getAs<ReferenceType>()) + return Ref->getPointeeType()->isObjCIndirectLifetimeType(); + if (const MemberPointerType *MemPtr = getAs<MemberPointerType>()) + return MemPtr->getPointeeType()->isObjCIndirectLifetimeType(); + return false; +} + +/// Returns true if objects of this type have lifetime semantics under +/// ARC. +bool Type::isObjCLifetimeType() const { + const Type *type = this; + while (const ArrayType *array = type->getAsArrayTypeUnsafe()) + type = array->getElementType().getTypePtr(); + return type->isObjCRetainableType(); +} + +/// \brief Determine whether the given type T is a "bridgable" Objective-C type, +/// which is either an Objective-C object pointer type or an +bool Type::isObjCARCBridgableType() const { + return isObjCObjectPointerType() || isBlockPointerType(); +} + +/// \brief Determine whether the given type T is a "bridgeable" C type. +bool Type::isCARCBridgableType() const { + const PointerType *Pointer = getAs<PointerType>(); + if (!Pointer) + return false; + + QualType Pointee = Pointer->getPointeeType(); + return Pointee->isVoidType() || Pointee->isRecordType(); +} + +bool Type::hasSizedVLAType() const { + if (!isVariablyModifiedType()) return false; + + if (const PointerType *ptr = getAs<PointerType>()) + return ptr->getPointeeType()->hasSizedVLAType(); + if (const ReferenceType *ref = getAs<ReferenceType>()) + return ref->getPointeeType()->hasSizedVLAType(); + if (const ArrayType *arr = getAsArrayTypeUnsafe()) { + if (isa<VariableArrayType>(arr) && + cast<VariableArrayType>(arr)->getSizeExpr()) + return true; + + return arr->getElementType()->hasSizedVLAType(); + } + + return false; +} + +QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) { + switch (type.getObjCLifetime()) { + case Qualifiers::OCL_None: + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + break; + + case Qualifiers::OCL_Strong: + return DK_objc_strong_lifetime; + case Qualifiers::OCL_Weak: + return DK_objc_weak_lifetime; + } + + /// Currently, the only destruction kind we recognize is C++ objects + /// with non-trivial destructors. + const CXXRecordDecl *record = + type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); + if (record && record->hasDefinition() && !record->hasTrivialDestructor()) + return DK_cxx_destructor; + + return DK_none; +} diff --git a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp new file mode 100644 index 000000000000..22a51bc345ac --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp @@ -0,0 +1,399 @@ +//===--- TypeLoc.cpp - Type Source Info Wrapper -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TypeLoc subclasses implementations. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/TypeLoc.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Expr.h" +#include "clang/AST/TypeLocVisitor.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; + +//===----------------------------------------------------------------------===// +// TypeLoc Implementation +//===----------------------------------------------------------------------===// + +namespace { + class TypeLocRanger : public TypeLocVisitor<TypeLocRanger, SourceRange> { + public: +#define ABSTRACT_TYPELOC(CLASS, PARENT) +#define TYPELOC(CLASS, PARENT) \ + SourceRange Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \ + return TyLoc.getLocalSourceRange(); \ + } +#include "clang/AST/TypeLocNodes.def" + }; +} + +SourceRange TypeLoc::getLocalSourceRangeImpl(TypeLoc TL) { + if (TL.isNull()) return SourceRange(); + return TypeLocRanger().Visit(TL); +} + +namespace { + class TypeAligner : public TypeLocVisitor<TypeAligner, unsigned> { + public: +#define ABSTRACT_TYPELOC(CLASS, PARENT) +#define TYPELOC(CLASS, PARENT) \ + unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \ + return TyLoc.getLocalDataAlignment(); \ + } +#include "clang/AST/TypeLocNodes.def" + }; +} + +/// \brief Returns the alignment of the type source info data block. +unsigned TypeLoc::getLocalAlignmentForType(QualType Ty) { + if (Ty.isNull()) return 1; + return TypeAligner().Visit(TypeLoc(Ty, 0)); +} + +namespace { + class TypeSizer : public TypeLocVisitor<TypeSizer, unsigned> { + public: +#define ABSTRACT_TYPELOC(CLASS, PARENT) +#define TYPELOC(CLASS, PARENT) \ + unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \ + return TyLoc.getLocalDataSize(); \ + } +#include "clang/AST/TypeLocNodes.def" + }; +} + +/// \brief Returns the size of the type source info data block. +unsigned TypeLoc::getFullDataSizeForType(QualType Ty) { + unsigned Total = 0; + TypeLoc TyLoc(Ty, 0); + unsigned MaxAlign = 1; + while (!TyLoc.isNull()) { + unsigned Align = getLocalAlignmentForType(TyLoc.getType()); + MaxAlign = std::max(Align, MaxAlign); + Total = llvm::RoundUpToAlignment(Total, Align); + Total += TypeSizer().Visit(TyLoc); + TyLoc = TyLoc.getNextTypeLoc(); + } + Total = llvm::RoundUpToAlignment(Total, MaxAlign); + return Total; +} + +namespace { + class NextLoc : public TypeLocVisitor<NextLoc, TypeLoc> { + public: +#define ABSTRACT_TYPELOC(CLASS, PARENT) +#define TYPELOC(CLASS, PARENT) \ + TypeLoc Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \ + return TyLoc.getNextTypeLoc(); \ + } +#include "clang/AST/TypeLocNodes.def" + }; +} + +/// \brief Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the +/// TypeLoc is a PointerLoc and next TypeLoc is for "int". +TypeLoc TypeLoc::getNextTypeLocImpl(TypeLoc TL) { + return NextLoc().Visit(TL); +} + +/// \brief Initializes a type location, and all of its children +/// recursively, as if the entire tree had been written in the +/// given location. +void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL, + SourceLocation Loc) { + while (true) { + switch (TL.getTypeLocClass()) { +#define ABSTRACT_TYPELOC(CLASS, PARENT) +#define TYPELOC(CLASS, PARENT) \ + case CLASS: { \ + CLASS##TypeLoc TLCasted = TL.castAs<CLASS##TypeLoc>(); \ + TLCasted.initializeLocal(Context, Loc); \ + TL = TLCasted.getNextTypeLoc(); \ + if (!TL) return; \ + continue; \ + } +#include "clang/AST/TypeLocNodes.def" + } + } +} + +SourceLocation TypeLoc::getBeginLoc() const { + TypeLoc Cur = *this; + TypeLoc LeftMost = Cur; + while (true) { + switch (Cur.getTypeLocClass()) { + case Elaborated: + LeftMost = Cur; + break; + case FunctionProto: + if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr() + ->hasTrailingReturn()) { + LeftMost = Cur; + break; + } + /* Fall through */ + case FunctionNoProto: + case ConstantArray: + case DependentSizedArray: + case IncompleteArray: + case VariableArray: + // FIXME: Currently QualifiedTypeLoc does not have a source range + case Qualified: + Cur = Cur.getNextTypeLoc(); + continue; + default: + if (!Cur.getLocalSourceRange().getBegin().isInvalid()) + LeftMost = Cur; + Cur = Cur.getNextTypeLoc(); + if (Cur.isNull()) + break; + continue; + } // switch + break; + } // while + return LeftMost.getLocalSourceRange().getBegin(); +} + +SourceLocation TypeLoc::getEndLoc() const { + TypeLoc Cur = *this; + TypeLoc Last; + while (true) { + switch (Cur.getTypeLocClass()) { + default: + if (!Last) + Last = Cur; + return Last.getLocalSourceRange().getEnd(); + case Paren: + case ConstantArray: + case DependentSizedArray: + case IncompleteArray: + case VariableArray: + case FunctionNoProto: + Last = Cur; + break; + case FunctionProto: + if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()->hasTrailingReturn()) + Last = TypeLoc(); + else + Last = Cur; + break; + case Pointer: + case BlockPointer: + case MemberPointer: + case LValueReference: + case RValueReference: + case PackExpansion: + if (!Last) + Last = Cur; + break; + case Qualified: + case Elaborated: + break; + } + Cur = Cur.getNextTypeLoc(); + } +} + + +namespace { + struct TSTChecker : public TypeLocVisitor<TSTChecker, bool> { + // Overload resolution does the real work for us. + static bool isTypeSpec(TypeSpecTypeLoc _) { return true; } + static bool isTypeSpec(TypeLoc _) { return false; } + +#define ABSTRACT_TYPELOC(CLASS, PARENT) +#define TYPELOC(CLASS, PARENT) \ + bool Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \ + return isTypeSpec(TyLoc); \ + } +#include "clang/AST/TypeLocNodes.def" + }; +} + + +/// \brief Determines if the given type loc corresponds to a +/// TypeSpecTypeLoc. Since there is not actually a TypeSpecType in +/// the type hierarchy, this is made somewhat complicated. +/// +/// There are a lot of types that currently use TypeSpecTypeLoc +/// because it's a convenient base class. Ideally we would not accept +/// those here, but ideally we would have better implementations for +/// them. +bool TypeSpecTypeLoc::isKind(const TypeLoc &TL) { + if (TL.getType().hasLocalQualifiers()) return false; + return TSTChecker().Visit(TL); +} + +// Reimplemented to account for GNU/C++ extension +// typeof unary-expression +// where there are no parentheses. +SourceRange TypeOfExprTypeLoc::getLocalSourceRange() const { + if (getRParenLoc().isValid()) + return SourceRange(getTypeofLoc(), getRParenLoc()); + else + return SourceRange(getTypeofLoc(), + getUnderlyingExpr()->getSourceRange().getEnd()); +} + + +TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const { + if (needsExtraLocalData()) + return static_cast<TypeSpecifierType>(getWrittenBuiltinSpecs().Type); + switch (getTypePtr()->getKind()) { + case BuiltinType::Void: + return TST_void; + case BuiltinType::Bool: + return TST_bool; + case BuiltinType::Char_U: + case BuiltinType::Char_S: + return TST_char; + case BuiltinType::Char16: + return TST_char16; + case BuiltinType::Char32: + return TST_char32; + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + return TST_wchar; + case BuiltinType::UChar: + case BuiltinType::UShort: + case BuiltinType::UInt: + case BuiltinType::ULong: + case BuiltinType::ULongLong: + case BuiltinType::UInt128: + case BuiltinType::SChar: + case BuiltinType::Short: + case BuiltinType::Int: + case BuiltinType::Long: + case BuiltinType::LongLong: + case BuiltinType::Int128: + case BuiltinType::Half: + case BuiltinType::Float: + case BuiltinType::Double: + case BuiltinType::LongDouble: + llvm_unreachable("Builtin type needs extra local data!"); + // Fall through, if the impossible happens. + + case BuiltinType::NullPtr: + case BuiltinType::Overload: + case BuiltinType::Dependent: + case BuiltinType::BoundMember: + case BuiltinType::UnknownAny: + case BuiltinType::ARCUnbridgedCast: + case BuiltinType::PseudoObject: + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + case BuiltinType::OCLImage1d: + case BuiltinType::OCLImage1dArray: + case BuiltinType::OCLImage1dBuffer: + case BuiltinType::OCLImage2d: + case BuiltinType::OCLImage2dArray: + case BuiltinType::OCLImage3d: + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::BuiltinFn: + return TST_unspecified; + } + + llvm_unreachable("Invalid BuiltinType Kind!"); +} + +TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) { + while (ParenTypeLoc PTL = TL.getAs<ParenTypeLoc>()) + TL = PTL.getInnerLoc(); + return TL; +} + +void ElaboratedTypeLoc::initializeLocal(ASTContext &Context, + SourceLocation Loc) { + setElaboratedKeywordLoc(Loc); + NestedNameSpecifierLocBuilder Builder; + Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc); + setQualifierLoc(Builder.getWithLocInContext(Context)); +} + +void DependentNameTypeLoc::initializeLocal(ASTContext &Context, + SourceLocation Loc) { + setElaboratedKeywordLoc(Loc); + NestedNameSpecifierLocBuilder Builder; + Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc); + setQualifierLoc(Builder.getWithLocInContext(Context)); + setNameLoc(Loc); +} + +void +DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context, + SourceLocation Loc) { + setElaboratedKeywordLoc(Loc); + if (getTypePtr()->getQualifier()) { + NestedNameSpecifierLocBuilder Builder; + Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc); + setQualifierLoc(Builder.getWithLocInContext(Context)); + } else { + setQualifierLoc(NestedNameSpecifierLoc()); + } + setTemplateKeywordLoc(Loc); + setTemplateNameLoc(Loc); + setLAngleLoc(Loc); + setRAngleLoc(Loc); + TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(), + getTypePtr()->getArgs(), + getArgInfos(), Loc); +} + +void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context, + unsigned NumArgs, + const TemplateArgument *Args, + TemplateArgumentLocInfo *ArgInfos, + SourceLocation Loc) { + for (unsigned i = 0, e = NumArgs; i != e; ++i) { + switch (Args[i].getKind()) { + case TemplateArgument::Null: + llvm_unreachable("Impossible TemplateArgument"); + + case TemplateArgument::Integral: + case TemplateArgument::Declaration: + case TemplateArgument::NullPtr: + ArgInfos[i] = TemplateArgumentLocInfo(); + break; + + case TemplateArgument::Expression: + ArgInfos[i] = TemplateArgumentLocInfo(Args[i].getAsExpr()); + break; + + case TemplateArgument::Type: + ArgInfos[i] = TemplateArgumentLocInfo( + Context.getTrivialTypeSourceInfo(Args[i].getAsType(), + Loc)); + break; + + case TemplateArgument::Template: + case TemplateArgument::TemplateExpansion: { + NestedNameSpecifierLocBuilder Builder; + TemplateName Template = Args[i].getAsTemplateOrTemplatePattern(); + if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) + Builder.MakeTrivial(Context, DTN->getQualifier(), Loc); + else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) + Builder.MakeTrivial(Context, QTN->getQualifier(), Loc); + + ArgInfos[i] = TemplateArgumentLocInfo( + Builder.getWithLocInContext(Context), Loc, + Args[i].getKind() == TemplateArgument::Template ? SourceLocation() + : Loc); + break; + } + + case TemplateArgument::Pack: + ArgInfos[i] = TemplateArgumentLocInfo(); + break; + } + } +} diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp new file mode 100644 index 000000000000..571e3db0289f --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp @@ -0,0 +1,1551 @@ +//===--- TypePrinter.cpp - Pretty-Print Clang Types -----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to print types from Clang's type system. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/Expr.h" +#include "clang/AST/Type.h" +#include "clang/Basic/LangOptions.h" +#include "clang/Basic/SourceManager.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/SaveAndRestore.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; + +namespace { + /// \brief RAII object that enables printing of the ARC __strong lifetime + /// qualifier. + class IncludeStrongLifetimeRAII { + PrintingPolicy &Policy; + bool Old; + + public: + explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy) + : Policy(Policy), Old(Policy.SuppressStrongLifetime) { + if (!Policy.SuppressLifetimeQualifiers) + Policy.SuppressStrongLifetime = false; + } + + ~IncludeStrongLifetimeRAII() { + Policy.SuppressStrongLifetime = Old; + } + }; + + class ParamPolicyRAII { + PrintingPolicy &Policy; + bool Old; + + public: + explicit ParamPolicyRAII(PrintingPolicy &Policy) + : Policy(Policy), Old(Policy.SuppressSpecifiers) { + Policy.SuppressSpecifiers = false; + } + + ~ParamPolicyRAII() { + Policy.SuppressSpecifiers = Old; + } + }; + + class ElaboratedTypePolicyRAII { + PrintingPolicy &Policy; + bool SuppressTagKeyword; + bool SuppressScope; + + public: + explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) { + SuppressTagKeyword = Policy.SuppressTagKeyword; + SuppressScope = Policy.SuppressScope; + Policy.SuppressTagKeyword = true; + Policy.SuppressScope = true; + } + + ~ElaboratedTypePolicyRAII() { + Policy.SuppressTagKeyword = SuppressTagKeyword; + Policy.SuppressScope = SuppressScope; + } + }; + + class TypePrinter { + PrintingPolicy Policy; + bool HasEmptyPlaceHolder; + bool InsideCCAttribute; + + public: + explicit TypePrinter(const PrintingPolicy &Policy) + : Policy(Policy), HasEmptyPlaceHolder(false), InsideCCAttribute(false) { } + + void print(const Type *ty, Qualifiers qs, raw_ostream &OS, + StringRef PlaceHolder); + void print(QualType T, raw_ostream &OS, StringRef PlaceHolder); + + static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier); + void spaceBeforePlaceHolder(raw_ostream &OS); + void printTypeSpec(const NamedDecl *D, raw_ostream &OS); + + void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS); + void printBefore(QualType T, raw_ostream &OS); + void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS); + void printAfter(QualType T, raw_ostream &OS); + void AppendScope(DeclContext *DC, raw_ostream &OS); + void printTag(TagDecl *T, raw_ostream &OS); +#define ABSTRACT_TYPE(CLASS, PARENT) +#define TYPE(CLASS, PARENT) \ + void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \ + void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS); +#include "clang/AST/TypeNodes.def" + }; +} + +static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals) { + bool appendSpace = false; + if (TypeQuals & Qualifiers::Const) { + OS << "const"; + appendSpace = true; + } + if (TypeQuals & Qualifiers::Volatile) { + if (appendSpace) OS << ' '; + OS << "volatile"; + appendSpace = true; + } + if (TypeQuals & Qualifiers::Restrict) { + if (appendSpace) OS << ' '; + OS << "restrict"; + } +} + +void TypePrinter::spaceBeforePlaceHolder(raw_ostream &OS) { + if (!HasEmptyPlaceHolder) + OS << ' '; +} + +void TypePrinter::print(QualType t, raw_ostream &OS, StringRef PlaceHolder) { + SplitQualType split = t.split(); + print(split.Ty, split.Quals, OS, PlaceHolder); +} + +void TypePrinter::print(const Type *T, Qualifiers Quals, raw_ostream &OS, + StringRef PlaceHolder) { + if (!T) { + OS << "NULL TYPE"; + return; + } + + SaveAndRestore<bool> PHVal(HasEmptyPlaceHolder, PlaceHolder.empty()); + + printBefore(T, Quals, OS); + OS << PlaceHolder; + printAfter(T, Quals, OS); +} + +bool TypePrinter::canPrefixQualifiers(const Type *T, + bool &NeedARCStrongQualifier) { + // CanPrefixQualifiers - We prefer to print type qualifiers before the type, + // so that we get "const int" instead of "int const", but we can't do this if + // the type is complex. For example if the type is "int*", we *must* print + // "int * const", printing "const int *" is different. Only do this when the + // type expands to a simple string. + bool CanPrefixQualifiers = false; + NeedARCStrongQualifier = false; + Type::TypeClass TC = T->getTypeClass(); + if (const AutoType *AT = dyn_cast<AutoType>(T)) + TC = AT->desugar()->getTypeClass(); + if (const SubstTemplateTypeParmType *Subst + = dyn_cast<SubstTemplateTypeParmType>(T)) + TC = Subst->getReplacementType()->getTypeClass(); + + switch (TC) { + case Type::Auto: + case Type::Builtin: + case Type::Complex: + case Type::UnresolvedUsing: + case Type::Typedef: + case Type::TypeOfExpr: + case Type::TypeOf: + case Type::Decltype: + case Type::UnaryTransform: + case Type::Record: + case Type::Enum: + case Type::Elaborated: + case Type::TemplateTypeParm: + case Type::SubstTemplateTypeParmPack: + case Type::TemplateSpecialization: + case Type::InjectedClassName: + case Type::DependentName: + case Type::DependentTemplateSpecialization: + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::Atomic: + CanPrefixQualifiers = true; + break; + + case Type::ObjCObjectPointer: + CanPrefixQualifiers = T->isObjCIdType() || T->isObjCClassType() || + T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType(); + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::DependentSizedArray: + NeedARCStrongQualifier = true; + // Fall through + + case Type::Decayed: + case Type::Pointer: + case Type::BlockPointer: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + case Type::DependentSizedExtVector: + case Type::Vector: + case Type::ExtVector: + case Type::FunctionProto: + case Type::FunctionNoProto: + case Type::Paren: + case Type::Attributed: + case Type::PackExpansion: + case Type::SubstTemplateTypeParm: + CanPrefixQualifiers = false; + break; + } + + return CanPrefixQualifiers; +} + +void TypePrinter::printBefore(QualType T, raw_ostream &OS) { + SplitQualType Split = T.split(); + + // If we have cv1 T, where T is substituted for cv2 U, only print cv1 - cv2 + // at this level. + Qualifiers Quals = Split.Quals; + if (const SubstTemplateTypeParmType *Subst = + dyn_cast<SubstTemplateTypeParmType>(Split.Ty)) + Quals -= QualType(Subst, 0).getQualifiers(); + + printBefore(Split.Ty, Quals, OS); +} + +/// \brief Prints the part of the type string before an identifier, e.g. for +/// "int foo[10]" it prints "int ". +void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) { + if (Policy.SuppressSpecifiers && T->isSpecifierType()) + return; + + SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder); + + // Print qualifiers as appropriate. + + bool CanPrefixQualifiers = false; + bool NeedARCStrongQualifier = false; + CanPrefixQualifiers = canPrefixQualifiers(T, NeedARCStrongQualifier); + + if (CanPrefixQualifiers && !Quals.empty()) { + if (NeedARCStrongQualifier) { + IncludeStrongLifetimeRAII Strong(Policy); + Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true); + } else { + Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true); + } + } + + bool hasAfterQuals = false; + if (!CanPrefixQualifiers && !Quals.empty()) { + hasAfterQuals = !Quals.isEmptyWhenPrinted(Policy); + if (hasAfterQuals) + HasEmptyPlaceHolder = false; + } + + switch (T->getTypeClass()) { +#define ABSTRACT_TYPE(CLASS, PARENT) +#define TYPE(CLASS, PARENT) case Type::CLASS: \ + print##CLASS##Before(cast<CLASS##Type>(T), OS); \ + break; +#include "clang/AST/TypeNodes.def" + } + + if (hasAfterQuals) { + if (NeedARCStrongQualifier) { + IncludeStrongLifetimeRAII Strong(Policy); + Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get()); + } else { + Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get()); + } + } +} + +void TypePrinter::printAfter(QualType t, raw_ostream &OS) { + SplitQualType split = t.split(); + printAfter(split.Ty, split.Quals, OS); +} + +/// \brief Prints the part of the type string after an identifier, e.g. for +/// "int foo[10]" it prints "[10]". +void TypePrinter::printAfter(const Type *T, Qualifiers Quals, raw_ostream &OS) { + switch (T->getTypeClass()) { +#define ABSTRACT_TYPE(CLASS, PARENT) +#define TYPE(CLASS, PARENT) case Type::CLASS: \ + print##CLASS##After(cast<CLASS##Type>(T), OS); \ + break; +#include "clang/AST/TypeNodes.def" + } +} + +void TypePrinter::printBuiltinBefore(const BuiltinType *T, raw_ostream &OS) { + OS << T->getName(Policy); + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printBuiltinAfter(const BuiltinType *T, raw_ostream &OS) { } + +void TypePrinter::printComplexBefore(const ComplexType *T, raw_ostream &OS) { + OS << "_Complex "; + printBefore(T->getElementType(), OS); +} +void TypePrinter::printComplexAfter(const ComplexType *T, raw_ostream &OS) { + printAfter(T->getElementType(), OS); +} + +void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getPointeeType(), OS); + // Handle things like 'int (*A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeType())) + OS << '('; + OS << '*'; +} +void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + // Handle things like 'int (*A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeType())) + OS << ')'; + printAfter(T->getPointeeType(), OS); +} + +void TypePrinter::printBlockPointerBefore(const BlockPointerType *T, + raw_ostream &OS) { + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getPointeeType(), OS); + OS << '^'; +} +void TypePrinter::printBlockPointerAfter(const BlockPointerType *T, + raw_ostream &OS) { + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printAfter(T->getPointeeType(), OS); +} + +void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getPointeeTypeAsWritten(), OS); + // Handle things like 'int (&A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeTypeAsWritten())) + OS << '('; + OS << '&'; +} +void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + // Handle things like 'int (&A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeTypeAsWritten())) + OS << ')'; + printAfter(T->getPointeeTypeAsWritten(), OS); +} + +void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getPointeeTypeAsWritten(), OS); + // Handle things like 'int (&&A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeTypeAsWritten())) + OS << '('; + OS << "&&"; +} +void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + // Handle things like 'int (&&A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeTypeAsWritten())) + OS << ')'; + printAfter(T->getPointeeTypeAsWritten(), OS); +} + +void TypePrinter::printMemberPointerBefore(const MemberPointerType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getPointeeType(), OS); + // Handle things like 'int (Cls::*A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeType())) + OS << '('; + + PrintingPolicy InnerPolicy(Policy); + InnerPolicy.SuppressTag = false; + TypePrinter(InnerPolicy).print(QualType(T->getClass(), 0), OS, StringRef()); + + OS << "::*"; +} +void TypePrinter::printMemberPointerAfter(const MemberPointerType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + // Handle things like 'int (Cls::*A)[4];' correctly. + // FIXME: this should include vectors, but vectors use attributes I guess. + if (isa<ArrayType>(T->getPointeeType())) + OS << ')'; + printAfter(T->getPointeeType(), OS); +} + +void TypePrinter::printConstantArrayBefore(const ConstantArrayType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getElementType(), OS); +} +void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T, + raw_ostream &OS) { + OS << '[' << T->getSize().getZExtValue() << ']'; + printAfter(T->getElementType(), OS); +} + +void TypePrinter::printIncompleteArrayBefore(const IncompleteArrayType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getElementType(), OS); +} +void TypePrinter::printIncompleteArrayAfter(const IncompleteArrayType *T, + raw_ostream &OS) { + OS << "[]"; + printAfter(T->getElementType(), OS); +} + +void TypePrinter::printVariableArrayBefore(const VariableArrayType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getElementType(), OS); +} +void TypePrinter::printVariableArrayAfter(const VariableArrayType *T, + raw_ostream &OS) { + OS << '['; + if (T->getIndexTypeQualifiers().hasQualifiers()) { + AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers()); + OS << ' '; + } + + if (T->getSizeModifier() == VariableArrayType::Static) + OS << "static"; + else if (T->getSizeModifier() == VariableArrayType::Star) + OS << '*'; + + if (T->getSizeExpr()) + T->getSizeExpr()->printPretty(OS, 0, Policy); + OS << ']'; + + printAfter(T->getElementType(), OS); +} + +void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) { + // Print as though it's a pointer. + printBefore(T->getDecayedType(), OS); +} +void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) { + printAfter(T->getDecayedType(), OS); +} + +void TypePrinter::printDependentSizedArrayBefore( + const DependentSizedArrayType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + printBefore(T->getElementType(), OS); +} +void TypePrinter::printDependentSizedArrayAfter( + const DependentSizedArrayType *T, + raw_ostream &OS) { + OS << '['; + if (T->getSizeExpr()) + T->getSizeExpr()->printPretty(OS, 0, Policy); + OS << ']'; + printAfter(T->getElementType(), OS); +} + +void TypePrinter::printDependentSizedExtVectorBefore( + const DependentSizedExtVectorType *T, + raw_ostream &OS) { + printBefore(T->getElementType(), OS); +} +void TypePrinter::printDependentSizedExtVectorAfter( + const DependentSizedExtVectorType *T, + raw_ostream &OS) { + OS << " __attribute__((ext_vector_type("; + if (T->getSizeExpr()) + T->getSizeExpr()->printPretty(OS, 0, Policy); + OS << ")))"; + printAfter(T->getElementType(), OS); +} + +void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) { + switch (T->getVectorKind()) { + case VectorType::AltiVecPixel: + OS << "__vector __pixel "; + break; + case VectorType::AltiVecBool: + OS << "__vector __bool "; + printBefore(T->getElementType(), OS); + break; + case VectorType::AltiVecVector: + OS << "__vector "; + printBefore(T->getElementType(), OS); + break; + case VectorType::NeonVector: + OS << "__attribute__((neon_vector_type(" + << T->getNumElements() << "))) "; + printBefore(T->getElementType(), OS); + break; + case VectorType::NeonPolyVector: + OS << "__attribute__((neon_polyvector_type(" << + T->getNumElements() << "))) "; + printBefore(T->getElementType(), OS); + break; + case VectorType::GenericVector: { + // FIXME: We prefer to print the size directly here, but have no way + // to get the size of the type. + OS << "__attribute__((__vector_size__(" + << T->getNumElements() + << " * sizeof("; + print(T->getElementType(), OS, StringRef()); + OS << ")))) "; + printBefore(T->getElementType(), OS); + break; + } + } +} +void TypePrinter::printVectorAfter(const VectorType *T, raw_ostream &OS) { + printAfter(T->getElementType(), OS); +} + +void TypePrinter::printExtVectorBefore(const ExtVectorType *T, + raw_ostream &OS) { + printBefore(T->getElementType(), OS); +} +void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) { + printAfter(T->getElementType(), OS); + OS << " __attribute__((ext_vector_type("; + OS << T->getNumElements(); + OS << ")))"; +} + +void +FunctionProtoType::printExceptionSpecification(raw_ostream &OS, + const PrintingPolicy &Policy) + const { + + if (hasDynamicExceptionSpec()) { + OS << " throw("; + if (getExceptionSpecType() == EST_MSAny) + OS << "..."; + else + for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) { + if (I) + OS << ", "; + + OS << getExceptionType(I).stream(Policy); + } + OS << ')'; + } else if (isNoexceptExceptionSpec(getExceptionSpecType())) { + OS << " noexcept"; + if (getExceptionSpecType() == EST_ComputedNoexcept) { + OS << '('; + getNoexceptExpr()->printPretty(OS, 0, Policy); + OS << ')'; + } + } +} + +void TypePrinter::printFunctionProtoBefore(const FunctionProtoType *T, + raw_ostream &OS) { + if (T->hasTrailingReturn()) { + OS << "auto "; + if (!HasEmptyPlaceHolder) + OS << '('; + } else { + // If needed for precedence reasons, wrap the inner part in grouping parens. + SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false); + printBefore(T->getResultType(), OS); + if (!PrevPHIsEmpty.get()) + OS << '('; + } +} + +void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T, + raw_ostream &OS) { + // If needed for precedence reasons, wrap the inner part in grouping parens. + if (!HasEmptyPlaceHolder) + OS << ')'; + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + + OS << '('; + { + ParamPolicyRAII ParamPolicy(Policy); + for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) { + if (i) OS << ", "; + print(T->getArgType(i), OS, StringRef()); + } + } + + if (T->isVariadic()) { + if (T->getNumArgs()) + OS << ", "; + OS << "..."; + } else if (T->getNumArgs() == 0 && !Policy.LangOpts.CPlusPlus) { + // Do not emit int() if we have a proto, emit 'int(void)'. + OS << "void"; + } + + OS << ')'; + + FunctionType::ExtInfo Info = T->getExtInfo(); + + if (!InsideCCAttribute) { + switch (Info.getCC()) { + case CC_C: + // The C calling convention is the default on the vast majority of platforms + // we support. If the user wrote it explicitly, it will usually be printed + // while traversing the AttributedType. If the type has been desugared, let + // the canonical spelling be the implicit calling convention. + // FIXME: It would be better to be explicit in certain contexts, such as a + // cdecl function typedef used to declare a member function with the + // Microsoft C++ ABI. + break; + case CC_X86StdCall: + OS << " __attribute__((stdcall))"; + break; + case CC_X86FastCall: + OS << " __attribute__((fastcall))"; + break; + case CC_X86ThisCall: + OS << " __attribute__((thiscall))"; + break; + case CC_X86Pascal: + OS << " __attribute__((pascal))"; + break; + case CC_AAPCS: + OS << " __attribute__((pcs(\"aapcs\")))"; + break; + case CC_AAPCS_VFP: + OS << " __attribute__((pcs(\"aapcs-vfp\")))"; + break; + case CC_PnaclCall: + OS << " __attribute__((pnaclcall))"; + break; + case CC_IntelOclBicc: + OS << " __attribute__((intel_ocl_bicc))"; + break; + case CC_X86_64Win64: + OS << " __attribute__((ms_abi))"; + break; + case CC_X86_64SysV: + OS << " __attribute__((sysv_abi))"; + break; + } + } + + if (Info.getNoReturn()) + OS << " __attribute__((noreturn))"; + if (Info.getRegParm()) + OS << " __attribute__((regparm (" + << Info.getRegParm() << ")))"; + + if (unsigned quals = T->getTypeQuals()) { + OS << ' '; + AppendTypeQualList(OS, quals); + } + + switch (T->getRefQualifier()) { + case RQ_None: + break; + + case RQ_LValue: + OS << " &"; + break; + + case RQ_RValue: + OS << " &&"; + break; + } + T->printExceptionSpecification(OS, Policy); + + if (T->hasTrailingReturn()) { + OS << " -> "; + print(T->getResultType(), OS, StringRef()); + } else + printAfter(T->getResultType(), OS); +} + +void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T, + raw_ostream &OS) { + // If needed for precedence reasons, wrap the inner part in grouping parens. + SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false); + printBefore(T->getResultType(), OS); + if (!PrevPHIsEmpty.get()) + OS << '('; +} +void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T, + raw_ostream &OS) { + // If needed for precedence reasons, wrap the inner part in grouping parens. + if (!HasEmptyPlaceHolder) + OS << ')'; + SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + + OS << "()"; + if (T->getNoReturnAttr()) + OS << " __attribute__((noreturn))"; + printAfter(T->getResultType(), OS); +} + +void TypePrinter::printTypeSpec(const NamedDecl *D, raw_ostream &OS) { + IdentifierInfo *II = D->getIdentifier(); + OS << II->getName(); + spaceBeforePlaceHolder(OS); +} + +void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T, + raw_ostream &OS) { + printTypeSpec(T->getDecl(), OS); +} +void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T, + raw_ostream &OS) { } + +void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) { + printTypeSpec(T->getDecl(), OS); +} +void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) { } + +void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T, + raw_ostream &OS) { + OS << "typeof "; + T->getUnderlyingExpr()->printPretty(OS, 0, Policy); + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printTypeOfExprAfter(const TypeOfExprType *T, + raw_ostream &OS) { } + +void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) { + OS << "typeof("; + print(T->getUnderlyingType(), OS, StringRef()); + OS << ')'; + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printTypeOfAfter(const TypeOfType *T, raw_ostream &OS) { } + +void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) { + OS << "decltype("; + T->getUnderlyingExpr()->printPretty(OS, 0, Policy); + OS << ')'; + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) { } + +void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + + switch (T->getUTTKind()) { + case UnaryTransformType::EnumUnderlyingType: + OS << "__underlying_type("; + print(T->getBaseType(), OS, StringRef()); + OS << ')'; + spaceBeforePlaceHolder(OS); + return; + } + + printBefore(T->getBaseType(), OS); +} +void TypePrinter::printUnaryTransformAfter(const UnaryTransformType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + + switch (T->getUTTKind()) { + case UnaryTransformType::EnumUnderlyingType: + return; + } + + printAfter(T->getBaseType(), OS); +} + +void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) { + // If the type has been deduced, do not print 'auto'. + if (!T->getDeducedType().isNull()) { + printBefore(T->getDeducedType(), OS); + } else { + OS << (T->isDecltypeAuto() ? "decltype(auto)" : "auto"); + spaceBeforePlaceHolder(OS); + } +} +void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) { + // If the type has been deduced, do not print 'auto'. + if (!T->getDeducedType().isNull()) + printAfter(T->getDeducedType(), OS); +} + +void TypePrinter::printAtomicBefore(const AtomicType *T, raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + + OS << "_Atomic("; + print(T->getValueType(), OS, StringRef()); + OS << ')'; + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) { } + +/// Appends the given scope to the end of a string. +void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) { + if (DC->isTranslationUnit()) return; + if (DC->isFunctionOrMethod()) return; + AppendScope(DC->getParent(), OS); + + if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) { + if (Policy.SuppressUnwrittenScope && + (NS->isAnonymousNamespace() || NS->isInline())) + return; + if (NS->getIdentifier()) + OS << NS->getName() << "::"; + else + OS << "<anonymous>::"; + } else if (ClassTemplateSpecializationDecl *Spec + = dyn_cast<ClassTemplateSpecializationDecl>(DC)) { + IncludeStrongLifetimeRAII Strong(Policy); + OS << Spec->getIdentifier()->getName(); + const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); + TemplateSpecializationType::PrintTemplateArgumentList(OS, + TemplateArgs.data(), + TemplateArgs.size(), + Policy); + OS << "::"; + } else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) { + if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl()) + OS << Typedef->getIdentifier()->getName() << "::"; + else if (Tag->getIdentifier()) + OS << Tag->getIdentifier()->getName() << "::"; + else + return; + } +} + +void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) { + if (Policy.SuppressTag) + return; + + bool HasKindDecoration = false; + + // bool SuppressTagKeyword + // = Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword; + + // We don't print tags unless this is an elaborated type. + // In C, we just assume every RecordType is an elaborated type. + if (!(Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword || + D->getTypedefNameForAnonDecl())) { + HasKindDecoration = true; + OS << D->getKindName(); + OS << ' '; + } + + // Compute the full nested-name-specifier for this type. + // In C, this will always be empty except when the type + // being printed is anonymous within other Record. + if (!Policy.SuppressScope) + AppendScope(D->getDeclContext(), OS); + + if (const IdentifierInfo *II = D->getIdentifier()) + OS << II->getName(); + else if (TypedefNameDecl *Typedef = D->getTypedefNameForAnonDecl()) { + assert(Typedef->getIdentifier() && "Typedef without identifier?"); + OS << Typedef->getIdentifier()->getName(); + } else { + // Make an unambiguous representation for anonymous types, e.g. + // <anonymous enum at /usr/include/string.h:120:9> + + if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) { + OS << "<lambda"; + HasKindDecoration = true; + } else { + OS << "<anonymous"; + } + + if (Policy.AnonymousTagLocations) { + // Suppress the redundant tag keyword if we just printed one. + // We don't have to worry about ElaboratedTypes here because you can't + // refer to an anonymous type with one. + if (!HasKindDecoration) + OS << " " << D->getKindName(); + + PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc( + D->getLocation()); + if (PLoc.isValid()) { + OS << " at " << PLoc.getFilename() + << ':' << PLoc.getLine() + << ':' << PLoc.getColumn(); + } + } + + OS << '>'; + } + + // If this is a class template specialization, print the template + // arguments. + if (ClassTemplateSpecializationDecl *Spec + = dyn_cast<ClassTemplateSpecializationDecl>(D)) { + const TemplateArgument *Args; + unsigned NumArgs; + if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) { + const TemplateSpecializationType *TST = + cast<TemplateSpecializationType>(TAW->getType()); + Args = TST->getArgs(); + NumArgs = TST->getNumArgs(); + } else { + const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); + Args = TemplateArgs.data(); + NumArgs = TemplateArgs.size(); + } + IncludeStrongLifetimeRAII Strong(Policy); + TemplateSpecializationType::PrintTemplateArgumentList(OS, + Args, NumArgs, + Policy); + } + + spaceBeforePlaceHolder(OS); +} + +void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) { + printTag(T->getDecl(), OS); +} +void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) { } + +void TypePrinter::printEnumBefore(const EnumType *T, raw_ostream &OS) { + printTag(T->getDecl(), OS); +} +void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) { } + +void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T, + raw_ostream &OS) { + if (IdentifierInfo *Id = T->getIdentifier()) + OS << Id->getName(); + else + OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex(); + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printTemplateTypeParmAfter(const TemplateTypeParmType *T, + raw_ostream &OS) { } + +void TypePrinter::printSubstTemplateTypeParmBefore( + const SubstTemplateTypeParmType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + printBefore(T->getReplacementType(), OS); +} +void TypePrinter::printSubstTemplateTypeParmAfter( + const SubstTemplateTypeParmType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + printAfter(T->getReplacementType(), OS); +} + +void TypePrinter::printSubstTemplateTypeParmPackBefore( + const SubstTemplateTypeParmPackType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + printTemplateTypeParmBefore(T->getReplacedParameter(), OS); +} +void TypePrinter::printSubstTemplateTypeParmPackAfter( + const SubstTemplateTypeParmPackType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + printTemplateTypeParmAfter(T->getReplacedParameter(), OS); +} + +void TypePrinter::printTemplateSpecializationBefore( + const TemplateSpecializationType *T, + raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + T->getTemplateName().print(OS, Policy); + + TemplateSpecializationType::PrintTemplateArgumentList(OS, + T->getArgs(), + T->getNumArgs(), + Policy); + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printTemplateSpecializationAfter( + const TemplateSpecializationType *T, + raw_ostream &OS) { } + +void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T, + raw_ostream &OS) { + printTemplateSpecializationBefore(T->getInjectedTST(), OS); +} +void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T, + raw_ostream &OS) { } + +void TypePrinter::printElaboratedBefore(const ElaboratedType *T, + raw_ostream &OS) { + if (Policy.SuppressTag && isa<TagType>(T->getNamedType())) + return; + OS << TypeWithKeyword::getKeywordName(T->getKeyword()); + if (T->getKeyword() != ETK_None) + OS << " "; + NestedNameSpecifier* Qualifier = T->getQualifier(); + if (Qualifier) + Qualifier->print(OS, Policy); + + ElaboratedTypePolicyRAII PolicyRAII(Policy); + printBefore(T->getNamedType(), OS); +} +void TypePrinter::printElaboratedAfter(const ElaboratedType *T, + raw_ostream &OS) { + ElaboratedTypePolicyRAII PolicyRAII(Policy); + printAfter(T->getNamedType(), OS); +} + +void TypePrinter::printParenBefore(const ParenType *T, raw_ostream &OS) { + if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) { + printBefore(T->getInnerType(), OS); + OS << '('; + } else + printBefore(T->getInnerType(), OS); +} +void TypePrinter::printParenAfter(const ParenType *T, raw_ostream &OS) { + if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) { + OS << ')'; + printAfter(T->getInnerType(), OS); + } else + printAfter(T->getInnerType(), OS); +} + +void TypePrinter::printDependentNameBefore(const DependentNameType *T, + raw_ostream &OS) { + OS << TypeWithKeyword::getKeywordName(T->getKeyword()); + if (T->getKeyword() != ETK_None) + OS << " "; + + T->getQualifier()->print(OS, Policy); + + OS << T->getIdentifier()->getName(); + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printDependentNameAfter(const DependentNameType *T, + raw_ostream &OS) { } + +void TypePrinter::printDependentTemplateSpecializationBefore( + const DependentTemplateSpecializationType *T, raw_ostream &OS) { + IncludeStrongLifetimeRAII Strong(Policy); + + OS << TypeWithKeyword::getKeywordName(T->getKeyword()); + if (T->getKeyword() != ETK_None) + OS << " "; + + if (T->getQualifier()) + T->getQualifier()->print(OS, Policy); + OS << T->getIdentifier()->getName(); + TemplateSpecializationType::PrintTemplateArgumentList(OS, + T->getArgs(), + T->getNumArgs(), + Policy); + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printDependentTemplateSpecializationAfter( + const DependentTemplateSpecializationType *T, raw_ostream &OS) { } + +void TypePrinter::printPackExpansionBefore(const PackExpansionType *T, + raw_ostream &OS) { + printBefore(T->getPattern(), OS); +} +void TypePrinter::printPackExpansionAfter(const PackExpansionType *T, + raw_ostream &OS) { + printAfter(T->getPattern(), OS); + OS << "..."; +} + +void TypePrinter::printAttributedBefore(const AttributedType *T, + raw_ostream &OS) { + // Prefer the macro forms of the GC and ownership qualifiers. + if (T->getAttrKind() == AttributedType::attr_objc_gc || + T->getAttrKind() == AttributedType::attr_objc_ownership) + return printBefore(T->getEquivalentType(), OS); + + printBefore(T->getModifiedType(), OS); + + if (T->isMSTypeSpec()) { + switch (T->getAttrKind()) { + default: return; + case AttributedType::attr_ptr32: OS << " __ptr32"; break; + case AttributedType::attr_ptr64: OS << " __ptr64"; break; + case AttributedType::attr_sptr: OS << " __sptr"; break; + case AttributedType::attr_uptr: OS << " __uptr"; break; + } + spaceBeforePlaceHolder(OS); + } +} + +void TypePrinter::printAttributedAfter(const AttributedType *T, + raw_ostream &OS) { + // Prefer the macro forms of the GC and ownership qualifiers. + if (T->getAttrKind() == AttributedType::attr_objc_gc || + T->getAttrKind() == AttributedType::attr_objc_ownership) + return printAfter(T->getEquivalentType(), OS); + + // TODO: not all attributes are GCC-style attributes. + if (T->isMSTypeSpec()) + return; + + // If this is a calling convention attribute, don't print the implicit CC from + // the modified type. + SaveAndRestore<bool> MaybeSuppressCC(InsideCCAttribute, T->isCallingConv()); + + printAfter(T->getModifiedType(), OS); + + OS << " __attribute__(("; + switch (T->getAttrKind()) { + default: llvm_unreachable("This attribute should have been handled already"); + case AttributedType::attr_address_space: + OS << "address_space("; + OS << T->getEquivalentType().getAddressSpace(); + OS << ')'; + break; + + case AttributedType::attr_vector_size: { + OS << "__vector_size__("; + if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) { + OS << vector->getNumElements(); + OS << " * sizeof("; + print(vector->getElementType(), OS, StringRef()); + OS << ')'; + } + OS << ')'; + break; + } + + case AttributedType::attr_neon_vector_type: + case AttributedType::attr_neon_polyvector_type: { + if (T->getAttrKind() == AttributedType::attr_neon_vector_type) + OS << "neon_vector_type("; + else + OS << "neon_polyvector_type("; + const VectorType *vector = T->getEquivalentType()->getAs<VectorType>(); + OS << vector->getNumElements(); + OS << ')'; + break; + } + + case AttributedType::attr_regparm: { + // FIXME: When Sema learns to form this AttributedType, avoid printing the + // attribute again in printFunctionProtoAfter. + OS << "regparm("; + QualType t = T->getEquivalentType(); + while (!t->isFunctionType()) + t = t->getPointeeType(); + OS << t->getAs<FunctionType>()->getRegParmType(); + OS << ')'; + break; + } + + case AttributedType::attr_objc_gc: { + OS << "objc_gc("; + + QualType tmp = T->getEquivalentType(); + while (tmp.getObjCGCAttr() == Qualifiers::GCNone) { + QualType next = tmp->getPointeeType(); + if (next == tmp) break; + tmp = next; + } + + if (tmp.isObjCGCWeak()) + OS << "weak"; + else + OS << "strong"; + OS << ')'; + break; + } + + case AttributedType::attr_objc_ownership: + OS << "objc_ownership("; + switch (T->getEquivalentType().getObjCLifetime()) { + case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); + case Qualifiers::OCL_ExplicitNone: OS << "none"; break; + case Qualifiers::OCL_Strong: OS << "strong"; break; + case Qualifiers::OCL_Weak: OS << "weak"; break; + case Qualifiers::OCL_Autoreleasing: OS << "autoreleasing"; break; + } + OS << ')'; + break; + + // FIXME: When Sema learns to form this AttributedType, avoid printing the + // attribute again in printFunctionProtoAfter. + case AttributedType::attr_noreturn: OS << "noreturn"; break; + + case AttributedType::attr_cdecl: OS << "cdecl"; break; + case AttributedType::attr_fastcall: OS << "fastcall"; break; + case AttributedType::attr_stdcall: OS << "stdcall"; break; + case AttributedType::attr_thiscall: OS << "thiscall"; break; + case AttributedType::attr_pascal: OS << "pascal"; break; + case AttributedType::attr_ms_abi: OS << "ms_abi"; break; + case AttributedType::attr_sysv_abi: OS << "sysv_abi"; break; + case AttributedType::attr_pcs: + case AttributedType::attr_pcs_vfp: { + OS << "pcs("; + QualType t = T->getEquivalentType(); + while (!t->isFunctionType()) + t = t->getPointeeType(); + OS << (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ? + "\"aapcs\"" : "\"aapcs-vfp\""); + OS << ')'; + break; + } + case AttributedType::attr_pnaclcall: OS << "pnaclcall"; break; + case AttributedType::attr_inteloclbicc: OS << "inteloclbicc"; break; + } + OS << "))"; +} + +void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T, + raw_ostream &OS) { + OS << T->getDecl()->getName(); + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printObjCInterfaceAfter(const ObjCInterfaceType *T, + raw_ostream &OS) { } + +void TypePrinter::printObjCObjectBefore(const ObjCObjectType *T, + raw_ostream &OS) { + if (T->qual_empty()) + return printBefore(T->getBaseType(), OS); + + print(T->getBaseType(), OS, StringRef()); + OS << '<'; + bool isFirst = true; + for (ObjCObjectType::qual_iterator + I = T->qual_begin(), E = T->qual_end(); I != E; ++I) { + if (isFirst) + isFirst = false; + else + OS << ','; + OS << (*I)->getName(); + } + OS << '>'; + spaceBeforePlaceHolder(OS); +} +void TypePrinter::printObjCObjectAfter(const ObjCObjectType *T, + raw_ostream &OS) { + if (T->qual_empty()) + return printAfter(T->getBaseType(), OS); +} + +void TypePrinter::printObjCObjectPointerBefore(const ObjCObjectPointerType *T, + raw_ostream &OS) { + T->getPointeeType().getLocalQualifiers().print(OS, Policy, + /*appendSpaceIfNonEmpty=*/true); + + if (T->isObjCIdType() || T->isObjCQualifiedIdType()) + OS << "id"; + else if (T->isObjCClassType() || T->isObjCQualifiedClassType()) + OS << "Class"; + else if (T->isObjCSelType()) + OS << "SEL"; + else + OS << T->getInterfaceDecl()->getName(); + + if (!T->qual_empty()) { + OS << '<'; + for (ObjCObjectPointerType::qual_iterator I = T->qual_begin(), + E = T->qual_end(); + I != E; ++I) { + OS << (*I)->getName(); + if (I+1 != E) + OS << ','; + } + OS << '>'; + } + + if (!T->isObjCIdType() && !T->isObjCQualifiedIdType()) { + OS << " *"; // Don't forget the implicit pointer. + } else { + spaceBeforePlaceHolder(OS); + } +} +void TypePrinter::printObjCObjectPointerAfter(const ObjCObjectPointerType *T, + raw_ostream &OS) { } + +void TemplateSpecializationType:: + PrintTemplateArgumentList(raw_ostream &OS, + const TemplateArgumentListInfo &Args, + const PrintingPolicy &Policy) { + return PrintTemplateArgumentList(OS, + Args.getArgumentArray(), + Args.size(), + Policy); +} + +void +TemplateSpecializationType::PrintTemplateArgumentList( + raw_ostream &OS, + const TemplateArgument *Args, + unsigned NumArgs, + const PrintingPolicy &Policy, + bool SkipBrackets) { + if (!SkipBrackets) + OS << '<'; + + bool needSpace = false; + for (unsigned Arg = 0; Arg < NumArgs; ++Arg) { + // Print the argument into a string. + SmallString<128> Buf; + llvm::raw_svector_ostream ArgOS(Buf); + if (Args[Arg].getKind() == TemplateArgument::Pack) { + if (Args[Arg].pack_size() && Arg > 0) + OS << ", "; + PrintTemplateArgumentList(ArgOS, + Args[Arg].pack_begin(), + Args[Arg].pack_size(), + Policy, true); + } else { + if (Arg > 0) + OS << ", "; + Args[Arg].print(Policy, ArgOS); + } + StringRef ArgString = ArgOS.str(); + + // If this is the first argument and its string representation + // begins with the global scope specifier ('::foo'), add a space + // to avoid printing the diagraph '<:'. + if (!Arg && !ArgString.empty() && ArgString[0] == ':') + OS << ' '; + + OS << ArgString; + + needSpace = (!ArgString.empty() && ArgString.back() == '>'); + } + + // If the last character of our string is '>', add another space to + // keep the two '>''s separate tokens. We don't *have* to do this in + // C++0x, but it's still good hygiene. + if (needSpace) + OS << ' '; + + if (!SkipBrackets) + OS << '>'; +} + +// Sadly, repeat all that with TemplateArgLoc. +void TemplateSpecializationType:: +PrintTemplateArgumentList(raw_ostream &OS, + const TemplateArgumentLoc *Args, unsigned NumArgs, + const PrintingPolicy &Policy) { + OS << '<'; + + bool needSpace = false; + for (unsigned Arg = 0; Arg < NumArgs; ++Arg) { + if (Arg > 0) + OS << ", "; + + // Print the argument into a string. + SmallString<128> Buf; + llvm::raw_svector_ostream ArgOS(Buf); + if (Args[Arg].getArgument().getKind() == TemplateArgument::Pack) { + PrintTemplateArgumentList(ArgOS, + Args[Arg].getArgument().pack_begin(), + Args[Arg].getArgument().pack_size(), + Policy, true); + } else { + Args[Arg].getArgument().print(Policy, ArgOS); + } + StringRef ArgString = ArgOS.str(); + + // If this is the first argument and its string representation + // begins with the global scope specifier ('::foo'), add a space + // to avoid printing the diagraph '<:'. + if (!Arg && !ArgString.empty() && ArgString[0] == ':') + OS << ' '; + + OS << ArgString; + + needSpace = (!ArgString.empty() && ArgString.back() == '>'); + } + + // If the last character of our string is '>', add another space to + // keep the two '>''s separate tokens. We don't *have* to do this in + // C++0x, but it's still good hygiene. + if (needSpace) + OS << ' '; + + OS << '>'; +} + +void QualType::dump(const char *msg) const { + if (msg) + llvm::errs() << msg << ": "; + LangOptions LO; + print(llvm::errs(), PrintingPolicy(LO), "identifier"); + llvm::errs() << '\n'; +} +void QualType::dump() const { + dump(0); +} + +void Type::dump() const { + QualType(this, 0).dump(); +} + +std::string Qualifiers::getAsString() const { + LangOptions LO; + return getAsString(PrintingPolicy(LO)); +} + +// Appends qualifiers to the given string, separated by spaces. Will +// prefix a space if the string is non-empty. Will not append a final +// space. +std::string Qualifiers::getAsString(const PrintingPolicy &Policy) const { + SmallString<64> Buf; + llvm::raw_svector_ostream StrOS(Buf); + print(StrOS, Policy); + return StrOS.str(); +} + +bool Qualifiers::isEmptyWhenPrinted(const PrintingPolicy &Policy) const { + if (getCVRQualifiers()) + return false; + + if (getAddressSpace()) + return false; + + if (getObjCGCAttr()) + return false; + + if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) + if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime)) + return false; + + return true; +} + +// Appends qualifiers to the given string, separated by spaces. Will +// prefix a space if the string is non-empty. Will not append a final +// space. +void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy, + bool appendSpaceIfNonEmpty) const { + bool addSpace = false; + + unsigned quals = getCVRQualifiers(); + if (quals) { + AppendTypeQualList(OS, quals); + addSpace = true; + } + if (unsigned addrspace = getAddressSpace()) { + if (addSpace) + OS << ' '; + addSpace = true; + switch (addrspace) { + case LangAS::opencl_global: + OS << "__global"; + break; + case LangAS::opencl_local: + OS << "__local"; + break; + case LangAS::opencl_constant: + OS << "__constant"; + break; + default: + OS << "__attribute__((address_space("; + OS << addrspace; + OS << ")))"; + } + } + if (Qualifiers::GC gc = getObjCGCAttr()) { + if (addSpace) + OS << ' '; + addSpace = true; + if (gc == Qualifiers::Weak) + OS << "__weak"; + else + OS << "__strong"; + } + if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) { + if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime)){ + if (addSpace) + OS << ' '; + addSpace = true; + } + + switch (lifetime) { + case Qualifiers::OCL_None: llvm_unreachable("none but true"); + case Qualifiers::OCL_ExplicitNone: OS << "__unsafe_unretained"; break; + case Qualifiers::OCL_Strong: + if (!Policy.SuppressStrongLifetime) + OS << "__strong"; + break; + + case Qualifiers::OCL_Weak: OS << "__weak"; break; + case Qualifiers::OCL_Autoreleasing: OS << "__autoreleasing"; break; + } + } + + if (appendSpaceIfNonEmpty && addSpace) + OS << ' '; +} + +std::string QualType::getAsString(const PrintingPolicy &Policy) const { + std::string S; + getAsStringInternal(S, Policy); + return S; +} + +std::string QualType::getAsString(const Type *ty, Qualifiers qs) { + std::string buffer; + LangOptions options; + getAsStringInternal(ty, qs, buffer, PrintingPolicy(options)); + return buffer; +} + +void QualType::print(const Type *ty, Qualifiers qs, + raw_ostream &OS, const PrintingPolicy &policy, + const Twine &PlaceHolder) { + SmallString<128> PHBuf; + StringRef PH = PlaceHolder.toStringRef(PHBuf); + + TypePrinter(policy).print(ty, qs, OS, PH); +} + +void QualType::getAsStringInternal(const Type *ty, Qualifiers qs, + std::string &buffer, + const PrintingPolicy &policy) { + SmallString<256> Buf; + llvm::raw_svector_ostream StrOS(Buf); + TypePrinter(policy).print(ty, qs, StrOS, buffer); + std::string str = StrOS.str(); + buffer.swap(str); +} diff --git a/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp new file mode 100644 index 000000000000..5ca4e862ef7b --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp @@ -0,0 +1,213 @@ +//===--- VTTBuilder.cpp - C++ VTT layout builder --------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with generation of the layout of virtual table +// tables (VTT). +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/VTTBuilder.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/RecordLayout.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/Support/Format.h" +#include <algorithm> +#include <cstdio> + +using namespace clang; + +#define DUMP_OVERRIDERS 0 + +VTTBuilder::VTTBuilder(ASTContext &Ctx, + const CXXRecordDecl *MostDerivedClass, + bool GenerateDefinition) + : Ctx(Ctx), MostDerivedClass(MostDerivedClass), + MostDerivedClassLayout(Ctx.getASTRecordLayout(MostDerivedClass)), + GenerateDefinition(GenerateDefinition) { + // Lay out this VTT. + LayoutVTT(BaseSubobject(MostDerivedClass, CharUnits::Zero()), + /*BaseIsVirtual=*/false); +} + +void VTTBuilder::AddVTablePointer(BaseSubobject Base, uint64_t VTableIndex, + const CXXRecordDecl *VTableClass) { + // Store the vtable pointer index if we're generating the primary VTT. + if (VTableClass == MostDerivedClass) { + assert(!SecondaryVirtualPointerIndices.count(Base) && + "A virtual pointer index already exists for this base subobject!"); + SecondaryVirtualPointerIndices[Base] = VTTComponents.size(); + } + + if (!GenerateDefinition) { + VTTComponents.push_back(VTTComponent()); + return; + } + + VTTComponents.push_back(VTTComponent(VTableIndex, Base)); +} + +void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) { + const CXXRecordDecl *RD = Base.getBase(); + + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + + // Don't layout virtual bases. + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD); + CharUnits BaseOffset = Base.getBaseOffset() + + Layout.getBaseClassOffset(BaseDecl); + + // Layout the VTT for this base. + LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false); + } +} + +void +VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base, + bool BaseIsMorallyVirtual, + uint64_t VTableIndex, + const CXXRecordDecl *VTableClass, + VisitedVirtualBasesSetTy &VBases) { + const CXXRecordDecl *RD = Base.getBase(); + + // We're not interested in bases that don't have virtual bases, and not + // morally virtual bases. + if (!RD->getNumVBases() && !BaseIsMorallyVirtual) + return; + + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + // Itanium C++ ABI 2.6.2: + // Secondary virtual pointers are present for all bases with either + // virtual bases or virtual function declarations overridden along a + // virtual path. + // + // If the base class is not dynamic, we don't want to add it, nor any + // of its base classes. + if (!BaseDecl->isDynamicClass()) + continue; + + bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual; + bool BaseDeclIsNonVirtualPrimaryBase = false; + CharUnits BaseOffset; + if (I->isVirtual()) { + // Ignore virtual bases that we've already visited. + if (!VBases.insert(BaseDecl)) + continue; + + BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + BaseDeclIsMorallyVirtual = true; + } else { + const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD); + + BaseOffset = Base.getBaseOffset() + + Layout.getBaseClassOffset(BaseDecl); + + if (!Layout.isPrimaryBaseVirtual() && + Layout.getPrimaryBase() == BaseDecl) + BaseDeclIsNonVirtualPrimaryBase = true; + } + + // Itanium C++ ABI 2.6.2: + // Secondary virtual pointers: for each base class X which (a) has virtual + // bases or is reachable along a virtual path from D, and (b) is not a + // non-virtual primary base, the address of the virtual table for X-in-D + // or an appropriate construction virtual table. + if (!BaseDeclIsNonVirtualPrimaryBase && + (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) { + // Add the vtable pointer. + AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTableIndex, + VTableClass); + } + + // And lay out the secondary virtual pointers for the base class. + LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset), + BaseDeclIsMorallyVirtual, VTableIndex, + VTableClass, VBases); + } +} + +void +VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base, + uint64_t VTableIndex) { + VisitedVirtualBasesSetTy VBases; + LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false, + VTableIndex, Base.getBase(), VBases); +} + +void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD, + VisitedVirtualBasesSetTy &VBases) { + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + // Check if this is a virtual base. + if (I->isVirtual()) { + // Check if we've seen this base before. + if (!VBases.insert(BaseDecl)) + continue; + + CharUnits BaseOffset = + MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + + LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true); + } + + // We only need to layout virtual VTTs for this base if it actually has + // virtual bases. + if (BaseDecl->getNumVBases()) + LayoutVirtualVTTs(BaseDecl, VBases); + } +} + +void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) { + const CXXRecordDecl *RD = Base.getBase(); + + // Itanium C++ ABI 2.6.2: + // An array of virtual table addresses, called the VTT, is declared for + // each class type that has indirect or direct virtual base classes. + if (RD->getNumVBases() == 0) + return; + + bool IsPrimaryVTT = Base.getBase() == MostDerivedClass; + + if (!IsPrimaryVTT) { + // Remember the sub-VTT index. + SubVTTIndicies[Base] = VTTComponents.size(); + } + + uint64_t VTableIndex = VTTVTables.size(); + VTTVTables.push_back(VTTVTable(Base, BaseIsVirtual)); + + // Add the primary vtable pointer. + AddVTablePointer(Base, VTableIndex, RD); + + // Add the secondary VTTs. + LayoutSecondaryVTTs(Base); + + // Add the secondary virtual pointers. + LayoutSecondaryVirtualPointers(Base, VTableIndex); + + // If this is the primary VTT, we want to lay out virtual VTTs as well. + if (IsPrimaryVTT) { + VisitedVirtualBasesSetTy VBases; + LayoutVirtualVTTs(Base.getBase(), VBases); + } +} diff --git a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp new file mode 100644 index 000000000000..5f7ae0f3ff4a --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp @@ -0,0 +1,3428 @@ +//===--- VTableBuilder.cpp - C++ vtable layout builder --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with generation of the layout of virtual tables. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/VTableBuilder.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/CXXInheritance.h" +#include "clang/AST/RecordLayout.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/raw_ostream.h" +#include <algorithm> +#include <cstdio> + +using namespace clang; + +#define DUMP_OVERRIDERS 0 + +namespace { + +/// BaseOffset - Represents an offset from a derived class to a direct or +/// indirect base class. +struct BaseOffset { + /// DerivedClass - The derived class. + const CXXRecordDecl *DerivedClass; + + /// VirtualBase - If the path from the derived class to the base class + /// involves virtual base classes, this holds the declaration of the last + /// virtual base in this path (i.e. closest to the base class). + const CXXRecordDecl *VirtualBase; + + /// NonVirtualOffset - The offset from the derived class to the base class. + /// (Or the offset from the virtual base class to the base class, if the + /// path from the derived class to the base class involves a virtual base + /// class. + CharUnits NonVirtualOffset; + + BaseOffset() : DerivedClass(0), VirtualBase(0), + NonVirtualOffset(CharUnits::Zero()) { } + BaseOffset(const CXXRecordDecl *DerivedClass, + const CXXRecordDecl *VirtualBase, CharUnits NonVirtualOffset) + : DerivedClass(DerivedClass), VirtualBase(VirtualBase), + NonVirtualOffset(NonVirtualOffset) { } + + bool isEmpty() const { return NonVirtualOffset.isZero() && !VirtualBase; } +}; + +/// FinalOverriders - Contains the final overrider member functions for all +/// member functions in the base subobjects of a class. +class FinalOverriders { +public: + /// OverriderInfo - Information about a final overrider. + struct OverriderInfo { + /// Method - The method decl of the overrider. + const CXXMethodDecl *Method; + + /// Offset - the base offset of the overrider's parent in the layout class. + CharUnits Offset; + + OverriderInfo() : Method(0), Offset(CharUnits::Zero()) { } + }; + +private: + /// MostDerivedClass - The most derived class for which the final overriders + /// are stored. + const CXXRecordDecl *MostDerivedClass; + + /// MostDerivedClassOffset - If we're building final overriders for a + /// construction vtable, this holds the offset from the layout class to the + /// most derived class. + const CharUnits MostDerivedClassOffset; + + /// LayoutClass - The class we're using for layout information. Will be + /// different than the most derived class if the final overriders are for a + /// construction vtable. + const CXXRecordDecl *LayoutClass; + + ASTContext &Context; + + /// MostDerivedClassLayout - the AST record layout of the most derived class. + const ASTRecordLayout &MostDerivedClassLayout; + + /// MethodBaseOffsetPairTy - Uniquely identifies a member function + /// in a base subobject. + typedef std::pair<const CXXMethodDecl *, CharUnits> MethodBaseOffsetPairTy; + + typedef llvm::DenseMap<MethodBaseOffsetPairTy, + OverriderInfo> OverridersMapTy; + + /// OverridersMap - The final overriders for all virtual member functions of + /// all the base subobjects of the most derived class. + OverridersMapTy OverridersMap; + + /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented + /// as a record decl and a subobject number) and its offsets in the most + /// derived class as well as the layout class. + typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>, + CharUnits> SubobjectOffsetMapTy; + + typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy; + + /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the + /// given base. + void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual, + CharUnits OffsetInLayoutClass, + SubobjectOffsetMapTy &SubobjectOffsets, + SubobjectOffsetMapTy &SubobjectLayoutClassOffsets, + SubobjectCountMapTy &SubobjectCounts); + + typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy; + + /// dump - dump the final overriders for a base subobject, and all its direct + /// and indirect base subobjects. + void dump(raw_ostream &Out, BaseSubobject Base, + VisitedVirtualBasesSetTy& VisitedVirtualBases); + +public: + FinalOverriders(const CXXRecordDecl *MostDerivedClass, + CharUnits MostDerivedClassOffset, + const CXXRecordDecl *LayoutClass); + + /// getOverrider - Get the final overrider for the given method declaration in + /// the subobject with the given base offset. + OverriderInfo getOverrider(const CXXMethodDecl *MD, + CharUnits BaseOffset) const { + assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) && + "Did not find overrider!"); + + return OverridersMap.lookup(std::make_pair(MD, BaseOffset)); + } + + /// dump - dump the final overriders. + void dump() { + VisitedVirtualBasesSetTy VisitedVirtualBases; + dump(llvm::errs(), BaseSubobject(MostDerivedClass, CharUnits::Zero()), + VisitedVirtualBases); + } + +}; + +FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass, + CharUnits MostDerivedClassOffset, + const CXXRecordDecl *LayoutClass) + : MostDerivedClass(MostDerivedClass), + MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass), + Context(MostDerivedClass->getASTContext()), + MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) { + + // Compute base offsets. + SubobjectOffsetMapTy SubobjectOffsets; + SubobjectOffsetMapTy SubobjectLayoutClassOffsets; + SubobjectCountMapTy SubobjectCounts; + ComputeBaseOffsets(BaseSubobject(MostDerivedClass, CharUnits::Zero()), + /*IsVirtual=*/false, + MostDerivedClassOffset, + SubobjectOffsets, SubobjectLayoutClassOffsets, + SubobjectCounts); + + // Get the final overriders. + CXXFinalOverriderMap FinalOverriders; + MostDerivedClass->getFinalOverriders(FinalOverriders); + + for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(), + E = FinalOverriders.end(); I != E; ++I) { + const CXXMethodDecl *MD = I->first; + const OverridingMethods& Methods = I->second; + + for (OverridingMethods::const_iterator I = Methods.begin(), + E = Methods.end(); I != E; ++I) { + unsigned SubobjectNumber = I->first; + assert(SubobjectOffsets.count(std::make_pair(MD->getParent(), + SubobjectNumber)) && + "Did not find subobject offset!"); + + CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(), + SubobjectNumber)]; + + assert(I->second.size() == 1 && "Final overrider is not unique!"); + const UniqueVirtualMethod &Method = I->second.front(); + + const CXXRecordDecl *OverriderRD = Method.Method->getParent(); + assert(SubobjectLayoutClassOffsets.count( + std::make_pair(OverriderRD, Method.Subobject)) + && "Did not find subobject offset!"); + CharUnits OverriderOffset = + SubobjectLayoutClassOffsets[std::make_pair(OverriderRD, + Method.Subobject)]; + + OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)]; + assert(!Overrider.Method && "Overrider should not exist yet!"); + + Overrider.Offset = OverriderOffset; + Overrider.Method = Method.Method; + } + } + +#if DUMP_OVERRIDERS + // And dump them (for now). + dump(); +#endif +} + +static BaseOffset ComputeBaseOffset(ASTContext &Context, + const CXXRecordDecl *DerivedRD, + const CXXBasePath &Path) { + CharUnits NonVirtualOffset = CharUnits::Zero(); + + unsigned NonVirtualStart = 0; + const CXXRecordDecl *VirtualBase = 0; + + // First, look for the virtual base class. + for (int I = Path.size(), E = 0; I != E; --I) { + const CXXBasePathElement &Element = Path[I - 1]; + + if (Element.Base->isVirtual()) { + NonVirtualStart = I; + QualType VBaseType = Element.Base->getType(); + VirtualBase = VBaseType->getAsCXXRecordDecl(); + break; + } + } + + // Now compute the non-virtual offset. + for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) { + const CXXBasePathElement &Element = Path[I]; + + // Check the base class offset. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class); + + const CXXRecordDecl *Base = Element.Base->getType()->getAsCXXRecordDecl(); + + NonVirtualOffset += Layout.getBaseClassOffset(Base); + } + + // FIXME: This should probably use CharUnits or something. Maybe we should + // even change the base offsets in ASTRecordLayout to be specified in + // CharUnits. + return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset); + +} + +static BaseOffset ComputeBaseOffset(ASTContext &Context, + const CXXRecordDecl *BaseRD, + const CXXRecordDecl *DerivedRD) { + CXXBasePaths Paths(/*FindAmbiguities=*/false, + /*RecordPaths=*/true, /*DetectVirtual=*/false); + + if (!DerivedRD->isDerivedFrom(BaseRD, Paths)) + llvm_unreachable("Class must be derived from the passed in base class!"); + + return ComputeBaseOffset(Context, DerivedRD, Paths.front()); +} + +static BaseOffset +ComputeReturnAdjustmentBaseOffset(ASTContext &Context, + const CXXMethodDecl *DerivedMD, + const CXXMethodDecl *BaseMD) { + const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>(); + const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>(); + + // Canonicalize the return types. + CanQualType CanDerivedReturnType = + Context.getCanonicalType(DerivedFT->getResultType()); + CanQualType CanBaseReturnType = + Context.getCanonicalType(BaseFT->getResultType()); + + assert(CanDerivedReturnType->getTypeClass() == + CanBaseReturnType->getTypeClass() && + "Types must have same type class!"); + + if (CanDerivedReturnType == CanBaseReturnType) { + // No adjustment needed. + return BaseOffset(); + } + + if (isa<ReferenceType>(CanDerivedReturnType)) { + CanDerivedReturnType = + CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType(); + CanBaseReturnType = + CanBaseReturnType->getAs<ReferenceType>()->getPointeeType(); + } else if (isa<PointerType>(CanDerivedReturnType)) { + CanDerivedReturnType = + CanDerivedReturnType->getAs<PointerType>()->getPointeeType(); + CanBaseReturnType = + CanBaseReturnType->getAs<PointerType>()->getPointeeType(); + } else { + llvm_unreachable("Unexpected return type!"); + } + + // We need to compare unqualified types here; consider + // const T *Base::foo(); + // T *Derived::foo(); + if (CanDerivedReturnType.getUnqualifiedType() == + CanBaseReturnType.getUnqualifiedType()) { + // No adjustment needed. + return BaseOffset(); + } + + const CXXRecordDecl *DerivedRD = + cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl()); + + const CXXRecordDecl *BaseRD = + cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl()); + + return ComputeBaseOffset(Context, BaseRD, DerivedRD); +} + +void +FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual, + CharUnits OffsetInLayoutClass, + SubobjectOffsetMapTy &SubobjectOffsets, + SubobjectOffsetMapTy &SubobjectLayoutClassOffsets, + SubobjectCountMapTy &SubobjectCounts) { + const CXXRecordDecl *RD = Base.getBase(); + + unsigned SubobjectNumber = 0; + if (!IsVirtual) + SubobjectNumber = ++SubobjectCounts[RD]; + + // Set up the subobject to offset mapping. + assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber)) + && "Subobject offset already exists!"); + assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber)) + && "Subobject offset already exists!"); + + SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = Base.getBaseOffset(); + SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] = + OffsetInLayoutClass; + + // Traverse our bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + + CharUnits BaseOffset; + CharUnits BaseOffsetInLayoutClass; + if (I->isVirtual()) { + // Check if we've visited this virtual base before. + if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0))) + continue; + + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + BaseOffsetInLayoutClass = + LayoutClassLayout.getVBaseClassOffset(BaseDecl); + } else { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + CharUnits Offset = Layout.getBaseClassOffset(BaseDecl); + + BaseOffset = Base.getBaseOffset() + Offset; + BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset; + } + + ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset), + I->isVirtual(), BaseOffsetInLayoutClass, + SubobjectOffsets, SubobjectLayoutClassOffsets, + SubobjectCounts); + } +} + +void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base, + VisitedVirtualBasesSetTy &VisitedVirtualBases) { + const CXXRecordDecl *RD = Base.getBase(); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + + // Ignore bases that don't have any virtual member functions. + if (!BaseDecl->isPolymorphic()) + continue; + + CharUnits BaseOffset; + if (I->isVirtual()) { + if (!VisitedVirtualBases.insert(BaseDecl)) { + // We've visited this base before. + continue; + } + + BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + } else { + BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset(); + } + + dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases); + } + + Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", "; + Out << Base.getBaseOffset().getQuantity() << ")\n"; + + // Now dump the overriders for this base subobject. + for (CXXRecordDecl::method_iterator I = RD->method_begin(), + E = RD->method_end(); I != E; ++I) { + const CXXMethodDecl *MD = *I; + + if (!MD->isVirtual()) + continue; + + OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset()); + + Out << " " << MD->getQualifiedNameAsString() << " - ("; + Out << Overrider.Method->getQualifiedNameAsString(); + Out << ", " << Overrider.Offset.getQuantity() << ')'; + + BaseOffset Offset; + if (!Overrider.Method->isPure()) + Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD); + + if (!Offset.isEmpty()) { + Out << " [ret-adj: "; + if (Offset.VirtualBase) + Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, "; + + Out << Offset.NonVirtualOffset.getQuantity() << " nv]"; + } + + Out << "\n"; + } +} + +/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable. +struct VCallOffsetMap { + + typedef std::pair<const CXXMethodDecl *, CharUnits> MethodAndOffsetPairTy; + + /// Offsets - Keeps track of methods and their offsets. + // FIXME: This should be a real map and not a vector. + SmallVector<MethodAndOffsetPairTy, 16> Offsets; + + /// MethodsCanShareVCallOffset - Returns whether two virtual member functions + /// can share the same vcall offset. + static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS, + const CXXMethodDecl *RHS); + +public: + /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the + /// add was successful, or false if there was already a member function with + /// the same signature in the map. + bool AddVCallOffset(const CXXMethodDecl *MD, CharUnits OffsetOffset); + + /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the + /// vtable address point) for the given virtual member function. + CharUnits getVCallOffsetOffset(const CXXMethodDecl *MD); + + // empty - Return whether the offset map is empty or not. + bool empty() const { return Offsets.empty(); } +}; + +static bool HasSameVirtualSignature(const CXXMethodDecl *LHS, + const CXXMethodDecl *RHS) { + const FunctionProtoType *LT = + cast<FunctionProtoType>(LHS->getType().getCanonicalType()); + const FunctionProtoType *RT = + cast<FunctionProtoType>(RHS->getType().getCanonicalType()); + + // Fast-path matches in the canonical types. + if (LT == RT) return true; + + // Force the signatures to match. We can't rely on the overrides + // list here because there isn't necessarily an inheritance + // relationship between the two methods. + if (LT->getTypeQuals() != RT->getTypeQuals() || + LT->getNumArgs() != RT->getNumArgs()) + return false; + for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I) + if (LT->getArgType(I) != RT->getArgType(I)) + return false; + return true; +} + +bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS, + const CXXMethodDecl *RHS) { + assert(LHS->isVirtual() && "LHS must be virtual!"); + assert(RHS->isVirtual() && "LHS must be virtual!"); + + // A destructor can share a vcall offset with another destructor. + if (isa<CXXDestructorDecl>(LHS)) + return isa<CXXDestructorDecl>(RHS); + + // FIXME: We need to check more things here. + + // The methods must have the same name. + DeclarationName LHSName = LHS->getDeclName(); + DeclarationName RHSName = RHS->getDeclName(); + if (LHSName != RHSName) + return false; + + // And the same signatures. + return HasSameVirtualSignature(LHS, RHS); +} + +bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD, + CharUnits OffsetOffset) { + // Check if we can reuse an offset. + for (unsigned I = 0, E = Offsets.size(); I != E; ++I) { + if (MethodsCanShareVCallOffset(Offsets[I].first, MD)) + return false; + } + + // Add the offset. + Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset)); + return true; +} + +CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) { + // Look for an offset. + for (unsigned I = 0, E = Offsets.size(); I != E; ++I) { + if (MethodsCanShareVCallOffset(Offsets[I].first, MD)) + return Offsets[I].second; + } + + llvm_unreachable("Should always find a vcall offset offset!"); +} + +/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets. +class VCallAndVBaseOffsetBuilder { +public: + typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> + VBaseOffsetOffsetsMapTy; + +private: + /// MostDerivedClass - The most derived class for which we're building vcall + /// and vbase offsets. + const CXXRecordDecl *MostDerivedClass; + + /// LayoutClass - The class we're using for layout information. Will be + /// different than the most derived class if we're building a construction + /// vtable. + const CXXRecordDecl *LayoutClass; + + /// Context - The ASTContext which we will use for layout information. + ASTContext &Context; + + /// Components - vcall and vbase offset components + typedef SmallVector<VTableComponent, 64> VTableComponentVectorTy; + VTableComponentVectorTy Components; + + /// VisitedVirtualBases - Visited virtual bases. + llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases; + + /// VCallOffsets - Keeps track of vcall offsets. + VCallOffsetMap VCallOffsets; + + + /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets, + /// relative to the address point. + VBaseOffsetOffsetsMapTy VBaseOffsetOffsets; + + /// FinalOverriders - The final overriders of the most derived class. + /// (Can be null when we're not building a vtable of the most derived class). + const FinalOverriders *Overriders; + + /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the + /// given base subobject. + void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual, + CharUnits RealBaseOffset); + + /// AddVCallOffsets - Add vcall offsets for the given base subobject. + void AddVCallOffsets(BaseSubobject Base, CharUnits VBaseOffset); + + /// AddVBaseOffsets - Add vbase offsets for the given class. + void AddVBaseOffsets(const CXXRecordDecl *Base, + CharUnits OffsetInLayoutClass); + + /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in + /// chars, relative to the vtable address point. + CharUnits getCurrentOffsetOffset() const; + +public: + VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass, + const CXXRecordDecl *LayoutClass, + const FinalOverriders *Overriders, + BaseSubobject Base, bool BaseIsVirtual, + CharUnits OffsetInLayoutClass) + : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass), + Context(MostDerivedClass->getASTContext()), Overriders(Overriders) { + + // Add vcall and vbase offsets. + AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass); + } + + /// Methods for iterating over the components. + typedef VTableComponentVectorTy::const_reverse_iterator const_iterator; + const_iterator components_begin() const { return Components.rbegin(); } + const_iterator components_end() const { return Components.rend(); } + + const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; } + const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const { + return VBaseOffsetOffsets; + } +}; + +void +VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base, + bool BaseIsVirtual, + CharUnits RealBaseOffset) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase()); + + // Itanium C++ ABI 2.5.2: + // ..in classes sharing a virtual table with a primary base class, the vcall + // and vbase offsets added by the derived class all come before the vcall + // and vbase offsets required by the base class, so that the latter may be + // laid out as required by the base class without regard to additions from + // the derived class(es). + + // (Since we're emitting the vcall and vbase offsets in reverse order, we'll + // emit them for the primary base first). + if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) { + bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual(); + + CharUnits PrimaryBaseOffset; + + // Get the base offset of the primary base. + if (PrimaryBaseIsVirtual) { + assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() && + "Primary vbase should have a zero offset!"); + + const ASTRecordLayout &MostDerivedClassLayout = + Context.getASTRecordLayout(MostDerivedClass); + + PrimaryBaseOffset = + MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase); + } else { + assert(Layout.getBaseClassOffset(PrimaryBase).isZero() && + "Primary base should have a zero offset!"); + + PrimaryBaseOffset = Base.getBaseOffset(); + } + + AddVCallAndVBaseOffsets( + BaseSubobject(PrimaryBase,PrimaryBaseOffset), + PrimaryBaseIsVirtual, RealBaseOffset); + } + + AddVBaseOffsets(Base.getBase(), RealBaseOffset); + + // We only want to add vcall offsets for virtual bases. + if (BaseIsVirtual) + AddVCallOffsets(Base, RealBaseOffset); +} + +CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const { + // OffsetIndex is the index of this vcall or vbase offset, relative to the + // vtable address point. (We subtract 3 to account for the information just + // above the address point, the RTTI info, the offset to top, and the + // vcall offset itself). + int64_t OffsetIndex = -(int64_t)(3 + Components.size()); + + CharUnits PointerWidth = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + CharUnits OffsetOffset = PointerWidth * OffsetIndex; + return OffsetOffset; +} + +void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base, + CharUnits VBaseOffset) { + const CXXRecordDecl *RD = Base.getBase(); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); + + // Handle the primary base first. + // We only want to add vcall offsets if the base is non-virtual; a virtual + // primary base will have its vcall and vbase offsets emitted already. + if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) { + // Get the base offset of the primary base. + assert(Layout.getBaseClassOffset(PrimaryBase).isZero() && + "Primary base should have a zero offset!"); + + AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()), + VBaseOffset); + } + + // Add the vcall offsets. + for (CXXRecordDecl::method_iterator I = RD->method_begin(), + E = RD->method_end(); I != E; ++I) { + const CXXMethodDecl *MD = *I; + + if (!MD->isVirtual()) + continue; + + CharUnits OffsetOffset = getCurrentOffsetOffset(); + + // Don't add a vcall offset if we already have one for this member function + // signature. + if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset)) + continue; + + CharUnits Offset = CharUnits::Zero(); + + if (Overriders) { + // Get the final overrider. + FinalOverriders::OverriderInfo Overrider = + Overriders->getOverrider(MD, Base.getBaseOffset()); + + /// The vcall offset is the offset from the virtual base to the object + /// where the function was overridden. + Offset = Overrider.Offset - VBaseOffset; + } + + Components.push_back( + VTableComponent::MakeVCallOffset(Offset)); + } + + // And iterate over all non-virtual bases (ignoring the primary base). + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + if (BaseDecl == PrimaryBase) + continue; + + // Get the base offset of this base. + CharUnits BaseOffset = Base.getBaseOffset() + + Layout.getBaseClassOffset(BaseDecl); + + AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset), + VBaseOffset); + } +} + +void +VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD, + CharUnits OffsetInLayoutClass) { + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + // Add vbase offsets. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + + // Check if this is a virtual base that we haven't visited before. + if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) { + CharUnits Offset = + LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass; + + // Add the vbase offset offset. + assert(!VBaseOffsetOffsets.count(BaseDecl) && + "vbase offset offset already exists!"); + + CharUnits VBaseOffsetOffset = getCurrentOffsetOffset(); + VBaseOffsetOffsets.insert( + std::make_pair(BaseDecl, VBaseOffsetOffset)); + + Components.push_back( + VTableComponent::MakeVBaseOffset(Offset)); + } + + // Check the base class looking for more vbase offsets. + AddVBaseOffsets(BaseDecl, OffsetInLayoutClass); + } +} + +/// ItaniumVTableBuilder - Class for building vtable layout information. +class ItaniumVTableBuilder { +public: + /// PrimaryBasesSetVectorTy - A set vector of direct and indirect + /// primary bases. + typedef llvm::SmallSetVector<const CXXRecordDecl *, 8> + PrimaryBasesSetVectorTy; + + typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> + VBaseOffsetOffsetsMapTy; + + typedef llvm::DenseMap<BaseSubobject, uint64_t> + AddressPointsMapTy; + + typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy; + +private: + /// VTables - Global vtable information. + ItaniumVTableContext &VTables; + + /// MostDerivedClass - The most derived class for which we're building this + /// vtable. + const CXXRecordDecl *MostDerivedClass; + + /// MostDerivedClassOffset - If we're building a construction vtable, this + /// holds the offset from the layout class to the most derived class. + const CharUnits MostDerivedClassOffset; + + /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual + /// base. (This only makes sense when building a construction vtable). + bool MostDerivedClassIsVirtual; + + /// LayoutClass - The class we're using for layout information. Will be + /// different than the most derived class if we're building a construction + /// vtable. + const CXXRecordDecl *LayoutClass; + + /// Context - The ASTContext which we will use for layout information. + ASTContext &Context; + + /// FinalOverriders - The final overriders of the most derived class. + const FinalOverriders Overriders; + + /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual + /// bases in this vtable. + llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases; + + /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for + /// the most derived class. + VBaseOffsetOffsetsMapTy VBaseOffsetOffsets; + + /// Components - The components of the vtable being built. + SmallVector<VTableComponent, 64> Components; + + /// AddressPoints - Address points for the vtable being built. + AddressPointsMapTy AddressPoints; + + /// MethodInfo - Contains information about a method in a vtable. + /// (Used for computing 'this' pointer adjustment thunks. + struct MethodInfo { + /// BaseOffset - The base offset of this method. + const CharUnits BaseOffset; + + /// BaseOffsetInLayoutClass - The base offset in the layout class of this + /// method. + const CharUnits BaseOffsetInLayoutClass; + + /// VTableIndex - The index in the vtable that this method has. + /// (For destructors, this is the index of the complete destructor). + const uint64_t VTableIndex; + + MethodInfo(CharUnits BaseOffset, CharUnits BaseOffsetInLayoutClass, + uint64_t VTableIndex) + : BaseOffset(BaseOffset), + BaseOffsetInLayoutClass(BaseOffsetInLayoutClass), + VTableIndex(VTableIndex) { } + + MethodInfo() + : BaseOffset(CharUnits::Zero()), + BaseOffsetInLayoutClass(CharUnits::Zero()), + VTableIndex(0) { } + }; + + typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy; + + /// MethodInfoMap - The information for all methods in the vtable we're + /// currently building. + MethodInfoMapTy MethodInfoMap; + + /// MethodVTableIndices - Contains the index (relative to the vtable address + /// point) where the function pointer for a virtual function is stored. + MethodVTableIndicesTy MethodVTableIndices; + + typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy; + + /// VTableThunks - The thunks by vtable index in the vtable currently being + /// built. + VTableThunksMapTy VTableThunks; + + typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy; + typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy; + + /// Thunks - A map that contains all the thunks needed for all methods in the + /// most derived class for which the vtable is currently being built. + ThunksMapTy Thunks; + + /// AddThunk - Add a thunk for the given method. + void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk); + + /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the + /// part of the vtable we're currently building. + void ComputeThisAdjustments(); + + typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy; + + /// PrimaryVirtualBases - All known virtual bases who are a primary base of + /// some other base. + VisitedVirtualBasesSetTy PrimaryVirtualBases; + + /// ComputeReturnAdjustment - Compute the return adjustment given a return + /// adjustment base offset. + ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset); + + /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting + /// the 'this' pointer from the base subobject to the derived subobject. + BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base, + BaseSubobject Derived) const; + + /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the + /// given virtual member function, its offset in the layout class and its + /// final overrider. + ThisAdjustment + ComputeThisAdjustment(const CXXMethodDecl *MD, + CharUnits BaseOffsetInLayoutClass, + FinalOverriders::OverriderInfo Overrider); + + /// AddMethod - Add a single virtual member function to the vtable + /// components vector. + void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment); + + /// IsOverriderUsed - Returns whether the overrider will ever be used in this + /// part of the vtable. + /// + /// Itanium C++ ABI 2.5.2: + /// + /// struct A { virtual void f(); }; + /// struct B : virtual public A { int i; }; + /// struct C : virtual public A { int j; }; + /// struct D : public B, public C {}; + /// + /// When B and C are declared, A is a primary base in each case, so although + /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this + /// adjustment is required and no thunk is generated. However, inside D + /// objects, A is no longer a primary base of C, so if we allowed calls to + /// C::f() to use the copy of A's vtable in the C subobject, we would need + /// to adjust this from C* to B::A*, which would require a third-party + /// thunk. Since we require that a call to C::f() first convert to A*, + /// C-in-D's copy of A's vtable is never referenced, so this is not + /// necessary. + bool IsOverriderUsed(const CXXMethodDecl *Overrider, + CharUnits BaseOffsetInLayoutClass, + const CXXRecordDecl *FirstBaseInPrimaryBaseChain, + CharUnits FirstBaseOffsetInLayoutClass) const; + + + /// AddMethods - Add the methods of this base subobject and all its + /// primary bases to the vtable components vector. + void AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass, + const CXXRecordDecl *FirstBaseInPrimaryBaseChain, + CharUnits FirstBaseOffsetInLayoutClass, + PrimaryBasesSetVectorTy &PrimaryBases); + + // LayoutVTable - Layout the vtable for the given base class, including its + // secondary vtables and any vtables for virtual bases. + void LayoutVTable(); + + /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the + /// given base subobject, as well as all its secondary vtables. + /// + /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base + /// or a direct or indirect base of a virtual base. + /// + /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual + /// in the layout class. + void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base, + bool BaseIsMorallyVirtual, + bool BaseIsVirtualInLayoutClass, + CharUnits OffsetInLayoutClass); + + /// LayoutSecondaryVTables - Layout the secondary vtables for the given base + /// subobject. + /// + /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base + /// or a direct or indirect base of a virtual base. + void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual, + CharUnits OffsetInLayoutClass); + + /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this + /// class hierarchy. + void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD, + CharUnits OffsetInLayoutClass, + VisitedVirtualBasesSetTy &VBases); + + /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the + /// given base (excluding any primary bases). + void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD, + VisitedVirtualBasesSetTy &VBases); + + /// isBuildingConstructionVTable - Return whether this vtable builder is + /// building a construction vtable. + bool isBuildingConstructorVTable() const { + return MostDerivedClass != LayoutClass; + } + +public: + ItaniumVTableBuilder(ItaniumVTableContext &VTables, + const CXXRecordDecl *MostDerivedClass, + CharUnits MostDerivedClassOffset, + bool MostDerivedClassIsVirtual, + const CXXRecordDecl *LayoutClass) + : VTables(VTables), MostDerivedClass(MostDerivedClass), + MostDerivedClassOffset(MostDerivedClassOffset), + MostDerivedClassIsVirtual(MostDerivedClassIsVirtual), + LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()), + Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) { + assert(!Context.getTargetInfo().getCXXABI().isMicrosoft()); + + LayoutVTable(); + + if (Context.getLangOpts().DumpVTableLayouts) + dumpLayout(llvm::outs()); + } + + uint64_t getNumThunks() const { + return Thunks.size(); + } + + ThunksMapTy::const_iterator thunks_begin() const { + return Thunks.begin(); + } + + ThunksMapTy::const_iterator thunks_end() const { + return Thunks.end(); + } + + const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const { + return VBaseOffsetOffsets; + } + + const AddressPointsMapTy &getAddressPoints() const { + return AddressPoints; + } + + MethodVTableIndicesTy::const_iterator vtable_indices_begin() const { + return MethodVTableIndices.begin(); + } + + MethodVTableIndicesTy::const_iterator vtable_indices_end() const { + return MethodVTableIndices.end(); + } + + /// getNumVTableComponents - Return the number of components in the vtable + /// currently built. + uint64_t getNumVTableComponents() const { + return Components.size(); + } + + const VTableComponent *vtable_component_begin() const { + return Components.begin(); + } + + const VTableComponent *vtable_component_end() const { + return Components.end(); + } + + AddressPointsMapTy::const_iterator address_points_begin() const { + return AddressPoints.begin(); + } + + AddressPointsMapTy::const_iterator address_points_end() const { + return AddressPoints.end(); + } + + VTableThunksMapTy::const_iterator vtable_thunks_begin() const { + return VTableThunks.begin(); + } + + VTableThunksMapTy::const_iterator vtable_thunks_end() const { + return VTableThunks.end(); + } + + /// dumpLayout - Dump the vtable layout. + void dumpLayout(raw_ostream&); +}; + +void ItaniumVTableBuilder::AddThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk) { + assert(!isBuildingConstructorVTable() && + "Can't add thunks for construction vtable"); + + SmallVectorImpl<ThunkInfo> &ThunksVector = Thunks[MD]; + + // Check if we have this thunk already. + if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) != + ThunksVector.end()) + return; + + ThunksVector.push_back(Thunk); +} + +typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy; + +/// Visit all the methods overridden by the given method recursively, +/// in a depth-first pre-order. The Visitor's visitor method returns a bool +/// indicating whether to continue the recursion for the given overridden +/// method (i.e. returning false stops the iteration). +template <class VisitorTy> +static void +visitAllOverriddenMethods(const CXXMethodDecl *MD, VisitorTy &Visitor) { + assert(MD->isVirtual() && "Method is not virtual!"); + + for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), + E = MD->end_overridden_methods(); I != E; ++I) { + const CXXMethodDecl *OverriddenMD = *I; + if (!Visitor.visit(OverriddenMD)) + continue; + visitAllOverriddenMethods(OverriddenMD, Visitor); + } +} + +namespace { + struct OverriddenMethodsCollector { + OverriddenMethodsSetTy *Methods; + + bool visit(const CXXMethodDecl *MD) { + // Don't recurse on this method if we've already collected it. + return Methods->insert(MD); + } + }; +} + +/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all +/// the overridden methods that the function decl overrides. +static void +ComputeAllOverriddenMethods(const CXXMethodDecl *MD, + OverriddenMethodsSetTy& OverriddenMethods) { + OverriddenMethodsCollector Collector = { &OverriddenMethods }; + visitAllOverriddenMethods(MD, Collector); +} + +void ItaniumVTableBuilder::ComputeThisAdjustments() { + // Now go through the method info map and see if any of the methods need + // 'this' pointer adjustments. + for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(), + E = MethodInfoMap.end(); I != E; ++I) { + const CXXMethodDecl *MD = I->first; + const MethodInfo &MethodInfo = I->second; + + // Ignore adjustments for unused function pointers. + uint64_t VTableIndex = MethodInfo.VTableIndex; + if (Components[VTableIndex].getKind() == + VTableComponent::CK_UnusedFunctionPointer) + continue; + + // Get the final overrider for this method. + FinalOverriders::OverriderInfo Overrider = + Overriders.getOverrider(MD, MethodInfo.BaseOffset); + + // Check if we need an adjustment at all. + if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) { + // When a return thunk is needed by a derived class that overrides a + // virtual base, gcc uses a virtual 'this' adjustment as well. + // While the thunk itself might be needed by vtables in subclasses or + // in construction vtables, there doesn't seem to be a reason for using + // the thunk in this vtable. Still, we do so to match gcc. + if (VTableThunks.lookup(VTableIndex).Return.isEmpty()) + continue; + } + + ThisAdjustment ThisAdjustment = + ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider); + + if (ThisAdjustment.isEmpty()) + continue; + + // Add it. + VTableThunks[VTableIndex].This = ThisAdjustment; + + if (isa<CXXDestructorDecl>(MD)) { + // Add an adjustment for the deleting destructor as well. + VTableThunks[VTableIndex + 1].This = ThisAdjustment; + } + } + + /// Clear the method info map. + MethodInfoMap.clear(); + + if (isBuildingConstructorVTable()) { + // We don't need to store thunk information for construction vtables. + return; + } + + for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(), + E = VTableThunks.end(); I != E; ++I) { + const VTableComponent &Component = Components[I->first]; + const ThunkInfo &Thunk = I->second; + const CXXMethodDecl *MD; + + switch (Component.getKind()) { + default: + llvm_unreachable("Unexpected vtable component kind!"); + case VTableComponent::CK_FunctionPointer: + MD = Component.getFunctionDecl(); + break; + case VTableComponent::CK_CompleteDtorPointer: + MD = Component.getDestructorDecl(); + break; + case VTableComponent::CK_DeletingDtorPointer: + // We've already added the thunk when we saw the complete dtor pointer. + continue; + } + + if (MD->getParent() == MostDerivedClass) + AddThunk(MD, Thunk); + } +} + +ReturnAdjustment +ItaniumVTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) { + ReturnAdjustment Adjustment; + + if (!Offset.isEmpty()) { + if (Offset.VirtualBase) { + // Get the virtual base offset offset. + if (Offset.DerivedClass == MostDerivedClass) { + // We can get the offset offset directly from our map. + Adjustment.Virtual.Itanium.VBaseOffsetOffset = + VBaseOffsetOffsets.lookup(Offset.VirtualBase).getQuantity(); + } else { + Adjustment.Virtual.Itanium.VBaseOffsetOffset = + VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass, + Offset.VirtualBase).getQuantity(); + } + } + + Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity(); + } + + return Adjustment; +} + +BaseOffset ItaniumVTableBuilder::ComputeThisAdjustmentBaseOffset( + BaseSubobject Base, BaseSubobject Derived) const { + const CXXRecordDecl *BaseRD = Base.getBase(); + const CXXRecordDecl *DerivedRD = Derived.getBase(); + + CXXBasePaths Paths(/*FindAmbiguities=*/true, + /*RecordPaths=*/true, /*DetectVirtual=*/true); + + if (!DerivedRD->isDerivedFrom(BaseRD, Paths)) + llvm_unreachable("Class must be derived from the passed in base class!"); + + // We have to go through all the paths, and see which one leads us to the + // right base subobject. + for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end(); + I != E; ++I) { + BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I); + + CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset; + + if (Offset.VirtualBase) { + // If we have a virtual base class, the non-virtual offset is relative + // to the virtual base class offset. + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + /// Get the virtual base offset, relative to the most derived class + /// layout. + OffsetToBaseSubobject += + LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase); + } else { + // Otherwise, the non-virtual offset is relative to the derived class + // offset. + OffsetToBaseSubobject += Derived.getBaseOffset(); + } + + // Check if this path gives us the right base subobject. + if (OffsetToBaseSubobject == Base.getBaseOffset()) { + // Since we're going from the base class _to_ the derived class, we'll + // invert the non-virtual offset here. + Offset.NonVirtualOffset = -Offset.NonVirtualOffset; + return Offset; + } + } + + return BaseOffset(); +} + +ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment( + const CXXMethodDecl *MD, CharUnits BaseOffsetInLayoutClass, + FinalOverriders::OverriderInfo Overrider) { + // Ignore adjustments for pure virtual member functions. + if (Overrider.Method->isPure()) + return ThisAdjustment(); + + BaseSubobject OverriddenBaseSubobject(MD->getParent(), + BaseOffsetInLayoutClass); + + BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(), + Overrider.Offset); + + // Compute the adjustment offset. + BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject, + OverriderBaseSubobject); + if (Offset.isEmpty()) + return ThisAdjustment(); + + ThisAdjustment Adjustment; + + if (Offset.VirtualBase) { + // Get the vcall offset map for this virtual base. + VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase]; + + if (VCallOffsets.empty()) { + // We don't have vcall offsets for this virtual base, go ahead and + // build them. + VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass, + /*FinalOverriders=*/0, + BaseSubobject(Offset.VirtualBase, + CharUnits::Zero()), + /*BaseIsVirtual=*/true, + /*OffsetInLayoutClass=*/ + CharUnits::Zero()); + + VCallOffsets = Builder.getVCallOffsets(); + } + + Adjustment.Virtual.Itanium.VCallOffsetOffset = + VCallOffsets.getVCallOffsetOffset(MD).getQuantity(); + } + + // Set the non-virtual part of the adjustment. + Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity(); + + return Adjustment; +} + +void ItaniumVTableBuilder::AddMethod(const CXXMethodDecl *MD, + ReturnAdjustment ReturnAdjustment) { + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + assert(ReturnAdjustment.isEmpty() && + "Destructor can't have return adjustment!"); + + // Add both the complete destructor and the deleting destructor. + Components.push_back(VTableComponent::MakeCompleteDtor(DD)); + Components.push_back(VTableComponent::MakeDeletingDtor(DD)); + } else { + // Add the return adjustment if necessary. + if (!ReturnAdjustment.isEmpty()) + VTableThunks[Components.size()].Return = ReturnAdjustment; + + // Add the function. + Components.push_back(VTableComponent::MakeFunction(MD)); + } +} + +/// OverridesIndirectMethodInBase - Return whether the given member function +/// overrides any methods in the set of given bases. +/// Unlike OverridesMethodInBase, this checks "overriders of overriders". +/// For example, if we have: +/// +/// struct A { virtual void f(); } +/// struct B : A { virtual void f(); } +/// struct C : B { virtual void f(); } +/// +/// OverridesIndirectMethodInBase will return true if given C::f as the method +/// and { A } as the set of bases. +static bool OverridesIndirectMethodInBases( + const CXXMethodDecl *MD, + ItaniumVTableBuilder::PrimaryBasesSetVectorTy &Bases) { + if (Bases.count(MD->getParent())) + return true; + + for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), + E = MD->end_overridden_methods(); I != E; ++I) { + const CXXMethodDecl *OverriddenMD = *I; + + // Check "indirect overriders". + if (OverridesIndirectMethodInBases(OverriddenMD, Bases)) + return true; + } + + return false; +} + +bool ItaniumVTableBuilder::IsOverriderUsed( + const CXXMethodDecl *Overrider, CharUnits BaseOffsetInLayoutClass, + const CXXRecordDecl *FirstBaseInPrimaryBaseChain, + CharUnits FirstBaseOffsetInLayoutClass) const { + // If the base and the first base in the primary base chain have the same + // offsets, then this overrider will be used. + if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass) + return true; + + // We know now that Base (or a direct or indirect base of it) is a primary + // base in part of the class hierarchy, but not a primary base in the most + // derived class. + + // If the overrider is the first base in the primary base chain, we know + // that the overrider will be used. + if (Overrider->getParent() == FirstBaseInPrimaryBaseChain) + return true; + + ItaniumVTableBuilder::PrimaryBasesSetVectorTy PrimaryBases; + + const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain; + PrimaryBases.insert(RD); + + // Now traverse the base chain, starting with the first base, until we find + // the base that is no longer a primary base. + while (true) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); + + if (!PrimaryBase) + break; + + if (Layout.isPrimaryBaseVirtual()) { + assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() && + "Primary base should always be at offset 0!"); + + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + // Now check if this is the primary base that is not a primary base in the + // most derived class. + if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) != + FirstBaseOffsetInLayoutClass) { + // We found it, stop walking the chain. + break; + } + } else { + assert(Layout.getBaseClassOffset(PrimaryBase).isZero() && + "Primary base should always be at offset 0!"); + } + + if (!PrimaryBases.insert(PrimaryBase)) + llvm_unreachable("Found a duplicate primary base!"); + + RD = PrimaryBase; + } + + // If the final overrider is an override of one of the primary bases, + // then we know that it will be used. + return OverridesIndirectMethodInBases(Overrider, PrimaryBases); +} + +typedef llvm::SmallSetVector<const CXXRecordDecl *, 8> BasesSetVectorTy; + +/// FindNearestOverriddenMethod - Given a method, returns the overridden method +/// from the nearest base. Returns null if no method was found. +/// The Bases are expected to be sorted in a base-to-derived order. +static const CXXMethodDecl * +FindNearestOverriddenMethod(const CXXMethodDecl *MD, + BasesSetVectorTy &Bases) { + OverriddenMethodsSetTy OverriddenMethods; + ComputeAllOverriddenMethods(MD, OverriddenMethods); + + for (int I = Bases.size(), E = 0; I != E; --I) { + const CXXRecordDecl *PrimaryBase = Bases[I - 1]; + + // Now check the overridden methods. + for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(), + E = OverriddenMethods.end(); I != E; ++I) { + const CXXMethodDecl *OverriddenMD = *I; + + // We found our overridden method. + if (OverriddenMD->getParent() == PrimaryBase) + return OverriddenMD; + } + } + + return 0; +} + +void ItaniumVTableBuilder::AddMethods( + BaseSubobject Base, CharUnits BaseOffsetInLayoutClass, + const CXXRecordDecl *FirstBaseInPrimaryBaseChain, + CharUnits FirstBaseOffsetInLayoutClass, + PrimaryBasesSetVectorTy &PrimaryBases) { + // Itanium C++ ABI 2.5.2: + // The order of the virtual function pointers in a virtual table is the + // order of declaration of the corresponding member functions in the class. + // + // There is an entry for any virtual function declared in a class, + // whether it is a new function or overrides a base class function, + // unless it overrides a function from the primary base, and conversion + // between their return types does not require an adjustment. + + const CXXRecordDecl *RD = Base.getBase(); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) { + CharUnits PrimaryBaseOffset; + CharUnits PrimaryBaseOffsetInLayoutClass; + if (Layout.isPrimaryBaseVirtual()) { + assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() && + "Primary vbase should have a zero offset!"); + + const ASTRecordLayout &MostDerivedClassLayout = + Context.getASTRecordLayout(MostDerivedClass); + + PrimaryBaseOffset = + MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase); + + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + PrimaryBaseOffsetInLayoutClass = + LayoutClassLayout.getVBaseClassOffset(PrimaryBase); + } else { + assert(Layout.getBaseClassOffset(PrimaryBase).isZero() && + "Primary base should have a zero offset!"); + + PrimaryBaseOffset = Base.getBaseOffset(); + PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass; + } + + AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset), + PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain, + FirstBaseOffsetInLayoutClass, PrimaryBases); + + if (!PrimaryBases.insert(PrimaryBase)) + llvm_unreachable("Found a duplicate primary base!"); + } + + const CXXDestructorDecl *ImplicitVirtualDtor = 0; + + typedef llvm::SmallVector<const CXXMethodDecl *, 8> NewVirtualFunctionsTy; + NewVirtualFunctionsTy NewVirtualFunctions; + + // Now go through all virtual member functions and add them. + for (CXXRecordDecl::method_iterator I = RD->method_begin(), + E = RD->method_end(); I != E; ++I) { + const CXXMethodDecl *MD = *I; + + if (!MD->isVirtual()) + continue; + + // Get the final overrider. + FinalOverriders::OverriderInfo Overrider = + Overriders.getOverrider(MD, Base.getBaseOffset()); + + // Check if this virtual member function overrides a method in a primary + // base. If this is the case, and the return type doesn't require adjustment + // then we can just use the member function from the primary base. + if (const CXXMethodDecl *OverriddenMD = + FindNearestOverriddenMethod(MD, PrimaryBases)) { + if (ComputeReturnAdjustmentBaseOffset(Context, MD, + OverriddenMD).isEmpty()) { + // Replace the method info of the overridden method with our own + // method. + assert(MethodInfoMap.count(OverriddenMD) && + "Did not find the overridden method!"); + MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD]; + + MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass, + OverriddenMethodInfo.VTableIndex); + + assert(!MethodInfoMap.count(MD) && + "Should not have method info for this method yet!"); + + MethodInfoMap.insert(std::make_pair(MD, MethodInfo)); + MethodInfoMap.erase(OverriddenMD); + + // If the overridden method exists in a virtual base class or a direct + // or indirect base class of a virtual base class, we need to emit a + // thunk if we ever have a class hierarchy where the base class is not + // a primary base in the complete object. + if (!isBuildingConstructorVTable() && OverriddenMD != MD) { + // Compute the this adjustment. + ThisAdjustment ThisAdjustment = + ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass, + Overrider); + + if (ThisAdjustment.Virtual.Itanium.VCallOffsetOffset && + Overrider.Method->getParent() == MostDerivedClass) { + + // There's no return adjustment from OverriddenMD and MD, + // but that doesn't mean there isn't one between MD and + // the final overrider. + BaseOffset ReturnAdjustmentOffset = + ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD); + ReturnAdjustment ReturnAdjustment = + ComputeReturnAdjustment(ReturnAdjustmentOffset); + + // This is a virtual thunk for the most derived class, add it. + AddThunk(Overrider.Method, + ThunkInfo(ThisAdjustment, ReturnAdjustment)); + } + } + + continue; + } + } + + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + if (MD->isImplicit()) { + // Itanium C++ ABI 2.5.2: + // If a class has an implicitly-defined virtual destructor, + // its entries come after the declared virtual function pointers. + + assert(!ImplicitVirtualDtor && + "Did already see an implicit virtual dtor!"); + ImplicitVirtualDtor = DD; + continue; + } + } + + NewVirtualFunctions.push_back(MD); + } + + if (ImplicitVirtualDtor) + NewVirtualFunctions.push_back(ImplicitVirtualDtor); + + for (NewVirtualFunctionsTy::const_iterator I = NewVirtualFunctions.begin(), + E = NewVirtualFunctions.end(); I != E; ++I) { + const CXXMethodDecl *MD = *I; + + // Get the final overrider. + FinalOverriders::OverriderInfo Overrider = + Overriders.getOverrider(MD, Base.getBaseOffset()); + + // Insert the method info for this method. + MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass, + Components.size()); + + assert(!MethodInfoMap.count(MD) && + "Should not have method info for this method yet!"); + MethodInfoMap.insert(std::make_pair(MD, MethodInfo)); + + // Check if this overrider is going to be used. + const CXXMethodDecl *OverriderMD = Overrider.Method; + if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass, + FirstBaseInPrimaryBaseChain, + FirstBaseOffsetInLayoutClass)) { + Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD)); + continue; + } + + // Check if this overrider needs a return adjustment. + // We don't want to do this for pure virtual member functions. + BaseOffset ReturnAdjustmentOffset; + if (!OverriderMD->isPure()) { + ReturnAdjustmentOffset = + ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD); + } + + ReturnAdjustment ReturnAdjustment = + ComputeReturnAdjustment(ReturnAdjustmentOffset); + + AddMethod(Overrider.Method, ReturnAdjustment); + } +} + +void ItaniumVTableBuilder::LayoutVTable() { + LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass, + CharUnits::Zero()), + /*BaseIsMorallyVirtual=*/false, + MostDerivedClassIsVirtual, + MostDerivedClassOffset); + + VisitedVirtualBasesSetTy VBases; + + // Determine the primary virtual bases. + DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset, + VBases); + VBases.clear(); + + LayoutVTablesForVirtualBases(MostDerivedClass, VBases); + + // -fapple-kext adds an extra entry at end of vtbl. + bool IsAppleKext = Context.getLangOpts().AppleKext; + if (IsAppleKext) + Components.push_back(VTableComponent::MakeVCallOffset(CharUnits::Zero())); +} + +void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables( + BaseSubobject Base, bool BaseIsMorallyVirtual, + bool BaseIsVirtualInLayoutClass, CharUnits OffsetInLayoutClass) { + assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!"); + + // Add vcall and vbase offsets for this vtable. + VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders, + Base, BaseIsVirtualInLayoutClass, + OffsetInLayoutClass); + Components.append(Builder.components_begin(), Builder.components_end()); + + // Check if we need to add these vcall offsets. + if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) { + VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()]; + + if (VCallOffsets.empty()) + VCallOffsets = Builder.getVCallOffsets(); + } + + // If we're laying out the most derived class we want to keep track of the + // virtual base class offset offsets. + if (Base.getBase() == MostDerivedClass) + VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets(); + + // Add the offset to top. + CharUnits OffsetToTop = MostDerivedClassOffset - OffsetInLayoutClass; + Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop)); + + // Next, add the RTTI. + Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass)); + + uint64_t AddressPoint = Components.size(); + + // Now go through all virtual member functions and add them. + PrimaryBasesSetVectorTy PrimaryBases; + AddMethods(Base, OffsetInLayoutClass, + Base.getBase(), OffsetInLayoutClass, + PrimaryBases); + + const CXXRecordDecl *RD = Base.getBase(); + if (RD == MostDerivedClass) { + assert(MethodVTableIndices.empty()); + for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(), + E = MethodInfoMap.end(); I != E; ++I) { + const CXXMethodDecl *MD = I->first; + const MethodInfo &MI = I->second; + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] + = MI.VTableIndex - AddressPoint; + MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] + = MI.VTableIndex + 1 - AddressPoint; + } else { + MethodVTableIndices[MD] = MI.VTableIndex - AddressPoint; + } + } + } + + // Compute 'this' pointer adjustments. + ComputeThisAdjustments(); + + // Add all address points. + while (true) { + AddressPoints.insert(std::make_pair( + BaseSubobject(RD, OffsetInLayoutClass), + AddressPoint)); + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); + + if (!PrimaryBase) + break; + + if (Layout.isPrimaryBaseVirtual()) { + // Check if this virtual primary base is a primary base in the layout + // class. If it's not, we don't want to add it. + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) != + OffsetInLayoutClass) { + // We don't want to add this class (or any of its primary bases). + break; + } + } + + RD = PrimaryBase; + } + + // Layout secondary vtables. + LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass); +} + +void +ItaniumVTableBuilder::LayoutSecondaryVTables(BaseSubobject Base, + bool BaseIsMorallyVirtual, + CharUnits OffsetInLayoutClass) { + // Itanium C++ ABI 2.5.2: + // Following the primary virtual table of a derived class are secondary + // virtual tables for each of its proper base classes, except any primary + // base(s) with which it shares its primary virtual table. + + const CXXRecordDecl *RD = Base.getBase(); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); + + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + // Ignore virtual bases, we'll emit them later. + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + + // Ignore bases that don't have a vtable. + if (!BaseDecl->isDynamicClass()) + continue; + + if (isBuildingConstructorVTable()) { + // Itanium C++ ABI 2.6.4: + // Some of the base class subobjects may not need construction virtual + // tables, which will therefore not be present in the construction + // virtual table group, even though the subobject virtual tables are + // present in the main virtual table group for the complete object. + if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases()) + continue; + } + + // Get the base offset of this base. + CharUnits RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl); + CharUnits BaseOffset = Base.getBaseOffset() + RelativeBaseOffset; + + CharUnits BaseOffsetInLayoutClass = + OffsetInLayoutClass + RelativeBaseOffset; + + // Don't emit a secondary vtable for a primary base. We might however want + // to emit secondary vtables for other bases of this base. + if (BaseDecl == PrimaryBase) { + LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset), + BaseIsMorallyVirtual, BaseOffsetInLayoutClass); + continue; + } + + // Layout the primary vtable (and any secondary vtables) for this base. + LayoutPrimaryAndSecondaryVTables( + BaseSubobject(BaseDecl, BaseOffset), + BaseIsMorallyVirtual, + /*BaseIsVirtualInLayoutClass=*/false, + BaseOffsetInLayoutClass); + } +} + +void ItaniumVTableBuilder::DeterminePrimaryVirtualBases( + const CXXRecordDecl *RD, CharUnits OffsetInLayoutClass, + VisitedVirtualBasesSetTy &VBases) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // Check if this base has a primary base. + if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) { + + // Check if it's virtual. + if (Layout.isPrimaryBaseVirtual()) { + bool IsPrimaryVirtualBase = true; + + if (isBuildingConstructorVTable()) { + // Check if the base is actually a primary base in the class we use for + // layout. + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + CharUnits PrimaryBaseOffsetInLayoutClass = + LayoutClassLayout.getVBaseClassOffset(PrimaryBase); + + // We know that the base is not a primary base in the layout class if + // the base offsets are different. + if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass) + IsPrimaryVirtualBase = false; + } + + if (IsPrimaryVirtualBase) + PrimaryVirtualBases.insert(PrimaryBase); + } + } + + // Traverse bases, looking for more primary virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + + CharUnits BaseOffsetInLayoutClass; + + if (I->isVirtual()) { + if (!VBases.insert(BaseDecl)) + continue; + + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + BaseOffsetInLayoutClass = + LayoutClassLayout.getVBaseClassOffset(BaseDecl); + } else { + BaseOffsetInLayoutClass = + OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl); + } + + DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases); + } +} + +void ItaniumVTableBuilder::LayoutVTablesForVirtualBases( + const CXXRecordDecl *RD, VisitedVirtualBasesSetTy &VBases) { + // Itanium C++ ABI 2.5.2: + // Then come the virtual base virtual tables, also in inheritance graph + // order, and again excluding primary bases (which share virtual tables with + // the classes for which they are primary). + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + + // Check if this base needs a vtable. (If it's virtual, not a primary base + // of some other class, and we haven't visited it before). + if (I->isVirtual() && BaseDecl->isDynamicClass() && + !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) { + const ASTRecordLayout &MostDerivedClassLayout = + Context.getASTRecordLayout(MostDerivedClass); + CharUnits BaseOffset = + MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + CharUnits BaseOffsetInLayoutClass = + LayoutClassLayout.getVBaseClassOffset(BaseDecl); + + LayoutPrimaryAndSecondaryVTables( + BaseSubobject(BaseDecl, BaseOffset), + /*BaseIsMorallyVirtual=*/true, + /*BaseIsVirtualInLayoutClass=*/true, + BaseOffsetInLayoutClass); + } + + // We only need to check the base for virtual base vtables if it actually + // has virtual bases. + if (BaseDecl->getNumVBases()) + LayoutVTablesForVirtualBases(BaseDecl, VBases); + } +} + +struct ItaniumThunkInfoComparator { + bool operator() (const ThunkInfo &LHS, const ThunkInfo &RHS) { + assert(LHS.Method == 0); + assert(RHS.Method == 0); + + if (LHS.This != RHS.This) + return LHS.This < RHS.This; + + if (LHS.Return != RHS.Return) + return LHS.Return < RHS.Return; + + return false; + } +}; + +/// dumpLayout - Dump the vtable layout. +void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { + // FIXME: write more tests that actually use the dumpLayout output to prevent + // ItaniumVTableBuilder regressions. + + if (isBuildingConstructorVTable()) { + Out << "Construction vtable for ('"; + Out << MostDerivedClass->getQualifiedNameAsString() << "', "; + Out << MostDerivedClassOffset.getQuantity() << ") in '"; + Out << LayoutClass->getQualifiedNameAsString(); + } else { + Out << "Vtable for '"; + Out << MostDerivedClass->getQualifiedNameAsString(); + } + Out << "' (" << Components.size() << " entries).\n"; + + // Iterate through the address points and insert them into a new map where + // they are keyed by the index and not the base object. + // Since an address point can be shared by multiple subobjects, we use an + // STL multimap. + std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex; + for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(), + E = AddressPoints.end(); I != E; ++I) { + const BaseSubobject& Base = I->first; + uint64_t Index = I->second; + + AddressPointsByIndex.insert(std::make_pair(Index, Base)); + } + + for (unsigned I = 0, E = Components.size(); I != E; ++I) { + uint64_t Index = I; + + Out << llvm::format("%4d | ", I); + + const VTableComponent &Component = Components[I]; + + // Dump the component. + switch (Component.getKind()) { + + case VTableComponent::CK_VCallOffset: + Out << "vcall_offset (" + << Component.getVCallOffset().getQuantity() + << ")"; + break; + + case VTableComponent::CK_VBaseOffset: + Out << "vbase_offset (" + << Component.getVBaseOffset().getQuantity() + << ")"; + break; + + case VTableComponent::CK_OffsetToTop: + Out << "offset_to_top (" + << Component.getOffsetToTop().getQuantity() + << ")"; + break; + + case VTableComponent::CK_RTTI: + Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI"; + break; + + case VTableComponent::CK_FunctionPointer: { + const CXXMethodDecl *MD = Component.getFunctionDecl(); + + std::string Str = + PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, + MD); + Out << Str; + if (MD->isPure()) + Out << " [pure]"; + + if (MD->isDeleted()) + Out << " [deleted]"; + + ThunkInfo Thunk = VTableThunks.lookup(I); + if (!Thunk.isEmpty()) { + // If this function pointer has a return adjustment, dump it. + if (!Thunk.Return.isEmpty()) { + Out << "\n [return adjustment: "; + Out << Thunk.Return.NonVirtual << " non-virtual"; + + if (Thunk.Return.Virtual.Itanium.VBaseOffsetOffset) { + Out << ", " << Thunk.Return.Virtual.Itanium.VBaseOffsetOffset; + Out << " vbase offset offset"; + } + + Out << ']'; + } + + // If this function pointer has a 'this' pointer adjustment, dump it. + if (!Thunk.This.isEmpty()) { + Out << "\n [this adjustment: "; + Out << Thunk.This.NonVirtual << " non-virtual"; + + if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) { + Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset; + Out << " vcall offset offset"; + } + + Out << ']'; + } + } + + break; + } + + case VTableComponent::CK_CompleteDtorPointer: + case VTableComponent::CK_DeletingDtorPointer: { + bool IsComplete = + Component.getKind() == VTableComponent::CK_CompleteDtorPointer; + + const CXXDestructorDecl *DD = Component.getDestructorDecl(); + + Out << DD->getQualifiedNameAsString(); + if (IsComplete) + Out << "() [complete]"; + else + Out << "() [deleting]"; + + if (DD->isPure()) + Out << " [pure]"; + + ThunkInfo Thunk = VTableThunks.lookup(I); + if (!Thunk.isEmpty()) { + // If this destructor has a 'this' pointer adjustment, dump it. + if (!Thunk.This.isEmpty()) { + Out << "\n [this adjustment: "; + Out << Thunk.This.NonVirtual << " non-virtual"; + + if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) { + Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset; + Out << " vcall offset offset"; + } + + Out << ']'; + } + } + + break; + } + + case VTableComponent::CK_UnusedFunctionPointer: { + const CXXMethodDecl *MD = Component.getUnusedFunctionDecl(); + + std::string Str = + PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, + MD); + Out << "[unused] " << Str; + if (MD->isPure()) + Out << " [pure]"; + } + + } + + Out << '\n'; + + // Dump the next address point. + uint64_t NextIndex = Index + 1; + if (AddressPointsByIndex.count(NextIndex)) { + if (AddressPointsByIndex.count(NextIndex) == 1) { + const BaseSubobject &Base = + AddressPointsByIndex.find(NextIndex)->second; + + Out << " -- (" << Base.getBase()->getQualifiedNameAsString(); + Out << ", " << Base.getBaseOffset().getQuantity(); + Out << ") vtable address --\n"; + } else { + CharUnits BaseOffset = + AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset(); + + // We store the class names in a set to get a stable order. + std::set<std::string> ClassNames; + for (std::multimap<uint64_t, BaseSubobject>::const_iterator I = + AddressPointsByIndex.lower_bound(NextIndex), E = + AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) { + assert(I->second.getBaseOffset() == BaseOffset && + "Invalid base offset!"); + const CXXRecordDecl *RD = I->second.getBase(); + ClassNames.insert(RD->getQualifiedNameAsString()); + } + + for (std::set<std::string>::const_iterator I = ClassNames.begin(), + E = ClassNames.end(); I != E; ++I) { + Out << " -- (" << *I; + Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n"; + } + } + } + } + + Out << '\n'; + + if (isBuildingConstructorVTable()) + return; + + if (MostDerivedClass->getNumVBases()) { + // We store the virtual base class names and their offsets in a map to get + // a stable order. + + std::map<std::string, CharUnits> ClassNamesAndOffsets; + for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(), + E = VBaseOffsetOffsets.end(); I != E; ++I) { + std::string ClassName = I->first->getQualifiedNameAsString(); + CharUnits OffsetOffset = I->second; + ClassNamesAndOffsets.insert( + std::make_pair(ClassName, OffsetOffset)); + } + + Out << "Virtual base offset offsets for '"; + Out << MostDerivedClass->getQualifiedNameAsString() << "' ("; + Out << ClassNamesAndOffsets.size(); + Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n"; + + for (std::map<std::string, CharUnits>::const_iterator I = + ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end(); + I != E; ++I) + Out << " " << I->first << " | " << I->second.getQuantity() << '\n'; + + Out << "\n"; + } + + if (!Thunks.empty()) { + // We store the method names in a map to get a stable order. + std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls; + + for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end(); + I != E; ++I) { + const CXXMethodDecl *MD = I->first; + std::string MethodName = + PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, + MD); + + MethodNamesAndDecls.insert(std::make_pair(MethodName, MD)); + } + + for (std::map<std::string, const CXXMethodDecl *>::const_iterator I = + MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end(); + I != E; ++I) { + const std::string &MethodName = I->first; + const CXXMethodDecl *MD = I->second; + + ThunkInfoVectorTy ThunksVector = Thunks[MD]; + std::sort(ThunksVector.begin(), ThunksVector.end(), + ItaniumThunkInfoComparator()); + + Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size(); + Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n"; + + for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) { + const ThunkInfo &Thunk = ThunksVector[I]; + + Out << llvm::format("%4d | ", I); + + // If this function pointer has a return pointer adjustment, dump it. + if (!Thunk.Return.isEmpty()) { + Out << "return adjustment: " << Thunk.Return.NonVirtual; + Out << " non-virtual"; + if (Thunk.Return.Virtual.Itanium.VBaseOffsetOffset) { + Out << ", " << Thunk.Return.Virtual.Itanium.VBaseOffsetOffset; + Out << " vbase offset offset"; + } + + if (!Thunk.This.isEmpty()) + Out << "\n "; + } + + // If this function pointer has a 'this' pointer adjustment, dump it. + if (!Thunk.This.isEmpty()) { + Out << "this adjustment: "; + Out << Thunk.This.NonVirtual << " non-virtual"; + + if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) { + Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset; + Out << " vcall offset offset"; + } + } + + Out << '\n'; + } + + Out << '\n'; + } + } + + // Compute the vtable indices for all the member functions. + // Store them in a map keyed by the index so we'll get a sorted table. + std::map<uint64_t, std::string> IndicesMap; + + for (CXXRecordDecl::method_iterator i = MostDerivedClass->method_begin(), + e = MostDerivedClass->method_end(); i != e; ++i) { + const CXXMethodDecl *MD = *i; + + // We only want virtual member functions. + if (!MD->isVirtual()) + continue; + + std::string MethodName = + PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, + MD); + + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + GlobalDecl GD(DD, Dtor_Complete); + assert(MethodVTableIndices.count(GD)); + uint64_t VTableIndex = MethodVTableIndices[GD]; + IndicesMap[VTableIndex] = MethodName + " [complete]"; + IndicesMap[VTableIndex + 1] = MethodName + " [deleting]"; + } else { + assert(MethodVTableIndices.count(MD)); + IndicesMap[MethodVTableIndices[MD]] = MethodName; + } + } + + // Print the vtable indices for all the member functions. + if (!IndicesMap.empty()) { + Out << "VTable indices for '"; + Out << MostDerivedClass->getQualifiedNameAsString(); + Out << "' (" << IndicesMap.size() << " entries).\n"; + + for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(), + E = IndicesMap.end(); I != E; ++I) { + uint64_t VTableIndex = I->first; + const std::string &MethodName = I->second; + + Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName + << '\n'; + } + } + + Out << '\n'; +} + +struct VTableThunksComparator { + bool operator()(const VTableLayout::VTableThunkTy &LHS, + const VTableLayout::VTableThunkTy &RHS) { + if (LHS.first == RHS.first) { + assert(LHS.second == RHS.second && + "Different thunks should have unique indices!"); + } + return LHS.first < RHS.first; + } +}; +} + +VTableLayout::VTableLayout(uint64_t NumVTableComponents, + const VTableComponent *VTableComponents, + uint64_t NumVTableThunks, + const VTableThunkTy *VTableThunks, + const AddressPointsMapTy &AddressPoints, + bool IsMicrosoftABI) + : NumVTableComponents(NumVTableComponents), + VTableComponents(new VTableComponent[NumVTableComponents]), + NumVTableThunks(NumVTableThunks), + VTableThunks(new VTableThunkTy[NumVTableThunks]), + AddressPoints(AddressPoints), + IsMicrosoftABI(IsMicrosoftABI) { + std::copy(VTableComponents, VTableComponents+NumVTableComponents, + this->VTableComponents.get()); + std::copy(VTableThunks, VTableThunks+NumVTableThunks, + this->VTableThunks.get()); + std::sort(this->VTableThunks.get(), + this->VTableThunks.get() + NumVTableThunks, + VTableThunksComparator()); +} + +VTableLayout::~VTableLayout() { } + +ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context) + : IsMicrosoftABI(Context.getTargetInfo().getCXXABI().isMicrosoft()) { +} + +ItaniumVTableContext::~ItaniumVTableContext() { + llvm::DeleteContainerSeconds(VTableLayouts); +} + +uint64_t ItaniumVTableContext::getMethodVTableIndex(GlobalDecl GD) { + MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD); + if (I != MethodVTableIndices.end()) + return I->second; + + const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent(); + + computeVTableRelatedInformation(RD); + + I = MethodVTableIndices.find(GD); + assert(I != MethodVTableIndices.end() && "Did not find index!"); + return I->second; +} + +CharUnits +ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, + const CXXRecordDecl *VBase) { + ClassPairTy ClassPair(RD, VBase); + + VirtualBaseClassOffsetOffsetsMapTy::iterator I = + VirtualBaseClassOffsetOffsets.find(ClassPair); + if (I != VirtualBaseClassOffsetOffsets.end()) + return I->second; + + VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0, + BaseSubobject(RD, CharUnits::Zero()), + /*BaseIsVirtual=*/false, + /*OffsetInLayoutClass=*/CharUnits::Zero()); + + for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I = + Builder.getVBaseOffsetOffsets().begin(), + E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) { + // Insert all types. + ClassPairTy ClassPair(RD, I->first); + + VirtualBaseClassOffsetOffsets.insert( + std::make_pair(ClassPair, I->second)); + } + + I = VirtualBaseClassOffsetOffsets.find(ClassPair); + assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!"); + + return I->second; +} + +static VTableLayout *CreateVTableLayout(const ItaniumVTableBuilder &Builder) { + SmallVector<VTableLayout::VTableThunkTy, 1> + VTableThunks(Builder.vtable_thunks_begin(), Builder.vtable_thunks_end()); + + return new VTableLayout(Builder.getNumVTableComponents(), + Builder.vtable_component_begin(), + VTableThunks.size(), + VTableThunks.data(), + Builder.getAddressPoints(), + /*IsMicrosoftABI=*/false); +} + +void +ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) { + assert(!IsMicrosoftABI && "Shouldn't be called in this ABI!"); + + const VTableLayout *&Entry = VTableLayouts[RD]; + + // Check if we've computed this information before. + if (Entry) + return; + + ItaniumVTableBuilder Builder(*this, RD, CharUnits::Zero(), + /*MostDerivedClassIsVirtual=*/0, RD); + Entry = CreateVTableLayout(Builder); + + MethodVTableIndices.insert(Builder.vtable_indices_begin(), + Builder.vtable_indices_end()); + + // Add the known thunks. + Thunks.insert(Builder.thunks_begin(), Builder.thunks_end()); + + // If we don't have the vbase information for this class, insert it. + // getVirtualBaseOffsetOffset will compute it separately without computing + // the rest of the vtable related information. + if (!RD->getNumVBases()) + return; + + const CXXRecordDecl *VBase = + RD->vbases_begin()->getType()->getAsCXXRecordDecl(); + + if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase))) + return; + + for (ItaniumVTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator + I = Builder.getVBaseOffsetOffsets().begin(), + E = Builder.getVBaseOffsetOffsets().end(); + I != E; ++I) { + // Insert all types. + ClassPairTy ClassPair(RD, I->first); + + VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second)); + } +} + +VTableLayout *ItaniumVTableContext::createConstructionVTableLayout( + const CXXRecordDecl *MostDerivedClass, CharUnits MostDerivedClassOffset, + bool MostDerivedClassIsVirtual, const CXXRecordDecl *LayoutClass) { + ItaniumVTableBuilder Builder(*this, MostDerivedClass, MostDerivedClassOffset, + MostDerivedClassIsVirtual, LayoutClass); + return CreateVTableLayout(Builder); +} + +namespace { + +// Vtables in the Microsoft ABI are different from the Itanium ABI. +// +// The main differences are: +// 1. Separate vftable and vbtable. +// +// 2. Each subobject with a vfptr gets its own vftable rather than an address +// point in a single vtable shared between all the subobjects. +// Each vftable is represented by a separate section and virtual calls +// must be done using the vftable which has a slot for the function to be +// called. +// +// 3. Virtual method definitions expect their 'this' parameter to point to the +// first vfptr whose table provides a compatible overridden method. In many +// cases, this permits the original vf-table entry to directly call +// the method instead of passing through a thunk. +// +// A compatible overridden method is one which does not have a non-trivial +// covariant-return adjustment. +// +// The first vfptr is the one with the lowest offset in the complete-object +// layout of the defining class, and the method definition will subtract +// that constant offset from the parameter value to get the real 'this' +// value. Therefore, if the offset isn't really constant (e.g. if a virtual +// function defined in a virtual base is overridden in a more derived +// virtual base and these bases have a reverse order in the complete +// object), the vf-table may require a this-adjustment thunk. +// +// 4. vftables do not contain new entries for overrides that merely require +// this-adjustment. Together with #3, this keeps vf-tables smaller and +// eliminates the need for this-adjustment thunks in many cases, at the cost +// of often requiring redundant work to adjust the "this" pointer. +// +// 5. Instead of VTT and constructor vtables, vbtables and vtordisps are used. +// Vtordisps are emitted into the class layout if a class has +// a) a user-defined ctor/dtor +// and +// b) a method overriding a method in a virtual base. + +class VFTableBuilder { +public: + typedef MicrosoftVTableContext::MethodVFTableLocation MethodVFTableLocation; + + typedef llvm::DenseMap<GlobalDecl, MethodVFTableLocation> + MethodVFTableLocationsTy; + +private: + /// VTables - Global vtable information. + MicrosoftVTableContext &VTables; + + /// Context - The ASTContext which we will use for layout information. + ASTContext &Context; + + /// MostDerivedClass - The most derived class for which we're building this + /// vtable. + const CXXRecordDecl *MostDerivedClass; + + const ASTRecordLayout &MostDerivedClassLayout; + + VFPtrInfo WhichVFPtr; + + /// FinalOverriders - The final overriders of the most derived class. + const FinalOverriders Overriders; + + /// Components - The components of the vftable being built. + SmallVector<VTableComponent, 64> Components; + + MethodVFTableLocationsTy MethodVFTableLocations; + + /// MethodInfo - Contains information about a method in a vtable. + /// (Used for computing 'this' pointer adjustment thunks. + struct MethodInfo { + /// VBTableIndex - The nonzero index in the vbtable that + /// this method's base has, or zero. + const uint64_t VBTableIndex; + + /// VFTableIndex - The index in the vftable that this method has. + const uint64_t VFTableIndex; + + /// Shadowed - Indicates if this vftable slot is shadowed by + /// a slot for a covariant-return override. If so, it shouldn't be printed + /// or used for vcalls in the most derived class. + bool Shadowed; + + MethodInfo(uint64_t VBTableIndex, uint64_t VFTableIndex) + : VBTableIndex(VBTableIndex), VFTableIndex(VFTableIndex), + Shadowed(false) {} + + MethodInfo() : VBTableIndex(0), VFTableIndex(0), Shadowed(false) {} + }; + + typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy; + + /// MethodInfoMap - The information for all methods in the vftable we're + /// currently building. + MethodInfoMapTy MethodInfoMap; + + typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy; + + /// VTableThunks - The thunks by vftable index in the vftable currently being + /// built. + VTableThunksMapTy VTableThunks; + + typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy; + typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy; + + /// Thunks - A map that contains all the thunks needed for all methods in the + /// most derived class for which the vftable is currently being built. + ThunksMapTy Thunks; + + /// AddThunk - Add a thunk for the given method. + void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) { + SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD]; + + // Check if we have this thunk already. + if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) != + ThunksVector.end()) + return; + + ThunksVector.push_back(Thunk); + } + + /// ComputeThisOffset - Returns the 'this' argument offset for the given + /// method in the given subobject, relative to the beginning of the + /// MostDerivedClass. + CharUnits ComputeThisOffset(const CXXMethodDecl *MD, + BaseSubobject Base, + FinalOverriders::OverriderInfo Overrider); + + void CalculateVtordispAdjustment(FinalOverriders::OverriderInfo Overrider, + CharUnits ThisOffset, ThisAdjustment &TA); + + /// AddMethod - Add a single virtual member function to the vftable + /// components vector. + void AddMethod(const CXXMethodDecl *MD, ThunkInfo TI) { + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + assert(TI.Return.isEmpty() && + "Destructor can't have return adjustment!"); + Components.push_back(VTableComponent::MakeDeletingDtor(DD)); + } else { + if (!TI.isEmpty()) + VTableThunks[Components.size()] = TI; + Components.push_back(VTableComponent::MakeFunction(MD)); + } + } + + /// AddMethods - Add the methods of this base subobject and the relevant + /// subbases to the vftable we're currently laying out. + void AddMethods(BaseSubobject Base, unsigned BaseDepth, + const CXXRecordDecl *LastVBase, + BasesSetVectorTy &VisitedBases); + + void LayoutVFTable() { + // FIXME: add support for RTTI when we have proper LLVM support for symbols + // pointing to the middle of a section. + + BasesSetVectorTy VisitedBases; + AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, 0, + VisitedBases); + + assert(MethodVFTableLocations.empty()); + for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(), + E = MethodInfoMap.end(); I != E; ++I) { + const CXXMethodDecl *MD = I->first; + const MethodInfo &MI = I->second; + // Skip the methods that the MostDerivedClass didn't override + // and the entries shadowed by return adjusting thunks. + if (MD->getParent() != MostDerivedClass || MI.Shadowed) + continue; + MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.LastVBase, + WhichVFPtr.VFPtrOffset, MI.VFTableIndex); + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + MethodVFTableLocations[GlobalDecl(DD, Dtor_Deleting)] = Loc; + } else { + MethodVFTableLocations[MD] = Loc; + } + } + } + + void ErrorUnsupported(StringRef Feature, SourceLocation Location) { + clang::DiagnosticsEngine &Diags = Context.getDiagnostics(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, "v-table layout for %0 is not supported yet"); + Diags.Report(Context.getFullLoc(Location), DiagID) << Feature; + } + +public: + VFTableBuilder(MicrosoftVTableContext &VTables, + const CXXRecordDecl *MostDerivedClass, VFPtrInfo Which) + : VTables(VTables), + Context(MostDerivedClass->getASTContext()), + MostDerivedClass(MostDerivedClass), + MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)), + WhichVFPtr(Which), + Overriders(MostDerivedClass, CharUnits(), MostDerivedClass) { + LayoutVFTable(); + + if (Context.getLangOpts().DumpVTableLayouts) + dumpLayout(llvm::outs()); + } + + uint64_t getNumThunks() const { return Thunks.size(); } + + ThunksMapTy::const_iterator thunks_begin() const { return Thunks.begin(); } + + ThunksMapTy::const_iterator thunks_end() const { return Thunks.end(); } + + MethodVFTableLocationsTy::const_iterator vtable_indices_begin() const { + return MethodVFTableLocations.begin(); + } + + MethodVFTableLocationsTy::const_iterator vtable_indices_end() const { + return MethodVFTableLocations.end(); + } + + uint64_t getNumVTableComponents() const { return Components.size(); } + + const VTableComponent *vtable_component_begin() const { + return Components.begin(); + } + + const VTableComponent *vtable_component_end() const { + return Components.end(); + } + + VTableThunksMapTy::const_iterator vtable_thunks_begin() const { + return VTableThunks.begin(); + } + + VTableThunksMapTy::const_iterator vtable_thunks_end() const { + return VTableThunks.end(); + } + + void dumpLayout(raw_ostream &); +}; + +/// InitialOverriddenDefinitionCollector - Finds the set of least derived bases +/// that define the given method. +struct InitialOverriddenDefinitionCollector { + BasesSetVectorTy Bases; + OverriddenMethodsSetTy VisitedOverriddenMethods; + + bool visit(const CXXMethodDecl *OverriddenMD) { + if (OverriddenMD->size_overridden_methods() == 0) + Bases.insert(OverriddenMD->getParent()); + // Don't recurse on this method if we've already collected it. + return VisitedOverriddenMethods.insert(OverriddenMD); + } +}; + +static bool BaseInSet(const CXXBaseSpecifier *Specifier, + CXXBasePath &Path, void *BasesSet) { + BasesSetVectorTy *Bases = (BasesSetVectorTy *)BasesSet; + return Bases->count(Specifier->getType()->getAsCXXRecordDecl()); +} + +CharUnits +VFTableBuilder::ComputeThisOffset(const CXXMethodDecl *MD, + BaseSubobject Base, + FinalOverriders::OverriderInfo Overrider) { + InitialOverriddenDefinitionCollector Collector; + visitAllOverriddenMethods(MD, Collector); + + CXXBasePaths Paths; + Base.getBase()->lookupInBases(BaseInSet, &Collector.Bases, Paths); + + // This will hold the smallest this offset among overridees of MD. + // This implies that an offset of a non-virtual base will dominate an offset + // of a virtual base to potentially reduce the number of thunks required + // in the derived classes that inherit this method. + CharUnits Ret; + bool First = true; + + for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end(); + I != E; ++I) { + const CXXBasePath &Path = (*I); + CharUnits ThisOffset = Base.getBaseOffset(); + CharUnits LastVBaseOffset; + + // For each path from the overrider to the parents of the overridden methods, + // traverse the path, calculating the this offset in the most derived class. + for (int J = 0, F = Path.size(); J != F; ++J) { + const CXXBasePathElement &Element = Path[J]; + QualType CurTy = Element.Base->getType(); + const CXXRecordDecl *PrevRD = Element.Class, + *CurRD = CurTy->getAsCXXRecordDecl(); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(PrevRD); + + if (Element.Base->isVirtual()) { + LastVBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(CurRD); + if (Overrider.Method->getParent() == PrevRD) { + // This one's interesting. If the final overrider is in a vbase B of the + // most derived class and it overrides a method of the B's own vbase A, + // it uses A* as "this". In its prologue, it can cast A* to B* with + // a static offset. This offset is used regardless of the actual + // offset of A from B in the most derived class, requiring an + // this-adjusting thunk in the vftable if A and B are laid out + // differently in the most derived class. + ThisOffset += Layout.getVBaseClassOffset(CurRD); + } else { + ThisOffset = LastVBaseOffset; + } + } else { + ThisOffset += Layout.getBaseClassOffset(CurRD); + } + } + + if (isa<CXXDestructorDecl>(MD)) { + if (LastVBaseOffset.isZero()) { + // If a "Base" class has at least one non-virtual base with a virtual + // destructor, the "Base" virtual destructor will take the address + // of the "Base" subobject as the "this" argument. + return Base.getBaseOffset(); + } else { + // A virtual destructor of a virtual base takes the address of the + // virtual base subobject as the "this" argument. + return LastVBaseOffset; + } + } + + if (Ret > ThisOffset || First) { + First = false; + Ret = ThisOffset; + } + } + + assert(!First && "Method not found in the given subobject?"); + return Ret; +} + +void VFTableBuilder::CalculateVtordispAdjustment( + FinalOverriders::OverriderInfo Overrider, CharUnits ThisOffset, + ThisAdjustment &TA) { + const ASTRecordLayout::VBaseOffsetsMapTy &VBaseMap = + MostDerivedClassLayout.getVBaseOffsetsMap(); + const ASTRecordLayout::VBaseOffsetsMapTy::const_iterator &VBaseMapEntry = + VBaseMap.find(WhichVFPtr.LastVBase); + assert(VBaseMapEntry != VBaseMap.end()); + + // Check if we need a vtordisp adjustment at all. + if (!VBaseMapEntry->second.hasVtorDisp()) + return; + + CharUnits VFPtrVBaseOffset = VBaseMapEntry->second.VBaseOffset; + // The implicit vtordisp field is located right before the vbase. + TA.Virtual.Microsoft.VtordispOffset = + (VFPtrVBaseOffset - WhichVFPtr.VFPtrFullOffset).getQuantity() - 4; + + // If the final overrider is defined in either: + // - the most derived class or its non-virtual base or + // - the same vbase as the initial declaration, + // a simple vtordisp thunk will suffice. + const CXXRecordDecl *OverriderRD = Overrider.Method->getParent(); + if (OverriderRD == MostDerivedClass) + return; + + const CXXRecordDecl *OverriderVBase = + ComputeBaseOffset(Context, OverriderRD, MostDerivedClass).VirtualBase; + if (!OverriderVBase || OverriderVBase == WhichVFPtr.LastVBase) + return; + + // Otherwise, we need to do use the dynamic offset of the final overrider + // in order to get "this" adjustment right. + TA.Virtual.Microsoft.VBPtrOffset = + (VFPtrVBaseOffset + WhichVFPtr.VFPtrOffset - + MostDerivedClassLayout.getVBPtrOffset()).getQuantity(); + TA.Virtual.Microsoft.VBOffsetOffset = + Context.getTypeSizeInChars(Context.IntTy).getQuantity() * + VTables.getVBTableIndex(MostDerivedClass, OverriderVBase); + + TA.NonVirtual = (ThisOffset - Overrider.Offset).getQuantity(); +} + +static void GroupNewVirtualOverloads( + const CXXRecordDecl *RD, + SmallVector<const CXXMethodDecl *, 10> &VirtualMethods) { + // Put the virtual methods into VirtualMethods in the proper order: + // 1) Group overloads by declaration name. New groups are added to the + // vftable in the order of their first declarations in this class + // (including overrides). + // 2) In each group, new overloads appear in the reverse order of declaration. + typedef SmallVector<const CXXMethodDecl *, 1> MethodGroup; + SmallVector<MethodGroup, 10> Groups; + typedef llvm::DenseMap<DeclarationName, unsigned> VisitedGroupIndicesTy; + VisitedGroupIndicesTy VisitedGroupIndices; + for (CXXRecordDecl::method_iterator I = RD->method_begin(), + E = RD->method_end(); I != E; ++I) { + const CXXMethodDecl *MD = *I; + if (!MD->isVirtual()) + continue; + + VisitedGroupIndicesTy::iterator J; + bool Inserted; + llvm::tie(J, Inserted) = VisitedGroupIndices.insert( + std::make_pair(MD->getDeclName(), Groups.size())); + if (Inserted) + Groups.push_back(MethodGroup(1, MD)); + else + Groups[J->second].push_back(MD); + } + + for (unsigned I = 0, E = Groups.size(); I != E; ++I) + VirtualMethods.append(Groups[I].rbegin(), Groups[I].rend()); +} + +void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth, + const CXXRecordDecl *LastVBase, + BasesSetVectorTy &VisitedBases) { + const CXXRecordDecl *RD = Base.getBase(); + if (!RD->isPolymorphic()) + return; + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // See if this class expands a vftable of the base we look at, which is either + // the one defined by the vfptr base path or the primary base of the current class. + const CXXRecordDecl *NextBase = 0, *NextLastVBase = LastVBase; + CharUnits NextBaseOffset; + if (BaseDepth < WhichVFPtr.PathToBaseWithVFPtr.size()) { + NextBase = WhichVFPtr.PathToBaseWithVFPtr[BaseDepth]; + if (Layout.getVBaseOffsetsMap().count(NextBase)) { + NextLastVBase = NextBase; + NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(NextBase); + } else { + NextBaseOffset = + Base.getBaseOffset() + Layout.getBaseClassOffset(NextBase); + } + } else if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) { + assert(!Layout.isPrimaryBaseVirtual() && + "No primary virtual bases in this ABI"); + NextBase = PrimaryBase; + NextBaseOffset = Base.getBaseOffset(); + } + + if (NextBase) { + AddMethods(BaseSubobject(NextBase, NextBaseOffset), BaseDepth + 1, + NextLastVBase, VisitedBases); + if (!VisitedBases.insert(NextBase)) + llvm_unreachable("Found a duplicate primary base!"); + } + + SmallVector<const CXXMethodDecl*, 10> VirtualMethods; + // Put virtual methods in the proper order. + GroupNewVirtualOverloads(RD, VirtualMethods); + + // Now go through all virtual member functions and add them to the current + // vftable. This is done by + // - replacing overridden methods in their existing slots, as long as they + // don't require return adjustment; calculating This adjustment if needed. + // - adding new slots for methods of the current base not present in any + // sub-bases; + // - adding new slots for methods that require Return adjustment. + // We keep track of the methods visited in the sub-bases in MethodInfoMap. + for (unsigned I = 0, E = VirtualMethods.size(); I != E; ++I) { + const CXXMethodDecl *MD = VirtualMethods[I]; + + FinalOverriders::OverriderInfo Overrider = + Overriders.getOverrider(MD, Base.getBaseOffset()); + ThisAdjustment ThisAdjustmentOffset; + bool ForceThunk = false; + + // Check if this virtual member function overrides + // a method in one of the visited bases. + if (const CXXMethodDecl *OverriddenMD = + FindNearestOverriddenMethod(MD, VisitedBases)) { + MethodInfoMapTy::iterator OverriddenMDIterator = + MethodInfoMap.find(OverriddenMD); + + // If the overridden method went to a different vftable, skip it. + if (OverriddenMDIterator == MethodInfoMap.end()) + continue; + + MethodInfo &OverriddenMethodInfo = OverriddenMDIterator->second; + + // Create a this-adjusting thunk if needed. + CharUnits TI = ComputeThisOffset(MD, Base, Overrider); + if (TI != WhichVFPtr.VFPtrFullOffset) { + ThisAdjustmentOffset.NonVirtual = + (TI - WhichVFPtr.VFPtrFullOffset).getQuantity(); + } + + if (WhichVFPtr.LastVBase) + CalculateVtordispAdjustment(Overrider, TI, ThisAdjustmentOffset); + + if (!ThisAdjustmentOffset.isEmpty()) { + VTableThunks[OverriddenMethodInfo.VFTableIndex].This = + ThisAdjustmentOffset; + AddThunk(MD, VTableThunks[OverriddenMethodInfo.VFTableIndex]); + } + + if (MD->getResultType() == OverriddenMD->getResultType()) { + // No return adjustment needed - just replace the overridden method info + // with the current info. + MethodInfo MI(OverriddenMethodInfo.VBTableIndex, + OverriddenMethodInfo.VFTableIndex); + MethodInfoMap.erase(OverriddenMDIterator); + + assert(!MethodInfoMap.count(MD) && + "Should not have method info for this method yet!"); + MethodInfoMap.insert(std::make_pair(MD, MI)); + continue; + } else { + // In case we need a return adjustment, we'll add a new slot for + // the overrider and put a return-adjusting thunk where the overridden + // method was in the vftable. + // For now, just mark the overriden method as shadowed by a new slot. + OverriddenMethodInfo.Shadowed = true; + ForceThunk = true; + + // Also apply this adjustment to the shadowed slots. + if (!ThisAdjustmentOffset.isEmpty()) { + // FIXME: this is O(N^2), can be O(N). + const CXXMethodDecl *SubOverride = OverriddenMD; + while ((SubOverride = + FindNearestOverriddenMethod(SubOverride, VisitedBases))) { + MethodInfoMapTy::iterator SubOverrideIterator = + MethodInfoMap.find(SubOverride); + if (SubOverrideIterator == MethodInfoMap.end()) + break; + MethodInfo &SubOverrideMI = SubOverrideIterator->second; + assert(SubOverrideMI.Shadowed); + VTableThunks[SubOverrideMI.VFTableIndex].This = + ThisAdjustmentOffset; + AddThunk(MD, VTableThunks[SubOverrideMI.VFTableIndex]); + } + } + } + } else if (Base.getBaseOffset() != WhichVFPtr.VFPtrFullOffset || + MD->size_overridden_methods()) { + // Skip methods that don't belong to the vftable of the current class, + // e.g. each method that wasn't seen in any of the visited sub-bases + // but overrides multiple methods of other sub-bases. + continue; + } + + // If we got here, MD is a method not seen in any of the sub-bases or + // it requires return adjustment. Insert the method info for this method. + unsigned VBIndex = + LastVBase ? VTables.getVBTableIndex(MostDerivedClass, LastVBase) : 0; + MethodInfo MI(VBIndex, Components.size()); + + assert(!MethodInfoMap.count(MD) && + "Should not have method info for this method yet!"); + MethodInfoMap.insert(std::make_pair(MD, MI)); + + const CXXMethodDecl *OverriderMD = Overrider.Method; + + // Check if this overrider needs a return adjustment. + // We don't want to do this for pure virtual member functions. + BaseOffset ReturnAdjustmentOffset; + ReturnAdjustment ReturnAdjustment; + if (!OverriderMD->isPure()) { + ReturnAdjustmentOffset = + ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD); + } + if (!ReturnAdjustmentOffset.isEmpty()) { + ForceThunk = true; + ReturnAdjustment.NonVirtual = + ReturnAdjustmentOffset.NonVirtualOffset.getQuantity(); + if (ReturnAdjustmentOffset.VirtualBase) { + const ASTRecordLayout &DerivedLayout = + Context.getASTRecordLayout(ReturnAdjustmentOffset.DerivedClass); + ReturnAdjustment.Virtual.Microsoft.VBPtrOffset = + DerivedLayout.getVBPtrOffset().getQuantity(); + ReturnAdjustment.Virtual.Microsoft.VBIndex = + VTables.getVBTableIndex(ReturnAdjustmentOffset.DerivedClass, + ReturnAdjustmentOffset.VirtualBase); + } + } + + AddMethod(OverriderMD, ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment, + ForceThunk ? MD : 0)); + } +} + +void PrintBasePath(const VFPtrInfo::BasePath &Path, raw_ostream &Out) { + for (VFPtrInfo::BasePath::const_reverse_iterator I = Path.rbegin(), + E = Path.rend(); I != E; ++I) { + Out << "'" << (*I)->getQualifiedNameAsString() << "' in "; + } +} + +struct MicrosoftThunkInfoStableSortComparator { + bool operator() (const ThunkInfo &LHS, const ThunkInfo &RHS) { + if (LHS.This != RHS.This) + return LHS.This < RHS.This; + + if (LHS.Return != RHS.Return) + return LHS.Return < RHS.Return; + + // Keep different thunks with the same adjustments in the order they + // were put into the vector. + return false; + } +}; + +static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out, + bool ContinueFirstLine) { + const ReturnAdjustment &R = TI.Return; + bool Multiline = false; + const char *LinePrefix = "\n "; + if (!R.isEmpty()) { + if (!ContinueFirstLine) + Out << LinePrefix; + Out << "[return adjustment: "; + if (R.Virtual.Microsoft.VBPtrOffset) + Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", "; + if (R.Virtual.Microsoft.VBIndex) + Out << "vbase #" << R.Virtual.Microsoft.VBIndex << ", "; + Out << R.NonVirtual << " non-virtual]"; + Multiline = true; + } + + const ThisAdjustment &T = TI.This; + if (!T.isEmpty()) { + if (Multiline || !ContinueFirstLine) + Out << LinePrefix; + Out << "[this adjustment: "; + if (!TI.This.Virtual.isEmpty()) { + assert(T.Virtual.Microsoft.VtordispOffset < 0); + Out << "vtordisp at " << T.Virtual.Microsoft.VtordispOffset << ", "; + if (T.Virtual.Microsoft.VBPtrOffset) { + Out << "vbptr at " << T.Virtual.Microsoft.VBPtrOffset + << " to the left, "; + assert(T.Virtual.Microsoft.VBOffsetOffset > 0); + Out << LinePrefix << " vboffset at " + << T.Virtual.Microsoft.VBOffsetOffset << " in the vbtable, "; + } + } + Out << T.NonVirtual << " non-virtual]"; + } +} + +void VFTableBuilder::dumpLayout(raw_ostream &Out) { + Out << "VFTable for "; + PrintBasePath(WhichVFPtr.PathToBaseWithVFPtr, Out); + Out << "'" << MostDerivedClass->getQualifiedNameAsString(); + Out << "' (" << Components.size() << " entries).\n"; + + for (unsigned I = 0, E = Components.size(); I != E; ++I) { + Out << llvm::format("%4d | ", I); + + const VTableComponent &Component = Components[I]; + + // Dump the component. + switch (Component.getKind()) { + case VTableComponent::CK_RTTI: + Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI"; + break; + + case VTableComponent::CK_FunctionPointer: { + const CXXMethodDecl *MD = Component.getFunctionDecl(); + + std::string Str = PredefinedExpr::ComputeName( + PredefinedExpr::PrettyFunctionNoVirtual, MD); + Out << Str; + if (MD->isPure()) + Out << " [pure]"; + + if (MD->isDeleted()) { + ErrorUnsupported("deleted methods", MD->getLocation()); + Out << " [deleted]"; + } + + ThunkInfo Thunk = VTableThunks.lookup(I); + if (!Thunk.isEmpty()) + dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/false); + + break; + } + + case VTableComponent::CK_DeletingDtorPointer: { + const CXXDestructorDecl *DD = Component.getDestructorDecl(); + + Out << DD->getQualifiedNameAsString(); + Out << "() [scalar deleting]"; + + if (DD->isPure()) + Out << " [pure]"; + + ThunkInfo Thunk = VTableThunks.lookup(I); + if (!Thunk.isEmpty()) { + assert(Thunk.Return.isEmpty() && + "No return adjustment needed for destructors!"); + dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/false); + } + + break; + } + + default: + DiagnosticsEngine &Diags = Context.getDiagnostics(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, + "Unexpected vftable component type %0 for component number %1"); + Diags.Report(MostDerivedClass->getLocation(), DiagID) + << I << Component.getKind(); + } + + Out << '\n'; + } + + Out << '\n'; + + if (!Thunks.empty()) { + // We store the method names in a map to get a stable order. + std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls; + + for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end(); + I != E; ++I) { + const CXXMethodDecl *MD = I->first; + std::string MethodName = PredefinedExpr::ComputeName( + PredefinedExpr::PrettyFunctionNoVirtual, MD); + + MethodNamesAndDecls.insert(std::make_pair(MethodName, MD)); + } + + for (std::map<std::string, const CXXMethodDecl *>::const_iterator + I = MethodNamesAndDecls.begin(), + E = MethodNamesAndDecls.end(); + I != E; ++I) { + const std::string &MethodName = I->first; + const CXXMethodDecl *MD = I->second; + + ThunkInfoVectorTy ThunksVector = Thunks[MD]; + std::stable_sort(ThunksVector.begin(), ThunksVector.end(), + MicrosoftThunkInfoStableSortComparator()); + + Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size(); + Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n"; + + for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) { + const ThunkInfo &Thunk = ThunksVector[I]; + + Out << llvm::format("%4d | ", I); + dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/true); + Out << '\n'; + } + + Out << '\n'; + } + } +} +} + +void MicrosoftVTableContext::enumerateVFPtrs( + const CXXRecordDecl *MostDerivedClass, + const ASTRecordLayout &MostDerivedClassLayout, BaseSubobject Base, + const CXXRecordDecl *LastVBase, + const VFPtrInfo::BasePath &PathFromCompleteClass, + BasesSetVectorTy &VisitedVBases, + VFPtrListTy &Result) { + const CXXRecordDecl *CurrentClass = Base.getBase(); + CharUnits OffsetInCompleteClass = Base.getBaseOffset(); + const ASTRecordLayout &CurrentClassLayout = + Context.getASTRecordLayout(CurrentClass); + + if (CurrentClassLayout.hasOwnVFPtr()) { + if (LastVBase) { + uint64_t VBIndex = getVBTableIndex(MostDerivedClass, LastVBase); + assert(VBIndex > 0 && "vbases must have vbindex!"); + CharUnits VFPtrOffset = + OffsetInCompleteClass - + MostDerivedClassLayout.getVBaseClassOffset(LastVBase); + Result.push_back(VFPtrInfo(VBIndex, LastVBase, VFPtrOffset, + PathFromCompleteClass, OffsetInCompleteClass)); + } else { + Result.push_back(VFPtrInfo(OffsetInCompleteClass, PathFromCompleteClass)); + } + } + + for (CXXRecordDecl::base_class_const_iterator I = CurrentClass->bases_begin(), + E = CurrentClass->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl(); + + CharUnits NextBaseOffset; + const CXXRecordDecl *NextLastVBase; + if (I->isVirtual()) { + if (!VisitedVBases.insert(BaseDecl)) + continue; + NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + NextLastVBase = BaseDecl; + } else { + NextBaseOffset = OffsetInCompleteClass + + CurrentClassLayout.getBaseClassOffset(BaseDecl); + NextLastVBase = LastVBase; + } + + VFPtrInfo::BasePath NewPath = PathFromCompleteClass; + NewPath.push_back(BaseDecl); + BaseSubobject NextBase(BaseDecl, NextBaseOffset); + + enumerateVFPtrs(MostDerivedClass, MostDerivedClassLayout, NextBase, + NextLastVBase, NewPath, VisitedVBases, Result); + } +} + +/// CalculatePathToMangle - Calculate the subset of records that should be used +/// to mangle the vftable for the given vfptr. +/// Should only be called if a class has multiple vftables. +static void +CalculatePathToMangle(const CXXRecordDecl *RD, VFPtrInfo &VFPtr) { + // FIXME: In some rare cases this code produces a slightly incorrect mangling. + // It's very likely that the vbtable mangling code can be adjusted to mangle + // both vftables and vbtables correctly. + + VFPtrInfo::BasePath &FullPath = VFPtr.PathToBaseWithVFPtr; + if (FullPath.empty()) { + // Mangle the class's own vftable. + assert(RD->getNumVBases() && + "Something's wrong: if the most derived " + "class has more than one vftable, it can only have its own " + "vftable if it has vbases"); + VFPtr.PathToMangle.push_back(RD); + return; + } + + unsigned Begin = 0; + + // First, skip all the bases before the vbase. + if (VFPtr.LastVBase) { + while (FullPath[Begin] != VFPtr.LastVBase) { + Begin++; + assert(Begin < FullPath.size()); + } + } + + // Then, put the rest of the base path in the reverse order. + for (unsigned I = FullPath.size(); I != Begin; --I) { + const CXXRecordDecl *CurBase = FullPath[I - 1], + *ItsBase = (I == 1) ? RD : FullPath[I - 2]; + bool BaseIsVirtual = false; + for (CXXRecordDecl::base_class_const_iterator J = ItsBase->bases_begin(), + F = ItsBase->bases_end(); J != F; ++J) { + if (J->getType()->getAsCXXRecordDecl() == CurBase) { + BaseIsVirtual = J->isVirtual(); + break; + } + } + + // Should skip the current base if it is a non-virtual base with no siblings. + if (BaseIsVirtual || ItsBase->getNumBases() != 1) + VFPtr.PathToMangle.push_back(CurBase); + } +} + +void MicrosoftVTableContext::enumerateVFPtrs( + const CXXRecordDecl *ForClass, + MicrosoftVTableContext::VFPtrListTy &Result) { + Result.clear(); + const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(ForClass); + BasesSetVectorTy VisitedVBases; + enumerateVFPtrs(ForClass, ClassLayout, + BaseSubobject(ForClass, CharUnits::Zero()), 0, + VFPtrInfo::BasePath(), VisitedVBases, Result); + if (Result.size() > 1) { + for (unsigned I = 0, E = Result.size(); I != E; ++I) + CalculatePathToMangle(ForClass, Result[I]); + } +} + +void MicrosoftVTableContext::computeVTableRelatedInformation( + const CXXRecordDecl *RD) { + assert(RD->isDynamicClass()); + + // Check if we've computed this information before. + if (VFPtrLocations.count(RD)) + return; + + const VTableLayout::AddressPointsMapTy EmptyAddressPointsMap; + + VFPtrListTy &VFPtrs = VFPtrLocations[RD]; + enumerateVFPtrs(RD, VFPtrs); + + MethodVFTableLocationsTy NewMethodLocations; + for (VFPtrListTy::iterator I = VFPtrs.begin(), E = VFPtrs.end(); + I != E; ++I) { + VFTableBuilder Builder(*this, RD, *I); + + VFTableIdTy id(RD, I->VFPtrFullOffset); + assert(VFTableLayouts.count(id) == 0); + SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks( + Builder.vtable_thunks_begin(), Builder.vtable_thunks_end()); + VFTableLayouts[id] = new VTableLayout( + Builder.getNumVTableComponents(), Builder.vtable_component_begin(), + VTableThunks.size(), VTableThunks.data(), EmptyAddressPointsMap, true); + NewMethodLocations.insert(Builder.vtable_indices_begin(), + Builder.vtable_indices_end()); + Thunks.insert(Builder.thunks_begin(), Builder.thunks_end()); + } + + MethodVFTableLocations.insert(NewMethodLocations.begin(), + NewMethodLocations.end()); + if (Context.getLangOpts().DumpVTableLayouts) + dumpMethodLocations(RD, NewMethodLocations, llvm::outs()); +} + +void MicrosoftVTableContext::dumpMethodLocations( + const CXXRecordDecl *RD, const MethodVFTableLocationsTy &NewMethods, + raw_ostream &Out) { + // Compute the vtable indices for all the member functions. + // Store them in a map keyed by the location so we'll get a sorted table. + std::map<MethodVFTableLocation, std::string> IndicesMap; + bool HasNonzeroOffset = false; + + for (MethodVFTableLocationsTy::const_iterator I = NewMethods.begin(), + E = NewMethods.end(); I != E; ++I) { + const CXXMethodDecl *MD = cast<const CXXMethodDecl>(I->first.getDecl()); + assert(MD->isVirtual()); + + std::string MethodName = PredefinedExpr::ComputeName( + PredefinedExpr::PrettyFunctionNoVirtual, MD); + + if (isa<CXXDestructorDecl>(MD)) { + IndicesMap[I->second] = MethodName + " [scalar deleting]"; + } else { + IndicesMap[I->second] = MethodName; + } + + if (!I->second.VFPtrOffset.isZero() || I->second.VBTableIndex != 0) + HasNonzeroOffset = true; + } + + // Print the vtable indices for all the member functions. + if (!IndicesMap.empty()) { + Out << "VFTable indices for "; + Out << "'" << RD->getQualifiedNameAsString(); + Out << "' (" << IndicesMap.size() << " entries).\n"; + + CharUnits LastVFPtrOffset = CharUnits::fromQuantity(-1); + uint64_t LastVBIndex = 0; + for (std::map<MethodVFTableLocation, std::string>::const_iterator + I = IndicesMap.begin(), + E = IndicesMap.end(); + I != E; ++I) { + CharUnits VFPtrOffset = I->first.VFPtrOffset; + uint64_t VBIndex = I->first.VBTableIndex; + if (HasNonzeroOffset && + (VFPtrOffset != LastVFPtrOffset || VBIndex != LastVBIndex)) { + assert(VBIndex > LastVBIndex || VFPtrOffset > LastVFPtrOffset); + Out << " -- accessible via "; + if (VBIndex) + Out << "vbtable index " << VBIndex << ", "; + Out << "vfptr at offset " << VFPtrOffset.getQuantity() << " --\n"; + LastVFPtrOffset = VFPtrOffset; + LastVBIndex = VBIndex; + } + + uint64_t VTableIndex = I->first.Index; + const std::string &MethodName = I->second; + Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName << '\n'; + } + Out << '\n'; + } +} + +void MicrosoftVTableContext::computeVBTableRelatedInformation( + const CXXRecordDecl *RD) { + if (ComputedVBTableIndices.count(RD)) + return; + ComputedVBTableIndices.insert(RD); + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + BasesSetVectorTy VisitedBases; + + // First, see if the Derived class shared the vbptr with a non-virtual base. + if (const CXXRecordDecl *VBPtrBase = Layout.getBaseSharingVBPtr()) { + // If the Derived class shares the vbptr with a non-virtual base, + // it inherits its vbase indices. + computeVBTableRelatedInformation(VBPtrBase); + for (CXXRecordDecl::base_class_const_iterator I = VBPtrBase->vbases_begin(), + E = VBPtrBase->vbases_end(); I != E; ++I) { + const CXXRecordDecl *SubVBase = I->getType()->getAsCXXRecordDecl(); + assert(VBTableIndices.count(ClassPairTy(VBPtrBase, SubVBase))); + VBTableIndices[ClassPairTy(RD, SubVBase)] = + VBTableIndices[ClassPairTy(VBPtrBase, SubVBase)]; + VisitedBases.insert(SubVBase); + } + } + + // New vbases are added to the end of the vbtable. + // Skip the self entry and vbases visited in the non-virtual base, if any. + unsigned VBTableIndex = 1 + VisitedBases.size(); + for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), + E = RD->vbases_end(); I != E; ++I) { + const CXXRecordDecl *CurVBase = I->getType()->getAsCXXRecordDecl(); + if (VisitedBases.insert(CurVBase)) + VBTableIndices[ClassPairTy(RD, CurVBase)] = VBTableIndex++; + } +} + +const MicrosoftVTableContext::VFPtrListTy & +MicrosoftVTableContext::getVFPtrOffsets(const CXXRecordDecl *RD) { + computeVTableRelatedInformation(RD); + + assert(VFPtrLocations.count(RD) && "Couldn't find vfptr locations"); + return VFPtrLocations[RD]; +} + +const VTableLayout & +MicrosoftVTableContext::getVFTableLayout(const CXXRecordDecl *RD, + CharUnits VFPtrOffset) { + computeVTableRelatedInformation(RD); + + VFTableIdTy id(RD, VFPtrOffset); + assert(VFTableLayouts.count(id) && "Couldn't find a VFTable at this offset"); + return *VFTableLayouts[id]; +} + +const MicrosoftVTableContext::MethodVFTableLocation & +MicrosoftVTableContext::getMethodVFTableLocation(GlobalDecl GD) { + assert(cast<CXXMethodDecl>(GD.getDecl())->isVirtual() && + "Only use this method for virtual methods or dtors"); + if (isa<CXXDestructorDecl>(GD.getDecl())) + assert(GD.getDtorType() == Dtor_Deleting); + + MethodVFTableLocationsTy::iterator I = MethodVFTableLocations.find(GD); + if (I != MethodVFTableLocations.end()) + return I->second; + + const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent(); + + computeVTableRelatedInformation(RD); + + I = MethodVFTableLocations.find(GD); + assert(I != MethodVFTableLocations.end() && "Did not find index!"); + return I->second; +} |